1 #include <lunaix/foptions.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/process.h>
5 #include <lunaix/types.h>
7 static struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
10 vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
12 struct v_mount* mnt = vzalloc(sizeof(struct v_mount));
17 llist_init_head(&mnt->submnts);
18 llist_append(&all_mnts, &mnt->list);
19 mutex_init(&mnt->lock);
23 mnt->mnt_point = mnt_point;
24 mnt->super_block = mnt_point->super_block;
27 mutex_lock(&mnt->parent->lock);
28 llist_append(&parent->submnts, &mnt->sibmnts);
29 mutex_unlock(&mnt->parent->lock);
32 atomic_fetch_add(&mnt_point->ref_count, 1);
38 __vfs_do_unmount(struct v_mount* mnt)
41 struct v_superblock* sb = mnt->super_block;
43 if ((errno = sb->fs->unmount(sb))) {
47 llist_delete(&mnt->list);
48 llist_delete(&mnt->sibmnts);
50 // detached the inodes from cache, and let lru policy to recycle them
51 for (size_t i = 0; i < VFS_HASHTABLE_SIZE; i++) {
52 struct hbucket* bucket = &sb->i_cache[i];
56 bucket->head->pprev = 0;
59 mnt_chillax(mnt->parent);
62 vfs_d_free(mnt->mnt_point);
69 mnt_mkbusy(struct v_mount* mnt)
72 mutex_lock(&mnt->lock);
74 mutex_unlock(&mnt->lock);
81 mnt_chillax(struct v_mount* mnt)
84 mutex_lock(&mnt->lock);
86 mutex_unlock(&mnt->lock);
93 vfs_mount_root(const char* fs_name, struct device* device)
96 if (vfs_sysroot->mnt && (errno = vfs_unmount_at(vfs_sysroot))) {
99 return vfs_mount_at(fs_name, device, vfs_sysroot, 0);
103 vfs_mount(const char* target,
105 struct device* device,
112 vfs_walk(__current->cwd, target, &mnt, NULL, VFS_WALK_MKPARENT))) {
113 errno = vfs_mount_at(fs_name, device, mnt, options);
120 vfs_unmount(const char* target)
125 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
126 errno = vfs_unmount_at(mnt);
133 vfs_mount_at(const char* fs_name,
134 struct device* device,
135 struct v_dnode* mnt_point,
138 if (mnt_point->inode && !(mnt_point->inode->itype & VFS_IFDIR)) {
142 struct filesystem* fs = fsm_get(fs_name);
147 struct v_mount* parent_mnt = mnt_point->mnt;
148 struct v_superblock *sb = vfs_sb_alloc(), *old_sb = mnt_point->super_block;
150 mnt_point->super_block = sb;
153 if (!(errno = fs->mount(sb, mnt_point))) {
155 sb->root = mnt_point;
157 if (!(mnt_point->mnt = vfs_create_mount(parent_mnt, mnt_point))) {
162 mnt_point->mnt->flags = options;
170 mnt_point->super_block = old_sb;
176 vfs_unmount_at(struct v_dnode* mnt_point)
179 struct v_superblock* sb = mnt_point->super_block;
184 if (sb->root != mnt_point) {
188 if (mnt_point->mnt->busy_counter) {
192 if (!(errno = __vfs_do_unmount(mnt_point->mnt))) {
193 atomic_fetch_sub(&mnt_point->ref_count, 1);
200 vfs_check_writable(struct v_dnode* dnode)
202 if ((dnode->mnt->flags & MNT_RO)) {
208 __DEFINE_LXSYSCALL4(int,
219 struct v_dnode *dev, *mnt;
222 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
226 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
230 if (mnt->ref_count > 1) {
235 // By our convention.
236 // XXX could we do better?
237 struct device* device = (struct device*)dev->inode->data;
239 if (!(dev->inode->itype & VFS_IFVOLDEV) || !device) {
244 errno = vfs_mount_at(fstype, device, mnt, options);
247 return DO_STATUS(errno);
250 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
252 return vfs_unmount(target);