2 #include <lunaix/mm/valloc.h>
3 #include <lunaix/process.h>
4 #include <lunaix/types.h>
6 static struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
9 vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
11 struct v_mount* mnt = vzalloc(sizeof(struct v_mount));
16 llist_init_head(&mnt->submnts);
17 llist_append(&all_mnts, &mnt->list);
18 mutex_init(&mnt->lock);
22 mnt->mnt_point = mnt_point;
23 mnt->super_block = mnt_point->super_block;
26 mutex_lock(&mnt->parent->lock);
27 llist_append(&parent->submnts, &mnt->sibmnts);
28 mutex_unlock(&mnt->parent->lock);
31 atomic_fetch_add(&mnt_point->ref_count, 1);
37 __vfs_do_unmount(struct v_mount* mnt)
40 struct v_superblock* sb = mnt->super_block;
42 if ((errno = sb->fs->unmount(sb))) {
46 llist_delete(&mnt->list);
47 llist_delete(&mnt->sibmnts);
49 // detached the inodes from cache, and let lru policy to recycle them
50 for (size_t i = 0; i < VFS_HASHTABLE_SIZE; i++) {
51 struct hbucket* bucket = &sb->i_cache[i];
55 bucket->head->pprev = 0;
58 mnt_chillax(mnt->parent);
61 vfs_d_free(mnt->mnt_point);
68 mnt_mkbusy(struct v_mount* mnt)
71 mutex_lock(&mnt->lock);
73 mutex_unlock(&mnt->lock);
80 mnt_chillax(struct v_mount* mnt)
83 mutex_lock(&mnt->lock);
85 mutex_unlock(&mnt->lock);
92 vfs_mount_root(const char* fs_name, struct device* device)
95 if (vfs_sysroot->mnt && (errno = vfs_unmount_at(vfs_sysroot))) {
98 return vfs_mount_at(fs_name, device, vfs_sysroot);
102 vfs_mount(const char* target, const char* fs_name, struct device* device)
108 vfs_walk(__current->cwd, target, &mnt, NULL, VFS_WALK_MKPARENT))) {
109 errno = vfs_mount_at(fs_name, device, mnt);
116 vfs_unmount(const char* target)
121 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
122 errno = vfs_unmount_at(mnt);
129 vfs_mount_at(const char* fs_name,
130 struct device* device,
131 struct v_dnode* mnt_point)
133 if (mnt_point->inode && !(mnt_point->inode->itype & VFS_IFDIR)) {
137 struct filesystem* fs = fsm_get(fs_name);
142 struct v_mount* parent_mnt = mnt_point->mnt;
143 struct v_superblock* sb = vfs_sb_alloc();
147 if (!(errno = fs->mount(sb, mnt_point))) {
148 mnt_point->super_block = sb;
150 sb->root = mnt_point;
152 if (!(mnt_point->mnt = vfs_create_mount(parent_mnt, mnt_point))) {
168 vfs_unmount_at(struct v_dnode* mnt_point)
171 struct v_superblock* sb = mnt_point->super_block;
176 if (sb->root != mnt_point) {
180 if (mnt_point->mnt->busy_counter) {
184 if (!(errno = __vfs_do_unmount(mnt_point->mnt))) {
185 atomic_fetch_sub(&mnt_point->ref_count, 1);
191 __DEFINE_LXSYSCALL3(int,
200 struct v_dnode *dev, *mnt;
203 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
207 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
211 if (mnt->ref_count > 1) {
216 // By our convention.
217 // XXX could we do better?
218 struct device* device = (struct device*)dev->data;
220 if (!(dev->inode->itype & VFS_IFVOLDEV) || !device) {
225 errno = vfs_mount_at(fstype, device, mnt);
228 return DO_STATUS(errno);
231 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
233 return vfs_unmount(target);