1 #include <lunaix/foptions.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/process.h>
5 #include <lunaix/spike.h>
6 #include <lunaix/syscall_utils.h>
7 #include <lunaix/syslog.h>
8 #include <lunaix/types.h>
12 struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
15 vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
17 struct v_mount* mnt = vzalloc(sizeof(struct v_mount));
22 llist_init_head(&mnt->submnts);
23 llist_append(&all_mnts, &mnt->list);
24 mutex_init(&mnt->lock);
27 mnt->mnt_point = mnt_point;
28 mnt->super_block = mnt_point->super_block;
32 mutex_lock(&mnt->parent->lock);
33 llist_append(&parent->submnts, &mnt->sibmnts);
34 mutex_unlock(&mnt->parent->lock);
37 atomic_fetch_add(&mnt_point->ref_count, 1);
43 __vfs_do_unmount(struct v_mount* mnt)
46 struct v_superblock* sb = mnt->super_block;
48 if ((errno = sb->fs->unmount(sb))) {
52 llist_delete(&mnt->list);
53 llist_delete(&mnt->sibmnts);
55 // detached the inodes from cache, and let lru policy to recycle them
56 for (size_t i = 0; i < VFS_HASHTABLE_SIZE; i++) {
57 struct hbucket* bucket = &sb->i_cache[i];
61 bucket->head->pprev = 0;
64 mnt_chillax(mnt->parent);
67 vfs_d_free(mnt->mnt_point);
74 mnt_mkbusy(struct v_mount* mnt)
76 mutex_lock(&mnt->lock);
78 mutex_unlock(&mnt->lock);
82 mnt_chillax(struct v_mount* mnt)
84 mutex_lock(&mnt->lock);
86 mutex_unlock(&mnt->lock);
90 vfs_mount_root(const char* fs_name, struct device* device)
92 extern struct v_dnode* vfs_sysroot;
94 if (vfs_sysroot->mnt && (errno = vfs_unmount_at(vfs_sysroot))) {
97 return vfs_mount_at(fs_name, device, vfs_sysroot, 0);
101 vfs_mount(const char* target,
103 struct device* device,
110 vfs_walk(__current->cwd, target, &mnt, NULL, VFS_WALK_MKPARENT))) {
111 errno = vfs_mount_at(fs_name, device, mnt, options);
118 vfs_unmount(const char* target)
123 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
124 errno = vfs_unmount_at(mnt);
131 vfs_mount_at(const char* fs_name,
132 struct device* device,
133 struct v_dnode* mnt_point,
136 if (device && device->dev_type != DEV_IFVOL) {
140 if (mnt_point->inode && !(mnt_point->inode->itype & VFS_IFDIR)) {
144 struct filesystem* fs = fsm_get(fs_name);
149 if (fs->types == FSTYPE_ROFS) {
153 char* dev_name = "sys";
154 struct v_mount* parent_mnt = mnt_point->mnt;
155 struct v_superblock *sb = vfs_sb_alloc(), *old_sb = mnt_point->super_block;
157 mnt_point->super_block = sb;
160 dev_name = device->name_val;
164 if (!(errno = fs->mount(sb, mnt_point))) {
166 sb->root = mnt_point;
168 if (!(mnt_point->mnt = vfs_create_mount(parent_mnt, mnt_point))) {
173 kprintf("mount: dev=%s, fs=%s, mode=%d\n", dev_name, fs_name, options);
175 mnt_point->mnt->flags = options;
183 kprintf(KERROR "mount: dev=%s, fs=%s, mode=%d, err=%d\n",
188 mnt_point->super_block = old_sb;
194 vfs_unmount_at(struct v_dnode* mnt_point)
197 struct v_superblock* sb = mnt_point->super_block;
202 if (sb->root != mnt_point) {
206 if (mnt_point->mnt->busy_counter) {
210 if (!(errno = __vfs_do_unmount(mnt_point->mnt))) {
211 atomic_fetch_sub(&mnt_point->ref_count, 1);
218 vfs_check_writable(struct v_dnode* dnode)
220 if ((dnode->mnt->flags & MNT_RO)) {
226 __DEFINE_LXSYSCALL4(int,
237 struct v_dnode *dev, *mnt;
240 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
244 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
248 if (mnt->ref_count > 1) {
253 // By our convention.
254 // XXX could we do better?
255 struct device* device = (struct device*)dev->inode->data;
257 if (!(dev->inode->itype & VFS_IFVOLDEV) || !device) {
262 errno = vfs_mount_at(fstype, device, mnt, options);
265 return DO_STATUS(errno);
268 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
270 return vfs_unmount(target);