1 #include <lunaix/foptions.h>
2 #include <lunaix/fs/api.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/process.h>
5 #include <lunaix/spike.h>
6 #include <lunaix/syscall_utils.h>
7 #include <lunaix/syslog.h>
8 #include <lunaix/types.h>
12 struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
15 vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
17 struct v_mount* mnt = vzalloc(sizeof(struct v_mount));
22 llist_init_head(&mnt->submnts);
23 llist_init_head(&mnt->sibmnts);
24 llist_append(&all_mnts, &mnt->list);
25 mutex_init(&mnt->lock);
28 mnt->mnt_point = mnt_point;
29 vfs_vmnt_assign_sb(mnt, mnt_point->super_block);
33 mutex_lock(&mnt->parent->lock);
34 llist_append(&parent->submnts, &mnt->sibmnts);
35 mutex_unlock(&mnt->parent->lock);
38 atomic_fetch_add(&mnt_point->ref_count, 1);
44 __vfs_release_vmnt(struct v_mount* mnt)
46 assert(llist_empty(&mnt->submnts));
49 mnt_chillax(mnt->parent);
52 llist_delete(&mnt->sibmnts);
53 llist_delete(&mnt->list);
54 atomic_fetch_sub(&mnt->mnt_point->ref_count, 1);
59 __vfs_do_unmount(struct v_mount* mnt)
62 struct v_superblock* sb = mnt->super_block;
64 if ((errno = sb->fs->unmount(sb))) {
68 // detached the inodes from cache, and let lru policy to recycle them
69 for (size_t i = 0; i < VFS_HASHTABLE_SIZE; i++) {
70 struct hbucket* bucket = &sb->i_cache[i];
74 bucket->head->pprev = 0;
77 mnt->mnt_point->mnt = mnt->parent;
80 __vfs_release_vmnt(mnt);
86 mnt_mkbusy(struct v_mount* mnt)
88 mutex_lock(&mnt->lock);
90 mutex_unlock(&mnt->lock);
94 mnt_chillax(struct v_mount* mnt)
96 mutex_lock(&mnt->lock);
98 mutex_unlock(&mnt->lock);
102 vfs_mount_root(const char* fs_name, struct device* device)
104 extern struct v_dnode* vfs_sysroot;
106 if (vfs_sysroot->mnt && (errno = vfs_unmount_at(vfs_sysroot))) {
109 return vfs_mount_at(fs_name, device, vfs_sysroot, 0);
113 vfs_mount(const char* target,
115 struct device* device,
122 vfs_walk(__current->cwd, target, &mnt, NULL, VFS_WALK_MKPARENT))) {
123 errno = vfs_mount_at(fs_name, device, mnt, options);
130 vfs_unmount(const char* target)
135 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
136 errno = vfs_unmount_at(mnt);
143 vfs_mount_fsat(struct filesystem* fs,
144 struct device* device,
145 struct v_dnode* mnt_point,
149 if (device && device->dev_type != DEV_IFVOL) {
153 if (mnt_point->inode && !check_directory_node(mnt_point->inode)) {
157 if ((fs->types & FSTYPE_ROFS)) {
161 if (!(fs->types & FSTYPE_PSEUDO) && !device) {
166 char* dev_name = "sys";
167 char* fsname = HSTR_VAL(fs->fs_name);
169 struct v_mount* parent_mnt = mnt_point->mnt;
170 struct v_superblock *sb = vfs_sb_alloc(),
171 *old_sb = mnt_point->super_block;
174 dev_name = device->name_val;
177 // prepare v_superblock for fs::mount invoke
180 sb->root = mnt_point;
181 vfs_d_assign_sb(mnt_point, sb);
183 if (!(mnt_point->mnt = vfs_create_mount(parent_mnt, mnt_point))) {
188 mnt_point->mnt->flags = options;
189 if (!(errno = fs->mount(sb, mnt_point))) {
190 kprintf("mount: dev=%s, fs=%s, mode=%d",
191 dev_name, fsname, options);
200 ERROR("failed mount: dev=%s, fs=%s, mode=%d, err=%d",
201 dev_name, fsname, options, errno);
203 vfs_d_assign_sb(mnt_point, old_sb);
205 __vfs_release_vmnt(mnt_point->mnt);
207 mnt_point->mnt = parent_mnt;
213 vfs_mount_at(const char* fs_name,
214 struct device* device,
215 struct v_dnode* mnt_point,
219 struct filesystem* fs = fsm_get(fs_name);
224 return vfs_mount_fsat(fs, device, mnt_point, options);
231 while (fsm_itnext(&fsi))
233 if ((fsi.fs->types & FSTYPE_PSEUDO)) {
237 INFO("mount attempt: %s", HSTR_VAL(fsi.fs->fs_name));
238 errno = vfs_mount_fsat(fsi.fs, device, mnt_point, options);
248 vfs_unmount_at(struct v_dnode* mnt_point)
251 struct v_superblock* sb = mnt_point->super_block;
256 if (sb->root != mnt_point) {
260 if (mnt_point->mnt->busy_counter) {
264 if (!(errno = __vfs_do_unmount(mnt_point->mnt))) {
265 atomic_fetch_sub(&mnt_point->ref_count, 1);
272 vfs_check_writable(struct v_dnode* dnode)
274 if ((dnode->mnt->flags & MNT_RO)) {
280 __DEFINE_LXSYSCALL4(int,
291 struct v_dnode *dev = NULL, *mnt = NULL;
294 // It is fine if source is not exist, as some mounting don't require it
295 vfs_walk(__current->cwd, source, &dev, NULL, 0);
297 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
301 if (mnt->ref_count > 1) {
306 if (mnt->mnt->mnt_point == mnt) {
311 // By our convention.
312 // XXX could we do better?
313 struct device* device = NULL;
316 if (!check_voldev_node(dev->inode)) {
320 device = (struct device*)dev->inode->data;
323 errno = vfs_mount_at(fstype, device, mnt, options);
326 return DO_STATUS(errno);
329 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
331 return vfs_unmount(target);