1 #include <lunaix/foptions.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/process.h>
5 #include <lunaix/spike.h>
6 #include <lunaix/syscall_utils.h>
7 #include <lunaix/syslog.h>
8 #include <lunaix/types.h>
12 struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
15 vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
17 struct v_mount* mnt = vzalloc(sizeof(struct v_mount));
22 llist_init_head(&mnt->submnts);
23 llist_append(&all_mnts, &mnt->list);
24 mutex_init(&mnt->lock);
27 mnt->mnt_point = mnt_point;
28 mnt->super_block = mnt_point->super_block;
32 mutex_lock(&mnt->parent->lock);
33 llist_append(&parent->submnts, &mnt->sibmnts);
34 mutex_unlock(&mnt->parent->lock);
37 atomic_fetch_add(&mnt_point->ref_count, 1);
43 __vfs_do_unmount(struct v_mount* mnt)
46 struct v_superblock* sb = mnt->super_block;
48 if ((errno = sb->fs->unmount(sb))) {
52 llist_delete(&mnt->list);
53 llist_delete(&mnt->sibmnts);
55 // detached the inodes from cache, and let lru policy to recycle them
56 for (size_t i = 0; i < VFS_HASHTABLE_SIZE; i++) {
57 struct hbucket* bucket = &sb->i_cache[i];
61 bucket->head->pprev = 0;
64 mnt_chillax(mnt->parent);
66 mnt->mnt_point->mnt = mnt->parent;
69 atomic_fetch_sub(&mnt->mnt_point->ref_count, 1);
76 mnt_mkbusy(struct v_mount* mnt)
78 mutex_lock(&mnt->lock);
80 mutex_unlock(&mnt->lock);
84 mnt_chillax(struct v_mount* mnt)
86 mutex_lock(&mnt->lock);
88 mutex_unlock(&mnt->lock);
92 vfs_mount_root(const char* fs_name, struct device* device)
94 extern struct v_dnode* vfs_sysroot;
96 if (vfs_sysroot->mnt && (errno = vfs_unmount_at(vfs_sysroot))) {
99 return vfs_mount_at(fs_name, device, vfs_sysroot, 0);
103 vfs_mount(const char* target,
105 struct device* device,
112 vfs_walk(__current->cwd, target, &mnt, NULL, VFS_WALK_MKPARENT))) {
113 errno = vfs_mount_at(fs_name, device, mnt, options);
120 vfs_unmount(const char* target)
125 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
126 errno = vfs_unmount_at(mnt);
133 vfs_mount_at(const char* fs_name,
134 struct device* device,
135 struct v_dnode* mnt_point,
138 if (device && device->dev_type != DEV_IFVOL) {
142 if (mnt_point->inode && (mnt_point->inode->itype & F_MFILE)) {
146 struct filesystem* fs = fsm_get(fs_name);
151 if (fs->types == FSTYPE_ROFS) {
155 char* dev_name = "sys";
156 struct v_mount* parent_mnt = mnt_point->mnt;
157 struct v_superblock *sb = vfs_sb_alloc(), *old_sb = mnt_point->super_block;
159 mnt_point->super_block = sb;
162 dev_name = device->name_val;
166 if (!(errno = fs->mount(sb, mnt_point))) {
168 sb->root = mnt_point;
170 if (!(mnt_point->mnt = vfs_create_mount(parent_mnt, mnt_point))) {
175 kprintf("mount: dev=%s, fs=%s, mode=%d", dev_name, fs_name, options);
177 mnt_point->mnt->flags = options;
185 ERROR("mount: dev=%s, fs=%s, mode=%d, err=%d",
190 mnt_point->super_block = old_sb;
196 vfs_unmount_at(struct v_dnode* mnt_point)
199 struct v_superblock* sb = mnt_point->super_block;
204 if (sb->root != mnt_point) {
208 if (mnt_point->mnt->busy_counter) {
212 if (!(errno = __vfs_do_unmount(mnt_point->mnt))) {
213 atomic_fetch_sub(&mnt_point->ref_count, 1);
220 vfs_check_writable(struct v_dnode* dnode)
222 if ((dnode->mnt->flags & MNT_RO)) {
228 __DEFINE_LXSYSCALL4(int,
239 struct v_dnode *dev = NULL, *mnt = NULL;
242 // It is fine if source is not exist, as some mounting don't require it
243 vfs_walk(__current->cwd, source, &dev, NULL, 0);
245 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
249 if (mnt->ref_count > 1) {
254 if (mnt->mnt->mnt_point == mnt) {
259 // By our convention.
260 // XXX could we do better?
261 struct device* device = NULL;
264 if (!(dev->inode->itype & VFS_IFVOLDEV)) {
268 device = (struct device*)dev->inode->data;
271 errno = vfs_mount_at(fstype, device, mnt, options);
274 return DO_STATUS(errno);
277 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
279 return vfs_unmount(target);