3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 #define PATH_DELIM '/'
61 #define unlock_inode(inode) mutex_unlock(&inode->lock)
62 #define lock_inode(inode) \
64 mutex_lock(&inode->lock); \
65 lru_use_one(inode_lru, &inode->lru); \
68 #define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
69 #define lock_dnode(dnode) \
71 mutex_lock(&dnode->lock); \
72 lru_use_one(dnode_lru, &dnode->lru); \
75 static struct cake_pile* dnode_pile;
76 static struct cake_pile* inode_pile;
77 static struct cake_pile* file_pile;
78 static struct cake_pile* superblock_pile;
79 static struct cake_pile* fd_pile;
81 struct v_dnode* vfs_sysroot;
82 static struct hbucket* dnode_cache;
84 static struct lru_zone *dnode_lru, *inode_lru;
86 struct hstr vfs_ddot = HSTR("..", 2);
87 struct hstr vfs_dot = HSTR(".", 1);
88 struct hstr vfs_empty = HSTR("", 0);
94 vfs_sb_free(struct v_superblock* sb);
97 __vfs_try_evict_dnode(struct lru_node* obj);
100 __vfs_try_evict_inode(struct lru_node* obj);
105 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
106 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
107 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
108 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
109 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
111 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
113 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
115 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
116 inode_lru = lru_new_zone(__vfs_try_evict_inode);
118 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
119 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
122 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
123 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
126 inline struct hbucket*
127 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
129 uint32_t _hash = *hash;
130 // 与parent的指针值做加法,来减小碰撞的可能性。
131 _hash += (uint32_t)parent;
133 _hash = _hash ^ (_hash >> VFS_HASHBITS);
135 return &dnode_cache[_hash & VFS_HASH_MASK];
139 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
141 if (!str->len || HSTR_EQ(str, &vfs_dot))
144 if (HSTR_EQ(str, &vfs_ddot)) {
145 return parent->parent ? parent->parent : parent;
148 uint32_t hash = str->hash;
149 struct hbucket* slot = __dcache_hash(parent, &hash);
151 struct v_dnode *pos, *n;
152 hashtable_bucket_foreach(slot, pos, n, hash_list)
154 if (pos->name.hash == hash) {
162 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
164 atomic_fetch_add(&dnode->ref_count, 1);
165 dnode->parent = parent;
166 llist_append(&parent->children, &dnode->siblings);
168 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
169 hlist_add(&bucket->head, &dnode->hash_list);
173 vfs_dcache_remove(struct v_dnode* dnode)
175 assert(dnode->ref_count == 1);
177 llist_delete(&dnode->siblings);
178 hlist_delete(&dnode->hash_list);
180 dnode->parent = NULL;
181 atomic_fetch_sub(&dnode->ref_count, 1);
185 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
187 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
188 vfs_dcache_remove(dnode);
189 vfs_dcache_add(new_parent, dnode);
193 __vfs_walk(struct v_dnode* start,
195 struct v_dnode** dentry,
196 struct hstr* component,
202 if (path[0] == PATH_DELIM || !start) {
203 if ((walk_options & VFS_WALK_FSRELATIVE) && start) {
204 start = start->super_block->root;
207 if (!vfs_sysroot->mnt) {
208 panick("vfs: no root");
214 struct v_dnode* dnode;
215 struct v_dnode* current_level = start;
217 char name_content[VFS_NAME_MAXLEN];
218 struct hstr name = HSTR(name_content, 0);
220 char current = path[i++], lookahead;
221 while (current && current_level) {
222 lookahead = path[i++];
223 if (current != PATH_DELIM) {
224 if (j >= VFS_NAME_MAXLEN - 1) {
227 if (!VFS_VALID_CHAR(current)) {
230 name_content[j++] = current;
236 // handling cases like /^.*(\/+).*$/
237 if (lookahead == PATH_DELIM) {
241 lock_dnode(current_level);
245 hstr_rehash(&name, HSTR_FULL_HASH);
247 if (!lookahead && (walk_options & VFS_WALK_PARENT)) {
249 component->hash = name.hash;
251 strcpy(component->value, name_content);
253 unlock_dnode(current_level);
257 dnode = vfs_dcache_lookup(current_level, &name);
260 dnode = vfs_d_alloc(current_level, &name);
267 struct v_inode* current_inode = current_level->inode;
269 lock_inode(current_inode);
271 errno = current_inode->ops->dir_lookup(current_inode, dnode);
273 if (errno == ENOENT && (walk_options & VFS_WALK_MKPARENT)) {
274 if (!current_inode->ops->mkdir) {
277 errno = current_inode->ops->mkdir(current_inode, dnode);
281 vfs_dcache_add(current_level, dnode);
282 unlock_inode(current_inode);
285 unlock_dnode(current_level);
290 unlock_dnode(current_level);
293 current_level = dnode;
298 *dentry = current_level;
308 #define VFS_MAX_SYMLINK 16
311 vfs_walk(struct v_dnode* start,
313 struct v_dnode** dentry,
314 struct hstr* component,
317 struct v_dnode* interim;
318 const char* pathname = path;
319 int errno = __vfs_walk(start, path, &interim, component, options);
322 // FIXME This is NOT a correct way to resolve symlink!
323 while (!errno && interim->inode && (options & VFS_WALK_NOFOLLOW)) {
324 if (counter >= VFS_MAX_SYMLINK) {
328 if ((interim->inode->itype & VFS_IFSYMLINK) &&
329 interim->inode->ops->read_symlink) {
331 lock_inode(interim->inode);
333 interim->inode->ops->read_symlink(interim->inode, &pathname);
334 unlock_inode(interim->inode);
342 errno = __vfs_walk(start, pathname, &interim, component, options);
346 *dentry = errno ? 0 : interim;
352 vfs_open(struct v_dnode* dnode, struct v_file** file)
354 if (!dnode->inode || !dnode->inode->ops->open) {
358 struct v_inode* inode = dnode->inode;
362 struct v_file* vfile = cake_grab(file_pile);
363 memset(vfile, 0, sizeof(*vfile));
365 vfile->dnode = dnode;
366 vfile->inode = inode;
367 vfile->ref_count = ATOMIC_VAR_INIT(1);
368 vfile->ops = inode->default_fops;
370 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
371 struct pcache* pcache = vzalloc(sizeof(struct pcache));
373 pcache->master = inode;
374 inode->pg_cache = pcache;
377 int errno = inode->ops->open(inode, vfile);
379 cake_release(file_pile, vfile);
381 atomic_fetch_add(&dnode->ref_count, 1);
383 mnt_mkbusy(dnode->mnt);
394 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
396 if (assign_to->inode) {
397 assign_to->inode->link_count--;
399 assign_to->inode = inode;
404 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
408 lock_inode(to_link->inode);
409 if (to_link->super_block->root != name->super_block->root) {
411 } else if (!to_link->inode->ops->link) {
413 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
414 vfs_assign_inode(name, to_link->inode);
416 unlock_inode(to_link->inode);
422 vfs_close(struct v_file* file)
425 if (!(errno = file->ops->close(file))) {
426 atomic_fetch_sub(&file->dnode->ref_count, 1);
427 file->inode->open_count--;
428 mnt_chillax(file->dnode->mnt);
430 pcache_commit_all(file->inode);
431 cake_release(file_pile, file);
437 vfs_fsync(struct v_file* file)
439 lock_inode(file->inode);
442 pcache_commit_all(file->inode);
444 if (file->ops->sync) {
445 errno = file->ops->sync(file);
448 unlock_inode(file->inode);
454 vfs_alloc_fdslot(int* fd)
456 for (size_t i = 0; i < VFS_MAX_FD; i++) {
457 if (!__current->fdtable->fds[i]) {
468 struct v_superblock* sb = cake_grab(superblock_pile);
469 memset(sb, 0, sizeof(*sb));
470 llist_init_head(&sb->sb_list);
471 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
476 vfs_sb_free(struct v_superblock* sb)
479 cake_release(superblock_pile, sb);
483 __vfs_try_evict_dnode(struct lru_node* obj)
485 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
487 if (!dnode->ref_count) {
495 __vfs_try_evict_inode(struct lru_node* obj)
497 struct v_inode* inode = container_of(obj, struct v_inode, lru);
499 if (!inode->link_count && !inode->open_count) {
507 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
509 struct v_dnode* dnode = cake_grab(dnode_pile);
511 lru_evict_half(dnode_lru);
513 if (!(dnode = cake_grab(dnode_pile))) {
518 memset(dnode, 0, sizeof(*dnode));
519 llist_init_head(&dnode->children);
520 llist_init_head(&dnode->siblings);
521 mutex_init(&dnode->lock);
523 dnode->ref_count = ATOMIC_VAR_INIT(0);
524 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
526 hstrcpy(&dnode->name, name);
529 dnode->super_block = parent->super_block;
532 lru_use_one(dnode_lru, &dnode->lru);
538 vfs_d_free(struct v_dnode* dnode)
540 assert(dnode->ref_count == 1);
543 assert(dnode->inode->link_count > 0);
544 dnode->inode->link_count--;
547 vfs_dcache_remove(dnode);
548 // Make sure the children de-referencing their parent.
549 // With lru presented, the eviction will be propagated over the entire
550 // detached subtree eventually
551 struct v_dnode *pos, *n;
552 llist_for_each(pos, n, &dnode->children, siblings)
554 vfs_dcache_remove(pos);
557 vfree(dnode->name.value);
558 cake_release(dnode_pile, dnode);
562 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
564 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
565 struct v_inode *pos, *n;
566 hashtable_bucket_foreach(slot, pos, n, hash_list)
568 if (pos->id == i_id) {
569 lru_use_one(inode_lru, &pos->lru);
578 vfs_i_addhash(struct v_inode* inode)
580 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
582 hlist_delete(&inode->hash_list);
583 hlist_add(&slot->head, &inode->hash_list);
587 vfs_i_alloc(struct v_superblock* sb)
589 assert(sb->ops.init_inode);
591 struct v_inode* inode;
592 if (!(inode = cake_grab(inode_pile))) {
593 lru_evict_half(inode_lru);
594 if (!(inode = cake_grab(inode_pile))) {
599 memset(inode, 0, sizeof(*inode));
600 mutex_init(&inode->lock);
602 sb->ops.init_inode(sb, inode);
605 inode->ctime = clock_unixtime();
606 inode->atime = inode->ctime;
607 inode->mtime = inode->ctime;
610 lru_use_one(inode_lru, &inode->lru);
615 vfs_i_free(struct v_inode* inode)
617 if (inode->pg_cache) {
618 pcache_release(inode->pg_cache);
619 vfree(inode->pg_cache);
621 inode->ops->sync(inode);
622 hlist_delete(&inode->hash_list);
623 cake_release(inode_pile, inode);
626 /* ---- System call definition and support ---- */
628 #define FLOCATE_CREATE_EMPTY 1
631 __vfs_getfd(int fd, struct v_fd** fd_s)
633 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
640 __vfs_try_locate_file(const char* path,
641 struct v_dnode** fdir,
642 struct v_dnode** file,
645 char name_str[VFS_NAME_MAXLEN];
646 struct hstr name = HSTR(name_str, 0);
649 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
653 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
654 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
658 struct v_dnode* parent = *fdir;
659 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
667 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
668 vfs_dcache_add(parent, file_new);
671 vfs_d_free(file_new);
674 unlock_dnode(parent);
680 vfs_do_open(const char* path, int options)
683 struct v_dnode *dentry, *file;
684 struct v_file* ofile = 0;
686 errno = __vfs_try_locate_file(
687 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
689 if (errno || (errno = vfs_open(file, &ofile))) {
693 struct v_inode* o_inode = ofile->inode;
695 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
696 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
697 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
699 fd_s->flags = options;
700 __current->fdtable->fds[fd] = fd_s;
707 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
709 int errno = vfs_do_open(path, options);
710 return DO_STATUS_OR_RETURN(errno);
713 __DEFINE_LXSYSCALL1(int, close, int, fd)
717 if ((errno = __vfs_getfd(fd, &fd_s))) {
721 if (fd_s->file->ref_count > 1) {
722 fd_s->file->ref_count--;
723 } else if ((errno = vfs_close(fd_s->file))) {
728 __current->fdtable->fds[fd] = 0;
731 return DO_STATUS(errno);
735 __vfs_readdir_callback(struct dir_context* dctx,
740 struct dirent* dent = (struct dirent*)dctx->cb_data;
741 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
743 dent->d_type = dtype;
746 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
751 if ((errno = __vfs_getfd(fd, &fd_s))) {
755 struct v_inode* inode = fd_s->file->inode;
759 if (!(inode->itype & VFS_IFDIR)) {
762 struct dir_context dctx =
763 (struct dir_context){ .cb_data = dent,
764 .index = dent->d_offset,
765 .read_complete_callback =
766 __vfs_readdir_callback };
768 if (dent->d_offset == 0) {
769 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
770 } else if (dent->d_offset == 1) {
771 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
774 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
785 return DO_STATUS_OR_RETURN(errno);
788 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
792 if ((errno = __vfs_getfd(fd, &fd_s))) {
796 struct v_file* file = fd_s->file;
797 if ((file->inode->itype & VFS_IFDIR)) {
802 lock_inode(file->inode);
804 file->inode->atime = clock_unixtime();
806 __SYSCALL_INTERRUPTIBLE({
807 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
808 errno = file->ops->read(file->inode, buf, count, file->f_pos);
810 errno = pcache_read(file->inode, buf, count, file->f_pos);
815 file->f_pos += errno;
816 unlock_inode(file->inode);
820 unlock_inode(file->inode);
823 return DO_STATUS(errno);
826 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
830 if ((errno = __vfs_getfd(fd, &fd_s))) {
834 struct v_file* file = fd_s->file;
835 if ((file->inode->itype & VFS_IFDIR)) {
840 lock_inode(file->inode);
842 file->inode->mtime = clock_unixtime();
844 __SYSCALL_INTERRUPTIBLE({
845 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
846 errno = file->ops->write(file->inode, buf, count, file->f_pos);
848 errno = pcache_write(file->inode, buf, count, file->f_pos);
853 file->f_pos += errno;
854 unlock_inode(file->inode);
858 unlock_inode(file->inode);
861 return DO_STATUS(errno);
864 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
868 if ((errno = __vfs_getfd(fd, &fd_s))) {
872 struct v_file* file = fd_s->file;
874 lock_inode(file->inode);
876 size_t fpos = file->f_pos;
879 fpos = (size_t)((int)file->f_pos + offset);
882 fpos = (size_t)((int)file->inode->fsize + offset);
888 if (!(errno = file->ops->seek(file->inode, fpos))) {
892 unlock_inode(file->inode);
895 return DO_STATUS(errno);
899 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
909 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
915 size_t cpy_size = MIN(dnode->name.len, size - len);
916 strncpy(buf + len, dnode->name.value, cpy_size);
920 buf[len++] = PATH_DELIM;
927 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
930 struct v_inode* inode = dnode->inode;
931 if (inode->ops->read_symlink) {
934 int errno = inode->ops->read_symlink(inode, &link);
935 strncpy(buf, link, size);
943 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
947 if ((errno = __vfs_getfd(fd, &fd_s))) {
951 struct v_dnode* dnode;
952 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
959 return DO_STATUS(errno);
962 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
965 struct v_dnode* dnode;
967 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
968 errno = vfs_readlink(dnode, buf, size);
975 return DO_STATUS(errno);
978 __DEFINE_LXSYSCALL4(int,
991 if ((errno = __vfs_getfd(dirfd, &fd_s))) {
995 struct v_dnode* dnode;
996 if (!(errno = vfs_walk(
997 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
998 errno = vfs_readlink(fd_s->file->dnode, buf, size);
1006 return DO_STATUS(errno);
1011 When we perform operation that could affect the layout of
1012 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
1013 whenever possible. This will blocking any ongoing path walking to reach
1014 it hence avoid any partial state.
1017 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
1020 struct v_dnode* dnode;
1021 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1022 return DO_STATUS(errno);
1027 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1032 if (dnode->ref_count > 1 || dnode->inode->open_count) {
1037 if (!llist_empty(&dnode->children)) {
1042 struct v_dnode* parent = dnode->parent;
1050 lock_inode(parent->inode);
1052 if ((dnode->inode->itype & VFS_IFDIR)) {
1053 errno = parent->inode->ops->rmdir(parent->inode, dnode);
1055 vfs_dcache_remove(dnode);
1061 unlock_inode(parent->inode);
1062 unlock_dnode(parent);
1065 unlock_dnode(dnode);
1066 return DO_STATUS(errno);
1069 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
1072 struct v_dnode *parent, *dir;
1073 char name_value[VFS_NAME_MAXLEN];
1074 struct hstr name = HHSTR(name_value, 0, 0);
1082 vfs_walk(__current->cwd, path, &parent, &name, VFS_WALK_PARENT))) {
1086 dir = vfs_d_alloc(parent, &name);
1089 lock_inode(parent->inode);
1091 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1093 } else if (!parent->inode->ops->mkdir) {
1095 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1097 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1098 vfs_dcache_add(parent, dir);
1105 unlock_inode(parent->inode);
1106 unlock_dnode(parent);
1108 return DO_STATUS(errno);
1112 __vfs_do_unlink(struct v_dnode* dnode)
1114 struct v_inode* inode = dnode->inode;
1116 if (dnode->ref_count > 1) {
1123 if (inode->open_count) {
1125 } else if (!(inode->itype & VFS_IFDIR)) {
1126 // The underlying unlink implementation should handle
1128 errno = inode->ops->unlink(inode);
1136 unlock_inode(inode);
1141 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1144 struct v_dnode* dnode;
1145 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1148 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1153 errno = __vfs_do_unlink(dnode);
1156 return DO_STATUS(errno);
1159 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1163 if ((errno = __vfs_getfd(fd, &fd_s))) {
1167 struct v_dnode* dnode;
1168 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1169 errno = __vfs_do_unlink(dnode);
1173 return DO_STATUS(errno);
1176 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1179 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1181 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1183 errno = __vfs_try_locate_file(
1184 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1187 } else if (name_file) {
1188 errno = vfs_link(to_link, name_file);
1191 return DO_STATUS(errno);
1194 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1198 if (!(errno = __vfs_getfd(fildes, &fd_s))) {
1199 errno = vfs_fsync(fd_s->file);
1202 return DO_STATUS(errno);
1206 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1209 struct v_fd* copied = cake_grab(fd_pile);
1211 memcpy(copied, old, sizeof(struct v_fd));
1213 atomic_fetch_add(&old->file->ref_count, 1);
1221 vfs_dup2(int oldfd, int newfd)
1223 if (newfd == oldfd) {
1228 struct v_fd *oldfd_s, *newfd_s;
1229 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1233 if (!TEST_FD(newfd)) {
1238 newfd_s = __current->fdtable->fds[newfd];
1239 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1243 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1244 __current->fdtable->fds[newfd] = newfd_s;
1249 return DO_STATUS(errno);
1252 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1254 return vfs_dup2(oldfd, newfd);
1257 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1260 struct v_fd *oldfd_s, *newfd_s;
1261 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1265 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1266 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1267 __current->fdtable->fds[newfd] = newfd_s;
1272 return DO_STATUS(errno);
1275 __DEFINE_LXSYSCALL2(int,
1283 struct v_dnode* dnode;
1284 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1287 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1291 if (!dnode->inode->ops->set_symlink) {
1296 lock_inode(dnode->inode);
1298 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1300 unlock_inode(dnode->inode);
1303 return DO_STATUS(errno);
1307 __vfs_do_chdir(struct v_dnode* dnode)
1313 if (!(dnode->inode->itype & VFS_IFDIR)) {
1318 if (__current->cwd) {
1319 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1320 mnt_chillax(__current->cwd->mnt);
1323 atomic_fetch_add(&dnode->ref_count, 1);
1324 mnt_mkbusy(dnode->mnt);
1325 __current->cwd = dnode;
1327 unlock_dnode(dnode);
1333 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1335 struct v_dnode* dnode;
1338 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1342 errno = __vfs_do_chdir(dnode);
1345 return DO_STATUS(errno);
1348 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1353 if ((errno = __vfs_getfd(fd, &fd_s))) {
1357 errno = __vfs_do_chdir(fd_s->file->dnode);
1360 return DO_STATUS(errno);
1363 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1374 if (!__current->cwd) {
1378 len = vfs_get_path(__current->cwd, buf, size, 0);
1385 buf[len + 1] = '\0';
1390 __current->k_status = errno;
1395 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1397 if (current->inode->id == target->inode->id) {
1402 if (current->ref_count > 1 || target->ref_count > 1) {
1406 if (current->super_block != target->super_block) {
1412 struct v_dnode* oldparent = current->parent;
1413 struct v_dnode* newparent = target->parent;
1415 lock_dnode(current);
1418 lock_dnode(oldparent);
1420 lock_dnode(newparent);
1422 if (!llist_empty(&target->children)) {
1424 unlock_dnode(target);
1429 current->inode->ops->rename(current->inode, current, target))) {
1430 unlock_dnode(target);
1434 // re-position current
1435 hstrcpy(¤t->name, &target->name);
1436 vfs_dcache_rehash(newparent, current);
1441 unlock_dnode(target);
1444 unlock_dnode(current);
1446 unlock_dnode(oldparent);
1448 unlock_dnode(newparent);
1453 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1455 struct v_dnode *cur, *target_parent, *target;
1456 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1459 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1463 if ((errno = vfs_walk(
1464 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1468 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1469 if (errno == ENOENT) {
1470 target = vfs_d_alloc(target_parent, &name);
1471 vfs_dcache_add(target_parent, target);
1481 errno = vfs_do_rename(cur, target);
1485 return DO_STATUS(errno);