3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 #define PATH_DELIM '/'
61 #define unlock_inode(inode) mutex_unlock(&inode->lock)
62 #define lock_inode(inode) \
64 mutex_lock(&inode->lock); \
65 lru_use_one(inode_lru, &inode->lru); \
68 #define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
69 #define lock_dnode(dnode) \
71 mutex_lock(&dnode->lock); \
72 lru_use_one(dnode_lru, &dnode->lru); \
75 static struct cake_pile* dnode_pile;
76 static struct cake_pile* inode_pile;
77 static struct cake_pile* file_pile;
78 static struct cake_pile* superblock_pile;
79 static struct cake_pile* fd_pile;
81 struct v_dnode* vfs_sysroot;
82 static struct hbucket* dnode_cache;
84 static struct lru_zone *dnode_lru, *inode_lru;
86 struct hstr vfs_ddot = HSTR("..", 2);
87 struct hstr vfs_dot = HSTR(".", 1);
88 struct hstr vfs_empty = HSTR("", 0);
94 vfs_sb_free(struct v_superblock* sb);
97 __vfs_try_evict_dnode(struct lru_node* obj);
100 __vfs_try_evict_inode(struct lru_node* obj);
105 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
106 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
107 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
108 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
109 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
111 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
113 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
115 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
116 inode_lru = lru_new_zone(__vfs_try_evict_inode);
118 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
119 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
122 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
123 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
126 inline struct hbucket*
127 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
129 uint32_t _hash = *hash;
130 // 与parent的指针值做加法,来减小碰撞的可能性。
131 _hash += (uint32_t)parent;
133 _hash = _hash ^ (_hash >> VFS_HASHBITS);
135 return &dnode_cache[_hash & VFS_HASH_MASK];
139 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
141 if (!str->len || HSTR_EQ(str, &vfs_dot))
144 if (HSTR_EQ(str, &vfs_ddot)) {
145 return parent->parent ? parent->parent : parent;
148 uint32_t hash = str->hash;
149 struct hbucket* slot = __dcache_hash(parent, &hash);
151 struct v_dnode *pos, *n;
152 hashtable_bucket_foreach(slot, pos, n, hash_list)
154 if (pos->name.hash == hash) {
162 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
166 atomic_fetch_add(&dnode->ref_count, 1);
167 dnode->parent = parent;
168 llist_append(&parent->children, &dnode->siblings);
170 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
171 hlist_add(&bucket->head, &dnode->hash_list);
175 vfs_dcache_remove(struct v_dnode* dnode)
178 assert(dnode->ref_count == 1);
180 llist_delete(&dnode->siblings);
181 hlist_delete(&dnode->hash_list);
183 dnode->parent = NULL;
184 atomic_fetch_sub(&dnode->ref_count, 1);
188 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
192 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
193 vfs_dcache_remove(dnode);
194 vfs_dcache_add(new_parent, dnode);
197 #define VFS_SYMLINK_DEPTH 16
200 __vfs_walk(struct v_dnode* start,
202 struct v_dnode** dentry,
203 struct hstr* component,
211 if (depth >= VFS_SYMLINK_DEPTH) {
215 if (path[0] == PATH_DELIM || !start) {
216 if ((walk_options & VFS_WALK_FSRELATIVE) && start) {
217 start = start->super_block->root;
220 if (!vfs_sysroot->mnt) {
221 panick("vfs: no root");
227 struct v_dnode* dnode;
228 struct v_inode* current_inode;
229 struct v_dnode* current_level = start;
231 struct hstr name = HSTR(fname_buffer, 0);
233 char current = path[i++], lookahead;
234 while (current && current_level) {
235 lookahead = path[i++];
236 if (current != PATH_DELIM) {
237 if (j >= VFS_NAME_MAXLEN - 1) {
240 if (!VFS_VALID_CHAR(current)) {
243 fname_buffer[j++] = current;
249 // handling cases like /^.*(\/+).*$/
250 if (lookahead == PATH_DELIM) {
256 hstr_rehash(&name, HSTR_FULL_HASH);
258 if (!lookahead && (walk_options & VFS_WALK_PARENT)) {
260 component->hash = name.hash;
262 strcpy(component->value, fname_buffer);
267 current_inode = current_level->inode;
269 if ((current_inode->itype & VFS_IFSYMLINK)) {
272 lock_inode(current_inode);
274 current_inode->ops->read_symlink(current_inode, &link))) {
275 unlock_inode(current_inode);
278 unlock_inode(current_inode);
280 errno = __vfs_walk(current_level->parent,
286 fname_buffer + name.len + 1);
292 // reposition the resolved subtree pointed by symlink
293 vfs_dcache_rehash(current_level->parent, dnode);
294 current_level = dnode;
295 current_inode = dnode->inode;
298 lock_dnode(current_level);
300 dnode = vfs_dcache_lookup(current_level, &name);
303 dnode = vfs_d_alloc(current_level, &name);
310 lock_inode(current_inode);
312 errno = current_inode->ops->dir_lookup(current_inode, dnode);
314 if (errno == ENOENT && (walk_options & VFS_WALK_MKPARENT)) {
315 if (!current_inode->ops->mkdir) {
318 errno = current_inode->ops->mkdir(current_inode, dnode);
322 vfs_dcache_add(current_level, dnode);
323 unlock_inode(current_inode);
326 unlock_dnode(current_level);
331 unlock_dnode(current_level);
334 current_level = dnode;
339 *dentry = current_level;
350 vfs_walk(struct v_dnode* start,
352 struct v_dnode** dentry,
353 struct hstr* component,
356 // allocate a file name stack for path walking and recursion to resolve
358 char* name_buffer = valloc(2048);
361 __vfs_walk(start, path, dentry, component, options, 0, name_buffer);
368 vfs_open(struct v_dnode* dnode, struct v_file** file)
370 if (!dnode->inode || !dnode->inode->ops->open) {
374 struct v_inode* inode = dnode->inode;
378 struct v_file* vfile = cake_grab(file_pile);
379 memset(vfile, 0, sizeof(*vfile));
381 vfile->dnode = dnode;
382 vfile->inode = inode;
383 vfile->ref_count = ATOMIC_VAR_INIT(1);
384 vfile->ops = inode->default_fops;
386 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
387 struct pcache* pcache = vzalloc(sizeof(struct pcache));
389 pcache->master = inode;
390 inode->pg_cache = pcache;
393 int errno = inode->ops->open(inode, vfile);
395 cake_release(file_pile, vfile);
397 atomic_fetch_add(&dnode->ref_count, 1);
399 mnt_mkbusy(dnode->mnt);
410 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
412 if (assign_to->inode) {
413 assign_to->inode->link_count--;
415 assign_to->inode = inode;
420 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
424 lock_inode(to_link->inode);
425 if (to_link->super_block->root != name->super_block->root) {
427 } else if (!to_link->inode->ops->link) {
429 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
430 vfs_assign_inode(name, to_link->inode);
432 unlock_inode(to_link->inode);
438 vfs_close(struct v_file* file)
441 if (!(errno = file->ops->close(file))) {
442 atomic_fetch_sub(&file->dnode->ref_count, 1);
443 file->inode->open_count--;
444 mnt_chillax(file->dnode->mnt);
446 pcache_commit_all(file->inode);
447 cake_release(file_pile, file);
453 vfs_fsync(struct v_file* file)
455 lock_inode(file->inode);
458 pcache_commit_all(file->inode);
460 if (file->ops->sync) {
461 errno = file->ops->sync(file);
464 unlock_inode(file->inode);
470 vfs_alloc_fdslot(int* fd)
472 for (size_t i = 0; i < VFS_MAX_FD; i++) {
473 if (!__current->fdtable->fds[i]) {
484 struct v_superblock* sb = cake_grab(superblock_pile);
485 memset(sb, 0, sizeof(*sb));
486 llist_init_head(&sb->sb_list);
487 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
492 vfs_sb_free(struct v_superblock* sb)
495 cake_release(superblock_pile, sb);
499 __vfs_try_evict_dnode(struct lru_node* obj)
501 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
503 if (!dnode->ref_count) {
511 __vfs_try_evict_inode(struct lru_node* obj)
513 struct v_inode* inode = container_of(obj, struct v_inode, lru);
515 if (!inode->link_count && !inode->open_count) {
523 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
525 struct v_dnode* dnode = cake_grab(dnode_pile);
527 lru_evict_half(dnode_lru);
529 if (!(dnode = cake_grab(dnode_pile))) {
534 memset(dnode, 0, sizeof(*dnode));
535 llist_init_head(&dnode->children);
536 llist_init_head(&dnode->siblings);
537 mutex_init(&dnode->lock);
539 dnode->ref_count = ATOMIC_VAR_INIT(0);
540 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
542 hstrcpy(&dnode->name, name);
545 dnode->super_block = parent->super_block;
548 lru_use_one(dnode_lru, &dnode->lru);
554 vfs_d_free(struct v_dnode* dnode)
556 assert(dnode->ref_count == 1);
559 assert(dnode->inode->link_count > 0);
560 dnode->inode->link_count--;
563 vfs_dcache_remove(dnode);
564 // Make sure the children de-referencing their parent.
565 // With lru presented, the eviction will be propagated over the entire
566 // detached subtree eventually
567 struct v_dnode *pos, *n;
568 llist_for_each(pos, n, &dnode->children, siblings)
570 vfs_dcache_remove(pos);
573 vfree(dnode->name.value);
574 cake_release(dnode_pile, dnode);
578 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
580 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
581 struct v_inode *pos, *n;
582 hashtable_bucket_foreach(slot, pos, n, hash_list)
584 if (pos->id == i_id) {
585 lru_use_one(inode_lru, &pos->lru);
594 vfs_i_addhash(struct v_inode* inode)
596 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
598 hlist_delete(&inode->hash_list);
599 hlist_add(&slot->head, &inode->hash_list);
603 vfs_i_alloc(struct v_superblock* sb)
605 assert(sb->ops.init_inode);
607 struct v_inode* inode;
608 if (!(inode = cake_grab(inode_pile))) {
609 lru_evict_half(inode_lru);
610 if (!(inode = cake_grab(inode_pile))) {
615 memset(inode, 0, sizeof(*inode));
616 mutex_init(&inode->lock);
618 sb->ops.init_inode(sb, inode);
621 inode->ctime = clock_unixtime();
622 inode->atime = inode->ctime;
623 inode->mtime = inode->ctime;
626 lru_use_one(inode_lru, &inode->lru);
631 vfs_i_free(struct v_inode* inode)
633 if (inode->pg_cache) {
634 pcache_release(inode->pg_cache);
635 vfree(inode->pg_cache);
637 inode->ops->sync(inode);
638 hlist_delete(&inode->hash_list);
639 cake_release(inode_pile, inode);
642 /* ---- System call definition and support ---- */
644 #define FLOCATE_CREATE_EMPTY 1
647 __vfs_getfd(int fd, struct v_fd** fd_s)
649 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
656 __vfs_try_locate_file(const char* path,
657 struct v_dnode** fdir,
658 struct v_dnode** file,
661 char name_str[VFS_NAME_MAXLEN];
662 struct hstr name = HSTR(name_str, 0);
665 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
669 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
670 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
674 struct v_dnode* parent = *fdir;
675 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
683 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
684 vfs_dcache_add(parent, file_new);
687 vfs_d_free(file_new);
690 unlock_dnode(parent);
696 vfs_do_open(const char* path, int options)
699 struct v_dnode *dentry, *file;
700 struct v_file* ofile = 0;
702 errno = __vfs_try_locate_file(
703 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
705 if (errno || (errno = vfs_open(file, &ofile))) {
709 struct v_inode* o_inode = ofile->inode;
711 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
712 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
713 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
715 fd_s->flags = options;
716 __current->fdtable->fds[fd] = fd_s;
723 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
725 int errno = vfs_do_open(path, options);
726 return DO_STATUS_OR_RETURN(errno);
729 __DEFINE_LXSYSCALL1(int, close, int, fd)
733 if ((errno = __vfs_getfd(fd, &fd_s))) {
737 if (fd_s->file->ref_count > 1) {
738 fd_s->file->ref_count--;
739 } else if ((errno = vfs_close(fd_s->file))) {
744 __current->fdtable->fds[fd] = 0;
747 return DO_STATUS(errno);
751 __vfs_readdir_callback(struct dir_context* dctx,
756 struct dirent* dent = (struct dirent*)dctx->cb_data;
757 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
759 dent->d_type = dtype;
762 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
767 if ((errno = __vfs_getfd(fd, &fd_s))) {
771 struct v_inode* inode = fd_s->file->inode;
775 if (!(inode->itype & VFS_IFDIR)) {
778 struct dir_context dctx =
779 (struct dir_context){ .cb_data = dent,
780 .index = dent->d_offset,
781 .read_complete_callback =
782 __vfs_readdir_callback };
784 if (dent->d_offset == 0) {
785 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
786 } else if (dent->d_offset == 1) {
787 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
790 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
801 return DO_STATUS_OR_RETURN(errno);
804 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
808 if ((errno = __vfs_getfd(fd, &fd_s))) {
812 struct v_file* file = fd_s->file;
813 if ((file->inode->itype & VFS_IFDIR)) {
818 lock_inode(file->inode);
820 file->inode->atime = clock_unixtime();
822 __SYSCALL_INTERRUPTIBLE({
823 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
824 errno = file->ops->read(file->inode, buf, count, file->f_pos);
826 errno = pcache_read(file->inode, buf, count, file->f_pos);
831 file->f_pos += errno;
832 unlock_inode(file->inode);
836 unlock_inode(file->inode);
839 return DO_STATUS(errno);
842 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
846 if ((errno = __vfs_getfd(fd, &fd_s))) {
850 struct v_file* file = fd_s->file;
851 if ((file->inode->itype & VFS_IFDIR)) {
856 lock_inode(file->inode);
858 file->inode->mtime = clock_unixtime();
860 __SYSCALL_INTERRUPTIBLE({
861 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
862 errno = file->ops->write(file->inode, buf, count, file->f_pos);
864 errno = pcache_write(file->inode, buf, count, file->f_pos);
869 file->f_pos += errno;
870 unlock_inode(file->inode);
874 unlock_inode(file->inode);
877 return DO_STATUS(errno);
880 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
884 if ((errno = __vfs_getfd(fd, &fd_s))) {
888 struct v_file* file = fd_s->file;
890 lock_inode(file->inode);
892 size_t fpos = file->f_pos;
895 fpos = (size_t)((int)file->f_pos + offset);
898 fpos = (size_t)((int)file->inode->fsize + offset);
904 if (!(errno = file->ops->seek(file->inode, fpos))) {
908 unlock_inode(file->inode);
911 return DO_STATUS(errno);
915 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
925 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
931 size_t cpy_size = MIN(dnode->name.len, size - len);
932 strncpy(buf + len, dnode->name.value, cpy_size);
936 buf[len++] = PATH_DELIM;
943 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
946 struct v_inode* inode = dnode->inode;
947 if (inode->ops->read_symlink) {
950 int errno = inode->ops->read_symlink(inode, &link);
951 strncpy(buf, link, size);
959 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
963 if ((errno = __vfs_getfd(fd, &fd_s))) {
967 struct v_dnode* dnode;
968 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
975 return DO_STATUS(errno);
978 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
981 struct v_dnode* dnode;
983 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
984 errno = vfs_readlink(dnode, buf, size);
991 return DO_STATUS(errno);
994 __DEFINE_LXSYSCALL4(int,
1007 if ((errno = __vfs_getfd(dirfd, &fd_s))) {
1011 struct v_dnode* dnode;
1012 if (!(errno = vfs_walk(
1013 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
1014 errno = vfs_readlink(fd_s->file->dnode, buf, size);
1022 return DO_STATUS(errno);
1027 When we perform operation that could affect the layout of
1028 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
1029 whenever possible. This will blocking any ongoing path walking to reach
1030 it hence avoid any partial state.
1033 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
1036 struct v_dnode* dnode;
1037 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1038 return DO_STATUS(errno);
1043 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1048 if (dnode->ref_count > 1 || dnode->inode->open_count) {
1053 if (!llist_empty(&dnode->children)) {
1058 struct v_dnode* parent = dnode->parent;
1066 lock_inode(parent->inode);
1068 if ((dnode->inode->itype & VFS_IFDIR)) {
1069 errno = parent->inode->ops->rmdir(parent->inode, dnode);
1071 vfs_dcache_remove(dnode);
1077 unlock_inode(parent->inode);
1078 unlock_dnode(parent);
1081 unlock_dnode(dnode);
1082 return DO_STATUS(errno);
1085 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
1088 struct v_dnode *parent, *dir;
1089 char name_value[VFS_NAME_MAXLEN];
1090 struct hstr name = HHSTR(name_value, 0, 0);
1098 vfs_walk(__current->cwd, path, &parent, &name, VFS_WALK_PARENT))) {
1102 dir = vfs_d_alloc(parent, &name);
1105 lock_inode(parent->inode);
1107 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1109 } else if (!parent->inode->ops->mkdir) {
1111 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1113 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1114 vfs_dcache_add(parent, dir);
1121 unlock_inode(parent->inode);
1122 unlock_dnode(parent);
1124 return DO_STATUS(errno);
1128 __vfs_do_unlink(struct v_dnode* dnode)
1130 struct v_inode* inode = dnode->inode;
1132 if (dnode->ref_count > 1) {
1139 if (inode->open_count) {
1141 } else if (!(inode->itype & VFS_IFDIR)) {
1142 // The underlying unlink implementation should handle
1144 errno = inode->ops->unlink(inode);
1152 unlock_inode(inode);
1157 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1160 struct v_dnode* dnode;
1161 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1164 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1169 errno = __vfs_do_unlink(dnode);
1172 return DO_STATUS(errno);
1175 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1179 if ((errno = __vfs_getfd(fd, &fd_s))) {
1183 struct v_dnode* dnode;
1184 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1185 errno = __vfs_do_unlink(dnode);
1189 return DO_STATUS(errno);
1192 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1195 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1197 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1199 errno = __vfs_try_locate_file(
1200 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1203 } else if (name_file) {
1204 errno = vfs_link(to_link, name_file);
1207 return DO_STATUS(errno);
1210 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1214 if (!(errno = __vfs_getfd(fildes, &fd_s))) {
1215 errno = vfs_fsync(fd_s->file);
1218 return DO_STATUS(errno);
1222 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1225 struct v_fd* copied = cake_grab(fd_pile);
1227 memcpy(copied, old, sizeof(struct v_fd));
1229 atomic_fetch_add(&old->file->ref_count, 1);
1237 vfs_dup2(int oldfd, int newfd)
1239 if (newfd == oldfd) {
1244 struct v_fd *oldfd_s, *newfd_s;
1245 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1249 if (!TEST_FD(newfd)) {
1254 newfd_s = __current->fdtable->fds[newfd];
1255 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1259 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1260 __current->fdtable->fds[newfd] = newfd_s;
1265 return DO_STATUS(errno);
1268 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1270 return vfs_dup2(oldfd, newfd);
1273 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1276 struct v_fd *oldfd_s, *newfd_s;
1277 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1281 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1282 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1283 __current->fdtable->fds[newfd] = newfd_s;
1288 return DO_STATUS(errno);
1291 __DEFINE_LXSYSCALL2(int,
1299 struct v_dnode* dnode;
1300 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1303 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1307 if (!dnode->inode->ops->set_symlink) {
1312 lock_inode(dnode->inode);
1314 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1316 unlock_inode(dnode->inode);
1319 return DO_STATUS(errno);
1323 __vfs_do_chdir(struct v_dnode* dnode)
1329 if (!(dnode->inode->itype & VFS_IFDIR)) {
1334 if (__current->cwd) {
1335 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1336 mnt_chillax(__current->cwd->mnt);
1339 atomic_fetch_add(&dnode->ref_count, 1);
1340 mnt_mkbusy(dnode->mnt);
1341 __current->cwd = dnode;
1343 unlock_dnode(dnode);
1349 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1351 struct v_dnode* dnode;
1354 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1358 errno = __vfs_do_chdir(dnode);
1361 return DO_STATUS(errno);
1364 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1369 if ((errno = __vfs_getfd(fd, &fd_s))) {
1373 errno = __vfs_do_chdir(fd_s->file->dnode);
1376 return DO_STATUS(errno);
1379 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1390 if (!__current->cwd) {
1394 len = vfs_get_path(__current->cwd, buf, size, 0);
1401 buf[len + 1] = '\0';
1406 __current->k_status = errno;
1411 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1413 if (current->inode->id == target->inode->id) {
1418 if (current->ref_count > 1 || target->ref_count > 1) {
1422 if (current->super_block != target->super_block) {
1428 struct v_dnode* oldparent = current->parent;
1429 struct v_dnode* newparent = target->parent;
1431 lock_dnode(current);
1434 lock_dnode(oldparent);
1436 lock_dnode(newparent);
1438 if (!llist_empty(&target->children)) {
1440 unlock_dnode(target);
1445 current->inode->ops->rename(current->inode, current, target))) {
1446 unlock_dnode(target);
1450 // re-position current
1451 hstrcpy(¤t->name, &target->name);
1452 vfs_dcache_rehash(newparent, current);
1457 unlock_dnode(target);
1460 unlock_dnode(current);
1462 unlock_dnode(oldparent);
1464 unlock_dnode(newparent);
1469 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1471 struct v_dnode *cur, *target_parent, *target;
1472 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1475 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1479 if ((errno = vfs_walk(
1480 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1484 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1485 if (errno == ENOENT) {
1486 target = vfs_d_alloc(target_parent, &name);
1487 vfs_dcache_add(target_parent, target);
1497 errno = vfs_do_rename(cur, target);
1501 return DO_STATUS(errno);