3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
116 _hash = _hash ^ (_hash >> VFS_HASHBITS);
117 // 与parent的指针值做加法,来减小碰撞的可能性。
118 _hash += (uint32_t)parent;
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 llist_delete(&dnode->aka_list);
167 hlist_delete(&dnode->hash_list);
169 dnode->parent = NULL;
170 atomic_fetch_sub(&dnode->ref_count, 1);
174 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
178 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
179 vfs_dcache_remove(dnode);
180 vfs_dcache_add(new_parent, dnode);
184 vfs_open(struct v_dnode* dnode, struct v_file** file)
186 if (!dnode->inode || !dnode->inode->ops->open) {
190 struct v_inode* inode = dnode->inode;
194 struct v_file* vfile = cake_grab(file_pile);
195 memset(vfile, 0, sizeof(*vfile));
197 vfile->dnode = dnode;
198 vfile->inode = inode;
199 vfile->ref_count = ATOMIC_VAR_INIT(1);
200 vfile->ops = inode->default_fops;
202 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
203 struct pcache* pcache = vzalloc(sizeof(struct pcache));
205 pcache->master = inode;
206 inode->pg_cache = pcache;
209 int errno = inode->ops->open(inode, vfile);
211 cake_release(file_pile, vfile);
213 atomic_fetch_add(&dnode->ref_count, 1);
215 mnt_mkbusy(dnode->mnt);
226 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
228 if (assign_to->inode) {
229 llist_delete(&assign_to->aka_list);
230 assign_to->inode->link_count--;
232 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
233 assign_to->inode = inode;
238 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
242 if ((errno = vfs_check_writable(to_link))) {
246 lock_inode(to_link->inode);
247 if (to_link->super_block->root != name->super_block->root) {
249 } else if (!to_link->inode->ops->link) {
251 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
252 vfs_assign_inode(name, to_link->inode);
254 unlock_inode(to_link->inode);
260 vfs_pclose(struct v_file* file, pid_t pid)
263 if (file->ref_count > 1) {
264 atomic_fetch_sub(&file->ref_count, 1);
265 } else if (!(errno = file->ops->close(file))) {
266 atomic_fetch_sub(&file->dnode->ref_count, 1);
267 file->inode->open_count--;
269 // Prevent dead lock.
270 // This happened when process is terminated while blocking on read.
271 // In that case, the process is still holding the inode lock and it will
272 // never get released.
274 * The unlocking should also include ownership check.
276 * To see why, consider two process both open the same file both with
278 * Process A: busy on reading x
279 * Process B: do nothing with x
280 * Assuming that, after a very short time, process B get terminated
281 * while process A is still busy in it's reading business. By this
282 * design, the inode lock of this file x is get released by B rather
283 * than A. And this will cause a probable race condition on A if other
284 * process is writing to this file later after B exit.
286 if (mutex_on_hold(&file->inode->lock)) {
287 mutex_unlock_for(&file->inode->lock, pid);
289 mnt_chillax(file->dnode->mnt);
291 pcache_commit_all(file->inode);
292 cake_release(file_pile, file);
298 vfs_close(struct v_file* file)
300 return vfs_pclose(file, __current->pid);
304 vfs_free_fd(struct v_fd* fd)
306 cake_release(fd_pile, fd);
310 vfs_fsync(struct v_file* file)
313 if ((errno = vfs_check_writable(file->dnode))) {
317 lock_inode(file->inode);
319 pcache_commit_all(file->inode);
322 if (file->ops->sync) {
323 errno = file->ops->sync(file);
326 unlock_inode(file->inode);
332 vfs_alloc_fdslot(int* fd)
334 for (size_t i = 0; i < VFS_MAX_FD; i++) {
335 if (!__current->fdtable->fds[i]) {
346 struct v_superblock* sb = cake_grab(superblock_pile);
347 memset(sb, 0, sizeof(*sb));
348 llist_init_head(&sb->sb_list);
349 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
354 vfs_sb_free(struct v_superblock* sb)
357 cake_release(superblock_pile, sb);
361 __vfs_try_evict_dnode(struct lru_node* obj)
363 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
365 if (!dnode->ref_count) {
373 __vfs_try_evict_inode(struct lru_node* obj)
375 struct v_inode* inode = container_of(obj, struct v_inode, lru);
377 if (!inode->link_count && !inode->open_count) {
385 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
387 struct v_dnode* dnode = cake_grab(dnode_pile);
389 lru_evict_half(dnode_lru);
391 if (!(dnode = cake_grab(dnode_pile))) {
396 memset(dnode, 0, sizeof(*dnode));
397 llist_init_head(&dnode->children);
398 llist_init_head(&dnode->siblings);
399 llist_init_head(&dnode->aka_list);
400 mutex_init(&dnode->lock);
402 dnode->ref_count = ATOMIC_VAR_INIT(0);
403 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
405 hstrcpy(&dnode->name, name);
408 dnode->super_block = parent->super_block;
409 dnode->mnt = parent->mnt;
412 lru_use_one(dnode_lru, &dnode->lru);
418 vfs_d_free(struct v_dnode* dnode)
420 assert(dnode->ref_count == 1);
423 assert(dnode->inode->link_count > 0);
424 dnode->inode->link_count--;
427 vfs_dcache_remove(dnode);
428 // Make sure the children de-referencing their parent.
429 // With lru presented, the eviction will be propagated over the entire
430 // detached subtree eventually
431 struct v_dnode *pos, *n;
432 llist_for_each(pos, n, &dnode->children, siblings)
434 vfs_dcache_remove(pos);
437 vfree(dnode->name.value);
438 cake_release(dnode_pile, dnode);
442 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
444 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
445 struct v_inode *pos, *n;
446 hashtable_bucket_foreach(slot, pos, n, hash_list)
448 if (pos->id == i_id) {
449 lru_use_one(inode_lru, &pos->lru);
458 vfs_i_addhash(struct v_inode* inode)
460 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
462 hlist_delete(&inode->hash_list);
463 hlist_add(&slot->head, &inode->hash_list);
467 vfs_i_alloc(struct v_superblock* sb)
469 assert(sb->ops.init_inode);
471 struct v_inode* inode;
472 if (!(inode = cake_grab(inode_pile))) {
473 lru_evict_half(inode_lru);
474 if (!(inode = cake_grab(inode_pile))) {
479 memset(inode, 0, sizeof(*inode));
480 mutex_init(&inode->lock);
481 llist_init_head(&inode->xattrs);
482 llist_init_head(&inode->aka_dnodes);
484 sb->ops.init_inode(sb, inode);
487 inode->ctime = clock_unixtime();
488 inode->atime = inode->ctime;
489 inode->mtime = inode->ctime;
492 lru_use_one(inode_lru, &inode->lru);
497 vfs_i_free(struct v_inode* inode)
499 if (inode->pg_cache) {
500 pcache_release(inode->pg_cache);
501 vfree(inode->pg_cache);
503 inode->ops->sync(inode);
504 hlist_delete(&inode->hash_list);
505 cake_release(inode_pile, inode);
508 /* ---- System call definition and support ---- */
510 #define FLOCATE_CREATE_EMPTY 1
513 vfs_getfd(int fd, struct v_fd** fd_s)
515 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
522 __vfs_try_locate_file(const char* path,
523 struct v_dnode** fdir,
524 struct v_dnode** file,
527 char name_str[VFS_NAME_MAXLEN];
528 struct hstr name = HSTR(name_str, 0);
532 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
536 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
537 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
541 struct v_dnode* parent = *fdir;
542 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
550 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
551 vfs_dcache_add(parent, file_new);
554 vfs_d_free(file_new);
557 unlock_dnode(parent);
563 vfs_do_open(const char* path, int options)
566 struct v_dnode *dentry, *file;
567 struct v_file* ofile = 0;
569 errno = __vfs_try_locate_file(
570 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
572 if (errno || (errno = vfs_open(file, &ofile))) {
576 struct v_inode* o_inode = ofile->inode;
578 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
579 struct v_fd* fd_s = cake_grab(fd_pile);
580 memset(fd_s, 0, sizeof(*fd_s));
582 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
584 fd_s->flags = options;
585 __current->fdtable->fds[fd] = fd_s;
592 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
594 int errno = vfs_do_open(path, options);
595 return DO_STATUS_OR_RETURN(errno);
598 __DEFINE_LXSYSCALL1(int, close, int, fd)
602 if ((errno = vfs_getfd(fd, &fd_s))) {
606 if ((errno = vfs_close(fd_s->file))) {
610 cake_release(fd_pile, fd_s);
611 __current->fdtable->fds[fd] = 0;
614 return DO_STATUS(errno);
618 __vfs_readdir_callback(struct dir_context* dctx,
623 struct dirent* dent = (struct dirent*)dctx->cb_data;
624 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
626 dent->d_type = dtype;
629 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
634 if ((errno = vfs_getfd(fd, &fd_s))) {
638 struct v_inode* inode = fd_s->file->inode;
642 if (!(inode->itype & VFS_IFDIR)) {
645 struct dir_context dctx =
646 (struct dir_context){ .cb_data = dent,
647 .index = dent->d_offset,
648 .read_complete_callback =
649 __vfs_readdir_callback };
651 if (dent->d_offset == 0) {
652 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
653 } else if (dent->d_offset == 1) {
654 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
657 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
668 return DO_STATUS_OR_RETURN(errno);
671 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
675 if ((errno = vfs_getfd(fd, &fd_s))) {
679 struct v_file* file = fd_s->file;
680 if ((file->inode->itype & VFS_IFDIR)) {
685 lock_inode(file->inode);
687 file->inode->atime = clock_unixtime();
689 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
690 errno = file->ops->read(file->inode, buf, count, file->f_pos);
692 errno = pcache_read(file->inode, buf, count, file->f_pos);
696 file->f_pos += errno;
697 unlock_inode(file->inode);
701 unlock_inode(file->inode);
704 return DO_STATUS(errno);
707 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
711 if ((errno = vfs_getfd(fd, &fd_s))) {
715 struct v_file* file = fd_s->file;
717 if ((errno = vfs_check_writable(file->dnode))) {
721 if ((file->inode->itype & VFS_IFDIR)) {
726 lock_inode(file->inode);
728 file->inode->mtime = clock_unixtime();
730 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
731 errno = file->ops->write(file->inode, buf, count, file->f_pos);
733 errno = pcache_write(file->inode, buf, count, file->f_pos);
737 file->f_pos += errno;
738 unlock_inode(file->inode);
742 unlock_inode(file->inode);
745 return DO_STATUS(errno);
748 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
752 if ((errno = vfs_getfd(fd, &fd_s))) {
756 struct v_file* file = fd_s->file;
758 if (!file->ops->seek) {
763 lock_inode(file->inode);
766 int fpos = file->f_pos;
769 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
773 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
781 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
785 unlock_inode(file->inode);
788 return DO_STATUS(errno);
792 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
804 if (dnode->parent != dnode) {
805 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
812 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
813 buf[len++] = VFS_PATH_DELIM;
816 size_t cpy_size = MIN(dnode->name.len, size - len);
817 strncpy(buf + len, dnode->name.value, cpy_size);
824 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
827 struct v_inode* inode = dnode->inode;
828 if (inode->ops->read_symlink) {
831 int errno = inode->ops->read_symlink(inode, &link);
832 strncpy(buf, link, size);
841 vfs_get_dtype(int itype)
853 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
857 if ((errno = vfs_getfd(fd, &fd_s))) {
861 struct v_dnode* dnode;
862 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
869 return DO_STATUS(errno);
872 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
875 struct v_dnode* dnode;
876 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
877 errno = vfs_readlink(dnode, buf, size);
884 return DO_STATUS(errno);
887 __DEFINE_LXSYSCALL4(int,
900 if ((errno = vfs_getfd(dirfd, &fd_s))) {
904 struct v_dnode* dnode;
905 if (!(errno = vfs_walk(
906 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
907 errno = vfs_readlink(fd_s->file->dnode, buf, size);
915 return DO_STATUS(errno);
920 When we perform operation that could affect the layout of
921 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
922 whenever possible. This will blocking any ongoing path walking to reach
923 it hence avoid any partial state.
926 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
929 struct v_dnode* dnode;
930 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
931 return DO_STATUS(errno);
936 if ((errno = vfs_check_writable(dnode))) {
940 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
945 if (dnode->ref_count > 1 || dnode->inode->open_count) {
950 if (!llist_empty(&dnode->children)) {
955 struct v_dnode* parent = dnode->parent;
963 lock_inode(parent->inode);
965 if ((dnode->inode->itype & VFS_IFDIR)) {
966 errno = parent->inode->ops->rmdir(parent->inode, dnode);
968 vfs_dcache_remove(dnode);
974 unlock_inode(parent->inode);
975 unlock_dnode(parent);
979 return DO_STATUS(errno);
982 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
985 struct v_dnode *parent, *dir;
986 char name_value[VFS_NAME_MAXLEN];
987 struct hstr name = HHSTR(name_value, 0, 0);
989 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
993 if ((errno = vfs_check_writable(parent))) {
997 if (!(dir = vfs_d_alloc(parent, &name))) {
1003 lock_inode(parent->inode);
1005 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1007 } else if (!parent->inode->ops->mkdir) {
1009 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1011 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1012 vfs_dcache_add(parent, dir);
1019 unlock_inode(parent->inode);
1020 unlock_dnode(parent);
1022 return DO_STATUS(errno);
1026 __vfs_do_unlink(struct v_dnode* dnode)
1029 struct v_inode* inode = dnode->inode;
1031 if (dnode->ref_count > 1) {
1035 if ((errno = vfs_check_writable(dnode))) {
1041 if (inode->open_count) {
1043 } else if (!(inode->itype & VFS_IFDIR)) {
1044 // The underlying unlink implementation should handle
1046 errno = inode->ops->unlink(inode);
1054 unlock_inode(inode);
1059 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1062 struct v_dnode* dnode;
1063 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1067 errno = __vfs_do_unlink(dnode);
1070 return DO_STATUS(errno);
1073 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1077 if ((errno = vfs_getfd(fd, &fd_s))) {
1081 struct v_dnode* dnode;
1082 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1083 errno = __vfs_do_unlink(dnode);
1087 return DO_STATUS(errno);
1090 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1093 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1095 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1097 errno = __vfs_try_locate_file(
1098 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1101 } else if (name_file) {
1102 errno = vfs_link(to_link, name_file);
1105 return DO_STATUS(errno);
1108 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1113 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1114 errno = vfs_fsync(fd_s->file);
1117 return DO_STATUS(errno);
1121 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1124 struct v_fd* copied = cake_grab(fd_pile);
1126 memcpy(copied, old, sizeof(struct v_fd));
1128 atomic_fetch_add(&old->file->ref_count, 1);
1136 vfs_dup2(int oldfd, int newfd)
1138 if (newfd == oldfd) {
1143 struct v_fd *oldfd_s, *newfd_s;
1144 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1148 if (!TEST_FD(newfd)) {
1153 newfd_s = __current->fdtable->fds[newfd];
1154 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1158 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1159 __current->fdtable->fds[newfd] = newfd_s;
1164 return DO_STATUS(errno);
1167 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1169 return vfs_dup2(oldfd, newfd);
1172 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1175 struct v_fd *oldfd_s, *newfd_s;
1176 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1180 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1181 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1182 __current->fdtable->fds[newfd] = newfd_s;
1187 return DO_STATUS(errno);
1190 __DEFINE_LXSYSCALL2(int,
1198 struct v_dnode* dnode;
1199 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1203 if (errno = vfs_check_writable(dnode)) {
1207 if (!dnode->inode->ops->set_symlink) {
1212 lock_inode(dnode->inode);
1214 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1216 unlock_inode(dnode->inode);
1219 return DO_STATUS(errno);
1223 vfs_ref_dnode(struct v_dnode* dnode)
1225 atomic_fetch_add(&dnode->ref_count, 1);
1226 mnt_mkbusy(dnode->mnt);
1230 vfs_unref_dnode(struct v_dnode* dnode)
1232 atomic_fetch_sub(&dnode->ref_count, 1);
1233 mnt_chillax(dnode->mnt);
1237 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1243 if (!(dnode->inode->itype & VFS_IFDIR)) {
1249 vfs_unref_dnode(proc->cwd);
1252 vfs_ref_dnode(dnode);
1255 unlock_dnode(dnode);
1261 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1263 struct v_dnode* dnode;
1266 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1270 errno = vfs_do_chdir(__current, dnode);
1273 return DO_STATUS(errno);
1276 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1281 if ((errno = vfs_getfd(fd, &fd_s))) {
1285 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1288 return DO_STATUS(errno);
1291 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1302 if (!__current->cwd) {
1303 *buf = VFS_PATH_DELIM;
1306 len = vfs_get_path(__current->cwd, buf, size, 0);
1313 buf[len + 1] = '\0';
1318 __current->k_status = errno;
1323 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1326 if (current->inode->id == target->inode->id) {
1331 if (errno = vfs_check_writable(current)) {
1335 if (current->ref_count > 1 || target->ref_count > 1) {
1339 if (current->super_block != target->super_block) {
1343 struct v_dnode* oldparent = current->parent;
1344 struct v_dnode* newparent = target->parent;
1346 lock_dnode(current);
1349 lock_dnode(oldparent);
1351 lock_dnode(newparent);
1353 if (!llist_empty(&target->children)) {
1355 unlock_dnode(target);
1360 current->inode->ops->rename(current->inode, current, target))) {
1361 unlock_dnode(target);
1365 // re-position current
1366 hstrcpy(¤t->name, &target->name);
1367 vfs_dcache_rehash(newparent, current);
1372 unlock_dnode(target);
1375 unlock_dnode(current);
1377 unlock_dnode(oldparent);
1379 unlock_dnode(newparent);
1384 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1386 struct v_dnode *cur, *target_parent, *target;
1387 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1390 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1394 if ((errno = vfs_walk(
1395 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1399 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1400 if (errno == ENOENT) {
1401 target = vfs_d_alloc(target_parent, &name);
1402 vfs_dcache_add(target_parent, target);
1412 errno = vfs_do_rename(cur, target);
1416 return DO_STATUS(errno);