3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
56 #include <lunaix/syscall_utils.h>
58 #include <lunaix/fs/twifs.h>
60 static struct cake_pile* dnode_pile;
61 static struct cake_pile* inode_pile;
62 static struct cake_pile* file_pile;
63 static struct cake_pile* superblock_pile;
64 static struct cake_pile* fd_pile;
66 struct v_dnode* vfs_sysroot;
67 static struct hbucket* dnode_cache;
69 struct lru_zone *dnode_lru, *inode_lru;
71 struct hstr vfs_ddot = HSTR("..", 2);
72 struct hstr vfs_dot = HSTR(".", 1);
73 struct hstr vfs_empty = HSTR("", 0);
79 vfs_sb_free(struct v_superblock* sb);
82 __vfs_try_evict_dnode(struct lru_node* obj);
85 __vfs_try_evict_inode(struct lru_node* obj);
90 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
91 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
92 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
93 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
94 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
96 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
98 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
100 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
101 inode_lru = lru_new_zone(__vfs_try_evict_inode);
103 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
104 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
107 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
108 vfs_sysroot->parent = vfs_sysroot;
109 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
112 inline struct hbucket*
113 __dcache_hash(struct v_dnode* parent, u32_t* hash)
117 _hash = _hash ^ (_hash >> VFS_HASHBITS);
118 // 与parent的指针值做加法,来减小碰撞的可能性。
119 _hash += (u32_t)parent;
121 return &dnode_cache[_hash & VFS_HASH_MASK];
125 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
127 if (!str->len || HSTR_EQ(str, &vfs_dot))
130 if (HSTR_EQ(str, &vfs_ddot)) {
131 return parent->parent;
134 u32_t hash = str->hash;
135 struct hbucket* slot = __dcache_hash(parent, &hash);
137 struct v_dnode *pos, *n;
138 hashtable_bucket_foreach(slot, pos, n, hash_list)
140 if (pos->name.hash == hash) {
148 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
152 atomic_fetch_add(&dnode->ref_count, 1);
153 dnode->parent = parent;
154 llist_append(&parent->children, &dnode->siblings);
156 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
157 hlist_add(&bucket->head, &dnode->hash_list);
161 vfs_dcache_remove(struct v_dnode* dnode)
164 assert(dnode->ref_count == 1);
166 llist_delete(&dnode->siblings);
167 llist_delete(&dnode->aka_list);
168 hlist_delete(&dnode->hash_list);
170 dnode->parent = NULL;
171 atomic_fetch_sub(&dnode->ref_count, 1);
175 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
179 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
180 vfs_dcache_remove(dnode);
181 vfs_dcache_add(new_parent, dnode);
185 vfs_open(struct v_dnode* dnode, struct v_file** file)
187 if (!dnode->inode || !dnode->inode->ops->open) {
191 struct v_inode* inode = dnode->inode;
195 struct v_file* vfile = cake_grab(file_pile);
196 memset(vfile, 0, sizeof(*vfile));
198 vfile->dnode = dnode;
199 vfile->inode = inode;
200 vfile->ref_count = ATOMIC_VAR_INIT(1);
201 vfile->ops = inode->default_fops;
203 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
204 struct pcache* pcache = vzalloc(sizeof(struct pcache));
206 pcache->master = inode;
207 inode->pg_cache = pcache;
210 int errno = inode->ops->open(inode, vfile);
212 cake_release(file_pile, vfile);
214 atomic_fetch_add(&dnode->ref_count, 1);
216 mnt_mkbusy(dnode->mnt);
227 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
229 if (assign_to->inode) {
230 llist_delete(&assign_to->aka_list);
231 assign_to->inode->link_count--;
233 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
234 assign_to->inode = inode;
239 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
243 if ((errno = vfs_check_writable(to_link))) {
247 lock_inode(to_link->inode);
248 if (to_link->super_block->root != name->super_block->root) {
250 } else if (!to_link->inode->ops->link) {
252 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
253 vfs_assign_inode(name, to_link->inode);
255 unlock_inode(to_link->inode);
261 vfs_pclose(struct v_file* file, pid_t pid)
264 if (file->ref_count > 1) {
265 atomic_fetch_sub(&file->ref_count, 1);
266 } else if (!(errno = file->ops->close(file))) {
267 atomic_fetch_sub(&file->dnode->ref_count, 1);
268 file->inode->open_count--;
272 * This happened when process is terminated while blocking on read.
273 * In that case, the process is still holding the inode lock and it
274 will never get released.
275 * The unlocking should also include ownership check.
277 * To see why, consider two process both open the same file both with
279 * Process A: busy on reading x
280 * Process B: do nothing with x
281 * Assuming that, after a very short time, process B get terminated
282 * while process A is still busy in it's reading business. By this
283 * design, the inode lock of this file x is get released by B rather
284 * than A. And this will cause a probable race condition on A if other
285 * process is writing to this file later after B exit.
287 if (mutex_on_hold(&file->inode->lock)) {
288 mutex_unlock_for(&file->inode->lock, pid);
290 mnt_chillax(file->dnode->mnt);
292 pcache_commit_all(file->inode);
293 cake_release(file_pile, file);
299 vfs_close(struct v_file* file)
301 return vfs_pclose(file, __current->pid);
305 vfs_free_fd(struct v_fd* fd)
307 cake_release(fd_pile, fd);
311 vfs_fsync(struct v_file* file)
314 if ((errno = vfs_check_writable(file->dnode))) {
318 lock_inode(file->inode);
320 pcache_commit_all(file->inode);
323 if (file->ops->sync) {
324 errno = file->ops->sync(file);
327 unlock_inode(file->inode);
333 vfs_alloc_fdslot(int* fd)
335 for (size_t i = 0; i < VFS_MAX_FD; i++) {
336 if (!__current->fdtable->fds[i]) {
347 struct v_superblock* sb = cake_grab(superblock_pile);
348 memset(sb, 0, sizeof(*sb));
349 llist_init_head(&sb->sb_list);
350 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
355 vfs_sb_free(struct v_superblock* sb)
358 cake_release(superblock_pile, sb);
362 __vfs_try_evict_dnode(struct lru_node* obj)
364 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
366 if (!dnode->ref_count) {
374 __vfs_try_evict_inode(struct lru_node* obj)
376 struct v_inode* inode = container_of(obj, struct v_inode, lru);
378 if (!inode->link_count && !inode->open_count) {
386 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
388 struct v_dnode* dnode = cake_grab(dnode_pile);
390 lru_evict_half(dnode_lru);
392 if (!(dnode = cake_grab(dnode_pile))) {
397 memset(dnode, 0, sizeof(*dnode));
398 llist_init_head(&dnode->children);
399 llist_init_head(&dnode->siblings);
400 llist_init_head(&dnode->aka_list);
401 mutex_init(&dnode->lock);
403 dnode->ref_count = ATOMIC_VAR_INIT(0);
404 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
406 hstrcpy(&dnode->name, name);
409 dnode->super_block = parent->super_block;
410 dnode->mnt = parent->mnt;
413 lru_use_one(dnode_lru, &dnode->lru);
419 vfs_d_free(struct v_dnode* dnode)
421 assert(dnode->ref_count == 1);
424 assert(dnode->inode->link_count > 0);
425 dnode->inode->link_count--;
428 vfs_dcache_remove(dnode);
429 // Make sure the children de-referencing their parent.
430 // With lru presented, the eviction will be propagated over the entire
431 // detached subtree eventually
432 struct v_dnode *pos, *n;
433 llist_for_each(pos, n, &dnode->children, siblings)
435 vfs_dcache_remove(pos);
438 vfree(dnode->name.value);
439 cake_release(dnode_pile, dnode);
443 vfs_i_find(struct v_superblock* sb, u32_t i_id)
445 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
446 struct v_inode *pos, *n;
447 hashtable_bucket_foreach(slot, pos, n, hash_list)
449 if (pos->id == i_id) {
450 lru_use_one(inode_lru, &pos->lru);
459 vfs_i_addhash(struct v_inode* inode)
461 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
463 hlist_delete(&inode->hash_list);
464 hlist_add(&slot->head, &inode->hash_list);
468 vfs_i_alloc(struct v_superblock* sb)
470 assert(sb->ops.init_inode);
472 struct v_inode* inode;
473 if (!(inode = cake_grab(inode_pile))) {
474 lru_evict_half(inode_lru);
475 if (!(inode = cake_grab(inode_pile))) {
480 memset(inode, 0, sizeof(*inode));
481 mutex_init(&inode->lock);
482 llist_init_head(&inode->xattrs);
483 llist_init_head(&inode->aka_dnodes);
485 sb->ops.init_inode(sb, inode);
488 inode->ctime = clock_unixtime();
489 inode->atime = inode->ctime;
490 inode->mtime = inode->ctime;
493 lru_use_one(inode_lru, &inode->lru);
498 vfs_i_free(struct v_inode* inode)
500 if (inode->pg_cache) {
501 pcache_release(inode->pg_cache);
502 vfree(inode->pg_cache);
504 // we don't need to sync inode.
505 // If an inode can be free, then it must be properly closed.
506 // Hence it must be synced already!
507 if (inode->destruct) {
508 inode->destruct(inode);
510 hlist_delete(&inode->hash_list);
511 cake_release(inode_pile, inode);
514 /* ---- System call definition and support ---- */
516 #define FLOCATE_CREATE_EMPTY 1
519 vfs_getfd(int fd, struct v_fd** fd_s)
521 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
528 __vfs_try_locate_file(const char* path,
529 struct v_dnode** fdir,
530 struct v_dnode** file,
533 char name_str[VFS_NAME_MAXLEN];
534 struct hstr name = HSTR(name_str, 0);
538 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
542 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
543 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
547 struct v_dnode* parent = *fdir;
548 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
556 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
557 vfs_dcache_add(parent, file_new);
560 vfs_d_free(file_new);
563 unlock_dnode(parent);
569 vfs_do_open(const char* path, int options)
572 struct v_dnode *dentry, *file;
573 struct v_file* ofile = NULL;
575 errno = __vfs_try_locate_file(
576 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
578 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
580 if (errno || (errno = vfs_open(file, &ofile))) {
584 struct v_fd* fd_s = cake_grab(fd_pile);
585 memset(fd_s, 0, sizeof(*fd_s));
587 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
589 fd_s->flags = options;
590 __current->fdtable->fds[fd] = fd_s;
597 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
599 int errno = vfs_do_open(path, options);
600 return DO_STATUS_OR_RETURN(errno);
603 __DEFINE_LXSYSCALL1(int, close, int, fd)
607 if ((errno = vfs_getfd(fd, &fd_s))) {
611 if ((errno = vfs_close(fd_s->file))) {
615 cake_release(fd_pile, fd_s);
616 __current->fdtable->fds[fd] = 0;
619 return DO_STATUS(errno);
623 __vfs_readdir_callback(struct dir_context* dctx,
628 struct dirent* dent = (struct dirent*)dctx->cb_data;
629 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
631 dent->d_type = dtype;
634 __DEFINE_LXSYSCALL2(int, sys_readdir, int, fd, struct dirent*, dent)
639 if ((errno = vfs_getfd(fd, &fd_s))) {
643 struct v_inode* inode = fd_s->file->inode;
647 if (!(inode->itype & VFS_IFDIR)) {
650 struct dir_context dctx =
651 (struct dir_context){ .cb_data = dent,
652 .index = dent->d_offset,
653 .read_complete_callback =
654 __vfs_readdir_callback };
656 if (dent->d_offset == 0) {
657 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
658 } else if (dent->d_offset == 1) {
659 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
662 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
673 return DO_STATUS_OR_RETURN(errno);
676 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
680 if ((errno = vfs_getfd(fd, &fd_s))) {
684 struct v_file* file = fd_s->file;
685 if ((file->inode->itype & VFS_IFDIR)) {
690 lock_inode(file->inode);
692 file->inode->atime = clock_unixtime();
694 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
695 errno = file->ops->read(file->inode, buf, count, file->f_pos);
697 errno = pcache_read(file->inode, buf, count, file->f_pos);
701 file->f_pos += errno;
702 unlock_inode(file->inode);
706 unlock_inode(file->inode);
709 return DO_STATUS(errno);
712 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
716 if ((errno = vfs_getfd(fd, &fd_s))) {
720 struct v_file* file = fd_s->file;
722 if ((errno = vfs_check_writable(file->dnode))) {
726 if ((file->inode->itype & VFS_IFDIR)) {
731 lock_inode(file->inode);
733 file->inode->mtime = clock_unixtime();
735 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
736 errno = file->ops->write(file->inode, buf, count, file->f_pos);
738 errno = pcache_write(file->inode, buf, count, file->f_pos);
742 file->f_pos += errno;
743 unlock_inode(file->inode);
747 unlock_inode(file->inode);
750 return DO_STATUS(errno);
753 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
757 if ((errno = vfs_getfd(fd, &fd_s))) {
761 struct v_file* file = fd_s->file;
763 if (!file->ops->seek) {
768 lock_inode(file->inode);
771 int fpos = file->f_pos;
774 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
778 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
786 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
790 unlock_inode(file->inode);
793 return DO_STATUS(errno);
797 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
809 if (dnode->parent != dnode) {
810 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
817 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
818 buf[len++] = VFS_PATH_DELIM;
821 size_t cpy_size = MIN(dnode->name.len, size - len);
822 strncpy(buf + len, dnode->name.value, cpy_size);
829 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
832 struct v_inode* inode = dnode->inode;
833 if (inode->ops->read_symlink) {
836 int errno = inode->ops->read_symlink(inode, &link);
837 strncpy(buf, link, size);
846 vfs_get_dtype(int itype)
858 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
862 if ((errno = vfs_getfd(fd, &fd_s))) {
866 struct v_dnode* dnode;
867 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
874 return DO_STATUS(errno);
877 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
880 struct v_dnode* dnode;
881 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
882 errno = vfs_readlink(dnode, buf, size);
889 return DO_STATUS(errno);
892 __DEFINE_LXSYSCALL4(int,
905 if ((errno = vfs_getfd(dirfd, &fd_s))) {
909 struct v_dnode* dnode;
910 if (!(errno = vfs_walk(
911 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
912 errno = vfs_readlink(fd_s->file->dnode, buf, size);
920 return DO_STATUS(errno);
925 When we perform operation that could affect the layout of
926 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
927 whenever possible. This will blocking any ongoing path walking to reach
928 it hence avoid any partial state.
931 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
934 struct v_dnode* dnode;
935 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
936 return DO_STATUS(errno);
941 if ((errno = vfs_check_writable(dnode))) {
945 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
950 if (dnode->ref_count > 1 || dnode->inode->open_count) {
955 if (!llist_empty(&dnode->children)) {
960 struct v_dnode* parent = dnode->parent;
968 lock_inode(parent->inode);
970 if ((dnode->inode->itype & VFS_IFDIR)) {
971 errno = parent->inode->ops->rmdir(parent->inode, dnode);
973 vfs_dcache_remove(dnode);
979 unlock_inode(parent->inode);
980 unlock_dnode(parent);
984 return DO_STATUS(errno);
987 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
990 struct v_dnode *parent, *dir;
991 char name_value[VFS_NAME_MAXLEN];
992 struct hstr name = HHSTR(name_value, 0, 0);
994 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
998 if ((errno = vfs_check_writable(parent))) {
1002 if (!(dir = vfs_d_alloc(parent, &name))) {
1008 lock_inode(parent->inode);
1010 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1012 } else if (!parent->inode->ops->mkdir) {
1014 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1016 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1017 vfs_dcache_add(parent, dir);
1024 unlock_inode(parent->inode);
1025 unlock_dnode(parent);
1027 return DO_STATUS(errno);
1031 __vfs_do_unlink(struct v_dnode* dnode)
1034 struct v_inode* inode = dnode->inode;
1036 if (dnode->ref_count > 1) {
1040 if ((errno = vfs_check_writable(dnode))) {
1046 if (inode->open_count) {
1048 } else if (!(inode->itype & VFS_IFDIR)) {
1049 // The underlying unlink implementation should handle
1051 errno = inode->ops->unlink(inode);
1059 unlock_inode(inode);
1064 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1067 struct v_dnode* dnode;
1068 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1072 errno = __vfs_do_unlink(dnode);
1075 return DO_STATUS(errno);
1078 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1082 if ((errno = vfs_getfd(fd, &fd_s))) {
1086 struct v_dnode* dnode;
1087 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1088 errno = __vfs_do_unlink(dnode);
1092 return DO_STATUS(errno);
1095 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1098 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1100 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1102 errno = __vfs_try_locate_file(
1103 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1106 } else if (name_file) {
1107 errno = vfs_link(to_link, name_file);
1110 return DO_STATUS(errno);
1113 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1118 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1119 errno = vfs_fsync(fd_s->file);
1122 return DO_STATUS(errno);
1126 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1129 struct v_fd* copied = cake_grab(fd_pile);
1131 memcpy(copied, old, sizeof(struct v_fd));
1133 atomic_fetch_add(&old->file->ref_count, 1);
1141 vfs_dup2(int oldfd, int newfd)
1143 if (newfd == oldfd) {
1148 struct v_fd *oldfd_s, *newfd_s;
1149 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1153 if (!TEST_FD(newfd)) {
1158 newfd_s = __current->fdtable->fds[newfd];
1159 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1163 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1164 __current->fdtable->fds[newfd] = newfd_s;
1169 return DO_STATUS(errno);
1172 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1174 return vfs_dup2(oldfd, newfd);
1177 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1180 struct v_fd *oldfd_s, *newfd_s;
1181 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1185 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1186 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1187 __current->fdtable->fds[newfd] = newfd_s;
1192 return DO_STATUS(errno);
1195 __DEFINE_LXSYSCALL2(int,
1203 struct v_dnode* dnode;
1204 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1208 if (errno = vfs_check_writable(dnode)) {
1212 if (!dnode->inode->ops->set_symlink) {
1217 lock_inode(dnode->inode);
1219 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1221 unlock_inode(dnode->inode);
1224 return DO_STATUS(errno);
1228 vfs_ref_dnode(struct v_dnode* dnode)
1230 atomic_fetch_add(&dnode->ref_count, 1);
1231 mnt_mkbusy(dnode->mnt);
1235 vfs_unref_dnode(struct v_dnode* dnode)
1237 atomic_fetch_sub(&dnode->ref_count, 1);
1238 mnt_chillax(dnode->mnt);
1242 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1248 if (!(dnode->inode->itype & VFS_IFDIR)) {
1254 vfs_unref_dnode(proc->cwd);
1257 vfs_ref_dnode(dnode);
1260 unlock_dnode(dnode);
1266 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1268 struct v_dnode* dnode;
1271 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1275 errno = vfs_do_chdir(__current, dnode);
1278 return DO_STATUS(errno);
1281 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1286 if ((errno = vfs_getfd(fd, &fd_s))) {
1290 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1293 return DO_STATUS(errno);
1296 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1307 if (!__current->cwd) {
1308 *buf = VFS_PATH_DELIM;
1311 len = vfs_get_path(__current->cwd, buf, size, 0);
1318 buf[len + 1] = '\0';
1323 __current->k_status = errno;
1328 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1331 if (current->inode->id == target->inode->id) {
1336 if (errno = vfs_check_writable(current)) {
1340 if (current->ref_count > 1 || target->ref_count > 1) {
1344 if (current->super_block != target->super_block) {
1348 struct v_dnode* oldparent = current->parent;
1349 struct v_dnode* newparent = target->parent;
1351 lock_dnode(current);
1354 lock_dnode(oldparent);
1356 lock_dnode(newparent);
1358 if (!llist_empty(&target->children)) {
1360 unlock_dnode(target);
1365 current->inode->ops->rename(current->inode, current, target))) {
1366 unlock_dnode(target);
1370 // re-position current
1371 hstrcpy(¤t->name, &target->name);
1372 vfs_dcache_rehash(newparent, current);
1377 unlock_dnode(target);
1380 unlock_dnode(current);
1382 unlock_dnode(oldparent);
1384 unlock_dnode(newparent);
1389 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1391 struct v_dnode *cur, *target_parent, *target;
1392 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1395 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1399 if ((errno = vfs_walk(
1400 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1404 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1405 if (errno == ENOENT) {
1406 target = vfs_d_alloc(target_parent, &name);
1407 vfs_dcache_add(target_parent, target);
1417 errno = vfs_do_rename(cur, target);
1421 return DO_STATUS(errno);