3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_close(struct v_file* file)
260 if (!(errno = file->ops->close(file))) {
261 atomic_fetch_sub(&file->dnode->ref_count, 1);
262 file->inode->open_count--;
263 mnt_chillax(file->dnode->mnt);
265 pcache_commit_all(file->inode);
266 cake_release(file_pile, file);
272 vfs_fsync(struct v_file* file)
275 if ((errno = vfs_check_writable(file->dnode))) {
279 lock_inode(file->inode);
281 pcache_commit_all(file->inode);
284 if (file->ops->sync) {
285 errno = file->ops->sync(file);
288 unlock_inode(file->inode);
294 vfs_alloc_fdslot(int* fd)
296 for (size_t i = 0; i < VFS_MAX_FD; i++) {
297 if (!__current->fdtable->fds[i]) {
308 struct v_superblock* sb = cake_grab(superblock_pile);
309 memset(sb, 0, sizeof(*sb));
310 llist_init_head(&sb->sb_list);
311 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
316 vfs_sb_free(struct v_superblock* sb)
319 cake_release(superblock_pile, sb);
323 __vfs_try_evict_dnode(struct lru_node* obj)
325 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
327 if (!dnode->ref_count) {
335 __vfs_try_evict_inode(struct lru_node* obj)
337 struct v_inode* inode = container_of(obj, struct v_inode, lru);
339 if (!inode->link_count && !inode->open_count) {
347 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
349 struct v_dnode* dnode = cake_grab(dnode_pile);
351 lru_evict_half(dnode_lru);
353 if (!(dnode = cake_grab(dnode_pile))) {
358 memset(dnode, 0, sizeof(*dnode));
359 llist_init_head(&dnode->children);
360 llist_init_head(&dnode->siblings);
361 mutex_init(&dnode->lock);
363 dnode->ref_count = ATOMIC_VAR_INIT(0);
364 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
366 hstrcpy(&dnode->name, name);
369 dnode->super_block = parent->super_block;
370 dnode->mnt = parent->mnt;
373 lru_use_one(dnode_lru, &dnode->lru);
379 vfs_d_free(struct v_dnode* dnode)
381 assert(dnode->ref_count == 1);
384 assert(dnode->inode->link_count > 0);
385 dnode->inode->link_count--;
388 vfs_dcache_remove(dnode);
389 // Make sure the children de-referencing their parent.
390 // With lru presented, the eviction will be propagated over the entire
391 // detached subtree eventually
392 struct v_dnode *pos, *n;
393 llist_for_each(pos, n, &dnode->children, siblings)
395 vfs_dcache_remove(pos);
398 vfree(dnode->name.value);
399 cake_release(dnode_pile, dnode);
403 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
405 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
406 struct v_inode *pos, *n;
407 hashtable_bucket_foreach(slot, pos, n, hash_list)
409 if (pos->id == i_id) {
410 lru_use_one(inode_lru, &pos->lru);
419 vfs_i_addhash(struct v_inode* inode)
421 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
423 hlist_delete(&inode->hash_list);
424 hlist_add(&slot->head, &inode->hash_list);
428 vfs_i_alloc(struct v_superblock* sb)
430 assert(sb->ops.init_inode);
432 struct v_inode* inode;
433 if (!(inode = cake_grab(inode_pile))) {
434 lru_evict_half(inode_lru);
435 if (!(inode = cake_grab(inode_pile))) {
440 memset(inode, 0, sizeof(*inode));
441 mutex_init(&inode->lock);
442 llist_init_head(&inode->xattrs);
444 sb->ops.init_inode(sb, inode);
447 inode->ctime = clock_unixtime();
448 inode->atime = inode->ctime;
449 inode->mtime = inode->ctime;
452 lru_use_one(inode_lru, &inode->lru);
457 vfs_i_free(struct v_inode* inode)
459 if (inode->pg_cache) {
460 pcache_release(inode->pg_cache);
461 vfree(inode->pg_cache);
463 inode->ops->sync(inode);
464 hlist_delete(&inode->hash_list);
465 cake_release(inode_pile, inode);
468 /* ---- System call definition and support ---- */
470 #define FLOCATE_CREATE_EMPTY 1
473 vfs_getfd(int fd, struct v_fd** fd_s)
475 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
482 __vfs_try_locate_file(const char* path,
483 struct v_dnode** fdir,
484 struct v_dnode** file,
487 char name_str[VFS_NAME_MAXLEN];
488 struct hstr name = HSTR(name_str, 0);
492 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
496 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
497 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
501 struct v_dnode* parent = *fdir;
502 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
510 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
511 vfs_dcache_add(parent, file_new);
514 vfs_d_free(file_new);
517 unlock_dnode(parent);
523 vfs_do_open(const char* path, int options)
526 struct v_dnode *dentry, *file;
527 struct v_file* ofile = 0;
529 errno = __vfs_try_locate_file(
530 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
532 if (errno || (errno = vfs_open(file, &ofile))) {
536 struct v_inode* o_inode = ofile->inode;
538 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
539 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
540 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
542 fd_s->flags = options;
543 __current->fdtable->fds[fd] = fd_s;
550 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
552 int errno = vfs_do_open(path, options);
553 return DO_STATUS_OR_RETURN(errno);
556 __DEFINE_LXSYSCALL1(int, close, int, fd)
560 if ((errno = vfs_getfd(fd, &fd_s))) {
564 if (fd_s->file->ref_count > 1) {
565 fd_s->file->ref_count--;
566 } else if ((errno = vfs_close(fd_s->file))) {
571 __current->fdtable->fds[fd] = 0;
574 return DO_STATUS(errno);
578 __vfs_readdir_callback(struct dir_context* dctx,
583 struct dirent* dent = (struct dirent*)dctx->cb_data;
584 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
586 dent->d_type = dtype;
589 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
594 if ((errno = vfs_getfd(fd, &fd_s))) {
598 struct v_inode* inode = fd_s->file->inode;
602 if (!(inode->itype & VFS_IFDIR)) {
605 struct dir_context dctx =
606 (struct dir_context){ .cb_data = dent,
607 .index = dent->d_offset,
608 .read_complete_callback =
609 __vfs_readdir_callback };
611 if (dent->d_offset == 0) {
612 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
613 } else if (dent->d_offset == 1) {
614 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
617 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
628 return DO_STATUS_OR_RETURN(errno);
631 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
635 if ((errno = vfs_getfd(fd, &fd_s))) {
639 struct v_file* file = fd_s->file;
640 if ((file->inode->itype & VFS_IFDIR)) {
645 lock_inode(file->inode);
647 file->inode->atime = clock_unixtime();
649 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
650 errno = file->ops->read(file->inode, buf, count, file->f_pos);
652 errno = pcache_read(file->inode, buf, count, file->f_pos);
656 file->f_pos += errno;
657 unlock_inode(file->inode);
661 unlock_inode(file->inode);
664 return DO_STATUS(errno);
667 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
671 if ((errno = vfs_getfd(fd, &fd_s))) {
675 struct v_file* file = fd_s->file;
677 if ((errno = vfs_check_writable(file->dnode))) {
681 if ((file->inode->itype & VFS_IFDIR)) {
686 lock_inode(file->inode);
688 file->inode->mtime = clock_unixtime();
690 __SYSCALL_INTERRUPTIBLE({
691 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
692 errno = file->ops->write(file->inode, buf, count, file->f_pos);
694 errno = pcache_write(file->inode, buf, count, file->f_pos);
699 file->f_pos += errno;
700 unlock_inode(file->inode);
704 unlock_inode(file->inode);
707 return DO_STATUS(errno);
710 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
714 if ((errno = vfs_getfd(fd, &fd_s))) {
718 struct v_file* file = fd_s->file;
720 if (!file->ops->seek) {
725 lock_inode(file->inode);
728 int fpos = file->f_pos;
731 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
735 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
743 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
747 unlock_inode(file->inode);
750 return DO_STATUS(errno);
754 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
756 if (!dnode || dnode->parent == dnode) {
764 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
770 buf[len++] = VFS_PATH_DELIM;
772 size_t cpy_size = MIN(dnode->name.len, size - len);
773 strncpy(buf + len, dnode->name.value, cpy_size);
780 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
783 struct v_inode* inode = dnode->inode;
784 if (inode->ops->read_symlink) {
787 int errno = inode->ops->read_symlink(inode, &link);
788 strncpy(buf, link, size);
797 vfs_get_dtype(int itype)
809 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
813 if ((errno = vfs_getfd(fd, &fd_s))) {
817 struct v_dnode* dnode;
818 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
825 return DO_STATUS(errno);
828 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
831 struct v_dnode* dnode;
832 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
833 errno = vfs_readlink(dnode, buf, size);
840 return DO_STATUS(errno);
843 __DEFINE_LXSYSCALL4(int,
856 if ((errno = vfs_getfd(dirfd, &fd_s))) {
860 struct v_dnode* dnode;
861 if (!(errno = vfs_walk(
862 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
863 errno = vfs_readlink(fd_s->file->dnode, buf, size);
871 return DO_STATUS(errno);
876 When we perform operation that could affect the layout of
877 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
878 whenever possible. This will blocking any ongoing path walking to reach
879 it hence avoid any partial state.
882 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
885 struct v_dnode* dnode;
886 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
887 return DO_STATUS(errno);
892 if ((errno = vfs_check_writable(dnode))) {
896 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
901 if (dnode->ref_count > 1 || dnode->inode->open_count) {
906 if (!llist_empty(&dnode->children)) {
911 struct v_dnode* parent = dnode->parent;
919 lock_inode(parent->inode);
921 if ((dnode->inode->itype & VFS_IFDIR)) {
922 errno = parent->inode->ops->rmdir(parent->inode, dnode);
924 vfs_dcache_remove(dnode);
930 unlock_inode(parent->inode);
931 unlock_dnode(parent);
935 return DO_STATUS(errno);
938 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
941 struct v_dnode *parent, *dir;
942 char name_value[VFS_NAME_MAXLEN];
943 struct hstr name = HHSTR(name_value, 0, 0);
945 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
949 if ((errno = vfs_check_writable(parent))) {
953 if (!(dir = vfs_d_alloc(parent, &name))) {
959 lock_inode(parent->inode);
961 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
963 } else if (!parent->inode->ops->mkdir) {
965 } else if (!(parent->inode->itype & VFS_IFDIR)) {
967 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
968 vfs_dcache_add(parent, dir);
975 unlock_inode(parent->inode);
976 unlock_dnode(parent);
978 return DO_STATUS(errno);
982 __vfs_do_unlink(struct v_dnode* dnode)
985 struct v_inode* inode = dnode->inode;
987 if (dnode->ref_count > 1) {
991 if ((errno = vfs_check_writable(dnode))) {
997 if (inode->open_count) {
999 } else if (!(inode->itype & VFS_IFDIR)) {
1000 // The underlying unlink implementation should handle
1002 errno = inode->ops->unlink(inode);
1010 unlock_inode(inode);
1015 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1018 struct v_dnode* dnode;
1019 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1023 errno = __vfs_do_unlink(dnode);
1026 return DO_STATUS(errno);
1029 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1033 if ((errno = vfs_getfd(fd, &fd_s))) {
1037 struct v_dnode* dnode;
1038 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1039 errno = __vfs_do_unlink(dnode);
1043 return DO_STATUS(errno);
1046 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1049 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1051 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1053 errno = __vfs_try_locate_file(
1054 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1057 } else if (name_file) {
1058 errno = vfs_link(to_link, name_file);
1061 return DO_STATUS(errno);
1064 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1069 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1070 errno = vfs_fsync(fd_s->file);
1073 return DO_STATUS(errno);
1077 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1080 struct v_fd* copied = cake_grab(fd_pile);
1082 memcpy(copied, old, sizeof(struct v_fd));
1084 atomic_fetch_add(&old->file->ref_count, 1);
1092 vfs_dup2(int oldfd, int newfd)
1094 if (newfd == oldfd) {
1099 struct v_fd *oldfd_s, *newfd_s;
1100 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1104 if (!TEST_FD(newfd)) {
1109 newfd_s = __current->fdtable->fds[newfd];
1110 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1114 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1115 __current->fdtable->fds[newfd] = newfd_s;
1120 return DO_STATUS(errno);
1123 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1125 return vfs_dup2(oldfd, newfd);
1128 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1131 struct v_fd *oldfd_s, *newfd_s;
1132 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1136 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1137 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1138 __current->fdtable->fds[newfd] = newfd_s;
1143 return DO_STATUS(errno);
1146 __DEFINE_LXSYSCALL2(int,
1154 struct v_dnode* dnode;
1155 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1159 if (errno = vfs_check_writable(dnode)) {
1163 if (!dnode->inode->ops->set_symlink) {
1168 lock_inode(dnode->inode);
1170 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1172 unlock_inode(dnode->inode);
1175 return DO_STATUS(errno);
1179 __vfs_do_chdir(struct v_dnode* dnode)
1185 if (!(dnode->inode->itype & VFS_IFDIR)) {
1190 if (__current->cwd) {
1191 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1192 mnt_chillax(__current->cwd->mnt);
1195 atomic_fetch_add(&dnode->ref_count, 1);
1196 mnt_mkbusy(dnode->mnt);
1197 __current->cwd = dnode;
1199 unlock_dnode(dnode);
1205 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1207 struct v_dnode* dnode;
1210 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1214 errno = __vfs_do_chdir(dnode);
1217 return DO_STATUS(errno);
1220 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1225 if ((errno = vfs_getfd(fd, &fd_s))) {
1229 errno = __vfs_do_chdir(fd_s->file->dnode);
1232 return DO_STATUS(errno);
1235 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1246 if (!__current->cwd) {
1247 *buf = VFS_PATH_DELIM;
1250 len = vfs_get_path(__current->cwd, buf, size, 0);
1257 buf[len + 1] = '\0';
1262 __current->k_status = errno;
1267 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1270 if (current->inode->id == target->inode->id) {
1275 if (errno = vfs_check_writable(current)) {
1279 if (current->ref_count > 1 || target->ref_count > 1) {
1283 if (current->super_block != target->super_block) {
1287 struct v_dnode* oldparent = current->parent;
1288 struct v_dnode* newparent = target->parent;
1290 lock_dnode(current);
1293 lock_dnode(oldparent);
1295 lock_dnode(newparent);
1297 if (!llist_empty(&target->children)) {
1299 unlock_dnode(target);
1304 current->inode->ops->rename(current->inode, current, target))) {
1305 unlock_dnode(target);
1309 // re-position current
1310 hstrcpy(¤t->name, &target->name);
1311 vfs_dcache_rehash(newparent, current);
1316 unlock_dnode(target);
1319 unlock_dnode(current);
1321 unlock_dnode(oldparent);
1323 unlock_dnode(newparent);
1328 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1330 struct v_dnode *cur, *target_parent, *target;
1331 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1334 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1338 if ((errno = vfs_walk(
1339 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1343 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1344 if (errno == ENOENT) {
1345 target = vfs_d_alloc(target_parent, &name);
1346 vfs_dcache_add(target_parent, target);
1356 errno = vfs_do_rename(cur, target);
1360 return DO_STATUS(errno);