3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_close(struct v_file* file)
260 if (!(errno = file->ops->close(file))) {
261 atomic_fetch_sub(&file->dnode->ref_count, 1);
262 file->inode->open_count--;
263 mnt_chillax(file->dnode->mnt);
265 pcache_commit_all(file->inode);
266 cake_release(file_pile, file);
272 vfs_fsync(struct v_file* file)
275 if ((errno = vfs_check_writable(file->dnode))) {
279 lock_inode(file->inode);
281 pcache_commit_all(file->inode);
284 if (file->ops->sync) {
285 errno = file->ops->sync(file);
288 unlock_inode(file->inode);
294 vfs_alloc_fdslot(int* fd)
296 for (size_t i = 0; i < VFS_MAX_FD; i++) {
297 if (!__current->fdtable->fds[i]) {
308 struct v_superblock* sb = cake_grab(superblock_pile);
309 memset(sb, 0, sizeof(*sb));
310 llist_init_head(&sb->sb_list);
311 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
316 vfs_sb_free(struct v_superblock* sb)
319 cake_release(superblock_pile, sb);
323 __vfs_try_evict_dnode(struct lru_node* obj)
325 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
327 if (!dnode->ref_count) {
335 __vfs_try_evict_inode(struct lru_node* obj)
337 struct v_inode* inode = container_of(obj, struct v_inode, lru);
339 if (!inode->link_count && !inode->open_count) {
347 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
349 struct v_dnode* dnode = cake_grab(dnode_pile);
351 lru_evict_half(dnode_lru);
353 if (!(dnode = cake_grab(dnode_pile))) {
358 memset(dnode, 0, sizeof(*dnode));
359 llist_init_head(&dnode->children);
360 llist_init_head(&dnode->siblings);
361 mutex_init(&dnode->lock);
363 dnode->ref_count = ATOMIC_VAR_INIT(0);
364 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
366 hstrcpy(&dnode->name, name);
369 dnode->super_block = parent->super_block;
370 dnode->mnt = parent->mnt;
373 lru_use_one(dnode_lru, &dnode->lru);
379 vfs_d_free(struct v_dnode* dnode)
381 assert(dnode->ref_count == 1);
384 assert(dnode->inode->link_count > 0);
385 dnode->inode->link_count--;
388 vfs_dcache_remove(dnode);
389 // Make sure the children de-referencing their parent.
390 // With lru presented, the eviction will be propagated over the entire
391 // detached subtree eventually
392 struct v_dnode *pos, *n;
393 llist_for_each(pos, n, &dnode->children, siblings)
395 vfs_dcache_remove(pos);
398 vfree(dnode->name.value);
399 cake_release(dnode_pile, dnode);
403 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
405 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
406 struct v_inode *pos, *n;
407 hashtable_bucket_foreach(slot, pos, n, hash_list)
409 if (pos->id == i_id) {
410 lru_use_one(inode_lru, &pos->lru);
419 vfs_i_addhash(struct v_inode* inode)
421 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
423 hlist_delete(&inode->hash_list);
424 hlist_add(&slot->head, &inode->hash_list);
428 vfs_i_alloc(struct v_superblock* sb)
430 assert(sb->ops.init_inode);
432 struct v_inode* inode;
433 if (!(inode = cake_grab(inode_pile))) {
434 lru_evict_half(inode_lru);
435 if (!(inode = cake_grab(inode_pile))) {
440 memset(inode, 0, sizeof(*inode));
441 mutex_init(&inode->lock);
442 llist_init_head(&inode->xattrs);
444 sb->ops.init_inode(sb, inode);
447 inode->ctime = clock_unixtime();
448 inode->atime = inode->ctime;
449 inode->mtime = inode->ctime;
452 lru_use_one(inode_lru, &inode->lru);
457 vfs_i_free(struct v_inode* inode)
459 if (inode->pg_cache) {
460 pcache_release(inode->pg_cache);
461 vfree(inode->pg_cache);
463 inode->ops->sync(inode);
464 hlist_delete(&inode->hash_list);
465 cake_release(inode_pile, inode);
468 /* ---- System call definition and support ---- */
470 #define FLOCATE_CREATE_EMPTY 1
473 vfs_getfd(int fd, struct v_fd** fd_s)
475 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
482 __vfs_try_locate_file(const char* path,
483 struct v_dnode** fdir,
484 struct v_dnode** file,
487 char name_str[VFS_NAME_MAXLEN];
488 struct hstr name = HSTR(name_str, 0);
492 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
496 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
497 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
501 struct v_dnode* parent = *fdir;
502 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
510 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
511 vfs_dcache_add(parent, file_new);
514 vfs_d_free(file_new);
517 unlock_dnode(parent);
523 vfs_do_open(const char* path, int options)
526 struct v_dnode *dentry, *file;
527 struct v_file* ofile = 0;
529 errno = __vfs_try_locate_file(
530 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
532 if (errno || (errno = vfs_open(file, &ofile))) {
536 struct v_inode* o_inode = ofile->inode;
538 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
539 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
540 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
542 fd_s->flags = options;
543 __current->fdtable->fds[fd] = fd_s;
550 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
552 int errno = vfs_do_open(path, options);
553 return DO_STATUS_OR_RETURN(errno);
556 __DEFINE_LXSYSCALL1(int, close, int, fd)
560 if ((errno = vfs_getfd(fd, &fd_s))) {
564 if (fd_s->file->ref_count > 1) {
565 fd_s->file->ref_count--;
566 } else if ((errno = vfs_close(fd_s->file))) {
571 __current->fdtable->fds[fd] = 0;
574 return DO_STATUS(errno);
578 __vfs_readdir_callback(struct dir_context* dctx,
583 struct dirent* dent = (struct dirent*)dctx->cb_data;
584 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
586 dent->d_type = dtype;
589 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
594 if ((errno = vfs_getfd(fd, &fd_s))) {
598 struct v_inode* inode = fd_s->file->inode;
602 if (!(inode->itype & VFS_IFDIR)) {
605 struct dir_context dctx =
606 (struct dir_context){ .cb_data = dent,
607 .index = dent->d_offset,
608 .read_complete_callback =
609 __vfs_readdir_callback };
611 if (dent->d_offset == 0) {
612 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
613 } else if (dent->d_offset == 1) {
614 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
617 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
628 return DO_STATUS_OR_RETURN(errno);
631 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
635 if ((errno = vfs_getfd(fd, &fd_s))) {
639 struct v_file* file = fd_s->file;
640 if ((file->inode->itype & VFS_IFDIR)) {
645 lock_inode(file->inode);
647 file->inode->atime = clock_unixtime();
649 __SYSCALL_INTERRUPTIBLE({
650 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
651 errno = file->ops->read(file->inode, buf, count, file->f_pos);
653 errno = pcache_read(file->inode, buf, count, file->f_pos);
658 file->f_pos += errno;
659 unlock_inode(file->inode);
663 unlock_inode(file->inode);
666 return DO_STATUS(errno);
669 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
673 if ((errno = vfs_getfd(fd, &fd_s))) {
677 struct v_file* file = fd_s->file;
679 if ((errno = vfs_check_writable(file->dnode))) {
683 if ((file->inode->itype & VFS_IFDIR)) {
688 lock_inode(file->inode);
690 file->inode->mtime = clock_unixtime();
692 __SYSCALL_INTERRUPTIBLE({
693 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
694 errno = file->ops->write(file->inode, buf, count, file->f_pos);
696 errno = pcache_write(file->inode, buf, count, file->f_pos);
701 file->f_pos += errno;
702 unlock_inode(file->inode);
706 unlock_inode(file->inode);
709 return DO_STATUS(errno);
712 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
716 if ((errno = vfs_getfd(fd, &fd_s))) {
720 struct v_file* file = fd_s->file;
722 if (!file->ops->seek) {
727 lock_inode(file->inode);
730 int fpos = file->f_pos;
733 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
737 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
745 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
749 unlock_inode(file->inode);
752 return DO_STATUS(errno);
756 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
758 if (!dnode || dnode->parent == dnode) {
766 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
772 buf[len++] = VFS_PATH_DELIM;
774 size_t cpy_size = MIN(dnode->name.len, size - len);
775 strncpy(buf + len, dnode->name.value, cpy_size);
782 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
785 struct v_inode* inode = dnode->inode;
786 if (inode->ops->read_symlink) {
789 int errno = inode->ops->read_symlink(inode, &link);
790 strncpy(buf, link, size);
798 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
802 if ((errno = vfs_getfd(fd, &fd_s))) {
806 struct v_dnode* dnode;
807 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
814 return DO_STATUS(errno);
817 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
820 struct v_dnode* dnode;
821 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
822 errno = vfs_readlink(dnode, buf, size);
829 return DO_STATUS(errno);
832 __DEFINE_LXSYSCALL4(int,
845 if ((errno = vfs_getfd(dirfd, &fd_s))) {
849 struct v_dnode* dnode;
850 if (!(errno = vfs_walk(
851 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
852 errno = vfs_readlink(fd_s->file->dnode, buf, size);
860 return DO_STATUS(errno);
865 When we perform operation that could affect the layout of
866 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
867 whenever possible. This will blocking any ongoing path walking to reach
868 it hence avoid any partial state.
871 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
874 struct v_dnode* dnode;
875 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
876 return DO_STATUS(errno);
881 if ((errno = vfs_check_writable(dnode))) {
885 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
890 if (dnode->ref_count > 1 || dnode->inode->open_count) {
895 if (!llist_empty(&dnode->children)) {
900 struct v_dnode* parent = dnode->parent;
908 lock_inode(parent->inode);
910 if ((dnode->inode->itype & VFS_IFDIR)) {
911 errno = parent->inode->ops->rmdir(parent->inode, dnode);
913 vfs_dcache_remove(dnode);
919 unlock_inode(parent->inode);
920 unlock_dnode(parent);
924 return DO_STATUS(errno);
927 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
930 struct v_dnode *parent, *dir;
931 char name_value[VFS_NAME_MAXLEN];
932 struct hstr name = HHSTR(name_value, 0, 0);
934 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
938 if ((errno = vfs_check_writable(parent))) {
942 if (!(dir = vfs_d_alloc(parent, &name))) {
948 lock_inode(parent->inode);
950 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
952 } else if (!parent->inode->ops->mkdir) {
954 } else if (!(parent->inode->itype & VFS_IFDIR)) {
956 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
957 vfs_dcache_add(parent, dir);
964 unlock_inode(parent->inode);
965 unlock_dnode(parent);
967 return DO_STATUS(errno);
971 __vfs_do_unlink(struct v_dnode* dnode)
974 struct v_inode* inode = dnode->inode;
976 if (dnode->ref_count > 1) {
980 if ((errno = vfs_check_writable(dnode))) {
986 if (inode->open_count) {
988 } else if (!(inode->itype & VFS_IFDIR)) {
989 // The underlying unlink implementation should handle
991 errno = inode->ops->unlink(inode);
1004 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1007 struct v_dnode* dnode;
1008 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1012 errno = __vfs_do_unlink(dnode);
1015 return DO_STATUS(errno);
1018 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1022 if ((errno = vfs_getfd(fd, &fd_s))) {
1026 struct v_dnode* dnode;
1027 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1028 errno = __vfs_do_unlink(dnode);
1032 return DO_STATUS(errno);
1035 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1038 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1040 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1042 errno = __vfs_try_locate_file(
1043 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1046 } else if (name_file) {
1047 errno = vfs_link(to_link, name_file);
1050 return DO_STATUS(errno);
1053 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1058 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1059 errno = vfs_fsync(fd_s->file);
1062 return DO_STATUS(errno);
1066 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1069 struct v_fd* copied = cake_grab(fd_pile);
1071 memcpy(copied, old, sizeof(struct v_fd));
1073 atomic_fetch_add(&old->file->ref_count, 1);
1081 vfs_dup2(int oldfd, int newfd)
1083 if (newfd == oldfd) {
1088 struct v_fd *oldfd_s, *newfd_s;
1089 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1093 if (!TEST_FD(newfd)) {
1098 newfd_s = __current->fdtable->fds[newfd];
1099 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1103 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1104 __current->fdtable->fds[newfd] = newfd_s;
1109 return DO_STATUS(errno);
1112 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1114 return vfs_dup2(oldfd, newfd);
1117 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1120 struct v_fd *oldfd_s, *newfd_s;
1121 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1125 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1126 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1127 __current->fdtable->fds[newfd] = newfd_s;
1132 return DO_STATUS(errno);
1135 __DEFINE_LXSYSCALL2(int,
1143 struct v_dnode* dnode;
1144 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1148 if (errno = vfs_check_writable(dnode)) {
1152 if (!dnode->inode->ops->set_symlink) {
1157 lock_inode(dnode->inode);
1159 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1161 unlock_inode(dnode->inode);
1164 return DO_STATUS(errno);
1168 __vfs_do_chdir(struct v_dnode* dnode)
1174 if (!(dnode->inode->itype & VFS_IFDIR)) {
1179 if (__current->cwd) {
1180 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1181 mnt_chillax(__current->cwd->mnt);
1184 atomic_fetch_add(&dnode->ref_count, 1);
1185 mnt_mkbusy(dnode->mnt);
1186 __current->cwd = dnode;
1188 unlock_dnode(dnode);
1194 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1196 struct v_dnode* dnode;
1199 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1203 errno = __vfs_do_chdir(dnode);
1206 return DO_STATUS(errno);
1209 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1214 if ((errno = vfs_getfd(fd, &fd_s))) {
1218 errno = __vfs_do_chdir(fd_s->file->dnode);
1221 return DO_STATUS(errno);
1224 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1235 if (!__current->cwd) {
1236 *buf = VFS_PATH_DELIM;
1239 len = vfs_get_path(__current->cwd, buf, size, 0);
1246 buf[len + 1] = '\0';
1251 __current->k_status = errno;
1256 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1259 if (current->inode->id == target->inode->id) {
1264 if (errno = vfs_check_writable(current)) {
1268 if (current->ref_count > 1 || target->ref_count > 1) {
1272 if (current->super_block != target->super_block) {
1276 struct v_dnode* oldparent = current->parent;
1277 struct v_dnode* newparent = target->parent;
1279 lock_dnode(current);
1282 lock_dnode(oldparent);
1284 lock_dnode(newparent);
1286 if (!llist_empty(&target->children)) {
1288 unlock_dnode(target);
1293 current->inode->ops->rename(current->inode, current, target))) {
1294 unlock_dnode(target);
1298 // re-position current
1299 hstrcpy(¤t->name, &target->name);
1300 vfs_dcache_rehash(newparent, current);
1305 unlock_dnode(target);
1308 unlock_dnode(current);
1310 unlock_dnode(oldparent);
1312 unlock_dnode(newparent);
1317 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1319 struct v_dnode *cur, *target_parent, *target;
1320 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1323 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1327 if ((errno = vfs_walk(
1328 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1332 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1333 if (errno == ENOENT) {
1334 target = vfs_d_alloc(target_parent, &name);
1335 vfs_dcache_add(target_parent, target);
1345 errno = vfs_do_rename(cur, target);
1349 return DO_STATUS(errno);