3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_close(struct v_file* file)
260 if (!(errno = file->ops->close(file))) {
261 atomic_fetch_sub(&file->dnode->ref_count, 1);
262 file->inode->open_count--;
263 mnt_chillax(file->dnode->mnt);
265 pcache_commit_all(file->inode);
266 cake_release(file_pile, file);
272 vfs_fsync(struct v_file* file)
275 if ((errno = vfs_check_writable(file->dnode))) {
279 lock_inode(file->inode);
281 pcache_commit_all(file->inode);
284 if (file->ops->sync) {
285 errno = file->ops->sync(file);
288 unlock_inode(file->inode);
294 vfs_alloc_fdslot(int* fd)
296 for (size_t i = 0; i < VFS_MAX_FD; i++) {
297 if (!__current->fdtable->fds[i]) {
308 struct v_superblock* sb = cake_grab(superblock_pile);
309 memset(sb, 0, sizeof(*sb));
310 llist_init_head(&sb->sb_list);
311 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
316 vfs_sb_free(struct v_superblock* sb)
319 cake_release(superblock_pile, sb);
323 __vfs_try_evict_dnode(struct lru_node* obj)
325 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
327 if (!dnode->ref_count) {
335 __vfs_try_evict_inode(struct lru_node* obj)
337 struct v_inode* inode = container_of(obj, struct v_inode, lru);
339 if (!inode->link_count && !inode->open_count) {
347 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
349 struct v_dnode* dnode = cake_grab(dnode_pile);
351 lru_evict_half(dnode_lru);
353 if (!(dnode = cake_grab(dnode_pile))) {
358 memset(dnode, 0, sizeof(*dnode));
359 llist_init_head(&dnode->children);
360 llist_init_head(&dnode->siblings);
361 mutex_init(&dnode->lock);
363 dnode->ref_count = ATOMIC_VAR_INIT(0);
364 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
366 hstrcpy(&dnode->name, name);
369 dnode->super_block = parent->super_block;
372 lru_use_one(dnode_lru, &dnode->lru);
378 vfs_d_free(struct v_dnode* dnode)
380 assert(dnode->ref_count == 1);
383 assert(dnode->inode->link_count > 0);
384 dnode->inode->link_count--;
387 vfs_dcache_remove(dnode);
388 // Make sure the children de-referencing their parent.
389 // With lru presented, the eviction will be propagated over the entire
390 // detached subtree eventually
391 struct v_dnode *pos, *n;
392 llist_for_each(pos, n, &dnode->children, siblings)
394 vfs_dcache_remove(pos);
397 vfree(dnode->name.value);
398 cake_release(dnode_pile, dnode);
402 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
404 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
405 struct v_inode *pos, *n;
406 hashtable_bucket_foreach(slot, pos, n, hash_list)
408 if (pos->id == i_id) {
409 lru_use_one(inode_lru, &pos->lru);
418 vfs_i_addhash(struct v_inode* inode)
420 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
422 hlist_delete(&inode->hash_list);
423 hlist_add(&slot->head, &inode->hash_list);
427 vfs_i_alloc(struct v_superblock* sb)
429 assert(sb->ops.init_inode);
431 struct v_inode* inode;
432 if (!(inode = cake_grab(inode_pile))) {
433 lru_evict_half(inode_lru);
434 if (!(inode = cake_grab(inode_pile))) {
439 memset(inode, 0, sizeof(*inode));
440 mutex_init(&inode->lock);
441 llist_init_head(&inode->xattrs);
443 sb->ops.init_inode(sb, inode);
446 inode->ctime = clock_unixtime();
447 inode->atime = inode->ctime;
448 inode->mtime = inode->ctime;
451 lru_use_one(inode_lru, &inode->lru);
456 vfs_i_free(struct v_inode* inode)
458 if (inode->pg_cache) {
459 pcache_release(inode->pg_cache);
460 vfree(inode->pg_cache);
462 inode->ops->sync(inode);
463 hlist_delete(&inode->hash_list);
464 cake_release(inode_pile, inode);
467 /* ---- System call definition and support ---- */
469 #define FLOCATE_CREATE_EMPTY 1
472 vfs_getfd(int fd, struct v_fd** fd_s)
474 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
481 __vfs_try_locate_file(const char* path,
482 struct v_dnode** fdir,
483 struct v_dnode** file,
486 char name_str[VFS_NAME_MAXLEN];
487 struct hstr name = HSTR(name_str, 0);
489 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
493 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
494 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
498 struct v_dnode* parent = *fdir;
499 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
507 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
508 vfs_dcache_add(parent, file_new);
511 vfs_d_free(file_new);
514 unlock_dnode(parent);
520 vfs_do_open(const char* path, int options)
523 struct v_dnode *dentry, *file;
524 struct v_file* ofile = 0;
526 errno = __vfs_try_locate_file(
527 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
529 if (errno || (errno = vfs_open(file, &ofile))) {
533 struct v_inode* o_inode = ofile->inode;
535 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
536 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
537 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
539 fd_s->flags = options;
540 __current->fdtable->fds[fd] = fd_s;
547 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
549 int errno = vfs_do_open(path, options);
550 return DO_STATUS_OR_RETURN(errno);
553 __DEFINE_LXSYSCALL1(int, close, int, fd)
557 if ((errno = vfs_getfd(fd, &fd_s))) {
561 if (fd_s->file->ref_count > 1) {
562 fd_s->file->ref_count--;
563 } else if ((errno = vfs_close(fd_s->file))) {
568 __current->fdtable->fds[fd] = 0;
571 return DO_STATUS(errno);
575 __vfs_readdir_callback(struct dir_context* dctx,
580 struct dirent* dent = (struct dirent*)dctx->cb_data;
581 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
583 dent->d_type = dtype;
586 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
591 if ((errno = vfs_getfd(fd, &fd_s))) {
595 struct v_inode* inode = fd_s->file->inode;
599 if (!(inode->itype & VFS_IFDIR)) {
602 struct dir_context dctx =
603 (struct dir_context){ .cb_data = dent,
604 .index = dent->d_offset,
605 .read_complete_callback =
606 __vfs_readdir_callback };
608 if (dent->d_offset == 0) {
609 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
610 } else if (dent->d_offset == 1) {
611 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
614 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
625 return DO_STATUS_OR_RETURN(errno);
628 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
632 if ((errno = vfs_getfd(fd, &fd_s))) {
636 struct v_file* file = fd_s->file;
637 if ((file->inode->itype & VFS_IFDIR)) {
642 lock_inode(file->inode);
644 file->inode->atime = clock_unixtime();
646 __SYSCALL_INTERRUPTIBLE({
647 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
648 errno = file->ops->read(file->inode, buf, count, file->f_pos);
650 errno = pcache_read(file->inode, buf, count, file->f_pos);
655 file->f_pos += errno;
656 unlock_inode(file->inode);
660 unlock_inode(file->inode);
663 return DO_STATUS(errno);
666 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
670 if ((errno = vfs_getfd(fd, &fd_s))) {
674 struct v_file* file = fd_s->file;
676 if ((errno = vfs_check_writable(file->dnode))) {
680 if ((file->inode->itype & VFS_IFDIR)) {
685 lock_inode(file->inode);
687 file->inode->mtime = clock_unixtime();
689 __SYSCALL_INTERRUPTIBLE({
690 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
691 errno = file->ops->write(file->inode, buf, count, file->f_pos);
693 errno = pcache_write(file->inode, buf, count, file->f_pos);
698 file->f_pos += errno;
699 unlock_inode(file->inode);
703 unlock_inode(file->inode);
706 return DO_STATUS(errno);
709 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
713 if ((errno = vfs_getfd(fd, &fd_s))) {
717 struct v_file* file = fd_s->file;
719 if (!file->ops->seek) {
724 lock_inode(file->inode);
727 int fpos = file->f_pos;
730 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
734 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
742 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
746 unlock_inode(file->inode);
749 return DO_STATUS(errno);
753 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
755 if (!dnode || dnode->parent == dnode) {
763 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
769 buf[len++] = VFS_PATH_DELIM;
771 size_t cpy_size = MIN(dnode->name.len, size - len);
772 strncpy(buf + len, dnode->name.value, cpy_size);
779 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
782 struct v_inode* inode = dnode->inode;
783 if (inode->ops->read_symlink) {
786 int errno = inode->ops->read_symlink(inode, &link);
787 strncpy(buf, link, size);
795 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
799 if ((errno = vfs_getfd(fd, &fd_s))) {
803 struct v_dnode* dnode;
804 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
811 return DO_STATUS(errno);
814 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
817 struct v_dnode* dnode;
818 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
819 errno = vfs_readlink(dnode, buf, size);
826 return DO_STATUS(errno);
829 __DEFINE_LXSYSCALL4(int,
842 if ((errno = vfs_getfd(dirfd, &fd_s))) {
846 struct v_dnode* dnode;
847 if (!(errno = vfs_walk(
848 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
849 errno = vfs_readlink(fd_s->file->dnode, buf, size);
857 return DO_STATUS(errno);
862 When we perform operation that could affect the layout of
863 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
864 whenever possible. This will blocking any ongoing path walking to reach
865 it hence avoid any partial state.
868 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
871 struct v_dnode* dnode;
872 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
873 return DO_STATUS(errno);
878 if ((errno = vfs_check_writable(dnode))) {
882 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
887 if (dnode->ref_count > 1 || dnode->inode->open_count) {
892 if (!llist_empty(&dnode->children)) {
897 struct v_dnode* parent = dnode->parent;
905 lock_inode(parent->inode);
907 if ((dnode->inode->itype & VFS_IFDIR)) {
908 errno = parent->inode->ops->rmdir(parent->inode, dnode);
910 vfs_dcache_remove(dnode);
916 unlock_inode(parent->inode);
917 unlock_dnode(parent);
921 return DO_STATUS(errno);
924 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
927 struct v_dnode *parent, *dir;
928 char name_value[VFS_NAME_MAXLEN];
929 struct hstr name = HHSTR(name_value, 0, 0);
931 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
935 if ((errno = vfs_check_writable(parent))) {
939 if (!(dir = vfs_d_alloc(parent, &name))) {
945 lock_inode(parent->inode);
947 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
949 } else if (!parent->inode->ops->mkdir) {
951 } else if (!(parent->inode->itype & VFS_IFDIR)) {
953 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
954 vfs_dcache_add(parent, dir);
961 unlock_inode(parent->inode);
962 unlock_dnode(parent);
964 return DO_STATUS(errno);
968 __vfs_do_unlink(struct v_dnode* dnode)
971 struct v_inode* inode = dnode->inode;
973 if (dnode->ref_count > 1) {
977 if ((errno = vfs_check_writable(dnode))) {
983 if (inode->open_count) {
985 } else if (!(inode->itype & VFS_IFDIR)) {
986 // The underlying unlink implementation should handle
988 errno = inode->ops->unlink(inode);
1001 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1004 struct v_dnode* dnode;
1005 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1009 errno = __vfs_do_unlink(dnode);
1012 return DO_STATUS(errno);
1015 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1019 if ((errno = vfs_getfd(fd, &fd_s))) {
1023 struct v_dnode* dnode;
1024 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1025 errno = __vfs_do_unlink(dnode);
1029 return DO_STATUS(errno);
1032 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1035 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1037 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1039 errno = __vfs_try_locate_file(
1040 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1043 } else if (name_file) {
1044 errno = vfs_link(to_link, name_file);
1047 return DO_STATUS(errno);
1050 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1055 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1056 errno = vfs_fsync(fd_s->file);
1059 return DO_STATUS(errno);
1063 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1066 struct v_fd* copied = cake_grab(fd_pile);
1068 memcpy(copied, old, sizeof(struct v_fd));
1070 atomic_fetch_add(&old->file->ref_count, 1);
1078 vfs_dup2(int oldfd, int newfd)
1080 if (newfd == oldfd) {
1085 struct v_fd *oldfd_s, *newfd_s;
1086 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1090 if (!TEST_FD(newfd)) {
1095 newfd_s = __current->fdtable->fds[newfd];
1096 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1100 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1101 __current->fdtable->fds[newfd] = newfd_s;
1106 return DO_STATUS(errno);
1109 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1111 return vfs_dup2(oldfd, newfd);
1114 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1117 struct v_fd *oldfd_s, *newfd_s;
1118 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1122 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1123 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1124 __current->fdtable->fds[newfd] = newfd_s;
1129 return DO_STATUS(errno);
1132 __DEFINE_LXSYSCALL2(int,
1140 struct v_dnode* dnode;
1141 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1145 if (errno = vfs_check_writable(dnode)) {
1149 if (!dnode->inode->ops->set_symlink) {
1154 lock_inode(dnode->inode);
1156 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1158 unlock_inode(dnode->inode);
1161 return DO_STATUS(errno);
1165 __vfs_do_chdir(struct v_dnode* dnode)
1171 if (!(dnode->inode->itype & VFS_IFDIR)) {
1176 if (__current->cwd) {
1177 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1178 mnt_chillax(__current->cwd->mnt);
1181 atomic_fetch_add(&dnode->ref_count, 1);
1182 mnt_mkbusy(dnode->mnt);
1183 __current->cwd = dnode;
1185 unlock_dnode(dnode);
1191 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1193 struct v_dnode* dnode;
1196 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1200 errno = __vfs_do_chdir(dnode);
1203 return DO_STATUS(errno);
1206 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1211 if ((errno = vfs_getfd(fd, &fd_s))) {
1215 errno = __vfs_do_chdir(fd_s->file->dnode);
1218 return DO_STATUS(errno);
1221 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1232 if (!__current->cwd) {
1233 *buf = VFS_PATH_DELIM;
1236 len = vfs_get_path(__current->cwd, buf, size, 0);
1243 buf[len + 1] = '\0';
1248 __current->k_status = errno;
1253 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1256 if (current->inode->id == target->inode->id) {
1261 if (errno = vfs_check_writable(current)) {
1265 if (current->ref_count > 1 || target->ref_count > 1) {
1269 if (current->super_block != target->super_block) {
1273 struct v_dnode* oldparent = current->parent;
1274 struct v_dnode* newparent = target->parent;
1276 lock_dnode(current);
1279 lock_dnode(oldparent);
1281 lock_dnode(newparent);
1283 if (!llist_empty(&target->children)) {
1285 unlock_dnode(target);
1290 current->inode->ops->rename(current->inode, current, target))) {
1291 unlock_dnode(target);
1295 // re-position current
1296 hstrcpy(¤t->name, &target->name);
1297 vfs_dcache_rehash(newparent, current);
1302 unlock_dnode(target);
1305 unlock_dnode(current);
1307 unlock_dnode(oldparent);
1309 unlock_dnode(newparent);
1314 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1316 struct v_dnode *cur, *target_parent, *target;
1317 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1320 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1324 if ((errno = vfs_walk(
1325 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1329 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1330 if (errno == ENOENT) {
1331 target = vfs_d_alloc(target_parent, &name);
1332 vfs_dcache_add(target_parent, target);
1342 errno = vfs_do_rename(cur, target);
1346 return DO_STATUS(errno);