3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_close(struct v_file* file)
260 if (!(errno = file->ops->close(file))) {
261 atomic_fetch_sub(&file->dnode->ref_count, 1);
262 file->inode->open_count--;
263 mnt_chillax(file->dnode->mnt);
265 pcache_commit_all(file->inode);
266 cake_release(file_pile, file);
272 vfs_fsync(struct v_file* file)
275 if ((errno = vfs_check_writable(file->dnode))) {
279 lock_inode(file->inode);
281 pcache_commit_all(file->inode);
284 if (file->ops->sync) {
285 errno = file->ops->sync(file);
288 unlock_inode(file->inode);
294 vfs_alloc_fdslot(int* fd)
296 for (size_t i = 0; i < VFS_MAX_FD; i++) {
297 if (!__current->fdtable->fds[i]) {
308 struct v_superblock* sb = cake_grab(superblock_pile);
309 memset(sb, 0, sizeof(*sb));
310 llist_init_head(&sb->sb_list);
311 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
316 vfs_sb_free(struct v_superblock* sb)
319 cake_release(superblock_pile, sb);
323 __vfs_try_evict_dnode(struct lru_node* obj)
325 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
327 if (!dnode->ref_count) {
335 __vfs_try_evict_inode(struct lru_node* obj)
337 struct v_inode* inode = container_of(obj, struct v_inode, lru);
339 if (!inode->link_count && !inode->open_count) {
347 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
349 struct v_dnode* dnode = cake_grab(dnode_pile);
351 lru_evict_half(dnode_lru);
353 if (!(dnode = cake_grab(dnode_pile))) {
358 memset(dnode, 0, sizeof(*dnode));
359 llist_init_head(&dnode->children);
360 llist_init_head(&dnode->siblings);
361 mutex_init(&dnode->lock);
363 dnode->ref_count = ATOMIC_VAR_INIT(0);
364 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
366 hstrcpy(&dnode->name, name);
369 dnode->super_block = parent->super_block;
370 dnode->mnt = parent->mnt;
373 lru_use_one(dnode_lru, &dnode->lru);
379 vfs_d_free(struct v_dnode* dnode)
381 assert(dnode->ref_count == 1);
384 assert(dnode->inode->link_count > 0);
385 dnode->inode->link_count--;
388 vfs_dcache_remove(dnode);
389 // Make sure the children de-referencing their parent.
390 // With lru presented, the eviction will be propagated over the entire
391 // detached subtree eventually
392 struct v_dnode *pos, *n;
393 llist_for_each(pos, n, &dnode->children, siblings)
395 vfs_dcache_remove(pos);
398 vfree(dnode->name.value);
399 cake_release(dnode_pile, dnode);
403 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
405 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
406 struct v_inode *pos, *n;
407 hashtable_bucket_foreach(slot, pos, n, hash_list)
409 if (pos->id == i_id) {
410 lru_use_one(inode_lru, &pos->lru);
419 vfs_i_addhash(struct v_inode* inode)
421 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
423 hlist_delete(&inode->hash_list);
424 hlist_add(&slot->head, &inode->hash_list);
428 vfs_i_alloc(struct v_superblock* sb)
430 assert(sb->ops.init_inode);
432 struct v_inode* inode;
433 if (!(inode = cake_grab(inode_pile))) {
434 lru_evict_half(inode_lru);
435 if (!(inode = cake_grab(inode_pile))) {
440 memset(inode, 0, sizeof(*inode));
441 mutex_init(&inode->lock);
442 llist_init_head(&inode->xattrs);
444 sb->ops.init_inode(sb, inode);
447 inode->ctime = clock_unixtime();
448 inode->atime = inode->ctime;
449 inode->mtime = inode->ctime;
452 lru_use_one(inode_lru, &inode->lru);
457 vfs_i_free(struct v_inode* inode)
459 if (inode->pg_cache) {
460 pcache_release(inode->pg_cache);
461 vfree(inode->pg_cache);
463 inode->ops->sync(inode);
464 hlist_delete(&inode->hash_list);
465 cake_release(inode_pile, inode);
468 /* ---- System call definition and support ---- */
470 #define FLOCATE_CREATE_EMPTY 1
473 vfs_getfd(int fd, struct v_fd** fd_s)
475 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
482 __vfs_try_locate_file(const char* path,
483 struct v_dnode** fdir,
484 struct v_dnode** file,
487 char name_str[VFS_NAME_MAXLEN];
488 struct hstr name = HSTR(name_str, 0);
490 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
494 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
495 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
499 struct v_dnode* parent = *fdir;
500 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
508 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
509 vfs_dcache_add(parent, file_new);
512 vfs_d_free(file_new);
515 unlock_dnode(parent);
521 vfs_do_open(const char* path, int options)
524 struct v_dnode *dentry, *file;
525 struct v_file* ofile = 0;
527 errno = __vfs_try_locate_file(
528 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
530 if (errno || (errno = vfs_open(file, &ofile))) {
534 struct v_inode* o_inode = ofile->inode;
536 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
537 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
538 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
540 fd_s->flags = options;
541 __current->fdtable->fds[fd] = fd_s;
548 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
550 int errno = vfs_do_open(path, options);
551 return DO_STATUS_OR_RETURN(errno);
554 __DEFINE_LXSYSCALL1(int, close, int, fd)
558 if ((errno = vfs_getfd(fd, &fd_s))) {
562 if (fd_s->file->ref_count > 1) {
563 fd_s->file->ref_count--;
564 } else if ((errno = vfs_close(fd_s->file))) {
569 __current->fdtable->fds[fd] = 0;
572 return DO_STATUS(errno);
576 __vfs_readdir_callback(struct dir_context* dctx,
581 struct dirent* dent = (struct dirent*)dctx->cb_data;
582 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
584 dent->d_type = dtype;
587 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
592 if ((errno = vfs_getfd(fd, &fd_s))) {
596 struct v_inode* inode = fd_s->file->inode;
600 if (!(inode->itype & VFS_IFDIR)) {
603 struct dir_context dctx =
604 (struct dir_context){ .cb_data = dent,
605 .index = dent->d_offset,
606 .read_complete_callback =
607 __vfs_readdir_callback };
609 if (dent->d_offset == 0) {
610 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
611 } else if (dent->d_offset == 1) {
612 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
615 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
626 return DO_STATUS_OR_RETURN(errno);
629 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
633 if ((errno = vfs_getfd(fd, &fd_s))) {
637 struct v_file* file = fd_s->file;
638 if ((file->inode->itype & VFS_IFDIR)) {
643 lock_inode(file->inode);
645 file->inode->atime = clock_unixtime();
647 __SYSCALL_INTERRUPTIBLE({
648 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
649 errno = file->ops->read(file->inode, buf, count, file->f_pos);
651 errno = pcache_read(file->inode, buf, count, file->f_pos);
656 file->f_pos += errno;
657 unlock_inode(file->inode);
661 unlock_inode(file->inode);
664 return DO_STATUS(errno);
667 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
671 if ((errno = vfs_getfd(fd, &fd_s))) {
675 struct v_file* file = fd_s->file;
677 if ((errno = vfs_check_writable(file->dnode))) {
681 if ((file->inode->itype & VFS_IFDIR)) {
686 lock_inode(file->inode);
688 file->inode->mtime = clock_unixtime();
690 __SYSCALL_INTERRUPTIBLE({
691 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
692 errno = file->ops->write(file->inode, buf, count, file->f_pos);
694 errno = pcache_write(file->inode, buf, count, file->f_pos);
699 file->f_pos += errno;
700 unlock_inode(file->inode);
704 unlock_inode(file->inode);
707 return DO_STATUS(errno);
710 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
714 if ((errno = vfs_getfd(fd, &fd_s))) {
718 struct v_file* file = fd_s->file;
720 if (!file->ops->seek) {
725 lock_inode(file->inode);
728 int fpos = file->f_pos;
731 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
735 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
743 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
747 unlock_inode(file->inode);
750 return DO_STATUS(errno);
754 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
756 if (!dnode || dnode->parent == dnode) {
764 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
770 buf[len++] = VFS_PATH_DELIM;
772 size_t cpy_size = MIN(dnode->name.len, size - len);
773 strncpy(buf + len, dnode->name.value, cpy_size);
780 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
783 struct v_inode* inode = dnode->inode;
784 if (inode->ops->read_symlink) {
787 int errno = inode->ops->read_symlink(inode, &link);
788 strncpy(buf, link, size);
796 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
800 if ((errno = vfs_getfd(fd, &fd_s))) {
804 struct v_dnode* dnode;
805 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
812 return DO_STATUS(errno);
815 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
818 struct v_dnode* dnode;
819 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
820 errno = vfs_readlink(dnode, buf, size);
827 return DO_STATUS(errno);
830 __DEFINE_LXSYSCALL4(int,
843 if ((errno = vfs_getfd(dirfd, &fd_s))) {
847 struct v_dnode* dnode;
848 if (!(errno = vfs_walk(
849 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
850 errno = vfs_readlink(fd_s->file->dnode, buf, size);
858 return DO_STATUS(errno);
863 When we perform operation that could affect the layout of
864 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
865 whenever possible. This will blocking any ongoing path walking to reach
866 it hence avoid any partial state.
869 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
872 struct v_dnode* dnode;
873 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
874 return DO_STATUS(errno);
879 if ((errno = vfs_check_writable(dnode))) {
883 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
888 if (dnode->ref_count > 1 || dnode->inode->open_count) {
893 if (!llist_empty(&dnode->children)) {
898 struct v_dnode* parent = dnode->parent;
906 lock_inode(parent->inode);
908 if ((dnode->inode->itype & VFS_IFDIR)) {
909 errno = parent->inode->ops->rmdir(parent->inode, dnode);
911 vfs_dcache_remove(dnode);
917 unlock_inode(parent->inode);
918 unlock_dnode(parent);
922 return DO_STATUS(errno);
925 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
928 struct v_dnode *parent, *dir;
929 char name_value[VFS_NAME_MAXLEN];
930 struct hstr name = HHSTR(name_value, 0, 0);
932 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
936 if ((errno = vfs_check_writable(parent))) {
940 if (!(dir = vfs_d_alloc(parent, &name))) {
946 lock_inode(parent->inode);
948 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
950 } else if (!parent->inode->ops->mkdir) {
952 } else if (!(parent->inode->itype & VFS_IFDIR)) {
954 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
955 vfs_dcache_add(parent, dir);
962 unlock_inode(parent->inode);
963 unlock_dnode(parent);
965 return DO_STATUS(errno);
969 __vfs_do_unlink(struct v_dnode* dnode)
972 struct v_inode* inode = dnode->inode;
974 if (dnode->ref_count > 1) {
978 if ((errno = vfs_check_writable(dnode))) {
984 if (inode->open_count) {
986 } else if (!(inode->itype & VFS_IFDIR)) {
987 // The underlying unlink implementation should handle
989 errno = inode->ops->unlink(inode);
1002 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1005 struct v_dnode* dnode;
1006 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1010 errno = __vfs_do_unlink(dnode);
1013 return DO_STATUS(errno);
1016 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1020 if ((errno = vfs_getfd(fd, &fd_s))) {
1024 struct v_dnode* dnode;
1025 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1026 errno = __vfs_do_unlink(dnode);
1030 return DO_STATUS(errno);
1033 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1036 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1038 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1040 errno = __vfs_try_locate_file(
1041 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1044 } else if (name_file) {
1045 errno = vfs_link(to_link, name_file);
1048 return DO_STATUS(errno);
1051 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1056 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1057 errno = vfs_fsync(fd_s->file);
1060 return DO_STATUS(errno);
1064 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1067 struct v_fd* copied = cake_grab(fd_pile);
1069 memcpy(copied, old, sizeof(struct v_fd));
1071 atomic_fetch_add(&old->file->ref_count, 1);
1079 vfs_dup2(int oldfd, int newfd)
1081 if (newfd == oldfd) {
1086 struct v_fd *oldfd_s, *newfd_s;
1087 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1091 if (!TEST_FD(newfd)) {
1096 newfd_s = __current->fdtable->fds[newfd];
1097 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1101 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1102 __current->fdtable->fds[newfd] = newfd_s;
1107 return DO_STATUS(errno);
1110 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1112 return vfs_dup2(oldfd, newfd);
1115 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1118 struct v_fd *oldfd_s, *newfd_s;
1119 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1123 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1124 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1125 __current->fdtable->fds[newfd] = newfd_s;
1130 return DO_STATUS(errno);
1133 __DEFINE_LXSYSCALL2(int,
1141 struct v_dnode* dnode;
1142 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1146 if (errno = vfs_check_writable(dnode)) {
1150 if (!dnode->inode->ops->set_symlink) {
1155 lock_inode(dnode->inode);
1157 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1159 unlock_inode(dnode->inode);
1162 return DO_STATUS(errno);
1166 __vfs_do_chdir(struct v_dnode* dnode)
1172 if (!(dnode->inode->itype & VFS_IFDIR)) {
1177 if (__current->cwd) {
1178 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1179 mnt_chillax(__current->cwd->mnt);
1182 atomic_fetch_add(&dnode->ref_count, 1);
1183 mnt_mkbusy(dnode->mnt);
1184 __current->cwd = dnode;
1186 unlock_dnode(dnode);
1192 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1194 struct v_dnode* dnode;
1197 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1201 errno = __vfs_do_chdir(dnode);
1204 return DO_STATUS(errno);
1207 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1212 if ((errno = vfs_getfd(fd, &fd_s))) {
1216 errno = __vfs_do_chdir(fd_s->file->dnode);
1219 return DO_STATUS(errno);
1222 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1233 if (!__current->cwd) {
1234 *buf = VFS_PATH_DELIM;
1237 len = vfs_get_path(__current->cwd, buf, size, 0);
1244 buf[len + 1] = '\0';
1249 __current->k_status = errno;
1254 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1257 if (current->inode->id == target->inode->id) {
1262 if (errno = vfs_check_writable(current)) {
1266 if (current->ref_count > 1 || target->ref_count > 1) {
1270 if (current->super_block != target->super_block) {
1274 struct v_dnode* oldparent = current->parent;
1275 struct v_dnode* newparent = target->parent;
1277 lock_dnode(current);
1280 lock_dnode(oldparent);
1282 lock_dnode(newparent);
1284 if (!llist_empty(&target->children)) {
1286 unlock_dnode(target);
1291 current->inode->ops->rename(current->inode, current, target))) {
1292 unlock_dnode(target);
1296 // re-position current
1297 hstrcpy(¤t->name, &target->name);
1298 vfs_dcache_rehash(newparent, current);
1303 unlock_dnode(target);
1306 unlock_dnode(current);
1308 unlock_dnode(oldparent);
1310 unlock_dnode(newparent);
1315 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1317 struct v_dnode *cur, *target_parent, *target;
1318 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1321 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1325 if ((errno = vfs_walk(
1326 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1330 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1331 if (errno == ENOENT) {
1332 target = vfs_d_alloc(target_parent, &name);
1333 vfs_dcache_add(target_parent, target);
1343 errno = vfs_do_rename(cur, target);
1347 return DO_STATUS(errno);