3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
110 inline struct hbucket*
111 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
113 uint32_t _hash = *hash;
114 // 与parent的指针值做加法,来减小碰撞的可能性。
115 _hash += (uint32_t)parent;
117 _hash = _hash ^ (_hash >> VFS_HASHBITS);
119 return &dnode_cache[_hash & VFS_HASH_MASK];
123 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
125 if (!str->len || HSTR_EQ(str, &vfs_dot))
128 if (HSTR_EQ(str, &vfs_ddot)) {
129 return parent->parent ? parent->parent : parent;
132 uint32_t hash = str->hash;
133 struct hbucket* slot = __dcache_hash(parent, &hash);
135 struct v_dnode *pos, *n;
136 hashtable_bucket_foreach(slot, pos, n, hash_list)
138 if (pos->name.hash == hash) {
146 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
150 atomic_fetch_add(&dnode->ref_count, 1);
151 dnode->parent = parent;
152 llist_append(&parent->children, &dnode->siblings);
154 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
155 hlist_add(&bucket->head, &dnode->hash_list);
159 vfs_dcache_remove(struct v_dnode* dnode)
162 assert(dnode->ref_count == 1);
164 llist_delete(&dnode->siblings);
165 hlist_delete(&dnode->hash_list);
167 dnode->parent = NULL;
168 atomic_fetch_sub(&dnode->ref_count, 1);
172 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
176 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
177 vfs_dcache_remove(dnode);
178 vfs_dcache_add(new_parent, dnode);
182 vfs_open(struct v_dnode* dnode, struct v_file** file)
184 if (!dnode->inode || !dnode->inode->ops->open) {
188 struct v_inode* inode = dnode->inode;
192 struct v_file* vfile = cake_grab(file_pile);
193 memset(vfile, 0, sizeof(*vfile));
195 vfile->dnode = dnode;
196 vfile->inode = inode;
197 vfile->ref_count = ATOMIC_VAR_INIT(1);
198 vfile->ops = inode->default_fops;
200 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
201 struct pcache* pcache = vzalloc(sizeof(struct pcache));
203 pcache->master = inode;
204 inode->pg_cache = pcache;
207 int errno = inode->ops->open(inode, vfile);
209 cake_release(file_pile, vfile);
211 atomic_fetch_add(&dnode->ref_count, 1);
213 mnt_mkbusy(dnode->mnt);
224 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
226 if (assign_to->inode) {
227 assign_to->inode->link_count--;
229 assign_to->inode = inode;
234 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
238 lock_inode(to_link->inode);
239 if (to_link->super_block->root != name->super_block->root) {
241 } else if (!to_link->inode->ops->link) {
243 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
244 vfs_assign_inode(name, to_link->inode);
246 unlock_inode(to_link->inode);
252 vfs_close(struct v_file* file)
255 if (!(errno = file->ops->close(file))) {
256 atomic_fetch_sub(&file->dnode->ref_count, 1);
257 file->inode->open_count--;
258 mnt_chillax(file->dnode->mnt);
260 pcache_commit_all(file->inode);
261 cake_release(file_pile, file);
267 vfs_fsync(struct v_file* file)
269 lock_inode(file->inode);
272 pcache_commit_all(file->inode);
274 if (file->ops->sync) {
275 errno = file->ops->sync(file);
278 unlock_inode(file->inode);
284 vfs_alloc_fdslot(int* fd)
286 for (size_t i = 0; i < VFS_MAX_FD; i++) {
287 if (!__current->fdtable->fds[i]) {
298 struct v_superblock* sb = cake_grab(superblock_pile);
299 memset(sb, 0, sizeof(*sb));
300 llist_init_head(&sb->sb_list);
301 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
306 vfs_sb_free(struct v_superblock* sb)
309 cake_release(superblock_pile, sb);
313 __vfs_try_evict_dnode(struct lru_node* obj)
315 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
317 if (!dnode->ref_count) {
325 __vfs_try_evict_inode(struct lru_node* obj)
327 struct v_inode* inode = container_of(obj, struct v_inode, lru);
329 if (!inode->link_count && !inode->open_count) {
337 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
339 struct v_dnode* dnode = cake_grab(dnode_pile);
341 lru_evict_half(dnode_lru);
343 if (!(dnode = cake_grab(dnode_pile))) {
348 memset(dnode, 0, sizeof(*dnode));
349 llist_init_head(&dnode->children);
350 llist_init_head(&dnode->siblings);
351 mutex_init(&dnode->lock);
353 dnode->ref_count = ATOMIC_VAR_INIT(0);
354 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
356 hstrcpy(&dnode->name, name);
359 dnode->super_block = parent->super_block;
362 lru_use_one(dnode_lru, &dnode->lru);
368 vfs_d_free(struct v_dnode* dnode)
370 assert(dnode->ref_count == 1);
373 assert(dnode->inode->link_count > 0);
374 dnode->inode->link_count--;
377 vfs_dcache_remove(dnode);
378 // Make sure the children de-referencing their parent.
379 // With lru presented, the eviction will be propagated over the entire
380 // detached subtree eventually
381 struct v_dnode *pos, *n;
382 llist_for_each(pos, n, &dnode->children, siblings)
384 vfs_dcache_remove(pos);
387 vfree(dnode->name.value);
388 cake_release(dnode_pile, dnode);
392 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
394 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
395 struct v_inode *pos, *n;
396 hashtable_bucket_foreach(slot, pos, n, hash_list)
398 if (pos->id == i_id) {
399 lru_use_one(inode_lru, &pos->lru);
408 vfs_i_addhash(struct v_inode* inode)
410 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
412 hlist_delete(&inode->hash_list);
413 hlist_add(&slot->head, &inode->hash_list);
417 vfs_i_alloc(struct v_superblock* sb)
419 assert(sb->ops.init_inode);
421 struct v_inode* inode;
422 if (!(inode = cake_grab(inode_pile))) {
423 lru_evict_half(inode_lru);
424 if (!(inode = cake_grab(inode_pile))) {
429 memset(inode, 0, sizeof(*inode));
430 mutex_init(&inode->lock);
431 llist_init_head(&inode->xattrs);
433 sb->ops.init_inode(sb, inode);
436 inode->ctime = clock_unixtime();
437 inode->atime = inode->ctime;
438 inode->mtime = inode->ctime;
441 lru_use_one(inode_lru, &inode->lru);
446 vfs_i_free(struct v_inode* inode)
448 if (inode->pg_cache) {
449 pcache_release(inode->pg_cache);
450 vfree(inode->pg_cache);
452 inode->ops->sync(inode);
453 hlist_delete(&inode->hash_list);
454 cake_release(inode_pile, inode);
457 /* ---- System call definition and support ---- */
459 #define FLOCATE_CREATE_EMPTY 1
462 vfs_getfd(int fd, struct v_fd** fd_s)
464 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
471 __vfs_try_locate_file(const char* path,
472 struct v_dnode** fdir,
473 struct v_dnode** file,
476 char name_str[VFS_NAME_MAXLEN];
477 struct hstr name = HSTR(name_str, 0);
480 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
484 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
485 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
489 struct v_dnode* parent = *fdir;
490 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
498 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
499 vfs_dcache_add(parent, file_new);
502 vfs_d_free(file_new);
505 unlock_dnode(parent);
511 vfs_do_open(const char* path, int options)
514 struct v_dnode *dentry, *file;
515 struct v_file* ofile = 0;
517 errno = __vfs_try_locate_file(
518 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
520 if (errno || (errno = vfs_open(file, &ofile))) {
524 struct v_inode* o_inode = ofile->inode;
526 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
527 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
528 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
530 fd_s->flags = options;
531 __current->fdtable->fds[fd] = fd_s;
538 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
540 int errno = vfs_do_open(path, options);
541 return DO_STATUS_OR_RETURN(errno);
544 __DEFINE_LXSYSCALL1(int, close, int, fd)
548 if ((errno = vfs_getfd(fd, &fd_s))) {
552 if (fd_s->file->ref_count > 1) {
553 fd_s->file->ref_count--;
554 } else if ((errno = vfs_close(fd_s->file))) {
559 __current->fdtable->fds[fd] = 0;
562 return DO_STATUS(errno);
566 __vfs_readdir_callback(struct dir_context* dctx,
571 struct dirent* dent = (struct dirent*)dctx->cb_data;
572 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
574 dent->d_type = dtype;
577 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
582 if ((errno = vfs_getfd(fd, &fd_s))) {
586 struct v_inode* inode = fd_s->file->inode;
590 if (!(inode->itype & VFS_IFDIR)) {
593 struct dir_context dctx =
594 (struct dir_context){ .cb_data = dent,
595 .index = dent->d_offset,
596 .read_complete_callback =
597 __vfs_readdir_callback };
599 if (dent->d_offset == 0) {
600 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
601 } else if (dent->d_offset == 1) {
602 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
605 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
616 return DO_STATUS_OR_RETURN(errno);
619 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
623 if ((errno = vfs_getfd(fd, &fd_s))) {
627 struct v_file* file = fd_s->file;
628 if ((file->inode->itype & VFS_IFDIR)) {
633 lock_inode(file->inode);
635 file->inode->atime = clock_unixtime();
637 __SYSCALL_INTERRUPTIBLE({
638 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
639 errno = file->ops->read(file->inode, buf, count, file->f_pos);
641 errno = pcache_read(file->inode, buf, count, file->f_pos);
646 file->f_pos += errno;
647 unlock_inode(file->inode);
651 unlock_inode(file->inode);
654 return DO_STATUS(errno);
657 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
661 if ((errno = vfs_getfd(fd, &fd_s))) {
665 struct v_file* file = fd_s->file;
666 if ((file->inode->itype & VFS_IFDIR)) {
671 lock_inode(file->inode);
673 file->inode->mtime = clock_unixtime();
675 __SYSCALL_INTERRUPTIBLE({
676 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
677 errno = file->ops->write(file->inode, buf, count, file->f_pos);
679 errno = pcache_write(file->inode, buf, count, file->f_pos);
684 file->f_pos += errno;
685 unlock_inode(file->inode);
689 unlock_inode(file->inode);
692 return DO_STATUS(errno);
695 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
699 if ((errno = vfs_getfd(fd, &fd_s))) {
703 struct v_file* file = fd_s->file;
705 lock_inode(file->inode);
707 size_t fpos = file->f_pos;
710 fpos = (size_t)((int)file->f_pos + offset);
713 fpos = (size_t)((int)file->inode->fsize + offset);
719 if (!(errno = file->ops->seek(file->inode, fpos))) {
723 unlock_inode(file->inode);
726 return DO_STATUS(errno);
730 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
740 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
746 size_t cpy_size = MIN(dnode->name.len, size - len);
747 strncpy(buf + len, dnode->name.value, cpy_size);
751 buf[len++] = VFS_PATH_DELIM;
758 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
761 struct v_inode* inode = dnode->inode;
762 if (inode->ops->read_symlink) {
765 int errno = inode->ops->read_symlink(inode, &link);
766 strncpy(buf, link, size);
774 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
778 if ((errno = vfs_getfd(fd, &fd_s))) {
782 struct v_dnode* dnode;
783 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
790 return DO_STATUS(errno);
793 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
796 struct v_dnode* dnode;
798 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
799 errno = vfs_readlink(dnode, buf, size);
806 return DO_STATUS(errno);
809 __DEFINE_LXSYSCALL4(int,
822 if ((errno = vfs_getfd(dirfd, &fd_s))) {
826 struct v_dnode* dnode;
827 if (!(errno = vfs_walk(
828 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
829 errno = vfs_readlink(fd_s->file->dnode, buf, size);
837 return DO_STATUS(errno);
842 When we perform operation that could affect the layout of
843 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
844 whenever possible. This will blocking any ongoing path walking to reach
845 it hence avoid any partial state.
848 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
851 struct v_dnode* dnode;
852 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
853 return DO_STATUS(errno);
858 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
863 if (dnode->ref_count > 1 || dnode->inode->open_count) {
868 if (!llist_empty(&dnode->children)) {
873 struct v_dnode* parent = dnode->parent;
881 lock_inode(parent->inode);
883 if ((dnode->inode->itype & VFS_IFDIR)) {
884 errno = parent->inode->ops->rmdir(parent->inode, dnode);
886 vfs_dcache_remove(dnode);
892 unlock_inode(parent->inode);
893 unlock_dnode(parent);
897 return DO_STATUS(errno);
900 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
903 struct v_dnode *parent, *dir;
904 char name_value[VFS_NAME_MAXLEN];
905 struct hstr name = HHSTR(name_value, 0, 0);
913 vfs_walk(__current->cwd, path, &parent, &name, VFS_WALK_PARENT))) {
917 dir = vfs_d_alloc(parent, &name);
920 lock_inode(parent->inode);
922 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
924 } else if (!parent->inode->ops->mkdir) {
926 } else if (!(parent->inode->itype & VFS_IFDIR)) {
928 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
929 vfs_dcache_add(parent, dir);
936 unlock_inode(parent->inode);
937 unlock_dnode(parent);
939 return DO_STATUS(errno);
943 __vfs_do_unlink(struct v_dnode* dnode)
945 struct v_inode* inode = dnode->inode;
947 if (dnode->ref_count > 1) {
954 if (inode->open_count) {
956 } else if (!(inode->itype & VFS_IFDIR)) {
957 // The underlying unlink implementation should handle
959 errno = inode->ops->unlink(inode);
972 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
975 struct v_dnode* dnode;
976 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
979 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
984 errno = __vfs_do_unlink(dnode);
987 return DO_STATUS(errno);
990 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
994 if ((errno = vfs_getfd(fd, &fd_s))) {
998 struct v_dnode* dnode;
999 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1000 errno = __vfs_do_unlink(dnode);
1004 return DO_STATUS(errno);
1007 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1010 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1012 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1014 errno = __vfs_try_locate_file(
1015 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1018 } else if (name_file) {
1019 errno = vfs_link(to_link, name_file);
1022 return DO_STATUS(errno);
1025 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1029 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1030 errno = vfs_fsync(fd_s->file);
1033 return DO_STATUS(errno);
1037 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1040 struct v_fd* copied = cake_grab(fd_pile);
1042 memcpy(copied, old, sizeof(struct v_fd));
1044 atomic_fetch_add(&old->file->ref_count, 1);
1052 vfs_dup2(int oldfd, int newfd)
1054 if (newfd == oldfd) {
1059 struct v_fd *oldfd_s, *newfd_s;
1060 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1064 if (!TEST_FD(newfd)) {
1069 newfd_s = __current->fdtable->fds[newfd];
1070 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1074 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1075 __current->fdtable->fds[newfd] = newfd_s;
1080 return DO_STATUS(errno);
1083 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1085 return vfs_dup2(oldfd, newfd);
1088 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1091 struct v_fd *oldfd_s, *newfd_s;
1092 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1096 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1097 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1098 __current->fdtable->fds[newfd] = newfd_s;
1103 return DO_STATUS(errno);
1106 __DEFINE_LXSYSCALL2(int,
1114 struct v_dnode* dnode;
1115 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1118 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1122 if (!dnode->inode->ops->set_symlink) {
1127 lock_inode(dnode->inode);
1129 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1131 unlock_inode(dnode->inode);
1134 return DO_STATUS(errno);
1138 __vfs_do_chdir(struct v_dnode* dnode)
1144 if (!(dnode->inode->itype & VFS_IFDIR)) {
1149 if (__current->cwd) {
1150 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1151 mnt_chillax(__current->cwd->mnt);
1154 atomic_fetch_add(&dnode->ref_count, 1);
1155 mnt_mkbusy(dnode->mnt);
1156 __current->cwd = dnode;
1158 unlock_dnode(dnode);
1164 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1166 struct v_dnode* dnode;
1169 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1173 errno = __vfs_do_chdir(dnode);
1176 return DO_STATUS(errno);
1179 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1184 if ((errno = vfs_getfd(fd, &fd_s))) {
1188 errno = __vfs_do_chdir(fd_s->file->dnode);
1191 return DO_STATUS(errno);
1194 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1205 if (!__current->cwd) {
1206 *buf = VFS_PATH_DELIM;
1209 len = vfs_get_path(__current->cwd, buf, size, 0);
1216 buf[len + 1] = '\0';
1221 __current->k_status = errno;
1226 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1228 if (current->inode->id == target->inode->id) {
1233 if (current->ref_count > 1 || target->ref_count > 1) {
1237 if (current->super_block != target->super_block) {
1243 struct v_dnode* oldparent = current->parent;
1244 struct v_dnode* newparent = target->parent;
1246 lock_dnode(current);
1249 lock_dnode(oldparent);
1251 lock_dnode(newparent);
1253 if (!llist_empty(&target->children)) {
1255 unlock_dnode(target);
1260 current->inode->ops->rename(current->inode, current, target))) {
1261 unlock_dnode(target);
1265 // re-position current
1266 hstrcpy(¤t->name, &target->name);
1267 vfs_dcache_rehash(newparent, current);
1272 unlock_dnode(target);
1275 unlock_dnode(current);
1277 unlock_dnode(oldparent);
1279 unlock_dnode(newparent);
1284 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1286 struct v_dnode *cur, *target_parent, *target;
1287 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1290 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1294 if ((errno = vfs_walk(
1295 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1299 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1300 if (errno == ENOENT) {
1301 target = vfs_d_alloc(target_parent, &name);
1302 vfs_dcache_add(target_parent, target);
1312 errno = vfs_do_rename(cur, target);
1316 return DO_STATUS(errno);