3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_pclose(struct v_file* file, pid_t pid)
260 if (file->ref_count > 1) {
261 atomic_fetch_sub(&file->ref_count, 1);
262 } else if (!(errno = file->ops->close(file))) {
263 atomic_fetch_sub(&file->dnode->ref_count, 1);
264 file->inode->open_count--;
266 // Prevent dead lock.
267 // This happened when process is terminated while blocking on read.
268 // In that case, the process is still holding the inode lock and it will
269 // never get released.
271 * The unlocking should also include ownership check.
273 * To see why, consider two process both open the same file both with
275 * Process A: busy on reading x
276 * Process B: do nothing with x
277 * Assuming that, after a very short time, process B get terminated
278 * while process A is still busy in it's reading business. By this
279 * design, the inode lock of this file x is get released by B rather
280 * than A. And this will cause a probable race condition on A if other
281 * process is writing to this file later after B exit.
283 if (mutex_on_hold(&file->inode->lock)) {
284 mutex_unlock_for(&file->inode->lock, pid);
286 mnt_chillax(file->dnode->mnt);
288 pcache_commit_all(file->inode);
289 cake_release(file_pile, file);
295 vfs_close(struct v_file* file)
297 return vfs_pclose(file, __current->pid);
301 vfs_fsync(struct v_file* file)
304 if ((errno = vfs_check_writable(file->dnode))) {
308 lock_inode(file->inode);
310 pcache_commit_all(file->inode);
313 if (file->ops->sync) {
314 errno = file->ops->sync(file);
317 unlock_inode(file->inode);
323 vfs_alloc_fdslot(int* fd)
325 for (size_t i = 0; i < VFS_MAX_FD; i++) {
326 if (!__current->fdtable->fds[i]) {
337 struct v_superblock* sb = cake_grab(superblock_pile);
338 memset(sb, 0, sizeof(*sb));
339 llist_init_head(&sb->sb_list);
340 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
345 vfs_sb_free(struct v_superblock* sb)
348 cake_release(superblock_pile, sb);
352 __vfs_try_evict_dnode(struct lru_node* obj)
354 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
356 if (!dnode->ref_count) {
364 __vfs_try_evict_inode(struct lru_node* obj)
366 struct v_inode* inode = container_of(obj, struct v_inode, lru);
368 if (!inode->link_count && !inode->open_count) {
376 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
378 struct v_dnode* dnode = cake_grab(dnode_pile);
380 lru_evict_half(dnode_lru);
382 if (!(dnode = cake_grab(dnode_pile))) {
387 memset(dnode, 0, sizeof(*dnode));
388 llist_init_head(&dnode->children);
389 llist_init_head(&dnode->siblings);
390 mutex_init(&dnode->lock);
392 dnode->ref_count = ATOMIC_VAR_INIT(0);
393 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
395 hstrcpy(&dnode->name, name);
398 dnode->super_block = parent->super_block;
399 dnode->mnt = parent->mnt;
402 lru_use_one(dnode_lru, &dnode->lru);
408 vfs_d_free(struct v_dnode* dnode)
410 assert(dnode->ref_count == 1);
413 assert(dnode->inode->link_count > 0);
414 dnode->inode->link_count--;
417 vfs_dcache_remove(dnode);
418 // Make sure the children de-referencing their parent.
419 // With lru presented, the eviction will be propagated over the entire
420 // detached subtree eventually
421 struct v_dnode *pos, *n;
422 llist_for_each(pos, n, &dnode->children, siblings)
424 vfs_dcache_remove(pos);
427 vfree(dnode->name.value);
428 cake_release(dnode_pile, dnode);
432 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
434 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
435 struct v_inode *pos, *n;
436 hashtable_bucket_foreach(slot, pos, n, hash_list)
438 if (pos->id == i_id) {
439 lru_use_one(inode_lru, &pos->lru);
448 vfs_i_addhash(struct v_inode* inode)
450 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
452 hlist_delete(&inode->hash_list);
453 hlist_add(&slot->head, &inode->hash_list);
457 vfs_i_alloc(struct v_superblock* sb)
459 assert(sb->ops.init_inode);
461 struct v_inode* inode;
462 if (!(inode = cake_grab(inode_pile))) {
463 lru_evict_half(inode_lru);
464 if (!(inode = cake_grab(inode_pile))) {
469 memset(inode, 0, sizeof(*inode));
470 mutex_init(&inode->lock);
471 llist_init_head(&inode->xattrs);
473 sb->ops.init_inode(sb, inode);
476 inode->ctime = clock_unixtime();
477 inode->atime = inode->ctime;
478 inode->mtime = inode->ctime;
481 lru_use_one(inode_lru, &inode->lru);
486 vfs_i_free(struct v_inode* inode)
488 if (inode->pg_cache) {
489 pcache_release(inode->pg_cache);
490 vfree(inode->pg_cache);
492 inode->ops->sync(inode);
493 hlist_delete(&inode->hash_list);
494 cake_release(inode_pile, inode);
497 /* ---- System call definition and support ---- */
499 #define FLOCATE_CREATE_EMPTY 1
502 vfs_getfd(int fd, struct v_fd** fd_s)
504 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
511 __vfs_try_locate_file(const char* path,
512 struct v_dnode** fdir,
513 struct v_dnode** file,
516 char name_str[VFS_NAME_MAXLEN];
517 struct hstr name = HSTR(name_str, 0);
521 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
525 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
526 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
530 struct v_dnode* parent = *fdir;
531 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
539 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
540 vfs_dcache_add(parent, file_new);
543 vfs_d_free(file_new);
546 unlock_dnode(parent);
552 vfs_do_open(const char* path, int options)
555 struct v_dnode *dentry, *file;
556 struct v_file* ofile = 0;
558 errno = __vfs_try_locate_file(
559 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
561 if (errno || (errno = vfs_open(file, &ofile))) {
565 struct v_inode* o_inode = ofile->inode;
567 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
568 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
569 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
571 fd_s->flags = options;
572 __current->fdtable->fds[fd] = fd_s;
579 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
581 int errno = vfs_do_open(path, options);
582 return DO_STATUS_OR_RETURN(errno);
585 __DEFINE_LXSYSCALL1(int, close, int, fd)
589 if ((errno = vfs_getfd(fd, &fd_s))) {
593 if ((errno = vfs_close(fd_s->file))) {
598 __current->fdtable->fds[fd] = 0;
601 return DO_STATUS(errno);
605 __vfs_readdir_callback(struct dir_context* dctx,
610 struct dirent* dent = (struct dirent*)dctx->cb_data;
611 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
613 dent->d_type = dtype;
616 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
621 if ((errno = vfs_getfd(fd, &fd_s))) {
625 struct v_inode* inode = fd_s->file->inode;
629 if (!(inode->itype & VFS_IFDIR)) {
632 struct dir_context dctx =
633 (struct dir_context){ .cb_data = dent,
634 .index = dent->d_offset,
635 .read_complete_callback =
636 __vfs_readdir_callback };
638 if (dent->d_offset == 0) {
639 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
640 } else if (dent->d_offset == 1) {
641 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
644 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
655 return DO_STATUS_OR_RETURN(errno);
658 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
662 if ((errno = vfs_getfd(fd, &fd_s))) {
666 struct v_file* file = fd_s->file;
667 if ((file->inode->itype & VFS_IFDIR)) {
672 lock_inode(file->inode);
674 file->inode->atime = clock_unixtime();
676 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
677 errno = file->ops->read(file->inode, buf, count, file->f_pos);
679 errno = pcache_read(file->inode, buf, count, file->f_pos);
683 file->f_pos += errno;
684 unlock_inode(file->inode);
688 unlock_inode(file->inode);
691 return DO_STATUS(errno);
694 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
698 if ((errno = vfs_getfd(fd, &fd_s))) {
702 struct v_file* file = fd_s->file;
704 if ((errno = vfs_check_writable(file->dnode))) {
708 if ((file->inode->itype & VFS_IFDIR)) {
713 lock_inode(file->inode);
715 file->inode->mtime = clock_unixtime();
717 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
718 errno = file->ops->write(file->inode, buf, count, file->f_pos);
720 errno = pcache_write(file->inode, buf, count, file->f_pos);
724 file->f_pos += errno;
725 unlock_inode(file->inode);
729 unlock_inode(file->inode);
732 return DO_STATUS(errno);
735 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
739 if ((errno = vfs_getfd(fd, &fd_s))) {
743 struct v_file* file = fd_s->file;
745 if (!file->ops->seek) {
750 lock_inode(file->inode);
753 int fpos = file->f_pos;
756 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
760 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
768 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
772 unlock_inode(file->inode);
775 return DO_STATUS(errno);
779 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
791 if (dnode->parent != dnode) {
792 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
799 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
800 buf[len++] = VFS_PATH_DELIM;
803 size_t cpy_size = MIN(dnode->name.len, size - len);
804 strncpy(buf + len, dnode->name.value, cpy_size);
811 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
814 struct v_inode* inode = dnode->inode;
815 if (inode->ops->read_symlink) {
818 int errno = inode->ops->read_symlink(inode, &link);
819 strncpy(buf, link, size);
828 vfs_get_dtype(int itype)
840 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
844 if ((errno = vfs_getfd(fd, &fd_s))) {
848 struct v_dnode* dnode;
849 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
856 return DO_STATUS(errno);
859 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
862 struct v_dnode* dnode;
863 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
864 errno = vfs_readlink(dnode, buf, size);
871 return DO_STATUS(errno);
874 __DEFINE_LXSYSCALL4(int,
887 if ((errno = vfs_getfd(dirfd, &fd_s))) {
891 struct v_dnode* dnode;
892 if (!(errno = vfs_walk(
893 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
894 errno = vfs_readlink(fd_s->file->dnode, buf, size);
902 return DO_STATUS(errno);
907 When we perform operation that could affect the layout of
908 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
909 whenever possible. This will blocking any ongoing path walking to reach
910 it hence avoid any partial state.
913 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
916 struct v_dnode* dnode;
917 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
918 return DO_STATUS(errno);
923 if ((errno = vfs_check_writable(dnode))) {
927 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
932 if (dnode->ref_count > 1 || dnode->inode->open_count) {
937 if (!llist_empty(&dnode->children)) {
942 struct v_dnode* parent = dnode->parent;
950 lock_inode(parent->inode);
952 if ((dnode->inode->itype & VFS_IFDIR)) {
953 errno = parent->inode->ops->rmdir(parent->inode, dnode);
955 vfs_dcache_remove(dnode);
961 unlock_inode(parent->inode);
962 unlock_dnode(parent);
966 return DO_STATUS(errno);
969 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
972 struct v_dnode *parent, *dir;
973 char name_value[VFS_NAME_MAXLEN];
974 struct hstr name = HHSTR(name_value, 0, 0);
976 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
980 if ((errno = vfs_check_writable(parent))) {
984 if (!(dir = vfs_d_alloc(parent, &name))) {
990 lock_inode(parent->inode);
992 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
994 } else if (!parent->inode->ops->mkdir) {
996 } else if (!(parent->inode->itype & VFS_IFDIR)) {
998 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
999 vfs_dcache_add(parent, dir);
1006 unlock_inode(parent->inode);
1007 unlock_dnode(parent);
1009 return DO_STATUS(errno);
1013 __vfs_do_unlink(struct v_dnode* dnode)
1016 struct v_inode* inode = dnode->inode;
1018 if (dnode->ref_count > 1) {
1022 if ((errno = vfs_check_writable(dnode))) {
1028 if (inode->open_count) {
1030 } else if (!(inode->itype & VFS_IFDIR)) {
1031 // The underlying unlink implementation should handle
1033 errno = inode->ops->unlink(inode);
1041 unlock_inode(inode);
1046 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1049 struct v_dnode* dnode;
1050 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1054 errno = __vfs_do_unlink(dnode);
1057 return DO_STATUS(errno);
1060 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1064 if ((errno = vfs_getfd(fd, &fd_s))) {
1068 struct v_dnode* dnode;
1069 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1070 errno = __vfs_do_unlink(dnode);
1074 return DO_STATUS(errno);
1077 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1080 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1082 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1084 errno = __vfs_try_locate_file(
1085 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1088 } else if (name_file) {
1089 errno = vfs_link(to_link, name_file);
1092 return DO_STATUS(errno);
1095 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1100 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1101 errno = vfs_fsync(fd_s->file);
1104 return DO_STATUS(errno);
1108 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1111 struct v_fd* copied = cake_grab(fd_pile);
1113 memcpy(copied, old, sizeof(struct v_fd));
1115 atomic_fetch_add(&old->file->ref_count, 1);
1123 vfs_dup2(int oldfd, int newfd)
1125 if (newfd == oldfd) {
1130 struct v_fd *oldfd_s, *newfd_s;
1131 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1135 if (!TEST_FD(newfd)) {
1140 newfd_s = __current->fdtable->fds[newfd];
1141 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1145 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1146 __current->fdtable->fds[newfd] = newfd_s;
1151 return DO_STATUS(errno);
1154 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1156 return vfs_dup2(oldfd, newfd);
1159 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1162 struct v_fd *oldfd_s, *newfd_s;
1163 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1167 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1168 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1169 __current->fdtable->fds[newfd] = newfd_s;
1174 return DO_STATUS(errno);
1177 __DEFINE_LXSYSCALL2(int,
1185 struct v_dnode* dnode;
1186 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1190 if (errno = vfs_check_writable(dnode)) {
1194 if (!dnode->inode->ops->set_symlink) {
1199 lock_inode(dnode->inode);
1201 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1203 unlock_inode(dnode->inode);
1206 return DO_STATUS(errno);
1210 vfs_ref_dnode(struct v_dnode* dnode)
1212 atomic_fetch_add(&dnode->ref_count, 1);
1213 mnt_mkbusy(dnode->mnt);
1217 vfs_unref_dnode(struct v_dnode* dnode)
1219 atomic_fetch_sub(&dnode->ref_count, 1);
1220 mnt_chillax(dnode->mnt);
1224 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1230 if (!(dnode->inode->itype & VFS_IFDIR)) {
1236 vfs_unref_dnode(proc->cwd);
1239 vfs_ref_dnode(dnode);
1242 unlock_dnode(dnode);
1248 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1250 struct v_dnode* dnode;
1253 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1257 errno = vfs_do_chdir(__current, dnode);
1260 return DO_STATUS(errno);
1263 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1268 if ((errno = vfs_getfd(fd, &fd_s))) {
1272 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1275 return DO_STATUS(errno);
1278 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1289 if (!__current->cwd) {
1290 *buf = VFS_PATH_DELIM;
1293 len = vfs_get_path(__current->cwd, buf, size, 0);
1300 buf[len + 1] = '\0';
1305 __current->k_status = errno;
1310 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1313 if (current->inode->id == target->inode->id) {
1318 if (errno = vfs_check_writable(current)) {
1322 if (current->ref_count > 1 || target->ref_count > 1) {
1326 if (current->super_block != target->super_block) {
1330 struct v_dnode* oldparent = current->parent;
1331 struct v_dnode* newparent = target->parent;
1333 lock_dnode(current);
1336 lock_dnode(oldparent);
1338 lock_dnode(newparent);
1340 if (!llist_empty(&target->children)) {
1342 unlock_dnode(target);
1347 current->inode->ops->rename(current->inode, current, target))) {
1348 unlock_dnode(target);
1352 // re-position current
1353 hstrcpy(¤t->name, &target->name);
1354 vfs_dcache_rehash(newparent, current);
1359 unlock_dnode(target);
1362 unlock_dnode(current);
1364 unlock_dnode(oldparent);
1366 unlock_dnode(newparent);
1371 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1373 struct v_dnode *cur, *target_parent, *target;
1374 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1377 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1381 if ((errno = vfs_walk(
1382 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1386 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1387 if (errno == ENOENT) {
1388 target = vfs_d_alloc(target_parent, &name);
1389 vfs_dcache_add(target_parent, target);
1399 errno = vfs_do_rename(cur, target);
1403 return DO_STATUS(errno);