3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
115 // 与parent的指针值做加法,来减小碰撞的可能性。
116 _hash += (uint32_t)parent;
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 hlist_delete(&dnode->hash_list);
168 dnode->parent = NULL;
169 atomic_fetch_sub(&dnode->ref_count, 1);
173 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
177 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
178 vfs_dcache_remove(dnode);
179 vfs_dcache_add(new_parent, dnode);
183 vfs_open(struct v_dnode* dnode, struct v_file** file)
185 if (!dnode->inode || !dnode->inode->ops->open) {
189 struct v_inode* inode = dnode->inode;
193 struct v_file* vfile = cake_grab(file_pile);
194 memset(vfile, 0, sizeof(*vfile));
196 vfile->dnode = dnode;
197 vfile->inode = inode;
198 vfile->ref_count = ATOMIC_VAR_INIT(1);
199 vfile->ops = inode->default_fops;
201 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
202 struct pcache* pcache = vzalloc(sizeof(struct pcache));
204 pcache->master = inode;
205 inode->pg_cache = pcache;
208 int errno = inode->ops->open(inode, vfile);
210 cake_release(file_pile, vfile);
212 atomic_fetch_add(&dnode->ref_count, 1);
214 mnt_mkbusy(dnode->mnt);
225 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
227 if (assign_to->inode) {
228 assign_to->inode->link_count--;
230 assign_to->inode = inode;
235 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
239 if ((errno = vfs_check_writable(to_link))) {
243 lock_inode(to_link->inode);
244 if (to_link->super_block->root != name->super_block->root) {
246 } else if (!to_link->inode->ops->link) {
248 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
249 vfs_assign_inode(name, to_link->inode);
251 unlock_inode(to_link->inode);
257 vfs_close(struct v_file* file)
260 if (file->ref_count > 1) {
261 atomic_fetch_sub(&file->ref_count, 1);
262 } else if (!(errno = file->ops->close(file))) {
263 atomic_fetch_sub(&file->dnode->ref_count, 1);
264 file->inode->open_count--;
267 // This happened when process is terminated while blocking on read.
268 // In that case, the process is still holding the inode lock and it will
269 // never get released.
270 // FIXME is this a good solution?
272 * Consider two process both open the same file both with fd=x.
273 * Process A: busy on reading x
274 * Process B: do nothing with x
275 * Assume that, after a very short time, process B get terminated while
276 * process A is still busy in it's reading business. By this design, the
277 * inode lock of this file x is get released by B rather than A. And
278 * this will cause a probable race condition on A if other process is
279 * writing to this file later after B exit.
281 * A possible solution is to add a owner identification in the lock
282 * context, so only the lock holder can do the release.
284 if (mutex_on_hold(&file->inode->lock)) {
285 unlock_inode(file->inode);
287 mnt_chillax(file->dnode->mnt);
289 pcache_commit_all(file->inode);
290 cake_release(file_pile, file);
296 vfs_fsync(struct v_file* file)
299 if ((errno = vfs_check_writable(file->dnode))) {
303 lock_inode(file->inode);
305 pcache_commit_all(file->inode);
308 if (file->ops->sync) {
309 errno = file->ops->sync(file);
312 unlock_inode(file->inode);
318 vfs_alloc_fdslot(int* fd)
320 for (size_t i = 0; i < VFS_MAX_FD; i++) {
321 if (!__current->fdtable->fds[i]) {
332 struct v_superblock* sb = cake_grab(superblock_pile);
333 memset(sb, 0, sizeof(*sb));
334 llist_init_head(&sb->sb_list);
335 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
340 vfs_sb_free(struct v_superblock* sb)
343 cake_release(superblock_pile, sb);
347 __vfs_try_evict_dnode(struct lru_node* obj)
349 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
351 if (!dnode->ref_count) {
359 __vfs_try_evict_inode(struct lru_node* obj)
361 struct v_inode* inode = container_of(obj, struct v_inode, lru);
363 if (!inode->link_count && !inode->open_count) {
371 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
373 struct v_dnode* dnode = cake_grab(dnode_pile);
375 lru_evict_half(dnode_lru);
377 if (!(dnode = cake_grab(dnode_pile))) {
382 memset(dnode, 0, sizeof(*dnode));
383 llist_init_head(&dnode->children);
384 llist_init_head(&dnode->siblings);
385 mutex_init(&dnode->lock);
387 dnode->ref_count = ATOMIC_VAR_INIT(0);
388 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
390 hstrcpy(&dnode->name, name);
393 dnode->super_block = parent->super_block;
394 dnode->mnt = parent->mnt;
397 lru_use_one(dnode_lru, &dnode->lru);
403 vfs_d_free(struct v_dnode* dnode)
405 assert(dnode->ref_count == 1);
408 assert(dnode->inode->link_count > 0);
409 dnode->inode->link_count--;
412 vfs_dcache_remove(dnode);
413 // Make sure the children de-referencing their parent.
414 // With lru presented, the eviction will be propagated over the entire
415 // detached subtree eventually
416 struct v_dnode *pos, *n;
417 llist_for_each(pos, n, &dnode->children, siblings)
419 vfs_dcache_remove(pos);
422 vfree(dnode->name.value);
423 cake_release(dnode_pile, dnode);
427 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
429 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
430 struct v_inode *pos, *n;
431 hashtable_bucket_foreach(slot, pos, n, hash_list)
433 if (pos->id == i_id) {
434 lru_use_one(inode_lru, &pos->lru);
443 vfs_i_addhash(struct v_inode* inode)
445 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
447 hlist_delete(&inode->hash_list);
448 hlist_add(&slot->head, &inode->hash_list);
452 vfs_i_alloc(struct v_superblock* sb)
454 assert(sb->ops.init_inode);
456 struct v_inode* inode;
457 if (!(inode = cake_grab(inode_pile))) {
458 lru_evict_half(inode_lru);
459 if (!(inode = cake_grab(inode_pile))) {
464 memset(inode, 0, sizeof(*inode));
465 mutex_init(&inode->lock);
466 llist_init_head(&inode->xattrs);
468 sb->ops.init_inode(sb, inode);
471 inode->ctime = clock_unixtime();
472 inode->atime = inode->ctime;
473 inode->mtime = inode->ctime;
476 lru_use_one(inode_lru, &inode->lru);
481 vfs_i_free(struct v_inode* inode)
483 if (inode->pg_cache) {
484 pcache_release(inode->pg_cache);
485 vfree(inode->pg_cache);
487 inode->ops->sync(inode);
488 hlist_delete(&inode->hash_list);
489 cake_release(inode_pile, inode);
492 /* ---- System call definition and support ---- */
494 #define FLOCATE_CREATE_EMPTY 1
497 vfs_getfd(int fd, struct v_fd** fd_s)
499 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
506 __vfs_try_locate_file(const char* path,
507 struct v_dnode** fdir,
508 struct v_dnode** file,
511 char name_str[VFS_NAME_MAXLEN];
512 struct hstr name = HSTR(name_str, 0);
516 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
520 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
521 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
525 struct v_dnode* parent = *fdir;
526 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
534 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
535 vfs_dcache_add(parent, file_new);
538 vfs_d_free(file_new);
541 unlock_dnode(parent);
547 vfs_do_open(const char* path, int options)
550 struct v_dnode *dentry, *file;
551 struct v_file* ofile = 0;
553 errno = __vfs_try_locate_file(
554 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
556 if (errno || (errno = vfs_open(file, &ofile))) {
560 struct v_inode* o_inode = ofile->inode;
562 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
563 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
564 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
566 fd_s->flags = options;
567 __current->fdtable->fds[fd] = fd_s;
574 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
576 int errno = vfs_do_open(path, options);
577 return DO_STATUS_OR_RETURN(errno);
580 __DEFINE_LXSYSCALL1(int, close, int, fd)
584 if ((errno = vfs_getfd(fd, &fd_s))) {
588 if ((errno = vfs_close(fd_s->file))) {
593 __current->fdtable->fds[fd] = 0;
596 return DO_STATUS(errno);
600 __vfs_readdir_callback(struct dir_context* dctx,
605 struct dirent* dent = (struct dirent*)dctx->cb_data;
606 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
608 dent->d_type = dtype;
611 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
616 if ((errno = vfs_getfd(fd, &fd_s))) {
620 struct v_inode* inode = fd_s->file->inode;
624 if (!(inode->itype & VFS_IFDIR)) {
627 struct dir_context dctx =
628 (struct dir_context){ .cb_data = dent,
629 .index = dent->d_offset,
630 .read_complete_callback =
631 __vfs_readdir_callback };
633 if (dent->d_offset == 0) {
634 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
635 } else if (dent->d_offset == 1) {
636 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
639 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
650 return DO_STATUS_OR_RETURN(errno);
653 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
657 if ((errno = vfs_getfd(fd, &fd_s))) {
661 struct v_file* file = fd_s->file;
662 if ((file->inode->itype & VFS_IFDIR)) {
667 lock_inode(file->inode);
669 file->inode->atime = clock_unixtime();
671 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
672 errno = file->ops->read(file->inode, buf, count, file->f_pos);
674 errno = pcache_read(file->inode, buf, count, file->f_pos);
678 file->f_pos += errno;
679 unlock_inode(file->inode);
683 unlock_inode(file->inode);
686 return DO_STATUS(errno);
689 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
693 if ((errno = vfs_getfd(fd, &fd_s))) {
697 struct v_file* file = fd_s->file;
699 if ((errno = vfs_check_writable(file->dnode))) {
703 if ((file->inode->itype & VFS_IFDIR)) {
708 lock_inode(file->inode);
710 file->inode->mtime = clock_unixtime();
712 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
713 errno = file->ops->write(file->inode, buf, count, file->f_pos);
715 errno = pcache_write(file->inode, buf, count, file->f_pos);
719 file->f_pos += errno;
720 unlock_inode(file->inode);
724 unlock_inode(file->inode);
727 return DO_STATUS(errno);
730 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
734 if ((errno = vfs_getfd(fd, &fd_s))) {
738 struct v_file* file = fd_s->file;
740 if (!file->ops->seek) {
745 lock_inode(file->inode);
748 int fpos = file->f_pos;
751 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
755 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
763 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
767 unlock_inode(file->inode);
770 return DO_STATUS(errno);
774 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
776 if (!dnode || dnode->parent == dnode) {
784 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
790 buf[len++] = VFS_PATH_DELIM;
792 size_t cpy_size = MIN(dnode->name.len, size - len);
793 strncpy(buf + len, dnode->name.value, cpy_size);
800 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
803 struct v_inode* inode = dnode->inode;
804 if (inode->ops->read_symlink) {
807 int errno = inode->ops->read_symlink(inode, &link);
808 strncpy(buf, link, size);
817 vfs_get_dtype(int itype)
829 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
833 if ((errno = vfs_getfd(fd, &fd_s))) {
837 struct v_dnode* dnode;
838 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
845 return DO_STATUS(errno);
848 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
851 struct v_dnode* dnode;
852 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
853 errno = vfs_readlink(dnode, buf, size);
860 return DO_STATUS(errno);
863 __DEFINE_LXSYSCALL4(int,
876 if ((errno = vfs_getfd(dirfd, &fd_s))) {
880 struct v_dnode* dnode;
881 if (!(errno = vfs_walk(
882 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
883 errno = vfs_readlink(fd_s->file->dnode, buf, size);
891 return DO_STATUS(errno);
896 When we perform operation that could affect the layout of
897 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
898 whenever possible. This will blocking any ongoing path walking to reach
899 it hence avoid any partial state.
902 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
905 struct v_dnode* dnode;
906 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
907 return DO_STATUS(errno);
912 if ((errno = vfs_check_writable(dnode))) {
916 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
921 if (dnode->ref_count > 1 || dnode->inode->open_count) {
926 if (!llist_empty(&dnode->children)) {
931 struct v_dnode* parent = dnode->parent;
939 lock_inode(parent->inode);
941 if ((dnode->inode->itype & VFS_IFDIR)) {
942 errno = parent->inode->ops->rmdir(parent->inode, dnode);
944 vfs_dcache_remove(dnode);
950 unlock_inode(parent->inode);
951 unlock_dnode(parent);
955 return DO_STATUS(errno);
958 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
961 struct v_dnode *parent, *dir;
962 char name_value[VFS_NAME_MAXLEN];
963 struct hstr name = HHSTR(name_value, 0, 0);
965 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
969 if ((errno = vfs_check_writable(parent))) {
973 if (!(dir = vfs_d_alloc(parent, &name))) {
979 lock_inode(parent->inode);
981 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
983 } else if (!parent->inode->ops->mkdir) {
985 } else if (!(parent->inode->itype & VFS_IFDIR)) {
987 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
988 vfs_dcache_add(parent, dir);
995 unlock_inode(parent->inode);
996 unlock_dnode(parent);
998 return DO_STATUS(errno);
1002 __vfs_do_unlink(struct v_dnode* dnode)
1005 struct v_inode* inode = dnode->inode;
1007 if (dnode->ref_count > 1) {
1011 if ((errno = vfs_check_writable(dnode))) {
1017 if (inode->open_count) {
1019 } else if (!(inode->itype & VFS_IFDIR)) {
1020 // The underlying unlink implementation should handle
1022 errno = inode->ops->unlink(inode);
1030 unlock_inode(inode);
1035 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1038 struct v_dnode* dnode;
1039 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1043 errno = __vfs_do_unlink(dnode);
1046 return DO_STATUS(errno);
1049 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1053 if ((errno = vfs_getfd(fd, &fd_s))) {
1057 struct v_dnode* dnode;
1058 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1059 errno = __vfs_do_unlink(dnode);
1063 return DO_STATUS(errno);
1066 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1069 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1071 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1073 errno = __vfs_try_locate_file(
1074 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1077 } else if (name_file) {
1078 errno = vfs_link(to_link, name_file);
1081 return DO_STATUS(errno);
1084 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1089 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1090 errno = vfs_fsync(fd_s->file);
1093 return DO_STATUS(errno);
1097 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1100 struct v_fd* copied = cake_grab(fd_pile);
1102 memcpy(copied, old, sizeof(struct v_fd));
1104 atomic_fetch_add(&old->file->ref_count, 1);
1112 vfs_dup2(int oldfd, int newfd)
1114 if (newfd == oldfd) {
1119 struct v_fd *oldfd_s, *newfd_s;
1120 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1124 if (!TEST_FD(newfd)) {
1129 newfd_s = __current->fdtable->fds[newfd];
1130 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1134 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1135 __current->fdtable->fds[newfd] = newfd_s;
1140 return DO_STATUS(errno);
1143 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1145 return vfs_dup2(oldfd, newfd);
1148 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1151 struct v_fd *oldfd_s, *newfd_s;
1152 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1156 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1157 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1158 __current->fdtable->fds[newfd] = newfd_s;
1163 return DO_STATUS(errno);
1166 __DEFINE_LXSYSCALL2(int,
1174 struct v_dnode* dnode;
1175 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1179 if (errno = vfs_check_writable(dnode)) {
1183 if (!dnode->inode->ops->set_symlink) {
1188 lock_inode(dnode->inode);
1190 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1192 unlock_inode(dnode->inode);
1195 return DO_STATUS(errno);
1199 vfs_ref_dnode(struct v_dnode* dnode)
1201 atomic_fetch_add(&dnode->ref_count, 1);
1202 mnt_mkbusy(dnode->mnt);
1206 vfs_unref_dnode(struct v_dnode* dnode)
1208 atomic_fetch_sub(&dnode->ref_count, 1);
1209 mnt_chillax(dnode->mnt);
1213 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1219 if (!(dnode->inode->itype & VFS_IFDIR)) {
1225 vfs_unref_dnode(proc->cwd);
1228 vfs_ref_dnode(dnode);
1231 unlock_dnode(dnode);
1237 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1239 struct v_dnode* dnode;
1242 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1246 errno = vfs_do_chdir(__current, dnode);
1249 return DO_STATUS(errno);
1252 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1257 if ((errno = vfs_getfd(fd, &fd_s))) {
1261 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1264 return DO_STATUS(errno);
1267 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1278 if (!__current->cwd) {
1279 *buf = VFS_PATH_DELIM;
1282 len = vfs_get_path(__current->cwd, buf, size, 0);
1289 buf[len + 1] = '\0';
1294 __current->k_status = errno;
1299 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1302 if (current->inode->id == target->inode->id) {
1307 if (errno = vfs_check_writable(current)) {
1311 if (current->ref_count > 1 || target->ref_count > 1) {
1315 if (current->super_block != target->super_block) {
1319 struct v_dnode* oldparent = current->parent;
1320 struct v_dnode* newparent = target->parent;
1322 lock_dnode(current);
1325 lock_dnode(oldparent);
1327 lock_dnode(newparent);
1329 if (!llist_empty(&target->children)) {
1331 unlock_dnode(target);
1336 current->inode->ops->rename(current->inode, current, target))) {
1337 unlock_dnode(target);
1341 // re-position current
1342 hstrcpy(¤t->name, &target->name);
1343 vfs_dcache_rehash(newparent, current);
1348 unlock_dnode(target);
1351 unlock_dnode(current);
1353 unlock_dnode(oldparent);
1355 unlock_dnode(newparent);
1360 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1362 struct v_dnode *cur, *target_parent, *target;
1363 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1366 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1370 if ((errno = vfs_walk(
1371 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1375 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1376 if (errno == ENOENT) {
1377 target = vfs_d_alloc(target_parent, &name);
1378 vfs_dcache_add(target_parent, target);
1388 errno = vfs_do_rename(cur, target);
1392 return DO_STATUS(errno);