3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
114 uint32_t _hash = *hash;
116 _hash = _hash ^ (_hash >> VFS_HASHBITS);
117 // 与parent的指针值做加法,来减小碰撞的可能性。
118 _hash += (uint32_t)parent;
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 uint32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 llist_delete(&dnode->aka_list);
167 hlist_delete(&dnode->hash_list);
169 dnode->parent = NULL;
170 atomic_fetch_sub(&dnode->ref_count, 1);
174 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
178 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
179 vfs_dcache_remove(dnode);
180 vfs_dcache_add(new_parent, dnode);
184 vfs_open(struct v_dnode* dnode, struct v_file** file)
186 if (!dnode->inode || !dnode->inode->ops->open) {
190 struct v_inode* inode = dnode->inode;
194 struct v_file* vfile = cake_grab(file_pile);
195 memset(vfile, 0, sizeof(*vfile));
197 vfile->dnode = dnode;
198 vfile->inode = inode;
199 vfile->ref_count = ATOMIC_VAR_INIT(1);
200 vfile->ops = inode->default_fops;
202 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
203 struct pcache* pcache = vzalloc(sizeof(struct pcache));
205 pcache->master = inode;
206 inode->pg_cache = pcache;
209 int errno = inode->ops->open(inode, vfile);
211 cake_release(file_pile, vfile);
213 atomic_fetch_add(&dnode->ref_count, 1);
215 mnt_mkbusy(dnode->mnt);
226 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
228 if (assign_to->inode) {
229 llist_delete(&assign_to->aka_list);
230 assign_to->inode->link_count--;
232 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
233 assign_to->inode = inode;
238 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
242 if ((errno = vfs_check_writable(to_link))) {
246 lock_inode(to_link->inode);
247 if (to_link->super_block->root != name->super_block->root) {
249 } else if (!to_link->inode->ops->link) {
251 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
252 vfs_assign_inode(name, to_link->inode);
254 unlock_inode(to_link->inode);
260 vfs_pclose(struct v_file* file, pid_t pid)
263 if (file->ref_count > 1) {
264 atomic_fetch_sub(&file->ref_count, 1);
265 } else if (!(errno = file->ops->close(file))) {
266 atomic_fetch_sub(&file->dnode->ref_count, 1);
267 file->inode->open_count--;
269 // Prevent dead lock.
270 // This happened when process is terminated while blocking on read.
271 // In that case, the process is still holding the inode lock and it will
272 // never get released.
274 * The unlocking should also include ownership check.
276 * To see why, consider two process both open the same file both with
278 * Process A: busy on reading x
279 * Process B: do nothing with x
280 * Assuming that, after a very short time, process B get terminated
281 * while process A is still busy in it's reading business. By this
282 * design, the inode lock of this file x is get released by B rather
283 * than A. And this will cause a probable race condition on A if other
284 * process is writing to this file later after B exit.
286 if (mutex_on_hold(&file->inode->lock)) {
287 mutex_unlock_for(&file->inode->lock, pid);
289 mnt_chillax(file->dnode->mnt);
291 pcache_commit_all(file->inode);
292 cake_release(file_pile, file);
298 vfs_close(struct v_file* file)
300 return vfs_pclose(file, __current->pid);
304 vfs_fsync(struct v_file* file)
307 if ((errno = vfs_check_writable(file->dnode))) {
311 lock_inode(file->inode);
313 pcache_commit_all(file->inode);
316 if (file->ops->sync) {
317 errno = file->ops->sync(file);
320 unlock_inode(file->inode);
326 vfs_alloc_fdslot(int* fd)
328 for (size_t i = 0; i < VFS_MAX_FD; i++) {
329 if (!__current->fdtable->fds[i]) {
340 struct v_superblock* sb = cake_grab(superblock_pile);
341 memset(sb, 0, sizeof(*sb));
342 llist_init_head(&sb->sb_list);
343 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
348 vfs_sb_free(struct v_superblock* sb)
351 cake_release(superblock_pile, sb);
355 __vfs_try_evict_dnode(struct lru_node* obj)
357 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
359 if (!dnode->ref_count) {
367 __vfs_try_evict_inode(struct lru_node* obj)
369 struct v_inode* inode = container_of(obj, struct v_inode, lru);
371 if (!inode->link_count && !inode->open_count) {
379 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
381 struct v_dnode* dnode = cake_grab(dnode_pile);
383 lru_evict_half(dnode_lru);
385 if (!(dnode = cake_grab(dnode_pile))) {
390 memset(dnode, 0, sizeof(*dnode));
391 llist_init_head(&dnode->children);
392 llist_init_head(&dnode->siblings);
393 llist_init_head(&dnode->aka_list);
394 mutex_init(&dnode->lock);
396 dnode->ref_count = ATOMIC_VAR_INIT(0);
397 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
399 hstrcpy(&dnode->name, name);
402 dnode->super_block = parent->super_block;
403 dnode->mnt = parent->mnt;
406 lru_use_one(dnode_lru, &dnode->lru);
412 vfs_d_free(struct v_dnode* dnode)
414 assert(dnode->ref_count == 1);
417 assert(dnode->inode->link_count > 0);
418 dnode->inode->link_count--;
421 vfs_dcache_remove(dnode);
422 // Make sure the children de-referencing their parent.
423 // With lru presented, the eviction will be propagated over the entire
424 // detached subtree eventually
425 struct v_dnode *pos, *n;
426 llist_for_each(pos, n, &dnode->children, siblings)
428 vfs_dcache_remove(pos);
431 vfree(dnode->name.value);
432 cake_release(dnode_pile, dnode);
436 vfs_i_find(struct v_superblock* sb, uint32_t i_id)
438 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
439 struct v_inode *pos, *n;
440 hashtable_bucket_foreach(slot, pos, n, hash_list)
442 if (pos->id == i_id) {
443 lru_use_one(inode_lru, &pos->lru);
452 vfs_i_addhash(struct v_inode* inode)
454 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
456 hlist_delete(&inode->hash_list);
457 hlist_add(&slot->head, &inode->hash_list);
461 vfs_i_alloc(struct v_superblock* sb)
463 assert(sb->ops.init_inode);
465 struct v_inode* inode;
466 if (!(inode = cake_grab(inode_pile))) {
467 lru_evict_half(inode_lru);
468 if (!(inode = cake_grab(inode_pile))) {
473 memset(inode, 0, sizeof(*inode));
474 mutex_init(&inode->lock);
475 llist_init_head(&inode->xattrs);
476 llist_init_head(&inode->aka_dnodes);
478 sb->ops.init_inode(sb, inode);
481 inode->ctime = clock_unixtime();
482 inode->atime = inode->ctime;
483 inode->mtime = inode->ctime;
486 lru_use_one(inode_lru, &inode->lru);
491 vfs_i_free(struct v_inode* inode)
493 if (inode->pg_cache) {
494 pcache_release(inode->pg_cache);
495 vfree(inode->pg_cache);
497 inode->ops->sync(inode);
498 hlist_delete(&inode->hash_list);
499 cake_release(inode_pile, inode);
502 /* ---- System call definition and support ---- */
504 #define FLOCATE_CREATE_EMPTY 1
507 vfs_getfd(int fd, struct v_fd** fd_s)
509 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
516 __vfs_try_locate_file(const char* path,
517 struct v_dnode** fdir,
518 struct v_dnode** file,
521 char name_str[VFS_NAME_MAXLEN];
522 struct hstr name = HSTR(name_str, 0);
526 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
530 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
531 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
535 struct v_dnode* parent = *fdir;
536 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
544 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
545 vfs_dcache_add(parent, file_new);
548 vfs_d_free(file_new);
551 unlock_dnode(parent);
557 vfs_do_open(const char* path, int options)
560 struct v_dnode *dentry, *file;
561 struct v_file* ofile = 0;
563 errno = __vfs_try_locate_file(
564 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
566 if (errno || (errno = vfs_open(file, &ofile))) {
570 struct v_inode* o_inode = ofile->inode;
572 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
573 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
574 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
576 fd_s->flags = options;
577 __current->fdtable->fds[fd] = fd_s;
584 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
586 int errno = vfs_do_open(path, options);
587 return DO_STATUS_OR_RETURN(errno);
590 __DEFINE_LXSYSCALL1(int, close, int, fd)
594 if ((errno = vfs_getfd(fd, &fd_s))) {
598 if ((errno = vfs_close(fd_s->file))) {
603 __current->fdtable->fds[fd] = 0;
606 return DO_STATUS(errno);
610 __vfs_readdir_callback(struct dir_context* dctx,
615 struct dirent* dent = (struct dirent*)dctx->cb_data;
616 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
618 dent->d_type = dtype;
621 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
626 if ((errno = vfs_getfd(fd, &fd_s))) {
630 struct v_inode* inode = fd_s->file->inode;
634 if (!(inode->itype & VFS_IFDIR)) {
637 struct dir_context dctx =
638 (struct dir_context){ .cb_data = dent,
639 .index = dent->d_offset,
640 .read_complete_callback =
641 __vfs_readdir_callback };
643 if (dent->d_offset == 0) {
644 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
645 } else if (dent->d_offset == 1) {
646 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
649 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
660 return DO_STATUS_OR_RETURN(errno);
663 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
667 if ((errno = vfs_getfd(fd, &fd_s))) {
671 struct v_file* file = fd_s->file;
672 if ((file->inode->itype & VFS_IFDIR)) {
677 lock_inode(file->inode);
679 file->inode->atime = clock_unixtime();
681 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
682 errno = file->ops->read(file->inode, buf, count, file->f_pos);
684 errno = pcache_read(file->inode, buf, count, file->f_pos);
688 file->f_pos += errno;
689 unlock_inode(file->inode);
693 unlock_inode(file->inode);
696 return DO_STATUS(errno);
699 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
703 if ((errno = vfs_getfd(fd, &fd_s))) {
707 struct v_file* file = fd_s->file;
709 if ((errno = vfs_check_writable(file->dnode))) {
713 if ((file->inode->itype & VFS_IFDIR)) {
718 lock_inode(file->inode);
720 file->inode->mtime = clock_unixtime();
722 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
723 errno = file->ops->write(file->inode, buf, count, file->f_pos);
725 errno = pcache_write(file->inode, buf, count, file->f_pos);
729 file->f_pos += errno;
730 unlock_inode(file->inode);
734 unlock_inode(file->inode);
737 return DO_STATUS(errno);
740 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
744 if ((errno = vfs_getfd(fd, &fd_s))) {
748 struct v_file* file = fd_s->file;
750 if (!file->ops->seek) {
755 lock_inode(file->inode);
758 int fpos = file->f_pos;
761 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
765 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
773 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
777 unlock_inode(file->inode);
780 return DO_STATUS(errno);
784 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
796 if (dnode->parent != dnode) {
797 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
804 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
805 buf[len++] = VFS_PATH_DELIM;
808 size_t cpy_size = MIN(dnode->name.len, size - len);
809 strncpy(buf + len, dnode->name.value, cpy_size);
816 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
819 struct v_inode* inode = dnode->inode;
820 if (inode->ops->read_symlink) {
823 int errno = inode->ops->read_symlink(inode, &link);
824 strncpy(buf, link, size);
833 vfs_get_dtype(int itype)
845 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
849 if ((errno = vfs_getfd(fd, &fd_s))) {
853 struct v_dnode* dnode;
854 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
861 return DO_STATUS(errno);
864 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
867 struct v_dnode* dnode;
868 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
869 errno = vfs_readlink(dnode, buf, size);
876 return DO_STATUS(errno);
879 __DEFINE_LXSYSCALL4(int,
892 if ((errno = vfs_getfd(dirfd, &fd_s))) {
896 struct v_dnode* dnode;
897 if (!(errno = vfs_walk(
898 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
899 errno = vfs_readlink(fd_s->file->dnode, buf, size);
907 return DO_STATUS(errno);
912 When we perform operation that could affect the layout of
913 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
914 whenever possible. This will blocking any ongoing path walking to reach
915 it hence avoid any partial state.
918 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
921 struct v_dnode* dnode;
922 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
923 return DO_STATUS(errno);
928 if ((errno = vfs_check_writable(dnode))) {
932 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
937 if (dnode->ref_count > 1 || dnode->inode->open_count) {
942 if (!llist_empty(&dnode->children)) {
947 struct v_dnode* parent = dnode->parent;
955 lock_inode(parent->inode);
957 if ((dnode->inode->itype & VFS_IFDIR)) {
958 errno = parent->inode->ops->rmdir(parent->inode, dnode);
960 vfs_dcache_remove(dnode);
966 unlock_inode(parent->inode);
967 unlock_dnode(parent);
971 return DO_STATUS(errno);
974 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
977 struct v_dnode *parent, *dir;
978 char name_value[VFS_NAME_MAXLEN];
979 struct hstr name = HHSTR(name_value, 0, 0);
981 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
985 if ((errno = vfs_check_writable(parent))) {
989 if (!(dir = vfs_d_alloc(parent, &name))) {
995 lock_inode(parent->inode);
997 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
999 } else if (!parent->inode->ops->mkdir) {
1001 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1003 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1004 vfs_dcache_add(parent, dir);
1011 unlock_inode(parent->inode);
1012 unlock_dnode(parent);
1014 return DO_STATUS(errno);
1018 __vfs_do_unlink(struct v_dnode* dnode)
1021 struct v_inode* inode = dnode->inode;
1023 if (dnode->ref_count > 1) {
1027 if ((errno = vfs_check_writable(dnode))) {
1033 if (inode->open_count) {
1035 } else if (!(inode->itype & VFS_IFDIR)) {
1036 // The underlying unlink implementation should handle
1038 errno = inode->ops->unlink(inode);
1046 unlock_inode(inode);
1051 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1054 struct v_dnode* dnode;
1055 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1059 errno = __vfs_do_unlink(dnode);
1062 return DO_STATUS(errno);
1065 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1069 if ((errno = vfs_getfd(fd, &fd_s))) {
1073 struct v_dnode* dnode;
1074 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1075 errno = __vfs_do_unlink(dnode);
1079 return DO_STATUS(errno);
1082 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1085 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1087 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1089 errno = __vfs_try_locate_file(
1090 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1093 } else if (name_file) {
1094 errno = vfs_link(to_link, name_file);
1097 return DO_STATUS(errno);
1100 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1105 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1106 errno = vfs_fsync(fd_s->file);
1109 return DO_STATUS(errno);
1113 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1116 struct v_fd* copied = cake_grab(fd_pile);
1118 memcpy(copied, old, sizeof(struct v_fd));
1120 atomic_fetch_add(&old->file->ref_count, 1);
1128 vfs_dup2(int oldfd, int newfd)
1130 if (newfd == oldfd) {
1135 struct v_fd *oldfd_s, *newfd_s;
1136 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1140 if (!TEST_FD(newfd)) {
1145 newfd_s = __current->fdtable->fds[newfd];
1146 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1150 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1151 __current->fdtable->fds[newfd] = newfd_s;
1156 return DO_STATUS(errno);
1159 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1161 return vfs_dup2(oldfd, newfd);
1164 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1167 struct v_fd *oldfd_s, *newfd_s;
1168 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1172 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1173 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1174 __current->fdtable->fds[newfd] = newfd_s;
1179 return DO_STATUS(errno);
1182 __DEFINE_LXSYSCALL2(int,
1190 struct v_dnode* dnode;
1191 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1195 if (errno = vfs_check_writable(dnode)) {
1199 if (!dnode->inode->ops->set_symlink) {
1204 lock_inode(dnode->inode);
1206 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1208 unlock_inode(dnode->inode);
1211 return DO_STATUS(errno);
1215 vfs_ref_dnode(struct v_dnode* dnode)
1217 atomic_fetch_add(&dnode->ref_count, 1);
1218 mnt_mkbusy(dnode->mnt);
1222 vfs_unref_dnode(struct v_dnode* dnode)
1224 atomic_fetch_sub(&dnode->ref_count, 1);
1225 mnt_chillax(dnode->mnt);
1229 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1235 if (!(dnode->inode->itype & VFS_IFDIR)) {
1241 vfs_unref_dnode(proc->cwd);
1244 vfs_ref_dnode(dnode);
1247 unlock_dnode(dnode);
1253 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1255 struct v_dnode* dnode;
1258 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1262 errno = vfs_do_chdir(__current, dnode);
1265 return DO_STATUS(errno);
1268 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1273 if ((errno = vfs_getfd(fd, &fd_s))) {
1277 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1280 return DO_STATUS(errno);
1283 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1294 if (!__current->cwd) {
1295 *buf = VFS_PATH_DELIM;
1298 len = vfs_get_path(__current->cwd, buf, size, 0);
1305 buf[len + 1] = '\0';
1310 __current->k_status = errno;
1315 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1318 if (current->inode->id == target->inode->id) {
1323 if (errno = vfs_check_writable(current)) {
1327 if (current->ref_count > 1 || target->ref_count > 1) {
1331 if (current->super_block != target->super_block) {
1335 struct v_dnode* oldparent = current->parent;
1336 struct v_dnode* newparent = target->parent;
1338 lock_dnode(current);
1341 lock_dnode(oldparent);
1343 lock_dnode(newparent);
1345 if (!llist_empty(&target->children)) {
1347 unlock_dnode(target);
1352 current->inode->ops->rename(current->inode, current, target))) {
1353 unlock_dnode(target);
1357 // re-position current
1358 hstrcpy(¤t->name, &target->name);
1359 vfs_dcache_rehash(newparent, current);
1364 unlock_dnode(target);
1367 unlock_dnode(current);
1369 unlock_dnode(oldparent);
1371 unlock_dnode(newparent);
1376 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1378 struct v_dnode *cur, *target_parent, *target;
1379 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1382 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1386 if ((errno = vfs_walk(
1387 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1391 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1392 if (errno == ENOENT) {
1393 target = vfs_d_alloc(target_parent, &name);
1394 vfs_dcache_add(target_parent, target);
1404 errno = vfs_do_rename(cur, target);
1408 return DO_STATUS(errno);