3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/foptions.h>
48 #include <lunaix/fs.h>
49 #include <lunaix/mm/cake.h>
50 #include <lunaix/mm/page.h>
51 #include <lunaix/mm/valloc.h>
52 #include <lunaix/process.h>
53 #include <lunaix/spike.h>
54 #include <lunaix/syscall.h>
55 #include <lunaix/syscall_utils.h>
57 #include <lunaix/fs/twifs.h>
59 #include <sys/dirent_defs.h>
61 static struct cake_pile* dnode_pile;
62 static struct cake_pile* inode_pile;
63 static struct cake_pile* file_pile;
64 static struct cake_pile* superblock_pile;
65 static struct cake_pile* fd_pile;
67 struct v_dnode* vfs_sysroot;
68 static struct hbucket* dnode_cache;
70 struct lru_zone *dnode_lru, *inode_lru;
72 struct hstr vfs_ddot = HSTR("..", 2);
73 struct hstr vfs_dot = HSTR(".", 1);
74 struct hstr vfs_empty = HSTR("", 0);
80 vfs_sb_free(struct v_superblock* sb);
83 __vfs_try_evict_dnode(struct lru_node* obj);
86 __vfs_try_evict_inode(struct lru_node* obj);
91 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
92 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
93 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
94 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
95 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
97 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
99 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
101 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
102 inode_lru = lru_new_zone(__vfs_try_evict_inode);
104 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
105 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
108 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
109 vfs_sysroot->parent = vfs_sysroot;
110 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
113 inline struct hbucket*
114 __dcache_hash(struct v_dnode* parent, u32_t* hash)
118 _hash = _hash ^ (_hash >> VFS_HASHBITS);
119 // 与parent的指针值做加法,来减小碰撞的可能性。
120 _hash += (u32_t)parent;
122 return &dnode_cache[_hash & VFS_HASH_MASK];
126 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
128 if (!str->len || HSTR_EQ(str, &vfs_dot))
131 if (HSTR_EQ(str, &vfs_ddot)) {
132 return parent->parent;
135 u32_t hash = str->hash;
136 struct hbucket* slot = __dcache_hash(parent, &hash);
138 struct v_dnode *pos, *n;
139 hashtable_bucket_foreach(slot, pos, n, hash_list)
141 if (pos->name.hash == hash) {
149 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
153 atomic_fetch_add(&dnode->ref_count, 1);
154 dnode->parent = parent;
155 llist_append(&parent->children, &dnode->siblings);
157 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
158 hlist_add(&bucket->head, &dnode->hash_list);
162 vfs_dcache_remove(struct v_dnode* dnode)
165 assert(dnode->ref_count == 1);
167 llist_delete(&dnode->siblings);
168 llist_delete(&dnode->aka_list);
169 hlist_delete(&dnode->hash_list);
171 dnode->parent = NULL;
172 atomic_fetch_sub(&dnode->ref_count, 1);
176 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
180 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
181 vfs_dcache_remove(dnode);
182 vfs_dcache_add(new_parent, dnode);
186 vfs_open(struct v_dnode* dnode, struct v_file** file)
188 if (!dnode->inode || !dnode->inode->ops->open) {
192 struct v_inode* inode = dnode->inode;
196 struct v_file* vfile = cake_grab(file_pile);
197 memset(vfile, 0, sizeof(*vfile));
199 vfile->dnode = dnode;
200 vfile->inode = inode;
201 vfile->ref_count = ATOMIC_VAR_INIT(1);
202 vfile->ops = inode->default_fops;
204 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
205 struct pcache* pcache = vzalloc(sizeof(struct pcache));
207 pcache->master = inode;
208 inode->pg_cache = pcache;
211 int errno = inode->ops->open(inode, vfile);
213 cake_release(file_pile, vfile);
215 atomic_fetch_add(&dnode->ref_count, 1);
217 mnt_mkbusy(dnode->mnt);
228 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
230 if (assign_to->inode) {
231 llist_delete(&assign_to->aka_list);
232 assign_to->inode->link_count--;
234 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
235 assign_to->inode = inode;
240 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
244 if ((errno = vfs_check_writable(to_link))) {
248 lock_inode(to_link->inode);
249 if (to_link->super_block->root != name->super_block->root) {
251 } else if (!to_link->inode->ops->link) {
253 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
254 vfs_assign_inode(name, to_link->inode);
256 unlock_inode(to_link->inode);
262 vfs_pclose(struct v_file* file, pid_t pid)
265 if (file->ref_count > 1) {
266 atomic_fetch_sub(&file->ref_count, 1);
267 } else if (!(errno = file->ops->close(file))) {
268 atomic_fetch_sub(&file->dnode->ref_count, 1);
269 file->inode->open_count--;
273 * This happened when process is terminated while blocking on read.
274 * In that case, the process is still holding the inode lock and it
275 will never get released.
276 * The unlocking should also include ownership check.
278 * To see why, consider two process both open the same file both with
280 * Process A: busy on reading x
281 * Process B: do nothing with x
282 * Assuming that, after a very short time, process B get terminated
283 * while process A is still busy in it's reading business. By this
284 * design, the inode lock of this file x is get released by B rather
285 * than A. And this will cause a probable race condition on A if other
286 * process is writing to this file later after B exit.
288 if (mutex_on_hold(&file->inode->lock)) {
289 mutex_unlock_for(&file->inode->lock, pid);
291 mnt_chillax(file->dnode->mnt);
293 pcache_commit_all(file->inode);
294 cake_release(file_pile, file);
300 vfs_close(struct v_file* file)
302 return vfs_pclose(file, __current->pid);
306 vfs_free_fd(struct v_fd* fd)
308 cake_release(fd_pile, fd);
312 vfs_fsync(struct v_file* file)
315 if ((errno = vfs_check_writable(file->dnode))) {
319 lock_inode(file->inode);
321 pcache_commit_all(file->inode);
324 if (file->ops->sync) {
325 errno = file->ops->sync(file);
328 unlock_inode(file->inode);
334 vfs_alloc_fdslot(int* fd)
336 for (size_t i = 0; i < VFS_MAX_FD; i++) {
337 if (!__current->fdtable->fds[i]) {
348 struct v_superblock* sb = cake_grab(superblock_pile);
349 memset(sb, 0, sizeof(*sb));
350 llist_init_head(&sb->sb_list);
351 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
356 vfs_sb_free(struct v_superblock* sb)
359 cake_release(superblock_pile, sb);
363 __vfs_try_evict_dnode(struct lru_node* obj)
365 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
367 if (!dnode->ref_count) {
375 __vfs_try_evict_inode(struct lru_node* obj)
377 struct v_inode* inode = container_of(obj, struct v_inode, lru);
379 if (!inode->link_count && !inode->open_count) {
387 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
389 struct v_dnode* dnode = cake_grab(dnode_pile);
391 lru_evict_half(dnode_lru);
393 if (!(dnode = cake_grab(dnode_pile))) {
398 memset(dnode, 0, sizeof(*dnode));
399 llist_init_head(&dnode->children);
400 llist_init_head(&dnode->siblings);
401 llist_init_head(&dnode->aka_list);
402 mutex_init(&dnode->lock);
404 dnode->ref_count = ATOMIC_VAR_INIT(0);
405 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
407 hstrcpy(&dnode->name, name);
410 dnode->super_block = parent->super_block;
411 dnode->mnt = parent->mnt;
414 lru_use_one(dnode_lru, &dnode->lru);
420 vfs_d_free(struct v_dnode* dnode)
422 assert(dnode->ref_count == 1);
425 assert(dnode->inode->link_count > 0);
426 dnode->inode->link_count--;
429 vfs_dcache_remove(dnode);
430 // Make sure the children de-referencing their parent.
431 // With lru presented, the eviction will be propagated over the entire
432 // detached subtree eventually
433 struct v_dnode *pos, *n;
434 llist_for_each(pos, n, &dnode->children, siblings)
436 vfs_dcache_remove(pos);
439 vfree(dnode->name.value);
440 cake_release(dnode_pile, dnode);
444 vfs_i_find(struct v_superblock* sb, u32_t i_id)
446 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
447 struct v_inode *pos, *n;
448 hashtable_bucket_foreach(slot, pos, n, hash_list)
450 if (pos->id == i_id) {
451 lru_use_one(inode_lru, &pos->lru);
460 vfs_i_addhash(struct v_inode* inode)
462 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
464 hlist_delete(&inode->hash_list);
465 hlist_add(&slot->head, &inode->hash_list);
469 vfs_i_alloc(struct v_superblock* sb)
471 assert(sb->ops.init_inode);
473 struct v_inode* inode;
474 if (!(inode = cake_grab(inode_pile))) {
475 lru_evict_half(inode_lru);
476 if (!(inode = cake_grab(inode_pile))) {
481 memset(inode, 0, sizeof(*inode));
482 mutex_init(&inode->lock);
483 llist_init_head(&inode->xattrs);
484 llist_init_head(&inode->aka_dnodes);
486 sb->ops.init_inode(sb, inode);
489 inode->ctime = clock_unixtime();
490 inode->atime = inode->ctime;
491 inode->mtime = inode->ctime;
494 lru_use_one(inode_lru, &inode->lru);
499 vfs_i_free(struct v_inode* inode)
501 if (inode->pg_cache) {
502 pcache_release(inode->pg_cache);
503 vfree(inode->pg_cache);
505 // we don't need to sync inode.
506 // If an inode can be free, then it must be properly closed.
507 // Hence it must be synced already!
508 if (inode->destruct) {
509 inode->destruct(inode);
511 hlist_delete(&inode->hash_list);
512 cake_release(inode_pile, inode);
515 /* ---- System call definition and support ---- */
517 #define FLOCATE_CREATE_EMPTY 1
518 #define FLOCATE_CREATE_ONLY 2
521 vfs_getfd(int fd, struct v_fd** fd_s)
523 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
530 __vfs_try_locate_file(const char* path,
531 struct v_dnode** fdir,
532 struct v_dnode** file,
535 char name_str[VFS_NAME_MAXLEN];
536 struct hstr name = HSTR(name_str, 0);
540 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
544 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
546 if (errno != ENOENT && (options & FLOCATE_CREATE_ONLY)) {
550 if (errno != ENOENT ||
551 !(options & (FLOCATE_CREATE_EMPTY | FLOCATE_CREATE_ONLY))) {
555 struct v_dnode* parent = *fdir;
556 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
564 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
565 vfs_dcache_add(parent, file_new);
568 vfs_d_free(file_new);
571 unlock_dnode(parent);
577 vfs_do_open(const char* path, int options)
580 struct v_dnode *dentry, *file;
581 struct v_file* ofile = NULL;
583 errno = __vfs_try_locate_file(
584 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
586 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
588 if (errno || (errno = vfs_open(file, &ofile))) {
592 struct v_fd* fd_s = cake_grab(fd_pile);
593 memset(fd_s, 0, sizeof(*fd_s));
595 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
597 fd_s->flags = options;
598 __current->fdtable->fds[fd] = fd_s;
605 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
607 int errno = vfs_do_open(path, options);
608 return DO_STATUS_OR_RETURN(errno);
611 __DEFINE_LXSYSCALL1(int, close, int, fd)
615 if ((errno = vfs_getfd(fd, &fd_s))) {
619 if ((errno = vfs_close(fd_s->file))) {
623 cake_release(fd_pile, fd_s);
624 __current->fdtable->fds[fd] = 0;
627 return DO_STATUS(errno);
631 __vfs_readdir_callback(struct dir_context* dctx,
636 struct lx_dirent* dent = (struct lx_dirent*)dctx->cb_data;
637 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
639 dent->d_type = dtype;
642 __DEFINE_LXSYSCALL2(int, sys_readdir, int, fd, struct lx_dirent*, dent)
647 if ((errno = vfs_getfd(fd, &fd_s))) {
651 struct v_inode* inode = fd_s->file->inode;
655 if (!(inode->itype & VFS_IFDIR)) {
658 struct dir_context dctx =
659 (struct dir_context){ .cb_data = dent,
660 .index = dent->d_offset,
661 .read_complete_callback =
662 __vfs_readdir_callback };
664 if (dent->d_offset == 0) {
665 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
666 } else if (dent->d_offset == 1) {
667 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
670 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
681 return DO_STATUS_OR_RETURN(errno);
684 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
688 if ((errno = vfs_getfd(fd, &fd_s))) {
692 struct v_file* file = fd_s->file;
693 if ((file->inode->itype & VFS_IFDIR)) {
698 lock_inode(file->inode);
700 file->inode->atime = clock_unixtime();
702 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
703 errno = file->ops->read(file->inode, buf, count, file->f_pos);
705 errno = pcache_read(file->inode, buf, count, file->f_pos);
709 file->f_pos += errno;
710 unlock_inode(file->inode);
714 unlock_inode(file->inode);
717 return DO_STATUS(errno);
720 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
724 if ((errno = vfs_getfd(fd, &fd_s))) {
728 struct v_file* file = fd_s->file;
730 if ((errno = vfs_check_writable(file->dnode))) {
734 if ((file->inode->itype & VFS_IFDIR)) {
739 lock_inode(file->inode);
741 file->inode->mtime = clock_unixtime();
743 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
744 errno = file->ops->write(file->inode, buf, count, file->f_pos);
746 errno = pcache_write(file->inode, buf, count, file->f_pos);
750 file->f_pos += errno;
751 unlock_inode(file->inode);
755 unlock_inode(file->inode);
758 return DO_STATUS(errno);
761 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
765 if ((errno = vfs_getfd(fd, &fd_s))) {
769 struct v_file* file = fd_s->file;
771 if (!file->ops->seek) {
776 lock_inode(file->inode);
779 int fpos = file->f_pos;
782 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
786 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
794 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
798 unlock_inode(file->inode);
801 return DO_STATUS(errno);
805 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
817 if (dnode->parent != dnode) {
818 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
825 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
826 buf[len++] = VFS_PATH_DELIM;
829 size_t cpy_size = MIN(dnode->name.len, size - len);
830 strncpy(buf + len, dnode->name.value, cpy_size);
837 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
840 struct v_inode* inode = dnode->inode;
841 if (inode->ops->read_symlink) {
844 int errno = inode->ops->read_symlink(inode, &link);
845 strncpy(buf, link, size);
854 vfs_get_dtype(int itype)
866 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
870 if ((errno = vfs_getfd(fd, &fd_s))) {
874 struct v_dnode* dnode;
875 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
882 return DO_STATUS(errno);
885 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
888 struct v_dnode* dnode;
889 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
890 errno = vfs_readlink(dnode, buf, size);
897 return DO_STATUS(errno);
900 __DEFINE_LXSYSCALL4(int,
913 if ((errno = vfs_getfd(dirfd, &fd_s))) {
917 struct v_dnode* dnode;
918 if (!(errno = vfs_walk(
919 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
920 errno = vfs_readlink(fd_s->file->dnode, buf, size);
928 return DO_STATUS(errno);
933 When we perform operation that could affect the layout of
934 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
935 whenever possible. This will blocking any ongoing path walking to reach
936 it hence avoid any partial state.
939 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
942 struct v_dnode* dnode;
943 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
944 return DO_STATUS(errno);
949 if ((errno = vfs_check_writable(dnode))) {
953 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
958 if (dnode->ref_count > 1 || dnode->inode->open_count) {
963 if (!llist_empty(&dnode->children)) {
968 struct v_dnode* parent = dnode->parent;
976 lock_inode(parent->inode);
978 if ((dnode->inode->itype & VFS_IFDIR)) {
979 errno = parent->inode->ops->rmdir(parent->inode, dnode);
981 vfs_dcache_remove(dnode);
987 unlock_inode(parent->inode);
988 unlock_dnode(parent);
992 return DO_STATUS(errno);
995 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
998 struct v_dnode *parent, *dir;
999 char name_value[VFS_NAME_MAXLEN];
1000 struct hstr name = HHSTR(name_value, 0, 0);
1002 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
1006 if ((errno = vfs_check_writable(parent))) {
1010 if (!(dir = vfs_d_alloc(parent, &name))) {
1016 lock_inode(parent->inode);
1018 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1020 } else if (!parent->inode->ops->mkdir) {
1022 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1024 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1025 vfs_dcache_add(parent, dir);
1032 unlock_inode(parent->inode);
1033 unlock_dnode(parent);
1035 return DO_STATUS(errno);
1039 __vfs_do_unlink(struct v_dnode* dnode)
1042 struct v_inode* inode = dnode->inode;
1044 if (dnode->ref_count > 1) {
1048 if ((errno = vfs_check_writable(dnode))) {
1054 if (inode->open_count) {
1056 } else if (!(inode->itype & VFS_IFDIR)) {
1057 // The underlying unlink implementation should handle
1059 errno = inode->ops->unlink(inode);
1067 unlock_inode(inode);
1072 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1075 struct v_dnode* dnode;
1076 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1080 errno = __vfs_do_unlink(dnode);
1083 return DO_STATUS(errno);
1086 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1090 if ((errno = vfs_getfd(fd, &fd_s))) {
1094 struct v_dnode* dnode;
1095 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1096 errno = __vfs_do_unlink(dnode);
1100 return DO_STATUS(errno);
1103 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1106 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1108 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1110 errno = __vfs_try_locate_file(
1111 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1114 } else if (name_file) {
1115 errno = vfs_link(to_link, name_file);
1118 return DO_STATUS(errno);
1121 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1126 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1127 errno = vfs_fsync(fd_s->file);
1130 return DO_STATUS(errno);
1134 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1137 struct v_fd* copied = cake_grab(fd_pile);
1139 memcpy(copied, old, sizeof(struct v_fd));
1141 atomic_fetch_add(&old->file->ref_count, 1);
1149 vfs_dup2(int oldfd, int newfd)
1151 if (newfd == oldfd) {
1156 struct v_fd *oldfd_s, *newfd_s;
1157 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1161 if (!TEST_FD(newfd)) {
1166 newfd_s = __current->fdtable->fds[newfd];
1167 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1171 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1172 __current->fdtable->fds[newfd] = newfd_s;
1177 return DO_STATUS(errno);
1180 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1182 return vfs_dup2(oldfd, newfd);
1185 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1188 struct v_fd *oldfd_s, *newfd_s;
1189 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1193 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1194 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1195 __current->fdtable->fds[newfd] = newfd_s;
1200 return DO_STATUS(errno);
1203 __DEFINE_LXSYSCALL2(int,
1211 struct v_dnode *dnode, *file;
1212 if ((errno = __vfs_try_locate_file(
1213 pathname, &dnode, &file, FLOCATE_CREATE_ONLY))) {
1217 if (errno = vfs_check_writable(file)) {
1221 if (!file->inode->ops->set_symlink) {
1226 lock_inode(file->inode);
1228 errno = file->inode->ops->set_symlink(file->inode, link_target);
1230 unlock_inode(file->inode);
1233 return DO_STATUS(errno);
1237 vfs_ref_file(struct v_file* file)
1239 atomic_fetch_add(&file->ref_count, 1);
1243 vfs_ref_dnode(struct v_dnode* dnode)
1245 atomic_fetch_add(&dnode->ref_count, 1);
1246 mnt_mkbusy(dnode->mnt);
1250 vfs_unref_dnode(struct v_dnode* dnode)
1252 atomic_fetch_sub(&dnode->ref_count, 1);
1253 mnt_chillax(dnode->mnt);
1257 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1263 if (!(dnode->inode->itype & VFS_IFDIR)) {
1269 vfs_unref_dnode(proc->cwd);
1272 vfs_ref_dnode(dnode);
1275 unlock_dnode(dnode);
1281 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1283 struct v_dnode* dnode;
1286 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1290 errno = vfs_do_chdir(__current, dnode);
1293 return DO_STATUS(errno);
1296 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1301 if ((errno = vfs_getfd(fd, &fd_s))) {
1305 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1308 return DO_STATUS(errno);
1311 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1322 if (!__current->cwd) {
1323 *buf = VFS_PATH_DELIM;
1326 len = vfs_get_path(__current->cwd, buf, size, 0);
1338 __current->k_status = errno;
1343 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1346 if (current->inode->id == target->inode->id) {
1351 if (errno = vfs_check_writable(current)) {
1355 if (current->ref_count > 1 || target->ref_count > 1) {
1359 if (current->super_block != target->super_block) {
1363 struct v_dnode* oldparent = current->parent;
1364 struct v_dnode* newparent = target->parent;
1366 lock_dnode(current);
1369 lock_dnode(oldparent);
1371 lock_dnode(newparent);
1373 if (!llist_empty(&target->children)) {
1375 unlock_dnode(target);
1380 current->inode->ops->rename(current->inode, current, target))) {
1381 unlock_dnode(target);
1385 // re-position current
1386 hstrcpy(¤t->name, &target->name);
1387 vfs_dcache_rehash(newparent, current);
1392 unlock_dnode(target);
1395 unlock_dnode(current);
1397 unlock_dnode(oldparent);
1399 unlock_dnode(newparent);
1404 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1406 struct v_dnode *cur, *target_parent, *target;
1407 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1410 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1414 if ((errno = vfs_walk(
1415 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1419 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1420 if (errno == ENOENT) {
1421 target = vfs_d_alloc(target_parent, &name);
1422 vfs_dcache_add(target_parent, target);
1432 errno = vfs_do_rename(cur, target);
1436 return DO_STATUS(errno);