3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, u32_t* hash)
116 _hash = _hash ^ (_hash >> VFS_HASHBITS);
117 // 与parent的指针值做加法,来减小碰撞的可能性。
118 _hash += (u32_t)parent;
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 u32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 llist_delete(&dnode->aka_list);
167 hlist_delete(&dnode->hash_list);
169 dnode->parent = NULL;
170 atomic_fetch_sub(&dnode->ref_count, 1);
174 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
178 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
179 vfs_dcache_remove(dnode);
180 vfs_dcache_add(new_parent, dnode);
184 vfs_open(struct v_dnode* dnode, struct v_file** file)
186 if (!dnode->inode || !dnode->inode->ops->open) {
190 struct v_inode* inode = dnode->inode;
194 struct v_file* vfile = cake_grab(file_pile);
195 memset(vfile, 0, sizeof(*vfile));
197 vfile->dnode = dnode;
198 vfile->inode = inode;
199 vfile->ref_count = ATOMIC_VAR_INIT(1);
200 vfile->ops = inode->default_fops;
202 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
203 struct pcache* pcache = vzalloc(sizeof(struct pcache));
205 pcache->master = inode;
206 inode->pg_cache = pcache;
209 int errno = inode->ops->open(inode, vfile);
211 cake_release(file_pile, vfile);
213 atomic_fetch_add(&dnode->ref_count, 1);
215 mnt_mkbusy(dnode->mnt);
226 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
228 if (assign_to->inode) {
229 llist_delete(&assign_to->aka_list);
230 assign_to->inode->link_count--;
232 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
233 assign_to->inode = inode;
238 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
242 if ((errno = vfs_check_writable(to_link))) {
246 lock_inode(to_link->inode);
247 if (to_link->super_block->root != name->super_block->root) {
249 } else if (!to_link->inode->ops->link) {
251 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
252 vfs_assign_inode(name, to_link->inode);
254 unlock_inode(to_link->inode);
260 vfs_pclose(struct v_file* file, pid_t pid)
263 if (file->ref_count > 1) {
264 atomic_fetch_sub(&file->ref_count, 1);
265 } else if (!(errno = file->ops->close(file))) {
266 atomic_fetch_sub(&file->dnode->ref_count, 1);
267 file->inode->open_count--;
269 // Prevent dead lock.
270 // This happened when process is terminated while blocking on read.
271 // In that case, the process is still holding the inode lock and it will
272 // never get released.
274 * The unlocking should also include ownership check.
276 * To see why, consider two process both open the same file both with
278 * Process A: busy on reading x
279 * Process B: do nothing with x
280 * Assuming that, after a very short time, process B get terminated
281 * while process A is still busy in it's reading business. By this
282 * design, the inode lock of this file x is get released by B rather
283 * than A. And this will cause a probable race condition on A if other
284 * process is writing to this file later after B exit.
286 if (mutex_on_hold(&file->inode->lock)) {
287 mutex_unlock_for(&file->inode->lock, pid);
289 mnt_chillax(file->dnode->mnt);
291 pcache_commit_all(file->inode);
292 cake_release(file_pile, file);
298 vfs_close(struct v_file* file)
300 return vfs_pclose(file, __current->pid);
304 vfs_free_fd(struct v_fd* fd)
306 cake_release(fd_pile, fd);
310 vfs_fsync(struct v_file* file)
313 if ((errno = vfs_check_writable(file->dnode))) {
317 lock_inode(file->inode);
319 pcache_commit_all(file->inode);
322 if (file->ops->sync) {
323 errno = file->ops->sync(file);
326 unlock_inode(file->inode);
332 vfs_alloc_fdslot(int* fd)
334 for (size_t i = 0; i < VFS_MAX_FD; i++) {
335 if (!__current->fdtable->fds[i]) {
346 struct v_superblock* sb = cake_grab(superblock_pile);
347 memset(sb, 0, sizeof(*sb));
348 llist_init_head(&sb->sb_list);
349 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
354 vfs_sb_free(struct v_superblock* sb)
357 cake_release(superblock_pile, sb);
361 __vfs_try_evict_dnode(struct lru_node* obj)
363 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
365 if (!dnode->ref_count) {
373 __vfs_try_evict_inode(struct lru_node* obj)
375 struct v_inode* inode = container_of(obj, struct v_inode, lru);
377 if (!inode->link_count && !inode->open_count) {
385 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
387 struct v_dnode* dnode = cake_grab(dnode_pile);
389 lru_evict_half(dnode_lru);
391 if (!(dnode = cake_grab(dnode_pile))) {
396 memset(dnode, 0, sizeof(*dnode));
397 llist_init_head(&dnode->children);
398 llist_init_head(&dnode->siblings);
399 llist_init_head(&dnode->aka_list);
400 mutex_init(&dnode->lock);
402 dnode->ref_count = ATOMIC_VAR_INIT(0);
403 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
405 hstrcpy(&dnode->name, name);
408 dnode->super_block = parent->super_block;
409 dnode->mnt = parent->mnt;
412 lru_use_one(dnode_lru, &dnode->lru);
418 vfs_d_free(struct v_dnode* dnode)
420 assert(dnode->ref_count == 1);
423 assert(dnode->inode->link_count > 0);
424 dnode->inode->link_count--;
427 vfs_dcache_remove(dnode);
428 // Make sure the children de-referencing their parent.
429 // With lru presented, the eviction will be propagated over the entire
430 // detached subtree eventually
431 struct v_dnode *pos, *n;
432 llist_for_each(pos, n, &dnode->children, siblings)
434 vfs_dcache_remove(pos);
437 vfree(dnode->name.value);
438 cake_release(dnode_pile, dnode);
442 vfs_i_find(struct v_superblock* sb, u32_t i_id)
444 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
445 struct v_inode *pos, *n;
446 hashtable_bucket_foreach(slot, pos, n, hash_list)
448 if (pos->id == i_id) {
449 lru_use_one(inode_lru, &pos->lru);
458 vfs_i_addhash(struct v_inode* inode)
460 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
462 hlist_delete(&inode->hash_list);
463 hlist_add(&slot->head, &inode->hash_list);
467 vfs_i_alloc(struct v_superblock* sb)
469 assert(sb->ops.init_inode);
471 struct v_inode* inode;
472 if (!(inode = cake_grab(inode_pile))) {
473 lru_evict_half(inode_lru);
474 if (!(inode = cake_grab(inode_pile))) {
479 memset(inode, 0, sizeof(*inode));
480 mutex_init(&inode->lock);
481 llist_init_head(&inode->xattrs);
482 llist_init_head(&inode->aka_dnodes);
484 sb->ops.init_inode(sb, inode);
487 inode->ctime = clock_unixtime();
488 inode->atime = inode->ctime;
489 inode->mtime = inode->ctime;
492 lru_use_one(inode_lru, &inode->lru);
497 vfs_i_free(struct v_inode* inode)
499 if (inode->pg_cache) {
500 pcache_release(inode->pg_cache);
501 vfree(inode->pg_cache);
503 // we don't need to sync inode.
504 // If an inode can be free, then it must be properly closed.
505 // Hence it must be synced already!
506 if (inode->destruct) {
507 inode->destruct(inode);
509 hlist_delete(&inode->hash_list);
510 cake_release(inode_pile, inode);
513 /* ---- System call definition and support ---- */
515 #define FLOCATE_CREATE_EMPTY 1
518 vfs_getfd(int fd, struct v_fd** fd_s)
520 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
527 __vfs_try_locate_file(const char* path,
528 struct v_dnode** fdir,
529 struct v_dnode** file,
532 char name_str[VFS_NAME_MAXLEN];
533 struct hstr name = HSTR(name_str, 0);
537 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
541 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
542 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
546 struct v_dnode* parent = *fdir;
547 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
555 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
556 vfs_dcache_add(parent, file_new);
559 vfs_d_free(file_new);
562 unlock_dnode(parent);
568 vfs_do_open(const char* path, int options)
571 struct v_dnode *dentry, *file;
572 struct v_file* ofile = 0;
574 errno = __vfs_try_locate_file(
575 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
577 if (errno || (errno = vfs_open(file, &ofile))) {
581 struct v_inode* o_inode = ofile->inode;
583 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
584 struct v_fd* fd_s = cake_grab(fd_pile);
585 memset(fd_s, 0, sizeof(*fd_s));
587 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
589 fd_s->flags = options;
590 __current->fdtable->fds[fd] = fd_s;
597 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
599 int errno = vfs_do_open(path, options);
600 return DO_STATUS_OR_RETURN(errno);
603 __DEFINE_LXSYSCALL1(int, close, int, fd)
607 if ((errno = vfs_getfd(fd, &fd_s))) {
611 if ((errno = vfs_close(fd_s->file))) {
615 cake_release(fd_pile, fd_s);
616 __current->fdtable->fds[fd] = 0;
619 return DO_STATUS(errno);
623 __vfs_readdir_callback(struct dir_context* dctx,
628 struct dirent* dent = (struct dirent*)dctx->cb_data;
629 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
631 dent->d_type = dtype;
634 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
639 if ((errno = vfs_getfd(fd, &fd_s))) {
643 struct v_inode* inode = fd_s->file->inode;
647 if (!(inode->itype & VFS_IFDIR)) {
650 struct dir_context dctx =
651 (struct dir_context){ .cb_data = dent,
652 .index = dent->d_offset,
653 .read_complete_callback =
654 __vfs_readdir_callback };
656 if (dent->d_offset == 0) {
657 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
658 } else if (dent->d_offset == 1) {
659 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
662 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
673 return DO_STATUS_OR_RETURN(errno);
676 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
680 if ((errno = vfs_getfd(fd, &fd_s))) {
684 struct v_file* file = fd_s->file;
685 if ((file->inode->itype & VFS_IFDIR)) {
690 lock_inode(file->inode);
692 file->inode->atime = clock_unixtime();
694 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
695 errno = file->ops->read(file->inode, buf, count, file->f_pos);
697 errno = pcache_read(file->inode, buf, count, file->f_pos);
701 file->f_pos += errno;
702 unlock_inode(file->inode);
706 unlock_inode(file->inode);
709 return DO_STATUS(errno);
712 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
716 if ((errno = vfs_getfd(fd, &fd_s))) {
720 struct v_file* file = fd_s->file;
722 if ((errno = vfs_check_writable(file->dnode))) {
726 if ((file->inode->itype & VFS_IFDIR)) {
731 lock_inode(file->inode);
733 file->inode->mtime = clock_unixtime();
735 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
736 errno = file->ops->write(file->inode, buf, count, file->f_pos);
738 errno = pcache_write(file->inode, buf, count, file->f_pos);
742 file->f_pos += errno;
743 unlock_inode(file->inode);
747 unlock_inode(file->inode);
750 return DO_STATUS(errno);
753 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
757 if ((errno = vfs_getfd(fd, &fd_s))) {
761 struct v_file* file = fd_s->file;
763 if (!file->ops->seek) {
768 lock_inode(file->inode);
771 int fpos = file->f_pos;
774 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
778 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
786 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
790 unlock_inode(file->inode);
793 return DO_STATUS(errno);
797 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
809 if (dnode->parent != dnode) {
810 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
817 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
818 buf[len++] = VFS_PATH_DELIM;
821 size_t cpy_size = MIN(dnode->name.len, size - len);
822 strncpy(buf + len, dnode->name.value, cpy_size);
829 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
832 struct v_inode* inode = dnode->inode;
833 if (inode->ops->read_symlink) {
836 int errno = inode->ops->read_symlink(inode, &link);
837 strncpy(buf, link, size);
846 vfs_get_dtype(int itype)
858 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
862 if ((errno = vfs_getfd(fd, &fd_s))) {
866 struct v_dnode* dnode;
867 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
874 return DO_STATUS(errno);
877 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
880 struct v_dnode* dnode;
881 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
882 errno = vfs_readlink(dnode, buf, size);
889 return DO_STATUS(errno);
892 __DEFINE_LXSYSCALL4(int,
905 if ((errno = vfs_getfd(dirfd, &fd_s))) {
909 struct v_dnode* dnode;
910 if (!(errno = vfs_walk(
911 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
912 errno = vfs_readlink(fd_s->file->dnode, buf, size);
920 return DO_STATUS(errno);
925 When we perform operation that could affect the layout of
926 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
927 whenever possible. This will blocking any ongoing path walking to reach
928 it hence avoid any partial state.
931 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
934 struct v_dnode* dnode;
935 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
936 return DO_STATUS(errno);
941 if ((errno = vfs_check_writable(dnode))) {
945 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
950 if (dnode->ref_count > 1 || dnode->inode->open_count) {
955 if (!llist_empty(&dnode->children)) {
960 struct v_dnode* parent = dnode->parent;
968 lock_inode(parent->inode);
970 if ((dnode->inode->itype & VFS_IFDIR)) {
971 errno = parent->inode->ops->rmdir(parent->inode, dnode);
973 vfs_dcache_remove(dnode);
979 unlock_inode(parent->inode);
980 unlock_dnode(parent);
984 return DO_STATUS(errno);
987 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
990 struct v_dnode *parent, *dir;
991 char name_value[VFS_NAME_MAXLEN];
992 struct hstr name = HHSTR(name_value, 0, 0);
994 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
998 if ((errno = vfs_check_writable(parent))) {
1002 if (!(dir = vfs_d_alloc(parent, &name))) {
1008 lock_inode(parent->inode);
1010 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1012 } else if (!parent->inode->ops->mkdir) {
1014 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1016 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1017 vfs_dcache_add(parent, dir);
1024 unlock_inode(parent->inode);
1025 unlock_dnode(parent);
1027 return DO_STATUS(errno);
1031 __vfs_do_unlink(struct v_dnode* dnode)
1034 struct v_inode* inode = dnode->inode;
1036 if (dnode->ref_count > 1) {
1040 if ((errno = vfs_check_writable(dnode))) {
1046 if (inode->open_count) {
1048 } else if (!(inode->itype & VFS_IFDIR)) {
1049 // The underlying unlink implementation should handle
1051 errno = inode->ops->unlink(inode);
1059 unlock_inode(inode);
1064 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1067 struct v_dnode* dnode;
1068 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1072 errno = __vfs_do_unlink(dnode);
1075 return DO_STATUS(errno);
1078 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1082 if ((errno = vfs_getfd(fd, &fd_s))) {
1086 struct v_dnode* dnode;
1087 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1088 errno = __vfs_do_unlink(dnode);
1092 return DO_STATUS(errno);
1095 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1098 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1100 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1102 errno = __vfs_try_locate_file(
1103 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1106 } else if (name_file) {
1107 errno = vfs_link(to_link, name_file);
1110 return DO_STATUS(errno);
1113 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1118 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1119 errno = vfs_fsync(fd_s->file);
1122 return DO_STATUS(errno);
1126 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1129 struct v_fd* copied = cake_grab(fd_pile);
1131 memcpy(copied, old, sizeof(struct v_fd));
1133 atomic_fetch_add(&old->file->ref_count, 1);
1141 vfs_dup2(int oldfd, int newfd)
1143 if (newfd == oldfd) {
1148 struct v_fd *oldfd_s, *newfd_s;
1149 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1153 if (!TEST_FD(newfd)) {
1158 newfd_s = __current->fdtable->fds[newfd];
1159 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1163 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1164 __current->fdtable->fds[newfd] = newfd_s;
1169 return DO_STATUS(errno);
1172 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1174 return vfs_dup2(oldfd, newfd);
1177 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1180 struct v_fd *oldfd_s, *newfd_s;
1181 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1185 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1186 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1187 __current->fdtable->fds[newfd] = newfd_s;
1192 return DO_STATUS(errno);
1195 __DEFINE_LXSYSCALL2(int,
1203 struct v_dnode* dnode;
1204 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1208 if (errno = vfs_check_writable(dnode)) {
1212 if (!dnode->inode->ops->set_symlink) {
1217 lock_inode(dnode->inode);
1219 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1221 unlock_inode(dnode->inode);
1224 return DO_STATUS(errno);
1228 vfs_ref_dnode(struct v_dnode* dnode)
1230 atomic_fetch_add(&dnode->ref_count, 1);
1231 mnt_mkbusy(dnode->mnt);
1235 vfs_unref_dnode(struct v_dnode* dnode)
1237 atomic_fetch_sub(&dnode->ref_count, 1);
1238 mnt_chillax(dnode->mnt);
1242 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1248 if (!(dnode->inode->itype & VFS_IFDIR)) {
1254 vfs_unref_dnode(proc->cwd);
1257 vfs_ref_dnode(dnode);
1260 unlock_dnode(dnode);
1266 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1268 struct v_dnode* dnode;
1271 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1275 errno = vfs_do_chdir(__current, dnode);
1278 return DO_STATUS(errno);
1281 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1286 if ((errno = vfs_getfd(fd, &fd_s))) {
1290 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1293 return DO_STATUS(errno);
1296 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1307 if (!__current->cwd) {
1308 *buf = VFS_PATH_DELIM;
1311 len = vfs_get_path(__current->cwd, buf, size, 0);
1318 buf[len + 1] = '\0';
1323 __current->k_status = errno;
1328 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1331 if (current->inode->id == target->inode->id) {
1336 if (errno = vfs_check_writable(current)) {
1340 if (current->ref_count > 1 || target->ref_count > 1) {
1344 if (current->super_block != target->super_block) {
1348 struct v_dnode* oldparent = current->parent;
1349 struct v_dnode* newparent = target->parent;
1351 lock_dnode(current);
1354 lock_dnode(oldparent);
1356 lock_dnode(newparent);
1358 if (!llist_empty(&target->children)) {
1360 unlock_dnode(target);
1365 current->inode->ops->rename(current->inode, current, target))) {
1366 unlock_dnode(target);
1370 // re-position current
1371 hstrcpy(¤t->name, &target->name);
1372 vfs_dcache_rehash(newparent, current);
1377 unlock_dnode(target);
1380 unlock_dnode(current);
1382 unlock_dnode(oldparent);
1384 unlock_dnode(newparent);
1389 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1391 struct v_dnode *cur, *target_parent, *target;
1392 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1395 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1399 if ((errno = vfs_walk(
1400 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1404 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1405 if (errno == ENOENT) {
1406 target = vfs_d_alloc(target_parent, &name);
1407 vfs_dcache_add(target_parent, target);
1417 errno = vfs_do_rename(cur, target);
1421 return DO_STATUS(errno);