3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 static struct cake_pile* dnode_pile;
60 static struct cake_pile* inode_pile;
61 static struct cake_pile* file_pile;
62 static struct cake_pile* superblock_pile;
63 static struct cake_pile* fd_pile;
65 struct v_dnode* vfs_sysroot;
66 static struct hbucket* dnode_cache;
68 struct lru_zone *dnode_lru, *inode_lru;
70 struct hstr vfs_ddot = HSTR("..", 2);
71 struct hstr vfs_dot = HSTR(".", 1);
72 struct hstr vfs_empty = HSTR("", 0);
78 vfs_sb_free(struct v_superblock* sb);
81 __vfs_try_evict_dnode(struct lru_node* obj);
84 __vfs_try_evict_inode(struct lru_node* obj);
89 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
90 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
91 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
92 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
93 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
95 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
97 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
99 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
100 inode_lru = lru_new_zone(__vfs_try_evict_inode);
102 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
103 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
107 vfs_sysroot->parent = vfs_sysroot;
108 atomic_fetch_add(&vfs_sysroot->ref_count, 1);
111 inline struct hbucket*
112 __dcache_hash(struct v_dnode* parent, u32_t* hash)
116 _hash = _hash ^ (_hash >> VFS_HASHBITS);
117 // 与parent的指针值做加法,来减小碰撞的可能性。
118 _hash += (u32_t)parent;
120 return &dnode_cache[_hash & VFS_HASH_MASK];
124 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
126 if (!str->len || HSTR_EQ(str, &vfs_dot))
129 if (HSTR_EQ(str, &vfs_ddot)) {
130 return parent->parent;
133 u32_t hash = str->hash;
134 struct hbucket* slot = __dcache_hash(parent, &hash);
136 struct v_dnode *pos, *n;
137 hashtable_bucket_foreach(slot, pos, n, hash_list)
139 if (pos->name.hash == hash) {
147 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
151 atomic_fetch_add(&dnode->ref_count, 1);
152 dnode->parent = parent;
153 llist_append(&parent->children, &dnode->siblings);
155 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
156 hlist_add(&bucket->head, &dnode->hash_list);
160 vfs_dcache_remove(struct v_dnode* dnode)
163 assert(dnode->ref_count == 1);
165 llist_delete(&dnode->siblings);
166 llist_delete(&dnode->aka_list);
167 hlist_delete(&dnode->hash_list);
169 dnode->parent = NULL;
170 atomic_fetch_sub(&dnode->ref_count, 1);
174 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
178 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
179 vfs_dcache_remove(dnode);
180 vfs_dcache_add(new_parent, dnode);
184 vfs_open(struct v_dnode* dnode, struct v_file** file)
186 if (!dnode->inode || !dnode->inode->ops->open) {
190 struct v_inode* inode = dnode->inode;
194 struct v_file* vfile = cake_grab(file_pile);
195 memset(vfile, 0, sizeof(*vfile));
197 vfile->dnode = dnode;
198 vfile->inode = inode;
199 vfile->ref_count = ATOMIC_VAR_INIT(1);
200 vfile->ops = inode->default_fops;
202 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
203 struct pcache* pcache = vzalloc(sizeof(struct pcache));
205 pcache->master = inode;
206 inode->pg_cache = pcache;
209 int errno = inode->ops->open(inode, vfile);
211 cake_release(file_pile, vfile);
213 atomic_fetch_add(&dnode->ref_count, 1);
215 mnt_mkbusy(dnode->mnt);
226 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
228 if (assign_to->inode) {
229 llist_delete(&assign_to->aka_list);
230 assign_to->inode->link_count--;
232 llist_append(&inode->aka_dnodes, &assign_to->aka_list);
233 assign_to->inode = inode;
238 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
242 if ((errno = vfs_check_writable(to_link))) {
246 lock_inode(to_link->inode);
247 if (to_link->super_block->root != name->super_block->root) {
249 } else if (!to_link->inode->ops->link) {
251 } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
252 vfs_assign_inode(name, to_link->inode);
254 unlock_inode(to_link->inode);
260 vfs_pclose(struct v_file* file, pid_t pid)
263 if (file->ref_count > 1) {
264 atomic_fetch_sub(&file->ref_count, 1);
265 } else if (!(errno = file->ops->close(file))) {
266 atomic_fetch_sub(&file->dnode->ref_count, 1);
267 file->inode->open_count--;
271 * This happened when process is terminated while blocking on read.
272 * In that case, the process is still holding the inode lock and it
273 will never get released.
274 * The unlocking should also include ownership check.
276 * To see why, consider two process both open the same file both with
278 * Process A: busy on reading x
279 * Process B: do nothing with x
280 * Assuming that, after a very short time, process B get terminated
281 * while process A is still busy in it's reading business. By this
282 * design, the inode lock of this file x is get released by B rather
283 * than A. And this will cause a probable race condition on A if other
284 * process is writing to this file later after B exit.
286 if (mutex_on_hold(&file->inode->lock)) {
287 mutex_unlock_for(&file->inode->lock, pid);
289 mnt_chillax(file->dnode->mnt);
291 pcache_commit_all(file->inode);
292 cake_release(file_pile, file);
298 vfs_close(struct v_file* file)
300 return vfs_pclose(file, __current->pid);
304 vfs_free_fd(struct v_fd* fd)
306 cake_release(fd_pile, fd);
310 vfs_fsync(struct v_file* file)
313 if ((errno = vfs_check_writable(file->dnode))) {
317 lock_inode(file->inode);
319 pcache_commit_all(file->inode);
322 if (file->ops->sync) {
323 errno = file->ops->sync(file);
326 unlock_inode(file->inode);
332 vfs_alloc_fdslot(int* fd)
334 for (size_t i = 0; i < VFS_MAX_FD; i++) {
335 if (!__current->fdtable->fds[i]) {
346 struct v_superblock* sb = cake_grab(superblock_pile);
347 memset(sb, 0, sizeof(*sb));
348 llist_init_head(&sb->sb_list);
349 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
354 vfs_sb_free(struct v_superblock* sb)
357 cake_release(superblock_pile, sb);
361 __vfs_try_evict_dnode(struct lru_node* obj)
363 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
365 if (!dnode->ref_count) {
373 __vfs_try_evict_inode(struct lru_node* obj)
375 struct v_inode* inode = container_of(obj, struct v_inode, lru);
377 if (!inode->link_count && !inode->open_count) {
385 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
387 struct v_dnode* dnode = cake_grab(dnode_pile);
389 lru_evict_half(dnode_lru);
391 if (!(dnode = cake_grab(dnode_pile))) {
396 memset(dnode, 0, sizeof(*dnode));
397 llist_init_head(&dnode->children);
398 llist_init_head(&dnode->siblings);
399 llist_init_head(&dnode->aka_list);
400 mutex_init(&dnode->lock);
402 dnode->ref_count = ATOMIC_VAR_INIT(0);
403 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
405 hstrcpy(&dnode->name, name);
408 dnode->super_block = parent->super_block;
409 dnode->mnt = parent->mnt;
412 lru_use_one(dnode_lru, &dnode->lru);
418 vfs_d_free(struct v_dnode* dnode)
420 assert(dnode->ref_count == 1);
423 assert(dnode->inode->link_count > 0);
424 dnode->inode->link_count--;
427 vfs_dcache_remove(dnode);
428 // Make sure the children de-referencing their parent.
429 // With lru presented, the eviction will be propagated over the entire
430 // detached subtree eventually
431 struct v_dnode *pos, *n;
432 llist_for_each(pos, n, &dnode->children, siblings)
434 vfs_dcache_remove(pos);
437 vfree(dnode->name.value);
438 cake_release(dnode_pile, dnode);
442 vfs_i_find(struct v_superblock* sb, u32_t i_id)
444 struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
445 struct v_inode *pos, *n;
446 hashtable_bucket_foreach(slot, pos, n, hash_list)
448 if (pos->id == i_id) {
449 lru_use_one(inode_lru, &pos->lru);
458 vfs_i_addhash(struct v_inode* inode)
460 struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
462 hlist_delete(&inode->hash_list);
463 hlist_add(&slot->head, &inode->hash_list);
467 vfs_i_alloc(struct v_superblock* sb)
469 assert(sb->ops.init_inode);
471 struct v_inode* inode;
472 if (!(inode = cake_grab(inode_pile))) {
473 lru_evict_half(inode_lru);
474 if (!(inode = cake_grab(inode_pile))) {
479 memset(inode, 0, sizeof(*inode));
480 mutex_init(&inode->lock);
481 llist_init_head(&inode->xattrs);
482 llist_init_head(&inode->aka_dnodes);
484 sb->ops.init_inode(sb, inode);
487 inode->ctime = clock_unixtime();
488 inode->atime = inode->ctime;
489 inode->mtime = inode->ctime;
492 lru_use_one(inode_lru, &inode->lru);
497 vfs_i_free(struct v_inode* inode)
499 if (inode->pg_cache) {
500 pcache_release(inode->pg_cache);
501 vfree(inode->pg_cache);
503 // we don't need to sync inode.
504 // If an inode can be free, then it must be properly closed.
505 // Hence it must be synced already!
506 if (inode->destruct) {
507 inode->destruct(inode);
509 hlist_delete(&inode->hash_list);
510 cake_release(inode_pile, inode);
513 /* ---- System call definition and support ---- */
515 #define FLOCATE_CREATE_EMPTY 1
518 vfs_getfd(int fd, struct v_fd** fd_s)
520 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
527 __vfs_try_locate_file(const char* path,
528 struct v_dnode** fdir,
529 struct v_dnode** file,
532 char name_str[VFS_NAME_MAXLEN];
533 struct hstr name = HSTR(name_str, 0);
537 if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
541 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
542 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
546 struct v_dnode* parent = *fdir;
547 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
555 if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
556 vfs_dcache_add(parent, file_new);
559 vfs_d_free(file_new);
562 unlock_dnode(parent);
568 vfs_do_open(const char* path, int options)
571 struct v_dnode *dentry, *file;
572 struct v_file* ofile = NULL;
574 errno = __vfs_try_locate_file(
575 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
577 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
579 if (errno || (errno = vfs_open(file, &ofile))) {
583 struct v_fd* fd_s = cake_grab(fd_pile);
584 memset(fd_s, 0, sizeof(*fd_s));
586 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
588 fd_s->flags = options;
589 __current->fdtable->fds[fd] = fd_s;
596 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
598 int errno = vfs_do_open(path, options);
599 return DO_STATUS_OR_RETURN(errno);
602 __DEFINE_LXSYSCALL1(int, close, int, fd)
606 if ((errno = vfs_getfd(fd, &fd_s))) {
610 if ((errno = vfs_close(fd_s->file))) {
614 cake_release(fd_pile, fd_s);
615 __current->fdtable->fds[fd] = 0;
618 return DO_STATUS(errno);
622 __vfs_readdir_callback(struct dir_context* dctx,
627 struct dirent* dent = (struct dirent*)dctx->cb_data;
628 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
630 dent->d_type = dtype;
633 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
638 if ((errno = vfs_getfd(fd, &fd_s))) {
642 struct v_inode* inode = fd_s->file->inode;
646 if (!(inode->itype & VFS_IFDIR)) {
649 struct dir_context dctx =
650 (struct dir_context){ .cb_data = dent,
651 .index = dent->d_offset,
652 .read_complete_callback =
653 __vfs_readdir_callback };
655 if (dent->d_offset == 0) {
656 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
657 } else if (dent->d_offset == 1) {
658 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
661 if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
672 return DO_STATUS_OR_RETURN(errno);
675 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
679 if ((errno = vfs_getfd(fd, &fd_s))) {
683 struct v_file* file = fd_s->file;
684 if ((file->inode->itype & VFS_IFDIR)) {
689 lock_inode(file->inode);
691 file->inode->atime = clock_unixtime();
693 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
694 errno = file->ops->read(file->inode, buf, count, file->f_pos);
696 errno = pcache_read(file->inode, buf, count, file->f_pos);
700 file->f_pos += errno;
701 unlock_inode(file->inode);
705 unlock_inode(file->inode);
708 return DO_STATUS(errno);
711 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
715 if ((errno = vfs_getfd(fd, &fd_s))) {
719 struct v_file* file = fd_s->file;
721 if ((errno = vfs_check_writable(file->dnode))) {
725 if ((file->inode->itype & VFS_IFDIR)) {
730 lock_inode(file->inode);
732 file->inode->mtime = clock_unixtime();
734 if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
735 errno = file->ops->write(file->inode, buf, count, file->f_pos);
737 errno = pcache_write(file->inode, buf, count, file->f_pos);
741 file->f_pos += errno;
742 unlock_inode(file->inode);
746 unlock_inode(file->inode);
749 return DO_STATUS(errno);
752 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
756 if ((errno = vfs_getfd(fd, &fd_s))) {
760 struct v_file* file = fd_s->file;
762 if (!file->ops->seek) {
767 lock_inode(file->inode);
770 int fpos = file->f_pos;
773 overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
777 __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
785 } else if (!(errno = file->ops->seek(file->inode, fpos))) {
789 unlock_inode(file->inode);
792 return DO_STATUS(errno);
796 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
808 if (dnode->parent != dnode) {
809 len = vfs_get_path(dnode->parent, buf, size, depth + 1);
816 if (!len || buf[len - 1] != VFS_PATH_DELIM) {
817 buf[len++] = VFS_PATH_DELIM;
820 size_t cpy_size = MIN(dnode->name.len, size - len);
821 strncpy(buf + len, dnode->name.value, cpy_size);
828 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
831 struct v_inode* inode = dnode->inode;
832 if (inode->ops->read_symlink) {
835 int errno = inode->ops->read_symlink(inode, &link);
836 strncpy(buf, link, size);
845 vfs_get_dtype(int itype)
857 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
861 if ((errno = vfs_getfd(fd, &fd_s))) {
865 struct v_dnode* dnode;
866 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
873 return DO_STATUS(errno);
876 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
879 struct v_dnode* dnode;
880 if (!(errno = vfs_walk_proc(path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
881 errno = vfs_readlink(dnode, buf, size);
888 return DO_STATUS(errno);
891 __DEFINE_LXSYSCALL4(int,
904 if ((errno = vfs_getfd(dirfd, &fd_s))) {
908 struct v_dnode* dnode;
909 if (!(errno = vfs_walk(
910 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
911 errno = vfs_readlink(fd_s->file->dnode, buf, size);
919 return DO_STATUS(errno);
924 When we perform operation that could affect the layout of
925 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
926 whenever possible. This will blocking any ongoing path walking to reach
927 it hence avoid any partial state.
930 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
933 struct v_dnode* dnode;
934 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
935 return DO_STATUS(errno);
940 if ((errno = vfs_check_writable(dnode))) {
944 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
949 if (dnode->ref_count > 1 || dnode->inode->open_count) {
954 if (!llist_empty(&dnode->children)) {
959 struct v_dnode* parent = dnode->parent;
967 lock_inode(parent->inode);
969 if ((dnode->inode->itype & VFS_IFDIR)) {
970 errno = parent->inode->ops->rmdir(parent->inode, dnode);
972 vfs_dcache_remove(dnode);
978 unlock_inode(parent->inode);
979 unlock_dnode(parent);
983 return DO_STATUS(errno);
986 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
989 struct v_dnode *parent, *dir;
990 char name_value[VFS_NAME_MAXLEN];
991 struct hstr name = HHSTR(name_value, 0, 0);
993 if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
997 if ((errno = vfs_check_writable(parent))) {
1001 if (!(dir = vfs_d_alloc(parent, &name))) {
1007 lock_inode(parent->inode);
1009 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1011 } else if (!parent->inode->ops->mkdir) {
1013 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1015 } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
1016 vfs_dcache_add(parent, dir);
1023 unlock_inode(parent->inode);
1024 unlock_dnode(parent);
1026 return DO_STATUS(errno);
1030 __vfs_do_unlink(struct v_dnode* dnode)
1033 struct v_inode* inode = dnode->inode;
1035 if (dnode->ref_count > 1) {
1039 if ((errno = vfs_check_writable(dnode))) {
1045 if (inode->open_count) {
1047 } else if (!(inode->itype & VFS_IFDIR)) {
1048 // The underlying unlink implementation should handle
1050 errno = inode->ops->unlink(inode);
1058 unlock_inode(inode);
1063 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1066 struct v_dnode* dnode;
1067 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1071 errno = __vfs_do_unlink(dnode);
1074 return DO_STATUS(errno);
1077 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1081 if ((errno = vfs_getfd(fd, &fd_s))) {
1085 struct v_dnode* dnode;
1086 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1087 errno = __vfs_do_unlink(dnode);
1091 return DO_STATUS(errno);
1094 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1097 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1099 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1101 errno = __vfs_try_locate_file(
1102 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1105 } else if (name_file) {
1106 errno = vfs_link(to_link, name_file);
1109 return DO_STATUS(errno);
1112 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1117 if (!(errno = vfs_getfd(fildes, &fd_s))) {
1118 errno = vfs_fsync(fd_s->file);
1121 return DO_STATUS(errno);
1125 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1128 struct v_fd* copied = cake_grab(fd_pile);
1130 memcpy(copied, old, sizeof(struct v_fd));
1132 atomic_fetch_add(&old->file->ref_count, 1);
1140 vfs_dup2(int oldfd, int newfd)
1142 if (newfd == oldfd) {
1147 struct v_fd *oldfd_s, *newfd_s;
1148 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1152 if (!TEST_FD(newfd)) {
1157 newfd_s = __current->fdtable->fds[newfd];
1158 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1162 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1163 __current->fdtable->fds[newfd] = newfd_s;
1168 return DO_STATUS(errno);
1171 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1173 return vfs_dup2(oldfd, newfd);
1176 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1179 struct v_fd *oldfd_s, *newfd_s;
1180 if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
1184 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1185 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1186 __current->fdtable->fds[newfd] = newfd_s;
1191 return DO_STATUS(errno);
1194 __DEFINE_LXSYSCALL2(int,
1202 struct v_dnode* dnode;
1203 if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
1207 if (errno = vfs_check_writable(dnode)) {
1211 if (!dnode->inode->ops->set_symlink) {
1216 lock_inode(dnode->inode);
1218 errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
1220 unlock_inode(dnode->inode);
1223 return DO_STATUS(errno);
1227 vfs_ref_dnode(struct v_dnode* dnode)
1229 atomic_fetch_add(&dnode->ref_count, 1);
1230 mnt_mkbusy(dnode->mnt);
1234 vfs_unref_dnode(struct v_dnode* dnode)
1236 atomic_fetch_sub(&dnode->ref_count, 1);
1237 mnt_chillax(dnode->mnt);
1241 vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
1247 if (!(dnode->inode->itype & VFS_IFDIR)) {
1253 vfs_unref_dnode(proc->cwd);
1256 vfs_ref_dnode(dnode);
1259 unlock_dnode(dnode);
1265 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1267 struct v_dnode* dnode;
1270 if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
1274 errno = vfs_do_chdir(__current, dnode);
1277 return DO_STATUS(errno);
1280 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1285 if ((errno = vfs_getfd(fd, &fd_s))) {
1289 errno = vfs_do_chdir(__current, fd_s->file->dnode);
1292 return DO_STATUS(errno);
1295 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1306 if (!__current->cwd) {
1307 *buf = VFS_PATH_DELIM;
1310 len = vfs_get_path(__current->cwd, buf, size, 0);
1317 buf[len + 1] = '\0';
1322 __current->k_status = errno;
1327 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1330 if (current->inode->id == target->inode->id) {
1335 if (errno = vfs_check_writable(current)) {
1339 if (current->ref_count > 1 || target->ref_count > 1) {
1343 if (current->super_block != target->super_block) {
1347 struct v_dnode* oldparent = current->parent;
1348 struct v_dnode* newparent = target->parent;
1350 lock_dnode(current);
1353 lock_dnode(oldparent);
1355 lock_dnode(newparent);
1357 if (!llist_empty(&target->children)) {
1359 unlock_dnode(target);
1364 current->inode->ops->rename(current->inode, current, target))) {
1365 unlock_dnode(target);
1369 // re-position current
1370 hstrcpy(¤t->name, &target->name);
1371 vfs_dcache_rehash(newparent, current);
1376 unlock_dnode(target);
1379 unlock_dnode(current);
1381 unlock_dnode(oldparent);
1383 unlock_dnode(newparent);
1388 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1390 struct v_dnode *cur, *target_parent, *target;
1391 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1394 if ((errno = vfs_walk_proc(oldpath, &cur, NULL, 0))) {
1398 if ((errno = vfs_walk(
1399 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1403 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1404 if (errno == ENOENT) {
1405 target = vfs_d_alloc(target_parent, &name);
1406 vfs_dcache_add(target_parent, target);
1416 errno = vfs_do_rename(cur, target);
1420 return DO_STATUS(errno);