3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru
20 2. Get dnodes hooked into lru
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction.
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely.
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we unmount.
31 7. (mount) Figure out a way to acquire the device represented by a dnode.
32 so it can be used to mount. (e.g. we wish to get `struct device*`
33 out of the dnode at /dev/sda)
34 [tip] we should pay attention at twifs and add a private_data field
36 8. (mount) Then, we should refactor on mount/unmount mechanism.
37 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
38 image file using a so called "loopback" pseudo device. Maybe
39 we can do similar thing in Lunaix? A block device emulation
40 above the regular file when we mount it on.
41 10. (device) device number (dev_t) allocation
42 [good idea] <class>:<subclass>:<uniq_id> composition
45 #include <klibc/string.h>
46 #include <lunaix/dirent.h>
47 #include <lunaix/foptions.h>
48 #include <lunaix/fs.h>
49 #include <lunaix/mm/cake.h>
50 #include <lunaix/mm/page.h>
51 #include <lunaix/mm/valloc.h>
52 #include <lunaix/process.h>
53 #include <lunaix/spike.h>
54 #include <lunaix/syscall.h>
56 #include <lunaix/fs/twifs.h>
58 #define PATH_DELIM '/'
59 #define HASHTABLE_BITS 10
60 #define HASHTABLE_SIZE (1 << HASHTABLE_BITS)
61 #define HASH_MASK (HASHTABLE_SIZE - 1)
62 #define HASHBITS (32 - HASHTABLE_BITS)
64 #define lock_inode(inode) mutex_lock(&inode->lock)
65 #define unlock_inode(inode) mutex_unlock(&inode->lock)
67 #define lock_dnode(dnode) mutex_lock(&dnode->lock)
68 #define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
70 static struct cake_pile* dnode_pile;
71 static struct cake_pile* inode_pile;
72 static struct cake_pile* file_pile;
73 static struct cake_pile* superblock_pile;
74 static struct cake_pile* fd_pile;
76 static struct v_superblock* root_sb;
77 static struct hbucket *dnode_cache, *inode_cache;
79 struct hstr vfs_ddot = HSTR("..", 2);
80 struct hstr vfs_dot = HSTR(".", 1);
81 struct hstr vfs_empty = HSTR("", 0);
87 vfs_sb_free(struct v_superblock* sb);
92 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
93 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
94 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
95 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
96 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
98 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
100 dnode_cache = vzalloc(HASHTABLE_SIZE * sizeof(struct hbucket));
101 inode_cache = vzalloc(HASHTABLE_SIZE * sizeof(struct hbucket));
103 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
104 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
106 // 创建一个根superblock,用来蕴含我们的根目录。
107 root_sb = vfs_sb_alloc();
108 root_sb->root = vfs_d_alloc();
109 root_sb->root->inode = vfs_i_alloc(root_sb, 0);
112 inline struct hbucket*
113 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
115 uint32_t _hash = *hash;
116 // 与parent的指针值做加法,来减小碰撞的可能性。
117 _hash += (uint32_t)parent;
119 _hash = _hash ^ (_hash >> HASHBITS);
121 return &dnode_cache[_hash & HASH_MASK];
125 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
127 if (!str->len || HSTR_EQ(str, &vfs_dot))
130 if (HSTR_EQ(str, &vfs_ddot)) {
131 return parent->parent ? parent->parent : parent;
134 uint32_t hash = str->hash;
135 struct hbucket* slot = __dcache_hash(parent, &hash);
137 struct v_dnode *pos, *n;
138 hashtable_bucket_foreach(slot, pos, n, hash_list)
140 if (pos->name.hash == hash) {
148 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
150 atomic_fetch_add(&dnode->ref_count, 1);
151 dnode->parent = parent;
152 llist_append(&parent->children, &dnode->siblings);
153 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
154 hlist_add(&bucket->head, &dnode->hash_list);
158 vfs_dcache_remove(struct v_dnode* dnode)
160 assert(dnode->ref_count == 1);
162 llist_delete(&dnode->siblings);
163 hlist_delete(&dnode->hash_list);
165 dnode->parent = NULL;
166 atomic_fetch_sub(&dnode->ref_count, 1);
170 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
172 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
173 vfs_dcache_remove(dnode);
174 vfs_dcache_add(new_parent, dnode);
178 __vfs_walk(struct v_dnode* start,
180 struct v_dnode** dentry,
181 struct hstr* component,
187 if (path[0] == PATH_DELIM || !start) {
188 if ((walk_options & VFS_WALK_FSRELATIVE) && start) {
189 start = start->super_block->root;
191 start = root_sb->root;
196 struct v_dnode* dnode;
197 struct v_dnode* current_level = start;
199 char name_content[VFS_NAME_MAXLEN];
200 struct hstr name = HSTR(name_content, 0);
202 char current = path[i++], lookahead;
204 lookahead = path[i++];
205 if (current != PATH_DELIM) {
206 if (j >= VFS_NAME_MAXLEN - 1) {
209 if (!VFS_VALID_CHAR(current)) {
212 name_content[j++] = current;
218 // handling cases like /^.*(\/+).*$/
219 if (lookahead == PATH_DELIM) {
223 lock_dnode(current_level);
227 hstr_rehash(&name, HSTR_FULL_HASH);
229 if (!lookahead && (walk_options & VFS_WALK_PARENT)) {
231 component->hash = name.hash;
233 strcpy(component->value, name_content);
235 unlock_dnode(current_level);
239 dnode = vfs_dcache_lookup(current_level, &name);
242 dnode = vfs_d_alloc();
244 hstrcpy(&dnode->name, &name);
246 lock_inode(current_level->inode);
249 current_level->inode->ops.dir_lookup(current_level->inode, dnode);
251 if (errno == ENOENT && (walk_options & VFS_WALK_MKPARENT)) {
252 if (!current_level->inode->ops.mkdir) {
255 errno = current_level->inode->ops.mkdir(
256 current_level->inode, dnode);
260 unlock_inode(current_level->inode);
263 unlock_dnode(current_level);
264 vfree(dnode->name.value);
268 vfs_dcache_add(current_level, dnode);
271 unlock_dnode(current_level);
274 current_level = dnode;
279 *dentry = current_level;
288 #define VFS_MAX_SYMLINK 16
291 vfs_walk(struct v_dnode* start,
293 struct v_dnode** dentry,
294 struct hstr* component,
297 struct v_dnode* interim;
298 const char* pathname = path;
299 int errno = __vfs_walk(start, path, &interim, component, options);
303 if (counter >= VFS_MAX_SYMLINK) {
307 if ((interim->inode->itype & VFS_IFSYMLINK) &&
308 !(options & VFS_WALK_NOFOLLOW) &&
309 interim->inode->ops.read_symlink) {
310 errno = interim->inode->ops.read_symlink(interim->inode, &pathname);
317 errno = __vfs_walk(start, pathname, &interim, component, options);
321 *dentry = errno ? 0 : interim;
327 vfs_mount(const char* target, const char* fs_name, struct device* device)
332 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
333 errno = vfs_mount_at(fs_name, device, mnt);
340 vfs_unmount(const char* target)
345 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
346 errno = vfs_unmount_at(mnt);
353 vfs_mount_at(const char* fs_name,
354 struct device* device,
355 struct v_dnode* mnt_point)
357 if (!(mnt_point->inode->itype & VFS_IFDIR)) {
361 struct filesystem* fs = fsm_get(fs_name);
366 struct v_superblock* sb = vfs_sb_alloc();
368 sb->fs_id = fs->fs_id;
371 if (!(errno = fs->mount(sb, mnt_point))) {
373 sb->root = mnt_point;
374 mnt_point->super_block = sb;
375 llist_append(&root_sb->sb_list, &sb->sb_list);
382 vfs_unmount_at(struct v_dnode* mnt_point)
384 // FIXME deal with the detached dcache subtree
386 struct v_superblock* sb = mnt_point->super_block;
391 if (sb->root != mnt_point) {
395 if (!(errno = sb->fs->unmount(sb))) {
396 struct v_dnode* fs_root = sb->root;
397 vfs_dcache_remove(fs_root);
399 llist_delete(&sb->sb_list);
407 vfs_open(struct v_dnode* dnode, struct v_file** file)
409 if (!dnode->inode || !dnode->inode->ops.open) {
413 struct v_inode* inode = dnode->inode;
414 struct v_file* vfile = cake_grab(file_pile);
415 memset(vfile, 0, sizeof(*vfile));
417 vfile->dnode = dnode;
418 vfile->inode = inode;
419 vfile->ref_count = ATOMIC_VAR_INIT(1);
420 vfile->ops = inode->default_fops;
422 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
423 struct pcache* pcache = vzalloc(sizeof(struct pcache));
425 pcache->master = inode;
426 inode->pg_cache = pcache;
429 int errno = inode->ops.open(inode, vfile);
431 cake_release(file_pile, vfile);
433 atomic_fetch_add(&dnode->ref_count, 1);
443 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
447 lock_inode(to_link->inode);
448 if (to_link->super_block->root != name->super_block->root) {
450 } else if (!to_link->inode->ops.link) {
452 } else if (!(errno = to_link->inode->ops.link(to_link->inode, name))) {
453 name->inode = to_link->inode;
454 to_link->inode->link_count++;
456 unlock_inode(to_link->inode);
462 vfs_close(struct v_file* file)
465 if (!file->ops.close || !(errno = file->ops.close(file))) {
466 atomic_fetch_sub(&file->dnode->ref_count, 1);
467 file->inode->open_count--;
469 pcache_commit_all(file->inode);
470 cake_release(file_pile, file);
476 vfs_fsync(struct v_file* file)
478 lock_inode(file->inode);
481 pcache_commit_all(file->inode);
482 if (file->ops.sync) {
483 errno = file->ops.sync(file->inode);
486 unlock_inode(file->inode);
492 vfs_alloc_fdslot(int* fd)
494 for (size_t i = 0; i < VFS_MAX_FD; i++) {
495 if (!__current->fdtable->fds[i]) {
506 struct v_superblock* sb = cake_grab(superblock_pile);
507 memset(sb, 0, sizeof(*sb));
508 llist_init_head(&sb->sb_list);
513 vfs_sb_free(struct v_superblock* sb)
515 cake_release(superblock_pile, sb);
521 struct v_dnode* dnode = cake_grab(dnode_pile);
522 memset(dnode, 0, sizeof(*dnode));
523 llist_init_head(&dnode->children);
524 llist_init_head(&dnode->siblings);
525 mutex_init(&dnode->lock);
527 dnode->ref_count = ATOMIC_VAR_INIT(0);
528 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
534 vfs_d_free(struct v_dnode* dnode)
536 if (dnode->ref_count) {
537 // it can be only freed if no one is refering
540 if (dnode->inode && dnode->inode->link_count) {
541 dnode->inode->link_count--;
544 // Make sure the children de-referencing their parent.
545 // With lru presented, the eviction will be propagated over the entire
546 // detached subtree eventually
547 struct v_dnode *pos, *n;
548 llist_for_each(pos, n, &dnode->children, siblings)
550 vfs_dcache_remove(pos);
553 vfree(dnode->name.value);
554 cake_release(dnode_pile, dnode);
558 vfs_i_alloc(dev_t device_id, uint32_t inode_id)
560 // 我们这里假设每个文件系统与设备是一一对应(毕竟一个分区不可能有两个不同的文件系统)
561 // 而每个文件系统所产生的 v_inode 缓存必须要和其他文件系统产生的区分开来。
562 // 这也就是说,每个 v_inode 的 id
563 // 必须要由设备ID,和该虚拟inode缓存所对应的物理inode
564 // 相对于其所在的文件系统的id,进行组成!
565 inode_id = hash_32(inode_id ^ device_id, HASH_SIZE_BITS);
566 inode_id = (inode_id >> HASHBITS) ^ inode_id;
568 struct hbucket* slot = &inode_cache[inode_id & HASH_MASK];
569 struct v_inode *pos, *n;
570 hashtable_bucket_foreach(slot, pos, n, hash_list)
572 if (pos->id == inode_id) {
577 pos = cake_grab(inode_pile);
578 memset(pos, 0, sizeof(*pos));
583 mutex_init(&pos->lock);
585 hlist_add(&slot->head, &pos->hash_list);
591 vfs_i_free(struct v_inode* inode)
593 hlist_delete(&inode->hash_list);
594 cake_release(inode_pile, inode);
597 /* ---- System call definition and support ---- */
599 #define FLOCATE_CREATE_EMPTY 1
601 #define DO_STATUS(errno) SYSCALL_ESTATUS(__current->k_status = errno)
602 #define DO_STATUS_OR_RETURN(errno) ({ errno < 0 ? DO_STATUS(errno) : errno; })
604 #define TEST_FD(fd) (fd >= 0 && fd < VFS_MAX_FD)
607 __vfs_getfd(int fd, struct v_fd** fd_s)
609 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
616 __vfs_try_locate_file(const char* path,
617 struct v_dnode** fdir,
618 struct v_dnode** file,
621 char name_str[VFS_NAME_MAXLEN];
622 struct hstr name = HSTR(name_str, 0);
625 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
629 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
630 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
634 struct v_dnode* parent = *fdir;
635 struct v_dnode* file_new = vfs_d_alloc();
636 hstrcpy(&file_new->name, &name);
638 if (!(errno = parent->inode->ops.create(parent->inode, file_new))) {
641 vfs_dcache_add(parent, file_new);
642 llist_append(&parent->children, &file_new->siblings);
644 vfs_d_free(file_new);
651 vfs_do_open(const char* path, int options)
654 struct v_dnode *dentry, *file;
655 struct v_file* ofile = 0;
657 errno = __vfs_try_locate_file(
658 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
660 if (errno || (errno = vfs_open(file, &ofile))) {
664 struct v_inode* o_inode = ofile->inode;
665 if (!(o_inode->itype & VFS_IFSEQDEV) && !(options & FO_DIRECT)) {
666 // XXX Change here accordingly when signature of pcache_r/w changed.
667 ofile->ops.read = pcache_read;
668 ofile->ops.write = pcache_write;
671 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
672 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
673 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
675 fd_s->flags = options;
676 __current->fdtable->fds[fd] = fd_s;
683 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
685 int errno = vfs_do_open(path, options);
686 return DO_STATUS_OR_RETURN(errno);
689 __DEFINE_LXSYSCALL1(int, close, int, fd)
693 if ((errno = __vfs_getfd(fd, &fd_s))) {
697 if (fd_s->file->ref_count > 1) {
698 fd_s->file->ref_count--;
699 } else if ((errno = vfs_close(fd_s->file))) {
704 __current->fdtable->fds[fd] = 0;
707 return DO_STATUS(errno);
711 __vfs_readdir_callback(struct dir_context* dctx,
716 struct dirent* dent = (struct dirent*)dctx->cb_data;
717 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
719 dent->d_type = dtype;
722 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
727 if ((errno = __vfs_getfd(fd, &fd_s))) {
731 struct v_inode* inode = fd_s->file->inode;
735 if (!(fd_s->file->inode->itype & VFS_IFDIR)) {
738 struct dir_context dctx =
739 (struct dir_context){ .cb_data = dent,
740 .index = dent->d_offset,
741 .read_complete_callback =
742 __vfs_readdir_callback };
743 if (dent->d_offset == 0) {
744 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
745 } else if (dent->d_offset == 1) {
746 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
749 if ((errno = fd_s->file->ops.readdir(inode, &dctx))) {
761 return DO_STATUS(errno);
764 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
768 if ((errno = __vfs_getfd(fd, &fd_s))) {
772 struct v_file* file = fd_s->file;
773 if ((file->inode->itype & VFS_IFDIR)) {
778 lock_inode(file->inode);
780 file->inode->atime = clock_unixtime();
782 __SYSCALL_INTERRUPTIBLE(
783 { errno = file->ops.read(file->inode, buf, count, file->f_pos); })
785 unlock_inode(file->inode);
788 file->f_pos += errno;
793 return DO_STATUS(errno);
796 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
800 if ((errno = __vfs_getfd(fd, &fd_s))) {
804 struct v_file* file = fd_s->file;
805 if ((file->inode->itype & VFS_IFDIR)) {
810 lock_inode(file->inode);
812 file->inode->mtime = clock_unixtime();
814 __SYSCALL_INTERRUPTIBLE(
815 { errno = file->ops.write(file->inode, buf, count, file->f_pos); })
817 unlock_inode(file->inode);
820 file->f_pos += errno;
825 return DO_STATUS(errno);
828 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
832 if ((errno = __vfs_getfd(fd, &fd_s))) {
836 struct v_file* file = fd_s->file;
838 lock_inode(file->inode);
840 size_t fpos = file->f_pos;
843 fpos = (size_t)((int)file->f_pos + offset);
846 fpos = (size_t)((int)file->inode->fsize + offset);
852 if (!file->ops.seek || !(errno = file->ops.seek(file->inode, fpos))) {
856 unlock_inode(file->inode);
859 return DO_STATUS(errno);
863 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
873 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
879 size_t cpy_size = MIN(dnode->name.len, size - len);
880 strncpy(buf + len, dnode->name.value, cpy_size);
884 buf[len++] = PATH_DELIM;
891 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
894 struct v_inode* inode = dnode->inode;
895 if (inode->ops.read_symlink) {
898 int errno = inode->ops.read_symlink(inode, &link);
899 strncpy(buf, link, size);
907 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
911 if ((errno = __vfs_getfd(fd, &fd_s))) {
915 struct v_dnode* dnode;
916 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
923 return DO_STATUS(errno);
926 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
929 struct v_dnode* dnode;
931 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
932 errno = vfs_readlink(dnode, buf, size);
939 return DO_STATUS(errno);
942 __DEFINE_LXSYSCALL4(int,
955 if ((errno = __vfs_getfd(dirfd, &fd_s))) {
959 struct v_dnode* dnode;
960 if (!(errno = vfs_walk(
961 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
962 errno = vfs_readlink(fd_s->file->dnode, buf, size);
970 return DO_STATUS(errno);
975 When we perform operation that could affect the layout of
976 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
977 whenever possible. This will blocking any ongoing path walking to reach
978 it hence avoid any partial state.
981 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
984 struct v_dnode* dnode;
985 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
986 return DO_STATUS(errno);
992 lock_dnode(dnode->parent);
994 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
999 if (dnode->ref_count || dnode->inode->open_count) {
1004 if (!llist_empty(&dnode->children)) {
1009 lock_inode(dnode->inode);
1011 if ((dnode->inode->itype & VFS_IFDIR)) {
1012 errno = dnode->inode->ops.rmdir(dnode->inode);
1014 vfs_dcache_remove(dnode);
1015 unlock_inode(dnode->inode);
1024 unlock_inode(dnode->inode);
1027 unlock_dnode(dnode);
1029 unlock_dnode(dnode->parent);
1030 return DO_STATUS(errno);
1033 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
1035 struct v_dnode *parent, *dir = vfs_d_alloc();
1037 vfs_walk(__current->cwd, path, &parent, &dir->name, VFS_WALK_PARENT);
1043 lock_inode(parent->inode);
1045 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1047 } else if (!parent->inode->ops.mkdir) {
1049 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1051 } else if (!(errno = parent->inode->ops.mkdir(parent->inode, dir))) {
1052 llist_append(&parent->children, &dir->siblings);
1059 unlock_inode(parent->inode);
1060 unlock_dnode(parent);
1062 return DO_STATUS(errno);
1066 __vfs_do_unlink(struct v_dnode* dnode)
1068 struct v_inode* inode = dnode->inode;
1070 if (dnode->ref_count > 1) {
1077 if (inode->open_count) {
1079 } else if (!(inode->itype & VFS_IFDIR)) {
1080 // The underlying unlink implementation should handle
1082 errno = inode->ops.unlink(inode);
1084 inode->link_count--;
1085 vfs_dcache_remove(dnode);
1092 unlock_inode(inode);
1097 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1100 struct v_dnode* dnode;
1101 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1104 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1109 errno = __vfs_do_unlink(dnode);
1112 return DO_STATUS(errno);
1115 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1119 if ((errno = __vfs_getfd(fd, &fd_s))) {
1123 struct v_dnode* dnode;
1124 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1125 errno = __vfs_do_unlink(dnode);
1129 return DO_STATUS(errno);
1132 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1135 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1137 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1139 errno = __vfs_try_locate_file(
1140 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1143 } else if (name_file) {
1144 errno = vfs_link(to_link, name_file);
1147 return DO_STATUS(errno);
1150 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1154 if (!(errno = __vfs_getfd(fildes, &fd_s))) {
1155 errno = vfs_fsync(fd_s->file);
1158 return DO_STATUS(errno);
1162 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1165 struct v_fd* copied = cake_grab(fd_pile);
1167 memcpy(copied, old, sizeof(struct v_fd));
1169 atomic_fetch_add(&old->file->ref_count, 1);
1177 vfs_dup2(int oldfd, int newfd)
1179 if (newfd == oldfd) {
1184 struct v_fd *oldfd_s, *newfd_s;
1185 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1189 if (!TEST_FD(newfd)) {
1194 newfd_s = __current->fdtable->fds[newfd];
1195 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1199 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1200 __current->fdtable->fds[newfd] = newfd_s;
1205 return DO_STATUS(errno);
1208 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1210 return vfs_dup2(oldfd, newfd);
1213 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1216 struct v_fd *oldfd_s, *newfd_s;
1217 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1221 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1222 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1223 __current->fdtable->fds[newfd] = newfd_s;
1228 return DO_STATUS(errno);
1231 __DEFINE_LXSYSCALL2(int,
1239 struct v_dnode* dnode;
1240 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1243 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1247 if (!dnode->inode->ops.symlink) {
1252 lock_inode(dnode->inode);
1254 errno = dnode->inode->ops.symlink(dnode->inode, link_target);
1256 unlock_inode(dnode->inode);
1259 return DO_STATUS(errno);
1263 __vfs_do_chdir(struct v_dnode* dnode)
1269 if (!(dnode->inode->itype & VFS_IFDIR)) {
1274 if (__current->cwd) {
1275 atomic_fetch_add(&__current->cwd->ref_count, 1);
1278 atomic_fetch_sub(&dnode->ref_count, 1);
1279 __current->cwd = dnode;
1281 unlock_dnode(dnode);
1287 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1289 struct v_dnode* dnode;
1292 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1296 errno = __vfs_do_chdir(dnode);
1299 return DO_STATUS(errno);
1302 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1307 if ((errno = __vfs_getfd(fd, &fd_s))) {
1311 errno = __vfs_do_chdir(fd_s->file->dnode);
1314 return DO_STATUS(errno);
1317 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1328 if (!__current->cwd) {
1332 len = vfs_get_path(__current->cwd, buf, size, 0);
1339 buf[len + 1] = '\0';
1344 __current->k_status = errno;
1349 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1351 if (current->inode->id == target->inode->id) {
1356 if (current->ref_count > 1 || target->ref_count > 1) {
1360 if (current->super_block != target->super_block) {
1366 struct v_dnode* oldparent = current->parent;
1367 struct v_dnode* newparent = target->parent;
1369 lock_dnode(current);
1372 lock_dnode(oldparent);
1374 lock_dnode(newparent);
1376 if (!llist_empty(&target->children)) {
1378 unlock_dnode(target);
1382 if ((errno = current->inode->ops.rename(current->inode, current, target))) {
1383 unlock_dnode(target);
1387 // re-position current
1388 hstrcpy(¤t->name, &target->name);
1389 vfs_dcache_rehash(newparent, current);
1392 vfs_dcache_remove(target);
1394 unlock_dnode(target);
1397 unlock_dnode(current);
1399 unlock_dnode(oldparent);
1401 unlock_dnode(newparent);
1406 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1408 struct v_dnode *cur, *target_parent, *target;
1409 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1412 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1416 if ((errno = vfs_walk(
1417 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1421 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1422 if (errno == ENOENT) {
1423 target = vfs_d_alloc();
1424 hstrcpy(&target->name, &name);
1429 if (!(errno = vfs_do_rename(cur, target))) {
1435 return DO_STATUS(errno);
1438 __DEFINE_LXSYSCALL3(int,
1447 struct v_dnode *dev, *mnt;
1450 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
1454 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
1458 if (!(dev->inode->itype & VFS_IFVOLDEV)) {
1463 if (mnt->ref_count > 1) {
1468 // FIXME should not touch the underlying fs!
1469 struct device* device =
1470 (struct device*)((struct twifs_node*)dev->inode->data)->data;
1472 errno = vfs_mount_at(fstype, device, mnt);
1475 return DO_STATUS(errno);
1478 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
1480 return vfs_unmount(target);