3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 #include <klibc/string.h>
13 #include <lunaix/dirent.h>
14 #include <lunaix/foptions.h>
15 #include <lunaix/fs.h>
16 #include <lunaix/mm/cake.h>
17 #include <lunaix/mm/page.h>
18 #include <lunaix/mm/valloc.h>
19 #include <lunaix/process.h>
20 #include <lunaix/spike.h>
21 #include <lunaix/syscall.h>
23 #include <lunaix/fs/twifs.h>
25 #define PATH_DELIM '/'
26 #define DNODE_HASHTABLE_BITS 10
27 #define DNODE_HASHTABLE_SIZE (1 << DNODE_HASHTABLE_BITS)
28 #define DNODE_HASH_MASK (DNODE_HASHTABLE_SIZE - 1)
29 #define DNODE_HASHBITS (32 - DNODE_HASHTABLE_BITS)
31 #define lock_inode(inode) mutex_lock(&inode->lock)
32 #define unlock_inode(inode) mutex_unlock(&inode->lock)
34 #define lock_dnode(dnode) mutex_lock(&dnode->lock)
35 #define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
37 static struct cake_pile* dnode_pile;
38 static struct cake_pile* inode_pile;
39 static struct cake_pile* file_pile;
40 static struct cake_pile* superblock_pile;
41 static struct cake_pile* fd_pile;
43 static struct v_superblock* root_sb;
44 static struct hbucket* dnode_cache;
48 struct hstr vfs_ddot = HSTR("..", 2);
49 struct hstr vfs_dot = HSTR(".", 1);
50 struct hstr vfs_empty = HSTR("", 0);
56 vfs_d_free(struct v_dnode* dnode);
62 vfs_sb_free(struct v_superblock* sb);
67 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
68 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
69 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
70 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
71 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
73 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
75 dnode_cache = vzalloc(DNODE_HASHTABLE_SIZE * sizeof(struct hbucket));
77 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
78 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
80 // 创建一个根superblock,用来蕴含我们的根目录。
81 root_sb = vfs_sb_alloc();
82 root_sb->root = vfs_d_alloc();
83 root_sb->root->inode = vfs_i_alloc();
86 inline struct hbucket*
87 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
89 uint32_t _hash = *hash;
90 // 与parent的指针值做加法,来减小碰撞的可能性。
91 _hash += (uint32_t)parent;
93 _hash = _hash ^ (_hash >> DNODE_HASHBITS);
95 return &dnode_cache[_hash & DNODE_HASH_MASK];
99 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
101 if (!str->len || HSTR_EQ(str, &vfs_dot))
104 if (HSTR_EQ(str, &vfs_ddot)) {
105 return parent->parent ? parent->parent : parent;
108 uint32_t hash = str->hash;
109 struct hbucket* slot = __dcache_hash(parent, &hash);
111 struct v_dnode *pos, *n;
112 hashtable_bucket_foreach(slot, pos, n, hash_list)
114 if (pos->name.hash == hash) {
122 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
124 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
125 hlist_add(&bucket->head, &dnode->hash_list);
129 vfs_dcache_rehash(struct v_dnode* parent, struct v_dnode* dnode)
131 hlist_delete(&dnode->hash_list);
132 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
133 vfs_dcache_add(parent, dnode);
137 __vfs_walk(struct v_dnode* start,
139 struct v_dnode** dentry,
140 struct hstr* component,
146 if (path[0] == PATH_DELIM || !start) {
147 if ((walk_options & VFS_WALK_FSRELATIVE) && start) {
148 start = start->super_block->root;
150 start = root_sb->root;
155 struct v_dnode* dnode;
156 struct v_dnode* current_level = start;
158 char name_content[VFS_NAME_MAXLEN];
159 struct hstr name = HSTR(name_content, 0);
161 char current = path[i++], lookahead;
163 lookahead = path[i++];
164 if (current != PATH_DELIM) {
165 if (j >= VFS_NAME_MAXLEN - 1) {
168 if (!VFS_VALID_CHAR(current)) {
171 name_content[j++] = current;
177 // handling cases like /^.*(\/+).*$/
178 if (lookahead == PATH_DELIM) {
182 lock_dnode(current_level);
186 hstr_rehash(&name, HSTR_FULL_HASH);
188 if (!lookahead && (walk_options & VFS_WALK_PARENT)) {
190 component->hash = name.hash;
192 strcpy(component->value, name_content);
194 unlock_dnode(current_level);
198 dnode = vfs_dcache_lookup(current_level, &name);
201 dnode = vfs_d_alloc();
203 hstrcpy(&dnode->name, &name);
205 lock_inode(current_level->inode);
208 current_level->inode->ops.dir_lookup(current_level->inode, dnode);
210 if (errno == ENOENT && (walk_options & VFS_WALK_MKPARENT)) {
211 if (!current_level->inode->ops.mkdir) {
214 errno = current_level->inode->ops.mkdir(
215 current_level->inode, dnode);
219 unlock_inode(current_level->inode);
222 unlock_dnode(current_level);
223 vfree(dnode->name.value);
227 vfs_dcache_add(current_level, dnode);
229 dnode->parent = current_level;
230 llist_append(¤t_level->children, &dnode->siblings);
233 unlock_dnode(current_level);
236 current_level = dnode;
241 *dentry = current_level;
250 #define VFS_MAX_SYMLINK 16
253 vfs_walk(struct v_dnode* start,
255 struct v_dnode** dentry,
256 struct hstr* component,
259 struct v_dnode* interim;
260 const char* pathname = path;
261 int errno = __vfs_walk(start, path, &interim, component, options);
265 if (counter >= VFS_MAX_SYMLINK) {
269 if ((interim->inode->itype & VFS_IFSYMLINK) &&
270 !(options & VFS_WALK_NOFOLLOW) &&
271 interim->inode->ops.read_symlink) {
272 errno = interim->inode->ops.read_symlink(interim->inode, &pathname);
279 errno = __vfs_walk(start, pathname, &interim, component, options);
283 *dentry = errno ? 0 : interim;
289 vfs_mount(const char* target, const char* fs_name, struct device* device)
294 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
295 errno = vfs_mount_at(fs_name, device, mnt);
302 vfs_unmount(const char* target)
307 if (!(errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
308 errno = vfs_unmount_at(mnt);
315 vfs_mount_at(const char* fs_name,
316 struct device* device,
317 struct v_dnode* mnt_point)
319 if (!(mnt_point->inode->itype & VFS_IFDIR)) {
323 struct filesystem* fs = fsm_get(fs_name);
328 struct v_superblock* sb = vfs_sb_alloc();
333 if (!(errno = fs->mount(sb, mnt_point))) {
335 sb->root = mnt_point;
336 mnt_point->super_block = sb;
337 llist_append(&root_sb->sb_list, &sb->sb_list);
344 vfs_unmount_at(struct v_dnode* mnt_point)
346 // FIXME mnt point check & deal with the detached dcache subtree
348 struct v_superblock* sb = mnt_point->super_block;
352 if (!(errno = sb->fs->unmount(sb))) {
353 struct v_dnode* fs_root = sb->root;
354 llist_delete(&fs_root->siblings);
355 llist_delete(&sb->sb_list);
356 hlist_delete(&fs_root->hash_list);
364 vfs_open(struct v_dnode* dnode, struct v_file** file)
366 if (!dnode->inode || !dnode->inode->ops.open) {
370 struct v_inode* inode = dnode->inode;
371 struct v_file* vfile = cake_grab(file_pile);
372 memset(vfile, 0, sizeof(*vfile));
374 vfile->dnode = dnode;
375 vfile->inode = inode;
376 vfile->ref_count = ATOMIC_VAR_INIT(1);
377 vfile->ops = inode->default_fops;
379 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
380 struct pcache* pcache = vzalloc(sizeof(struct pcache));
382 pcache->master = inode;
383 inode->pg_cache = pcache;
386 int errno = inode->ops.open(inode, vfile);
388 cake_release(file_pile, vfile);
390 atomic_fetch_add(&dnode->ref_count, 1);
400 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
404 lock_inode(to_link->inode);
405 if (to_link->super_block->root != name->super_block->root) {
407 } else if (!to_link->inode->ops.link) {
409 } else if (!(errno = to_link->inode->ops.link(to_link->inode, name))) {
410 name->inode = to_link->inode;
411 atomic_fetch_add(&to_link->inode->link_count, 1);
413 unlock_inode(to_link->inode);
419 vfs_close(struct v_file* file)
422 if (!file->ops.close || !(errno = file->ops.close(file))) {
423 atomic_fetch_sub(&file->dnode->ref_count, 1);
424 file->inode->open_count--;
426 pcache_commit_all(file->inode);
427 cake_release(file_pile, file);
433 vfs_fsync(struct v_file* file)
435 lock_inode(file->inode);
438 pcache_commit_all(file->inode);
439 if (file->ops.sync) {
440 errno = file->ops.sync(file->inode);
443 unlock_inode(file->inode);
449 vfs_alloc_fdslot(int* fd)
451 for (size_t i = 0; i < VFS_MAX_FD; i++) {
452 if (!__current->fdtable->fds[i]) {
463 struct v_superblock* sb = cake_grab(superblock_pile);
464 memset(sb, 0, sizeof(*sb));
465 llist_init_head(&sb->sb_list);
470 vfs_sb_free(struct v_superblock* sb)
472 cake_release(superblock_pile, sb);
478 struct v_dnode* dnode = cake_grab(dnode_pile);
479 memset(dnode, 0, sizeof(*dnode));
480 llist_init_head(&dnode->children);
481 llist_init_head(&dnode->siblings);
482 mutex_init(&dnode->lock);
484 dnode->ref_count = ATOMIC_VAR_INIT(0);
485 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
491 vfs_d_free(struct v_dnode* dnode)
493 if (dnode->inode && dnode->inode->link_count) {
494 dnode->inode->link_count--;
496 vfree(dnode->name.value);
497 cake_release(dnode_pile, dnode);
503 struct v_inode* inode = cake_grab(inode_pile);
504 memset(inode, 0, sizeof(*inode));
505 inode->link_count = 1;
506 mutex_init(&inode->lock);
512 vfs_i_free(struct v_inode* inode)
514 cake_release(inode_pile, inode);
517 /* ---- System call definition and support ---- */
519 #define FLOCATE_CREATE_EMPTY 1
521 #define DO_STATUS(errno) SYSCALL_ESTATUS(__current->k_status = errno)
522 #define DO_STATUS_OR_RETURN(errno) ({ errno < 0 ? DO_STATUS(errno) : errno; })
524 #define TEST_FD(fd) (fd >= 0 && fd < VFS_MAX_FD)
527 __vfs_getfd(int fd, struct v_fd** fd_s)
529 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
536 __vfs_try_locate_file(const char* path,
537 struct v_dnode** fdir,
538 struct v_dnode** file,
541 char name_str[VFS_NAME_MAXLEN];
542 struct hstr name = HSTR(name_str, 0);
545 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
549 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
550 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
554 struct v_dnode* parent = *fdir;
555 struct v_dnode* file_new = vfs_d_alloc();
556 hstrcpy(&file_new->name, &name);
558 if (!(errno = parent->inode->ops.create(parent->inode, file_new))) {
561 vfs_dcache_add(parent, file_new);
562 llist_append(&parent->children, &file_new->siblings);
564 vfs_d_free(file_new);
571 vfs_do_open(const char* path, int options)
574 struct v_dnode *dentry, *file;
575 struct v_file* ofile = 0;
577 errno = __vfs_try_locate_file(
578 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
580 if (errno || (errno = vfs_open(file, &ofile))) {
584 struct v_inode* o_inode = ofile->inode;
585 if (!(o_inode->itype & VFS_IFSEQDEV) && !(options & FO_DIRECT)) {
586 // XXX Change here accordingly when signature of pcache_r/w changed.
587 ofile->ops.read = pcache_read;
588 ofile->ops.write = pcache_write;
591 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
592 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
593 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
595 fd_s->flags = options;
596 __current->fdtable->fds[fd] = fd_s;
603 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
605 int errno = vfs_do_open(path, options);
606 return DO_STATUS_OR_RETURN(errno);
609 __DEFINE_LXSYSCALL1(int, close, int, fd)
613 if ((errno = __vfs_getfd(fd, &fd_s))) {
617 if (fd_s->file->ref_count > 1) {
618 fd_s->file->ref_count--;
619 } else if ((errno = vfs_close(fd_s->file))) {
624 __current->fdtable->fds[fd] = 0;
627 return DO_STATUS(errno);
631 __vfs_readdir_callback(struct dir_context* dctx,
636 struct dirent* dent = (struct dirent*)dctx->cb_data;
637 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
639 dent->d_type = dtype;
642 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
647 if ((errno = __vfs_getfd(fd, &fd_s))) {
651 struct v_inode* inode = fd_s->file->inode;
655 if (!(fd_s->file->inode->itype & VFS_IFDIR)) {
658 struct dir_context dctx =
659 (struct dir_context){ .cb_data = dent,
660 .index = dent->d_offset,
661 .read_complete_callback =
662 __vfs_readdir_callback };
663 if (dent->d_offset == 0) {
664 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
665 } else if (dent->d_offset == 1) {
666 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
669 if ((errno = fd_s->file->ops.readdir(inode, &dctx))) {
681 return DO_STATUS(errno);
684 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
688 if ((errno = __vfs_getfd(fd, &fd_s))) {
692 struct v_file* file = fd_s->file;
693 if ((file->inode->itype & VFS_IFDIR)) {
698 lock_inode(file->inode);
700 file->inode->atime = clock_unixtime();
702 __SYSCALL_INTERRUPTIBLE(
703 { errno = file->ops.read(file->inode, buf, count, file->f_pos); })
705 unlock_inode(file->inode);
708 file->f_pos += errno;
713 return DO_STATUS(errno);
716 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
720 if ((errno = __vfs_getfd(fd, &fd_s))) {
724 struct v_file* file = fd_s->file;
725 if ((file->inode->itype & VFS_IFDIR)) {
730 lock_inode(file->inode);
732 file->inode->mtime = clock_unixtime();
734 __SYSCALL_INTERRUPTIBLE(
735 { errno = file->ops.write(file->inode, buf, count, file->f_pos); })
737 unlock_inode(file->inode);
740 file->f_pos += errno;
745 return DO_STATUS(errno);
748 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
752 if ((errno = __vfs_getfd(fd, &fd_s))) {
756 struct v_file* file = fd_s->file;
758 lock_inode(file->inode);
760 size_t fpos = file->f_pos;
763 fpos = (size_t)((int)file->f_pos + offset);
766 fpos = (size_t)((int)file->inode->fsize + offset);
772 if (!file->ops.seek || !(errno = file->ops.seek(file->inode, fpos))) {
776 unlock_inode(file->inode);
779 return DO_STATUS(errno);
783 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
793 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
799 size_t cpy_size = MIN(dnode->name.len, size - len);
800 strncpy(buf + len, dnode->name.value, cpy_size);
804 buf[len++] = PATH_DELIM;
811 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
814 struct v_inode* inode = dnode->inode;
815 if (inode->ops.read_symlink) {
818 int errno = inode->ops.read_symlink(inode, &link);
819 strncpy(buf, link, size);
827 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
831 if ((errno = __vfs_getfd(fd, &fd_s))) {
835 struct v_dnode* dnode;
836 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
843 return DO_STATUS(errno);
846 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
849 struct v_dnode* dnode;
851 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
852 errno = vfs_readlink(dnode, buf, size);
859 return DO_STATUS(errno);
862 __DEFINE_LXSYSCALL4(int,
875 if ((errno = __vfs_getfd(dirfd, &fd_s))) {
879 struct v_dnode* dnode;
880 if (!(errno = vfs_walk(
881 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
882 errno = vfs_readlink(fd_s->file->dnode, buf, size);
890 return DO_STATUS(errno);
895 When we perform operation that could affect the layout of
896 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
897 whenever possible. This will blocking any ongoing path walking to reach
898 it hence avoid any partial state.
901 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
904 struct v_dnode* dnode;
905 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
906 return DO_STATUS(errno);
912 lock_dnode(dnode->parent);
914 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
919 if (dnode->ref_count || dnode->inode->open_count) {
924 if (!llist_empty(&dnode->children)) {
929 lock_inode(dnode->inode);
931 if ((dnode->inode->itype & VFS_IFDIR)) {
932 errno = dnode->inode->ops.rmdir(dnode->inode);
934 llist_delete(&dnode->siblings);
935 hlist_delete(&dnode->hash_list);
936 unlock_inode(dnode->inode);
945 unlock_inode(dnode->inode);
950 unlock_dnode(dnode->parent);
951 return DO_STATUS(errno);
954 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
956 struct v_dnode *parent, *dir = vfs_d_alloc();
958 vfs_walk(__current->cwd, path, &parent, &dir->name, VFS_WALK_PARENT);
964 lock_inode(parent->inode);
966 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
968 } else if (!parent->inode->ops.mkdir) {
970 } else if (!(parent->inode->itype & VFS_IFDIR)) {
972 } else if (!(errno = parent->inode->ops.mkdir(parent->inode, dir))) {
973 llist_append(&parent->children, &dir->siblings);
980 unlock_inode(parent->inode);
981 unlock_dnode(parent);
983 return DO_STATUS(errno);
987 __vfs_do_unlink(struct v_dnode* dnode)
989 struct v_inode* inode = dnode->inode;
991 if (dnode->ref_count) {
998 if (inode->open_count) {
1000 } else if (!(inode->itype & VFS_IFDIR)) {
1001 // The underlying unlink implementation should handle
1003 errno = inode->ops.unlink(inode);
1005 inode->link_count--;
1006 llist_delete(&dnode->siblings);
1007 hlist_delete(&dnode->hash_list);
1014 unlock_inode(inode);
1019 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1022 struct v_dnode* dnode;
1023 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1026 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1031 errno = __vfs_do_unlink(dnode);
1034 return DO_STATUS(errno);
1037 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1041 if ((errno = __vfs_getfd(fd, &fd_s))) {
1045 struct v_dnode* dnode;
1046 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1047 errno = __vfs_do_unlink(dnode);
1051 return DO_STATUS(errno);
1054 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1057 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1059 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1061 errno = __vfs_try_locate_file(
1062 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1065 } else if (name_file) {
1066 errno = vfs_link(to_link, name_file);
1069 return DO_STATUS(errno);
1072 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1076 if (!(errno = __vfs_getfd(fildes, &fd_s))) {
1077 errno = vfs_fsync(fd_s->file);
1080 return DO_STATUS(errno);
1084 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1087 struct v_fd* copied = cake_grab(fd_pile);
1089 memcpy(copied, old, sizeof(struct v_fd));
1091 atomic_fetch_add(&old->file->ref_count, 1);
1099 vfs_dup2(int oldfd, int newfd)
1101 if (newfd == oldfd) {
1106 struct v_fd *oldfd_s, *newfd_s;
1107 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1111 if (!TEST_FD(newfd)) {
1116 newfd_s = __current->fdtable->fds[newfd];
1117 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1121 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1122 __current->fdtable->fds[newfd] = newfd_s;
1127 return DO_STATUS(errno);
1130 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1132 return vfs_dup2(oldfd, newfd);
1135 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1138 struct v_fd *oldfd_s, *newfd_s;
1139 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1143 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1144 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1145 __current->fdtable->fds[newfd] = newfd_s;
1150 return DO_STATUS(errno);
1153 __DEFINE_LXSYSCALL2(int,
1161 struct v_dnode* dnode;
1162 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1165 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1169 if (!dnode->inode->ops.symlink) {
1174 lock_inode(dnode->inode);
1176 errno = dnode->inode->ops.symlink(dnode->inode, link_target);
1178 unlock_inode(dnode->inode);
1181 return DO_STATUS(errno);
1185 __vfs_do_chdir(struct v_dnode* dnode)
1191 if (!(dnode->inode->itype & VFS_IFDIR)) {
1196 if (__current->cwd) {
1197 __current->cwd->ref_count--;
1201 __current->cwd = dnode;
1203 unlock_dnode(dnode);
1209 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1211 struct v_dnode* dnode;
1214 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1218 errno = __vfs_do_chdir(dnode);
1221 return DO_STATUS(errno);
1224 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1229 if ((errno = __vfs_getfd(fd, &fd_s))) {
1233 errno = __vfs_do_chdir(fd_s->file->dnode);
1236 return DO_STATUS(errno);
1239 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1250 if (!__current->cwd) {
1254 len = vfs_get_path(__current->cwd, buf, size, 0);
1261 buf[len + 1] = '\0';
1266 __current->k_status = errno;
1271 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1273 if (current->inode->id == target->inode->id) {
1278 if (current->ref_count || target->ref_count) {
1282 if (current->super_block != target->super_block) {
1288 struct v_dnode* oldparent = current->parent;
1289 struct v_dnode* newparent = target->parent;
1291 lock_dnode(current);
1294 lock_dnode(oldparent);
1296 lock_dnode(newparent);
1298 if (!llist_empty(&target->children)) {
1300 unlock_dnode(target);
1304 if ((errno = current->inode->ops.rename(current->inode, current, target))) {
1305 unlock_dnode(target);
1309 // re-position current
1310 current->parent = newparent;
1311 hstrcpy(¤t->name, &target->name);
1312 llist_delete(¤t->siblings);
1313 llist_append(&newparent->children, ¤t->siblings);
1314 vfs_dcache_rehash(newparent, current);
1317 llist_delete(&target->siblings);
1318 hlist_delete(&target->hash_list);
1320 unlock_dnode(target);
1323 unlock_dnode(current);
1325 unlock_dnode(oldparent);
1327 unlock_dnode(newparent);
1332 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1334 struct v_dnode *cur, *target_parent, *target;
1335 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1338 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1342 if ((errno = vfs_walk(
1343 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1347 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1348 if (errno == ENOENT) {
1349 target = vfs_d_alloc();
1350 hstrcpy(&target->name, &name);
1355 if (!(errno = vfs_do_rename(cur, target))) {
1361 return DO_STATUS(errno);
1364 __DEFINE_LXSYSCALL3(int,
1373 struct v_dnode *dev, *mnt;
1376 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
1380 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
1384 if (!(dev->inode->itype & VFS_IFVOLDEV)) {
1389 // FIXME should not touch the underlying fs!
1390 struct device* device =
1391 (struct device*)((struct twifs_node*)dev->inode->data)->data;
1393 errno = vfs_mount_at(fstype, device, mnt);
1396 return DO_STATUS(errno);
1399 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
1401 return vfs_unmount(target);