3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief Lunaix virtual file system - an abstraction layer for all file system.
8 * @copyright Copyright (c) 2022
12 // Welcome to The Mountain O'Shit! :)
15 TODO vfs & device todos checklist
17 It is overseen by Twilight Sparkle ;)
19 1. Get inodes hooked into lru (CHECKED)
20 2. Get dnodes hooked into lru (CHECKED)
21 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
22 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
23 [good idea] or a constructor/destructor pattern in cake allocator ?
24 5. (mount) Figure out a way to identify a busy mount point before unmount
25 maybe a unified mount_point structure that maintain a referencing
26 counter on any dnodes within the subtree? Such a counter will only
27 increament if a file is opened or a dnode is being used as working
28 directory and decreamenting conversely. (CHECKED)
29 6. (mount) Ability to track all mount points (including sub-mounts)
30 so we can be confident to clean up everything when we
32 7. (mount) Figure out a way to acquire the device represented by a dnode.
33 so it can be used to mount. (e.g. we wish to get `struct device*`
34 out of the dnode at /dev/sda)
35 [tip] we should pay attention at twifs and add a private_data field
36 under struct v_dnode? (CHECKED)
37 8. (mount) Then, we should refactor on mount/unmount mechanism. (CHECKED)
38 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
39 image file using a so called "loopback" pseudo device. Maybe
40 we can do similar thing in Lunaix? A block device emulation
41 above the regular file when we mount it on.
42 10. (device) device number (dev_t) allocation
43 [good idea] <class>:<subclass>:<uniq_id> composition
46 #include <klibc/string.h>
47 #include <lunaix/dirent.h>
48 #include <lunaix/foptions.h>
49 #include <lunaix/fs.h>
50 #include <lunaix/mm/cake.h>
51 #include <lunaix/mm/page.h>
52 #include <lunaix/mm/valloc.h>
53 #include <lunaix/process.h>
54 #include <lunaix/spike.h>
55 #include <lunaix/syscall.h>
57 #include <lunaix/fs/twifs.h>
59 #define PATH_DELIM '/'
61 #define unlock_inode(inode) mutex_unlock(&inode->lock)
62 #define lock_inode(inode) \
64 mutex_lock(&inode->lock); \
65 lru_use_one(inode_lru, &inode->lru); \
68 #define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
69 #define lock_dnode(dnode) \
71 mutex_lock(&dnode->lock); \
72 lru_use_one(dnode_lru, &dnode->lru); \
75 static struct cake_pile* dnode_pile;
76 static struct cake_pile* inode_pile;
77 static struct cake_pile* file_pile;
78 static struct cake_pile* superblock_pile;
79 static struct cake_pile* fd_pile;
81 static struct v_dnode* sysroot;
82 static struct hbucket* dnode_cache;
84 static struct lru_zone *dnode_lru, *inode_lru;
86 struct hstr vfs_ddot = HSTR("..", 2);
87 struct hstr vfs_dot = HSTR(".", 1);
88 struct hstr vfs_empty = HSTR("", 0);
94 vfs_sb_free(struct v_superblock* sb);
97 __vfs_try_evict_dnode(struct lru_node* obj);
100 __vfs_try_evict_inode(struct lru_node* obj);
105 // 为他们专门创建一个蛋糕堆,而不使用valloc,这样我们可以最小化内碎片的产生
106 dnode_pile = cake_new_pile("dnode_cache", sizeof(struct v_dnode), 1, 0);
107 inode_pile = cake_new_pile("inode_cache", sizeof(struct v_inode), 1, 0);
108 file_pile = cake_new_pile("file_cache", sizeof(struct v_file), 1, 0);
109 fd_pile = cake_new_pile("fd_cache", sizeof(struct v_fd), 1, 0);
111 cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
113 dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
115 dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
116 inode_lru = lru_new_zone(__vfs_try_evict_inode);
118 hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
119 hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
122 sysroot = vfs_d_alloc(NULL, &vfs_empty);
123 atomic_fetch_add(&sysroot->ref_count, 1);
126 inline struct hbucket*
127 __dcache_hash(struct v_dnode* parent, uint32_t* hash)
129 uint32_t _hash = *hash;
130 // 与parent的指针值做加法,来减小碰撞的可能性。
131 _hash += (uint32_t)parent;
133 _hash = _hash ^ (_hash >> VFS_HASHBITS);
135 return &dnode_cache[_hash & VFS_HASH_MASK];
139 vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
141 if (!str->len || HSTR_EQ(str, &vfs_dot))
144 if (HSTR_EQ(str, &vfs_ddot)) {
145 return parent->parent ? parent->parent : parent;
148 uint32_t hash = str->hash;
149 struct hbucket* slot = __dcache_hash(parent, &hash);
151 struct v_dnode *pos, *n;
152 hashtable_bucket_foreach(slot, pos, n, hash_list)
154 if (pos->name.hash == hash) {
162 vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
164 atomic_fetch_add(&dnode->ref_count, 1);
165 dnode->parent = parent;
166 llist_append(&parent->children, &dnode->siblings);
168 struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
169 hlist_add(&bucket->head, &dnode->hash_list);
173 vfs_dcache_remove(struct v_dnode* dnode)
175 assert(dnode->ref_count == 1);
177 llist_delete(&dnode->siblings);
178 hlist_delete(&dnode->hash_list);
180 dnode->parent = NULL;
181 atomic_fetch_sub(&dnode->ref_count, 1);
185 vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
187 hstr_rehash(&dnode->name, HSTR_FULL_HASH);
188 vfs_dcache_remove(dnode);
189 vfs_dcache_add(new_parent, dnode);
193 __vfs_walk(struct v_dnode* start,
195 struct v_dnode** dentry,
196 struct hstr* component,
202 if (path[0] == PATH_DELIM || !start) {
203 if ((walk_options & VFS_WALK_FSRELATIVE) && start) {
204 start = start->super_block->root;
211 struct v_dnode* dnode;
212 struct v_dnode* current_level = start;
214 char name_content[VFS_NAME_MAXLEN];
215 struct hstr name = HSTR(name_content, 0);
217 char current = path[i++], lookahead;
218 while (current && current_level) {
219 lookahead = path[i++];
220 if (current != PATH_DELIM) {
221 if (j >= VFS_NAME_MAXLEN - 1) {
224 if (!VFS_VALID_CHAR(current)) {
227 name_content[j++] = current;
233 // handling cases like /^.*(\/+).*$/
234 if (lookahead == PATH_DELIM) {
238 lock_dnode(current_level);
242 hstr_rehash(&name, HSTR_FULL_HASH);
244 if (!lookahead && (walk_options & VFS_WALK_PARENT)) {
246 component->hash = name.hash;
248 strcpy(component->value, name_content);
250 unlock_dnode(current_level);
254 dnode = vfs_dcache_lookup(current_level, &name);
257 dnode = vfs_d_alloc(current_level, &name);
264 lock_inode(current_level->inode);
267 current_level->inode->ops.dir_lookup(current_level->inode, dnode);
269 if (errno == ENOENT && (walk_options & VFS_WALK_MKPARENT)) {
270 if (!current_level->inode->ops.mkdir) {
273 errno = current_level->inode->ops.mkdir(
274 current_level->inode, dnode);
278 vfs_dcache_add(current_level, dnode);
279 unlock_inode(current_level->inode);
282 unlock_dnode(current_level);
287 unlock_dnode(current_level);
290 current_level = dnode;
295 *dentry = current_level;
305 #define VFS_MAX_SYMLINK 16
308 vfs_walk(struct v_dnode* start,
310 struct v_dnode** dentry,
311 struct hstr* component,
314 struct v_dnode* interim;
315 const char* pathname = path;
316 int errno = __vfs_walk(start, path, &interim, component, options);
319 while (!errno && interim->inode && (options & VFS_WALK_NOFOLLOW)) {
320 if (counter >= VFS_MAX_SYMLINK) {
324 if ((interim->inode->itype & VFS_IFSYMLINK) &&
325 interim->inode->ops.read_symlink) {
327 lock_inode(interim->inode);
328 errno = interim->inode->ops.read_symlink(interim->inode, &pathname);
329 unlock_inode(interim->inode);
337 errno = __vfs_walk(start, pathname, &interim, component, options);
341 *dentry = errno ? 0 : interim;
347 vfs_open(struct v_dnode* dnode, struct v_file** file)
349 if (!dnode->inode || !dnode->inode->ops.open) {
353 struct v_inode* inode = dnode->inode;
357 struct v_file* vfile = cake_grab(file_pile);
358 memset(vfile, 0, sizeof(*vfile));
360 vfile->dnode = dnode;
361 vfile->inode = inode;
362 vfile->ref_count = ATOMIC_VAR_INIT(1);
363 vfile->ops = inode->default_fops;
365 if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
366 struct pcache* pcache = vzalloc(sizeof(struct pcache));
368 pcache->master = inode;
369 inode->pg_cache = pcache;
372 int errno = inode->ops.open(inode, vfile);
374 cake_release(file_pile, vfile);
376 atomic_fetch_add(&dnode->ref_count, 1);
378 mnt_mkbusy(dnode->mnt);
389 vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
391 if (assign_to->inode) {
392 assign_to->inode->link_count--;
394 assign_to->inode = inode;
399 vfs_link(struct v_dnode* to_link, struct v_dnode* name)
403 lock_inode(to_link->inode);
404 if (to_link->super_block->root != name->super_block->root) {
406 } else if (!to_link->inode->ops.link) {
408 } else if (!(errno = to_link->inode->ops.link(to_link->inode, name))) {
409 vfs_assign_inode(name, to_link->inode);
411 unlock_inode(to_link->inode);
417 vfs_close(struct v_file* file)
420 if (!file->ops.close || !(errno = file->ops.close(file))) {
421 atomic_fetch_sub(&file->dnode->ref_count, 1);
422 file->inode->open_count--;
423 mnt_chillax(file->dnode->mnt);
425 pcache_commit_all(file->inode);
426 cake_release(file_pile, file);
432 vfs_fsync(struct v_file* file)
434 lock_inode(file->inode);
437 pcache_commit_all(file->inode);
438 if (file->ops.sync) {
439 errno = file->ops.sync(file->inode);
442 unlock_inode(file->inode);
448 vfs_alloc_fdslot(int* fd)
450 for (size_t i = 0; i < VFS_MAX_FD; i++) {
451 if (!__current->fdtable->fds[i]) {
462 struct v_superblock* sb = cake_grab(superblock_pile);
463 memset(sb, 0, sizeof(*sb));
464 llist_init_head(&sb->sb_list);
465 sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
470 vfs_sb_free(struct v_superblock* sb)
473 cake_release(superblock_pile, sb);
477 __vfs_try_evict_dnode(struct lru_node* obj)
479 struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
481 if (!dnode->ref_count) {
489 __vfs_try_evict_inode(struct lru_node* obj)
491 struct v_inode* inode = container_of(obj, struct v_inode, lru);
493 if (!inode->link_count && !inode->open_count) {
501 vfs_d_alloc(struct v_dnode* parent, struct hstr* name)
503 struct v_dnode* dnode = cake_grab(dnode_pile);
505 lru_evict_half(dnode_lru);
507 if (!(dnode = cake_grab(dnode_pile))) {
512 memset(dnode, 0, sizeof(*dnode));
513 llist_init_head(&dnode->children);
514 llist_init_head(&dnode->siblings);
515 mutex_init(&dnode->lock);
517 dnode->ref_count = ATOMIC_VAR_INIT(0);
518 dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
520 hstrcpy(&dnode->name, name);
523 dnode->super_block = parent->super_block;
526 lru_use_one(dnode_lru, &dnode->lru);
532 vfs_d_free(struct v_dnode* dnode)
534 assert(dnode->ref_count == 1);
537 assert(dnode->inode->link_count > 0);
538 dnode->inode->link_count--;
541 vfs_dcache_remove(dnode);
542 // Make sure the children de-referencing their parent.
543 // With lru presented, the eviction will be propagated over the entire
544 // detached subtree eventually
545 struct v_dnode *pos, *n;
546 llist_for_each(pos, n, &dnode->children, siblings)
548 vfs_dcache_remove(pos);
551 vfree(dnode->name.value);
552 cake_release(dnode_pile, dnode);
556 vfs_i_alloc(struct v_superblock* sb,
558 void (*init)(struct v_inode* inode, void* data),
561 // 每个超级块儿维护一个inode缓存哈希表。
562 // 他们的hash value自然就是inode id了。
563 struct hbucket* slot = &sb->i_cache[inode_id & VFS_HASH_MASK];
564 struct v_inode *pos, *n;
565 hashtable_bucket_foreach(slot, pos, n, hash_list)
567 if (pos->id == inode_id) {
572 if (!(pos = cake_grab(inode_pile))) {
573 lru_evict_half(inode_lru);
574 if (!(pos = cake_grab(inode_pile))) {
579 memset(pos, 0, sizeof(*pos));
583 mutex_init(&pos->lock);
587 hlist_add(&slot->head, &pos->hash_list);
590 lru_use_one(inode_lru, &pos->lru);
595 vfs_i_free(struct v_inode* inode)
597 if (inode->pg_cache) {
598 pcache_release(inode->pg_cache);
599 vfree(inode->pg_cache);
601 inode->ops.sync(inode);
602 hlist_delete(&inode->hash_list);
603 cake_release(inode_pile, inode);
606 /* ---- System call definition and support ---- */
608 #define FLOCATE_CREATE_EMPTY 1
610 #define DO_STATUS(errno) SYSCALL_ESTATUS(__current->k_status = errno)
611 #define DO_STATUS_OR_RETURN(errno) ({ errno < 0 ? DO_STATUS(errno) : errno; })
613 #define TEST_FD(fd) (fd >= 0 && fd < VFS_MAX_FD)
616 __vfs_getfd(int fd, struct v_fd** fd_s)
618 if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
625 __vfs_try_locate_file(const char* path,
626 struct v_dnode** fdir,
627 struct v_dnode** file,
630 char name_str[VFS_NAME_MAXLEN];
631 struct hstr name = HSTR(name_str, 0);
634 vfs_walk(__current->cwd, path, fdir, &name, VFS_WALK_PARENT))) {
638 errno = vfs_walk(*fdir, name.value, file, NULL, 0);
639 if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
643 struct v_dnode* parent = *fdir;
644 struct v_dnode* file_new = vfs_d_alloc(parent, &name);
652 if (!(errno = parent->inode->ops.create(parent->inode, file_new))) {
653 vfs_dcache_add(parent, file_new);
656 vfs_d_free(file_new);
659 unlock_dnode(parent);
665 vfs_do_open(const char* path, int options)
668 struct v_dnode *dentry, *file;
669 struct v_file* ofile = 0;
671 errno = __vfs_try_locate_file(
672 path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
674 if (errno || (errno = vfs_open(file, &ofile))) {
678 struct v_inode* o_inode = ofile->inode;
679 if (!(o_inode->itype & VFS_IFSEQDEV) && !(options & FO_DIRECT)) {
680 // XXX Change here accordingly when signature of pcache_r/w changed.
681 ofile->ops.read = pcache_read;
682 ofile->ops.write = pcache_write;
685 if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
686 struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
687 ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
689 fd_s->flags = options;
690 __current->fdtable->fds[fd] = fd_s;
697 __DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
699 int errno = vfs_do_open(path, options);
700 return DO_STATUS_OR_RETURN(errno);
703 __DEFINE_LXSYSCALL1(int, close, int, fd)
707 if ((errno = __vfs_getfd(fd, &fd_s))) {
711 if (fd_s->file->ref_count > 1) {
712 fd_s->file->ref_count--;
713 } else if ((errno = vfs_close(fd_s->file))) {
718 __current->fdtable->fds[fd] = 0;
721 return DO_STATUS(errno);
725 __vfs_readdir_callback(struct dir_context* dctx,
730 struct dirent* dent = (struct dirent*)dctx->cb_data;
731 strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
733 dent->d_type = dtype;
736 __DEFINE_LXSYSCALL2(int, readdir, int, fd, struct dirent*, dent)
741 if ((errno = __vfs_getfd(fd, &fd_s))) {
745 struct v_inode* inode = fd_s->file->inode;
749 if (!(fd_s->file->inode->itype & VFS_IFDIR)) {
752 struct dir_context dctx =
753 (struct dir_context){ .cb_data = dent,
754 .index = dent->d_offset,
755 .read_complete_callback =
756 __vfs_readdir_callback };
757 if (dent->d_offset == 0) {
758 __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
759 } else if (dent->d_offset == 1) {
760 __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
763 if ((errno = fd_s->file->ops.readdir(inode, &dctx))) {
775 return DO_STATUS(errno);
778 __DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
782 if ((errno = __vfs_getfd(fd, &fd_s))) {
786 struct v_file* file = fd_s->file;
787 if ((file->inode->itype & VFS_IFDIR)) {
792 lock_inode(file->inode);
794 file->inode->atime = clock_unixtime();
796 __SYSCALL_INTERRUPTIBLE(
797 { errno = file->ops.read(file->inode, buf, count, file->f_pos); })
800 file->f_pos += errno;
801 unlock_inode(file->inode);
805 unlock_inode(file->inode);
808 return DO_STATUS(errno);
811 __DEFINE_LXSYSCALL3(int, write, int, fd, void*, buf, size_t, count)
815 if ((errno = __vfs_getfd(fd, &fd_s))) {
819 struct v_file* file = fd_s->file;
820 if ((file->inode->itype & VFS_IFDIR)) {
825 lock_inode(file->inode);
827 file->inode->mtime = clock_unixtime();
829 __SYSCALL_INTERRUPTIBLE(
830 { errno = file->ops.write(file->inode, buf, count, file->f_pos); })
833 file->f_pos += errno;
834 unlock_inode(file->inode);
838 unlock_inode(file->inode);
841 return DO_STATUS(errno);
844 __DEFINE_LXSYSCALL3(int, lseek, int, fd, int, offset, int, options)
848 if ((errno = __vfs_getfd(fd, &fd_s))) {
852 struct v_file* file = fd_s->file;
854 lock_inode(file->inode);
856 size_t fpos = file->f_pos;
859 fpos = (size_t)((int)file->f_pos + offset);
862 fpos = (size_t)((int)file->inode->fsize + offset);
868 if (!file->ops.seek || !(errno = file->ops.seek(file->inode, fpos))) {
872 unlock_inode(file->inode);
875 return DO_STATUS(errno);
879 vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
889 size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
895 size_t cpy_size = MIN(dnode->name.len, size - len);
896 strncpy(buf + len, dnode->name.value, cpy_size);
900 buf[len++] = PATH_DELIM;
907 vfs_readlink(struct v_dnode* dnode, char* buf, size_t size)
910 struct v_inode* inode = dnode->inode;
911 if (inode->ops.read_symlink) {
914 int errno = inode->ops.read_symlink(inode, &link);
915 strncpy(buf, link, size);
923 __DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
927 if ((errno = __vfs_getfd(fd, &fd_s))) {
931 struct v_dnode* dnode;
932 errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
939 return DO_STATUS(errno);
942 __DEFINE_LXSYSCALL3(int, readlink, const char*, path, char*, buf, size_t, size)
945 struct v_dnode* dnode;
947 vfs_walk(__current->cwd, path, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
948 errno = vfs_readlink(dnode, buf, size);
955 return DO_STATUS(errno);
958 __DEFINE_LXSYSCALL4(int,
971 if ((errno = __vfs_getfd(dirfd, &fd_s))) {
975 struct v_dnode* dnode;
976 if (!(errno = vfs_walk(
977 fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
978 errno = vfs_readlink(fd_s->file->dnode, buf, size);
986 return DO_STATUS(errno);
991 When we perform operation that could affect the layout of
992 directory (i.e., rename, mkdir, rmdir). We must lock the parent dir
993 whenever possible. This will blocking any ongoing path walking to reach
994 it hence avoid any partial state.
997 __DEFINE_LXSYSCALL1(int, rmdir, const char*, pathname)
1000 struct v_dnode* dnode;
1001 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1002 return DO_STATUS(errno);
1007 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1012 if (dnode->ref_count > 1 || dnode->inode->open_count) {
1017 if (!llist_empty(&dnode->children)) {
1022 struct v_dnode* parent = dnode->parent;
1030 lock_inode(parent->inode);
1032 if ((dnode->inode->itype & VFS_IFDIR)) {
1033 errno = parent->inode->ops.rmdir(parent->inode, dnode);
1035 vfs_dcache_remove(dnode);
1041 unlock_inode(parent->inode);
1042 unlock_dnode(parent);
1045 unlock_dnode(dnode);
1046 return DO_STATUS(errno);
1049 __DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
1052 struct v_dnode *parent, *dir;
1053 char name_value[VFS_NAME_MAXLEN];
1054 struct hstr name = HHSTR(name_value, 0, 0);
1062 vfs_walk(__current->cwd, path, &parent, &name, VFS_WALK_PARENT))) {
1066 dir = vfs_d_alloc(parent, &name);
1069 lock_inode(parent->inode);
1071 if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
1073 } else if (!parent->inode->ops.mkdir) {
1075 } else if (!(parent->inode->itype & VFS_IFDIR)) {
1077 } else if (!(errno = parent->inode->ops.mkdir(parent->inode, dir))) {
1078 vfs_dcache_add(parent, dir);
1085 unlock_inode(parent->inode);
1086 unlock_dnode(parent);
1088 return DO_STATUS(errno);
1092 __vfs_do_unlink(struct v_dnode* dnode)
1094 struct v_inode* inode = dnode->inode;
1096 if (dnode->ref_count > 1) {
1103 if (inode->open_count) {
1105 } else if (!(inode->itype & VFS_IFDIR)) {
1106 // The underlying unlink implementation should handle
1108 errno = inode->ops.unlink(inode);
1116 unlock_inode(inode);
1121 __DEFINE_LXSYSCALL1(int, unlink, const char*, pathname)
1124 struct v_dnode* dnode;
1125 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1128 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1133 errno = __vfs_do_unlink(dnode);
1136 return DO_STATUS(errno);
1139 __DEFINE_LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
1143 if ((errno = __vfs_getfd(fd, &fd_s))) {
1147 struct v_dnode* dnode;
1148 if (!(errno = vfs_walk(fd_s->file->dnode, pathname, &dnode, NULL, 0))) {
1149 errno = __vfs_do_unlink(dnode);
1153 return DO_STATUS(errno);
1156 __DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
1159 struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
1161 errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
1163 errno = __vfs_try_locate_file(
1164 newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
1167 } else if (name_file) {
1168 errno = vfs_link(to_link, name_file);
1171 return DO_STATUS(errno);
1174 __DEFINE_LXSYSCALL1(int, fsync, int, fildes)
1178 if (!(errno = __vfs_getfd(fildes, &fd_s))) {
1179 errno = vfs_fsync(fd_s->file);
1182 return DO_STATUS(errno);
1186 vfs_dup_fd(struct v_fd* old, struct v_fd** new)
1189 struct v_fd* copied = cake_grab(fd_pile);
1191 memcpy(copied, old, sizeof(struct v_fd));
1193 atomic_fetch_add(&old->file->ref_count, 1);
1201 vfs_dup2(int oldfd, int newfd)
1203 if (newfd == oldfd) {
1208 struct v_fd *oldfd_s, *newfd_s;
1209 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1213 if (!TEST_FD(newfd)) {
1218 newfd_s = __current->fdtable->fds[newfd];
1219 if (newfd_s && (errno = vfs_close(newfd_s->file))) {
1223 if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1224 __current->fdtable->fds[newfd] = newfd_s;
1229 return DO_STATUS(errno);
1232 __DEFINE_LXSYSCALL2(int, dup2, int, oldfd, int, newfd)
1234 return vfs_dup2(oldfd, newfd);
1237 __DEFINE_LXSYSCALL1(int, dup, int, oldfd)
1240 struct v_fd *oldfd_s, *newfd_s;
1241 if ((errno = __vfs_getfd(oldfd, &oldfd_s))) {
1245 if (!(errno = vfs_alloc_fdslot(&newfd)) &&
1246 !(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
1247 __current->fdtable->fds[newfd] = newfd_s;
1252 return DO_STATUS(errno);
1255 __DEFINE_LXSYSCALL2(int,
1263 struct v_dnode* dnode;
1264 if ((errno = vfs_walk(__current->cwd, pathname, &dnode, NULL, 0))) {
1267 if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
1271 if (!dnode->inode->ops.set_symlink) {
1276 lock_inode(dnode->inode);
1278 errno = dnode->inode->ops.set_symlink(dnode->inode, link_target);
1280 unlock_inode(dnode->inode);
1283 return DO_STATUS(errno);
1287 __vfs_do_chdir(struct v_dnode* dnode)
1293 if (!(dnode->inode->itype & VFS_IFDIR)) {
1298 if (__current->cwd) {
1299 atomic_fetch_sub(&__current->cwd->ref_count, 1);
1300 mnt_chillax(__current->cwd->mnt);
1303 atomic_fetch_add(&dnode->ref_count, 1);
1304 mnt_mkbusy(dnode->mnt);
1305 __current->cwd = dnode;
1307 unlock_dnode(dnode);
1313 __DEFINE_LXSYSCALL1(int, chdir, const char*, path)
1315 struct v_dnode* dnode;
1318 if ((errno = vfs_walk(__current->cwd, path, &dnode, NULL, 0))) {
1322 errno = __vfs_do_chdir(dnode);
1325 return DO_STATUS(errno);
1328 __DEFINE_LXSYSCALL1(int, fchdir, int, fd)
1333 if ((errno = __vfs_getfd(fd, &fd_s))) {
1337 errno = __vfs_do_chdir(fd_s->file->dnode);
1340 return DO_STATUS(errno);
1343 __DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
1354 if (!__current->cwd) {
1358 len = vfs_get_path(__current->cwd, buf, size, 0);
1365 buf[len + 1] = '\0';
1370 __current->k_status = errno;
1375 vfs_do_rename(struct v_dnode* current, struct v_dnode* target)
1377 if (current->inode->id == target->inode->id) {
1382 if (current->ref_count > 1 || target->ref_count > 1) {
1386 if (current->super_block != target->super_block) {
1392 struct v_dnode* oldparent = current->parent;
1393 struct v_dnode* newparent = target->parent;
1395 lock_dnode(current);
1398 lock_dnode(oldparent);
1400 lock_dnode(newparent);
1402 if (!llist_empty(&target->children)) {
1404 unlock_dnode(target);
1408 if ((errno = current->inode->ops.rename(current->inode, current, target))) {
1409 unlock_dnode(target);
1413 // re-position current
1414 hstrcpy(¤t->name, &target->name);
1415 vfs_dcache_rehash(newparent, current);
1420 unlock_dnode(target);
1423 unlock_dnode(current);
1425 unlock_dnode(oldparent);
1427 unlock_dnode(newparent);
1432 __DEFINE_LXSYSCALL2(int, rename, const char*, oldpath, const char*, newpath)
1434 struct v_dnode *cur, *target_parent, *target;
1435 struct hstr name = HSTR(valloc(VFS_NAME_MAXLEN), 0);
1438 if ((errno = vfs_walk(__current->cwd, oldpath, &cur, NULL, 0))) {
1442 if ((errno = vfs_walk(
1443 __current->cwd, newpath, &target_parent, &name, VFS_WALK_PARENT))) {
1447 errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
1448 if (errno == ENOENT) {
1449 target = vfs_d_alloc(target_parent, &name);
1450 vfs_dcache_add(target_parent, target);
1460 errno = vfs_do_rename(cur, target);
1464 return DO_STATUS(errno);
1467 __DEFINE_LXSYSCALL3(int,
1476 struct v_dnode *dev, *mnt;
1479 if ((errno = vfs_walk(__current->cwd, source, &dev, NULL, 0))) {
1483 if ((errno = vfs_walk(__current->cwd, target, &mnt, NULL, 0))) {
1487 if (mnt->ref_count > 1) {
1492 // By our convention.
1493 // XXX could we do better?
1494 struct device* device = (struct device*)dev->data;
1496 if (!(dev->inode->itype & VFS_IFVOLDEV) || !device) {
1501 errno = vfs_mount_at(fstype, device, mnt);
1504 return DO_STATUS(errno);
1507 __DEFINE_LXSYSCALL1(int, unmount, const char*, target)
1509 return vfs_unmount(target);