*
*/
+// Welcome to The Mountain O'Shit! :)
+
+/*
+ TODO vfs & device todos checklist
+
+ It is overseen by Twilight Sparkle ;)
+
+ 1. Get inodes hooked into lru (CHECKED)
+ 2. Get dnodes hooked into lru (CHECKED)
+ 3. Get inodes properly hashed so they can be reused by underling fs (CHECKED)
+ 4. (lru) Add a callback function (or destructor) for eviction. (CHECKED)
+ [good idea] or a constructor/destructor pattern in cake allocator ?
+ 5. (mount) Figure out a way to identify a busy mount point before unmount
+ maybe a unified mount_point structure that maintain a referencing
+ counter on any dnodes within the subtree? Such a counter will only
+ increament if a file is opened or a dnode is being used as working
+ directory and decreamenting conversely.
+ 6. (mount) Ability to track all mount points (including sub-mounts)
+ so we can be confident to clean up everything when we unmount.
+ 7. (mount) Figure out a way to acquire the device represented by a dnode.
+ so it can be used to mount. (e.g. we wish to get `struct device*`
+ out of the dnode at /dev/sda)
+ [tip] we should pay attention at twifs and add a private_data field
+ under struct v_dnode?
+ 8. (mount) Then, we should refactor on mount/unmount mechanism.
+ 9. (mount) (future) Ability to mount any thing? e.g. Linux can mount a disk
+ image file using a so called "loopback" pseudo device. Maybe
+ we can do similar thing in Lunaix? A block device emulation
+ above the regular file when we mount it on.
+ 10. (device) device number (dev_t) allocation
+ [good idea] <class>:<subclass>:<uniq_id> composition
+*/
+
#include <klibc/string.h>
#include <lunaix/dirent.h>
#include <lunaix/foptions.h>
#include <lunaix/fs/twifs.h>
#define PATH_DELIM '/'
-#define DNODE_HASHTABLE_BITS 10
-#define DNODE_HASHTABLE_SIZE (1 << DNODE_HASHTABLE_BITS)
-#define DNODE_HASH_MASK (DNODE_HASHTABLE_SIZE - 1)
-#define DNODE_HASHBITS (32 - DNODE_HASHTABLE_BITS)
+#define HASHTABLE_BITS 10
+#define HASHTABLE_SIZE (1 << HASHTABLE_BITS)
+#define HASH_MASK (HASHTABLE_SIZE - 1)
+#define HASHBITS (32 - HASHTABLE_BITS)
-#define lock_inode(inode) mutex_lock(&inode->lock)
#define unlock_inode(inode) mutex_unlock(&inode->lock)
+#define lock_inode(inode) \
+ ({ \
+ mutex_lock(&inode->lock); \
+ lru_use_one(inode_lru, &inode->lru); \
+ })
-#define lock_dnode(dnode) mutex_lock(&dnode->lock)
#define unlock_dnode(dnode) mutex_unlock(&dnode->lock)
+#define lock_dnode(dnode) \
+ ({ \
+ mutex_lock(&dnode->lock); \
+ lru_use_one(dnode_lru, &dnode->lru); \
+ })
static struct cake_pile* dnode_pile;
static struct cake_pile* inode_pile;
static struct cake_pile* fd_pile;
static struct v_superblock* root_sb;
-static struct hbucket* dnode_cache;
+static struct hbucket *dnode_cache, *inode_cache;
-static int fs_id = 0;
+static struct lru_zone *dnode_lru, *inode_lru;
struct hstr vfs_ddot = HSTR("..", 2);
struct hstr vfs_dot = HSTR(".", 1);
struct hstr vfs_empty = HSTR("", 0);
-struct v_dnode*
-vfs_d_alloc();
-
-void
-vfs_d_free(struct v_dnode* dnode);
-
struct v_superblock*
vfs_sb_alloc();
void
vfs_sb_free(struct v_superblock* sb);
+static int
+__vfs_try_evict_dnode(struct lru_node* obj);
+
+static int
+__vfs_try_evict_inode(struct lru_node* obj);
+
void
vfs_init()
{
superblock_pile =
cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
- dnode_cache = vzalloc(DNODE_HASHTABLE_SIZE * sizeof(struct hbucket));
+ dnode_cache = vzalloc(HASHTABLE_SIZE * sizeof(struct hbucket));
+ inode_cache = vzalloc(HASHTABLE_SIZE * sizeof(struct hbucket));
+
+ dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
+ inode_lru = lru_new_zone(__vfs_try_evict_inode);
hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
// 创建一个根superblock,用来蕴含我们的根目录。
root_sb = vfs_sb_alloc();
root_sb->root = vfs_d_alloc();
- root_sb->root->inode = vfs_i_alloc();
}
inline struct hbucket*
// 与parent的指针值做加法,来减小碰撞的可能性。
_hash += (uint32_t)parent;
// 确保低位更加随机
- _hash = _hash ^ (_hash >> DNODE_HASHBITS);
+ _hash = _hash ^ (_hash >> HASHBITS);
*hash = _hash;
- return &dnode_cache[_hash & DNODE_HASH_MASK];
+ return &dnode_cache[_hash & HASH_MASK];
}
struct v_dnode*
void
vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
{
+ atomic_fetch_add(&dnode->ref_count, 1);
+ dnode->parent = parent;
+ llist_append(&parent->children, &dnode->siblings);
struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
hlist_add(&bucket->head, &dnode->hash_list);
}
void
-vfs_dcache_rehash(struct v_dnode* parent, struct v_dnode* dnode)
+vfs_dcache_remove(struct v_dnode* dnode)
{
+ assert(dnode->ref_count == 1);
+
+ llist_delete(&dnode->siblings);
hlist_delete(&dnode->hash_list);
+
+ dnode->parent = NULL;
+ atomic_fetch_sub(&dnode->ref_count, 1);
+}
+
+void
+vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
+{
hstr_rehash(&dnode->name, HSTR_FULL_HASH);
- vfs_dcache_add(parent, dnode);
+ vfs_dcache_remove(dnode);
+ vfs_dcache_add(new_parent, dnode);
}
int
if (!dnode) {
dnode = vfs_d_alloc();
+ if (!dnode) {
+ errno = ENOMEM;
+ goto error;
+ }
+
hstrcpy(&dnode->name, &name);
lock_inode(current_level->inode);
if (errno) {
unlock_dnode(current_level);
vfree(dnode->name.value);
- goto error;
+ goto cleanup;
}
vfs_dcache_add(current_level, dnode);
-
- dnode->parent = current_level;
- llist_append(¤t_level->children, &dnode->siblings);
}
unlock_dnode(current_level);
*dentry = current_level;
return 0;
-error:
+cleanup:
vfs_d_free(dnode);
+error:
*dentry = NULL;
return errno;
}
int errno = __vfs_walk(start, path, &interim, component, options);
int counter = 0;
- while (!errno) {
+ while (!errno && interim->inode && (options & VFS_WALK_NOFOLLOW)) {
if (counter >= VFS_MAX_SYMLINK) {
errno = ELOOP;
continue;
}
if ((interim->inode->itype & VFS_IFSYMLINK) &&
- !(options & VFS_WALK_NOFOLLOW) &&
interim->inode->ops.read_symlink) {
+
+ lock_inode(interim->inode);
errno = interim->inode->ops.read_symlink(interim->inode, &pathname);
+ unlock_inode(interim->inode);
+
if (errno) {
break;
}
struct device* device,
struct v_dnode* mnt_point)
{
- if (!(mnt_point->inode->itype & VFS_IFDIR)) {
+ if (mnt_point->inode && !(mnt_point->inode->itype & VFS_IFDIR)) {
return ENOTDIR;
}
struct v_superblock* sb = vfs_sb_alloc();
sb->dev = device;
- sb->fs_id = fs_id++;
+ sb->fs_id = fs->fs_id;
int errno = 0;
if (!(errno = fs->mount(sb, mnt_point))) {
int
vfs_unmount_at(struct v_dnode* mnt_point)
{
- // FIXME mnt point check & deal with the detached dcache subtree
+ // FIXME deal with the detached dcache subtree
int errno = 0;
struct v_superblock* sb = mnt_point->super_block;
if (!sb) {
return EINVAL;
}
+
+ if (sb->root != mnt_point) {
+ return EINVAL;
+ }
+
if (!(errno = sb->fs->unmount(sb))) {
struct v_dnode* fs_root = sb->root;
- llist_delete(&fs_root->siblings);
+ vfs_dcache_remove(fs_root);
+
llist_delete(&sb->sb_list);
- hlist_delete(&fs_root->hash_list);
vfs_sb_free(sb);
vfs_d_free(fs_root);
}
return errno;
}
+void
+vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
+{
+ if (assign_to->inode) {
+ assign_to->inode->link_count--;
+ }
+ assign_to->inode = inode;
+ inode->link_count++;
+}
+
int
vfs_link(struct v_dnode* to_link, struct v_dnode* name)
{
} else if (!to_link->inode->ops.link) {
errno = ENOTSUP;
} else if (!(errno = to_link->inode->ops.link(to_link->inode, name))) {
- name->inode = to_link->inode;
- atomic_fetch_add(&to_link->inode->link_count, 1);
+ vfs_assign_inode(name, to_link->inode);
}
unlock_inode(to_link->inode);
cake_release(superblock_pile, sb);
}
+static int
+__vfs_try_evict_dnode(struct lru_node* obj)
+{
+ struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
+
+ if (!dnode->ref_count) {
+ vfs_d_free(dnode);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+__vfs_try_evict_inode(struct lru_node* obj)
+{
+ struct v_inode* inode = container_of(obj, struct v_inode, lru);
+
+ if (!inode->link_count && !inode->open_count) {
+ vfs_i_free(inode);
+ return 1;
+ }
+ return 0;
+}
+
struct v_dnode*
vfs_d_alloc()
{
struct v_dnode* dnode = cake_grab(dnode_pile);
+ if (!dnode) {
+ lru_evict_half(dnode_lru);
+
+ if (!(dnode = cake_grab(dnode_pile))) {
+ return NULL;
+ }
+ }
+
memset(dnode, 0, sizeof(*dnode));
llist_init_head(&dnode->children);
llist_init_head(&dnode->siblings);
dnode->ref_count = ATOMIC_VAR_INIT(0);
dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
+ lru_use_one(dnode_lru, &dnode->lru);
+
return dnode;
}
void
vfs_d_free(struct v_dnode* dnode)
{
- if (dnode->inode && dnode->inode->link_count) {
+ assert(dnode->ref_count == 0);
+
+ if (dnode->inode) {
+ assert(dnode->inode->link_count > 0);
dnode->inode->link_count--;
}
+
+ // Make sure the children de-referencing their parent.
+ // With lru presented, the eviction will be propagated over the entire
+ // detached subtree eventually
+ struct v_dnode *pos, *n;
+ llist_for_each(pos, n, &dnode->children, siblings)
+ {
+ vfs_dcache_remove(pos);
+ }
+
vfree(dnode->name.value);
cake_release(dnode_pile, dnode);
}
struct v_inode*
-vfs_i_alloc()
+vfs_i_alloc(dev_t device_id, uint32_t inode_id)
{
- struct v_inode* inode = cake_grab(inode_pile);
- memset(inode, 0, sizeof(*inode));
- inode->link_count = 1;
- mutex_init(&inode->lock);
+ // 我们这里假设每个文件系统与设备是一一对应(毕竟一个分区不可能有两个不同的文件系统)
+ // 而每个文件系统所产生的 v_inode 缓存必须要和其他文件系统产生的区分开来。
+ // 这也就是说,每个 v_inode 的 id
+ // 必须要由设备ID,和该虚拟inode缓存所对应的物理inode
+ // 相对于其所在的文件系统的id,进行组成!
+ inode_id = hash_32(inode_id ^ (-device_id), HASH_SIZE_BITS);
+ inode_id = (inode_id >> HASHBITS) ^ inode_id;
+
+ struct hbucket* slot = &inode_cache[inode_id & HASH_MASK];
+ struct v_inode *pos, *n;
+ hashtable_bucket_foreach(slot, pos, n, hash_list)
+ {
+ if (pos->id == inode_id) {
+ goto done;
+ }
+ }
- return inode;
+ if (!(pos = cake_grab(inode_pile))) {
+ lru_evict_half(inode_lru);
+ if (!(pos = cake_grab(inode_pile))) {
+ return NULL;
+ }
+ }
+
+ memset(pos, 0, sizeof(*pos));
+
+ pos->id = inode_id;
+
+ mutex_init(&pos->lock);
+
+ hlist_add(&slot->head, &pos->hash_list);
+
+done:
+ lru_use_one(inode_lru, &pos->lru);
+ return pos;
}
void
vfs_i_free(struct v_inode* inode)
{
+ hlist_delete(&inode->hash_list);
cake_release(inode_pile, inode);
}
struct v_dnode* parent = *fdir;
struct v_dnode* file_new = vfs_d_alloc();
+
+ if (!file_new) {
+ return ENOMEM;
+ }
+
hstrcpy(&file_new->name, &name);
+ lock_dnode(parent);
+
if (!(errno = parent->inode->ops.create(parent->inode, file_new))) {
*file = file_new;
vfs_d_free(file_new);
}
+ unlock_dnode(parent);
+
return errno;
}
__SYSCALL_INTERRUPTIBLE(
{ errno = file->ops.read(file->inode, buf, count, file->f_pos); })
- unlock_inode(file->inode);
-
if (errno > 0) {
file->f_pos += errno;
+ unlock_inode(file->inode);
return errno;
}
+ unlock_inode(file->inode);
+
done:
return DO_STATUS(errno);
}
__SYSCALL_INTERRUPTIBLE(
{ errno = file->ops.write(file->inode, buf, count, file->f_pos); })
- unlock_inode(file->inode);
-
if (errno > 0) {
file->f_pos += errno;
+ unlock_inode(file->inode);
return errno;
}
+ unlock_inode(file->inode);
+
done:
return DO_STATUS(errno);
}
lock_dnode(dnode);
- if (dnode->parent)
- lock_dnode(dnode->parent);
-
if ((dnode->super_block->fs->types & FSTYPE_ROFS)) {
errno = EROFS;
goto done;
}
- if (dnode->ref_count || dnode->inode->open_count) {
+ if (dnode->ref_count > 1 || dnode->inode->open_count) {
errno = EBUSY;
goto done;
}
goto done;
}
- lock_inode(dnode->inode);
+ struct v_dnode* parent = dnode->parent;
+
+ if (!parent) {
+ errno = EINVAL;
+ goto done;
+ }
+
+ lock_dnode(parent);
+ lock_inode(parent->inode);
if ((dnode->inode->itype & VFS_IFDIR)) {
- errno = dnode->inode->ops.rmdir(dnode->inode);
+ errno = parent->inode->ops.rmdir(parent->inode, dnode);
if (!errno) {
- llist_delete(&dnode->siblings);
- hlist_delete(&dnode->hash_list);
- unlock_inode(dnode->inode);
- vfs_d_free(dnode);
-
- goto done;
+ vfs_dcache_remove(dnode);
}
} else {
errno = ENOTDIR;
}
- unlock_inode(dnode->inode);
+ unlock_inode(parent->inode);
+ unlock_dnode(parent);
done:
unlock_dnode(dnode);
- if (dnode->parent)
- unlock_dnode(dnode->parent);
return DO_STATUS(errno);
}
__DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
{
+ int errno = 0;
struct v_dnode *parent, *dir = vfs_d_alloc();
- int errno =
- vfs_walk(__current->cwd, path, &parent, &dir->name, VFS_WALK_PARENT);
- if (errno) {
+
+ if (!dir) {
+ errno = ENOMEM;
+ goto done;
+ }
+
+ if ((errno = vfs_walk(
+ __current->cwd, path, &parent, &dir->name, VFS_WALK_PARENT))) {
goto done;
}
{
struct v_inode* inode = dnode->inode;
- if (dnode->ref_count) {
+ if (dnode->ref_count > 1) {
return EBUSY;
}
// symlink case
errno = inode->ops.unlink(inode);
if (!errno) {
- inode->link_count--;
- llist_delete(&dnode->siblings);
- hlist_delete(&dnode->hash_list);
+ vfs_dcache_remove(dnode);
vfs_d_free(dnode);
}
} else {
errno = EROFS;
goto done;
}
- if (!dnode->inode->ops.symlink) {
+ if (!dnode->inode->ops.set_symlink) {
errno = ENOTSUP;
goto done;
}
lock_inode(dnode->inode);
- errno = dnode->inode->ops.symlink(dnode->inode, link_target);
+ errno = dnode->inode->ops.set_symlink(dnode->inode, link_target);
unlock_inode(dnode->inode);
}
if (__current->cwd) {
- __current->cwd->ref_count--;
+ atomic_fetch_add(&__current->cwd->ref_count, 1);
}
- dnode->ref_count--;
+ atomic_fetch_sub(&dnode->ref_count, 1);
__current->cwd = dnode;
unlock_dnode(dnode);
return 0;
}
- if (current->ref_count || target->ref_count) {
+ if (current->ref_count > 1 || target->ref_count > 1) {
return EBUSY;
}
}
// re-position current
- current->parent = newparent;
hstrcpy(¤t->name, &target->name);
- llist_delete(¤t->siblings);
- llist_append(&newparent->children, ¤t->siblings);
vfs_dcache_rehash(newparent, current);
// detach target
- llist_delete(&target->siblings);
- hlist_delete(&target->hash_list);
+ vfs_dcache_remove(target);
unlock_dnode(target);
errno = vfs_walk(target_parent, name.value, &target, NULL, 0);
if (errno == ENOENT) {
target = vfs_d_alloc();
- hstrcpy(&target->name, &name);
} else if (errno) {
goto done;
}
+ if (!target) {
+ errno = ENOMEM;
+ goto done;
+ }
+
+ hstrcpy(&target->name, &name);
+
if (!(errno = vfs_do_rename(cur, target))) {
vfs_d_free(target);
}
goto done;
}
+ if (mnt->ref_count > 1) {
+ errno = EBUSY;
+ goto done;
+ }
+
// FIXME should not touch the underlying fs!
struct device* device =
(struct device*)((struct twifs_node*)dev->inode->data)->data;