static struct cake_pile* fd_pile;
struct v_dnode* vfs_sysroot;
-static struct hbucket* dnode_cache;
struct lru_zone *dnode_lru, *inode_lru;
superblock_pile =
cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
- dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
-
dnode_lru = lru_new_zone("vfs_dnode", __vfs_try_evict_dnode);
inode_lru = lru_new_zone("vfs_inode", __vfs_try_evict_inode);
// 创建一个根dnode。
vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
vfs_sysroot->parent = vfs_sysroot;
- atomic_fetch_add(&vfs_sysroot->ref_count, 1);
+
+ vfs_ref_dnode(vfs_sysroot);
}
static inline struct hbucket*
__dcache_hash(struct v_dnode* parent, u32_t* hash)
{
- u32_t _hash = *hash;
- // 确保低位更加随机
+ struct hbucket* d_cache;
+ u32_t _hash;
+
+ d_cache = parent->super_block->d_cache;
+ _hash = *hash;
_hash = _hash ^ (_hash >> VFS_HASHBITS);
- // 与parent的指针值做加法,来减小碰撞的可能性。
_hash += (u32_t)__ptr(parent);
+
*hash = _hash;
- return &dnode_cache[_hash & VFS_HASH_MASK];
+ return &d_cache[_hash & VFS_HASH_MASK];
}
static inline int
{
assert(parent);
- atomic_fetch_add(&dnode->ref_count, 1);
+ dnode->ref_count = 1;
dnode->parent = parent;
llist_append(&parent->children, &dnode->siblings);
hlist_delete(&dnode->hash_list);
dnode->parent = NULL;
- atomic_fetch_sub(&dnode->ref_count, 1);
+ dnode->ref_count = 0;
}
void
vfile->dnode = dnode;
vfile->inode = inode;
- vfile->ref_count = ATOMIC_VAR_INIT(1);
+ vfile->ref_count = 1;
vfile->ops = inode->default_fops;
- if (check_file_node(inode) && !inode->pg_cache) {
+ if (check_regfile_node(inode) && !inode->pg_cache) {
struct pcache* pcache = vzalloc(sizeof(struct pcache));
pcache_init(pcache);
pcache->master = inode;
if (errno) {
cake_release(file_pile, vfile);
} else {
- atomic_fetch_add(&dnode->ref_count, 1);
+ vfs_ref_dnode(dnode);
inode->open_count++;
- mnt_mkbusy(dnode->mnt);
*file = vfile;
}
{
struct v_inode* inode;
int errno = 0;
-
- if (file->ref_count > 1) {
- atomic_fetch_sub(&file->ref_count, 1);
- return 0;
- }
-
+
inode = file->inode;
/*
* process is writing to this file later after B exit.
*/
- if (mutex_on_hold(&inode->lock)) {
- mutex_unlock_for(&inode->lock, pid);
+ mutex_unlock_for(&inode->lock, pid);
+
+ if (vfs_check_duped_file(file)) {
+ vfs_unref_file(file);
+ return 0;
}
- lock_inode(inode);
-
- pcache_commit_all(inode);
if ((errno = file->ops->close(file))) {
- goto unlock;
+ goto done;
}
- atomic_fetch_sub(&file->dnode->ref_count, 1);
+ vfs_unref_dnode(file->dnode);
+ cake_release(file_pile, file);
+
+ /*
+ if the current inode is not being locked by other
+ threads that does not share same open context,
+ then we can try to do sync opportunistically
+ */
+ if (mutex_on_hold(&inode->lock)) {
+ goto done;
+ }
+
+ lock_inode(inode);
+
+ pcache_commit_all(inode);
inode->open_count--;
if (!inode->open_count) {
__sync_inode_nolock(inode);
}
- mnt_chillax(file->dnode->mnt);
- cake_release(file_pile, file);
-
-unlock:
unlock_inode(inode);
+
+done:
return errno;
}
struct v_superblock* sb = cake_grab(superblock_pile);
memset(sb, 0, sizeof(*sb));
llist_init_head(&sb->sb_list);
+
sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+ sb->d_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+
sb->ref_count = 1;
return sb;
}
}
void
-vfs_sb_free(struct v_superblock* sb)
+vfs_sb_unref(struct v_superblock* sb)
{
assert(sb->ref_count);
sb->ref_count--;
- if (sb->ref_count) {
+ if (likely(sb->ref_count)) {
return;
}
}
vfree(sb->i_cache);
+ vfree(sb->d_cache);
+
cake_release(superblock_pile, sb);
}
llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
- dnode->ref_count = ATOMIC_VAR_INIT(0);
dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
hstrcpy(&dnode->name, name);
dnode->destruct(dnode);
}
- vfs_sb_free(dnode->super_block);
+ vfs_sb_unref(dnode->super_block);
vfree((void*)dnode->name.value);
cake_release(dnode_pile, dnode);
}
void
vfs_i_free(struct v_inode* inode)
{
+ assert(inode->link_count == 0);
+
if (inode->pg_cache) {
pcache_release(inode->pg_cache);
vfree(inode->pg_cache);
inode->destruct(inode);
}
- vfs_sb_free(inode->sb);
+ vfs_sb_unref(inode->sb);
hlist_delete(&inode->hash_list);
cake_release(inode_pile, inode);
}
const int dtype)
{
struct lx_dirent* dent = (struct lx_dirent*)dctx->cb_data;
- strncpy(dent->d_name, name, MIN(len, DIRENT_NAME_MAX_LEN));
- dent->d_nlen = len;
+ int len_ = MIN(len, DIRENT_NAME_MAX_LEN - 1);
+
+ strncpy(dent->d_name, name, len_);
+ dent->d_name[len_] = 0;
+
+ dent->d_nlen = len_;
dent->d_type = dtype;
}
return DO_STATUS_OR_RETURN(errno);
}
+static inline bool
+check_pcache_eligibility(struct v_fd* fd_s)
+{
+ struct v_inode* inode;
+
+ inode = fd_s->file->inode;
+ return !check_seqdev_node(inode) \
+ && !fsm_check_pseudo_fs(inode->sb->fs) \
+ && !(fd_s->flags & FO_DIRECT);
+}
+
__DEFINE_LXSYSCALL3(int, read, int, fd, void*, buf, size_t, count)
{
int errno = 0;
file->inode->atime = clock_unixtime();
- if (check_seqdev_node(file->inode) || (fd_s->flags & FO_DIRECT)) {
+ if (!check_pcache_eligibility(fd_s)) {
errno = file->ops->read(file->inode, buf, count, file->f_pos);
} else {
errno = pcache_read(file->inode, buf, count, file->f_pos);
file->f_pos = inode->fsize;
}
- if (check_seqdev_node(inode) || (fd_s->flags & FO_DIRECT)) {
+ if (!check_pcache_eligibility(fd_s)) {
errno = file->ops->write(inode, buf, count, file->f_pos);
} else {
errno = pcache_write(inode, buf, count, file->f_pos);
size_t cpy_size = MIN(dnode->name.len, size - len);
strncpy(buf + len, dnode->name.value, cpy_size);
+ buf[len + cpy_size] = 0;
+
len += cpy_size;
return len;
memcpy(copied, old, sizeof(struct v_fd));
- atomic_fetch_add(&old->file->ref_count, 1);
+ vfs_ref_file(old->file);
*new = copied;
return DO_STATUS(errno);
}
-void
-vfs_ref_file(struct v_file* file)
-{
- atomic_fetch_add(&file->ref_count, 1);
-}
-
-void
-vfs_ref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_add(&dnode->ref_count, 1);
-
- if (dnode->mnt) {
- mnt_mkbusy(dnode->mnt);
- }
-}
-
-void
-vfs_unref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_sub(&dnode->ref_count, 1);
- if (dnode->mnt) {
- mnt_chillax(dnode->mnt);
- }
-}
-
int
vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
{
vfs_ref_dnode(dnode);
proc->cwd = dnode;
+done:
unlock_dnode(dnode);
-done:
return errno;
}
if (check_device_node(vino)) {
struct device* rdev = resolve_device(vino->data);
- if (!rdev || rdev->magic != DEV_STRUCT_MAGIC) {
+ if (!rdev) {
errno = EINVAL;
goto done;
}
stat->st_rdev = (dev_t){.meta = rdev->ident.fn_grp,
.unique = rdev->ident.unique,
- .index = rdev->dev_uid};
+ .index = dev_uid(rdev) };
}
if (fdev) {
stat->st_dev = (dev_t){.meta = fdev->ident.fn_grp,
.unique = fdev->ident.unique,
- .index = fdev->dev_uid};
+ .index = dev_uid(fdev) };
}
done: