static struct cake_pile* fd_pile;
struct v_dnode* vfs_sysroot;
-static struct hbucket* dnode_cache;
struct lru_zone *dnode_lru, *inode_lru;
superblock_pile =
cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
- dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
-
dnode_lru = lru_new_zone("vfs_dnode", __vfs_try_evict_dnode);
inode_lru = lru_new_zone("vfs_inode", __vfs_try_evict_inode);
// 创建一个根dnode。
vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
vfs_sysroot->parent = vfs_sysroot;
- atomic_fetch_add(&vfs_sysroot->ref_count, 1);
+
+ vfs_ref_dnode(vfs_sysroot);
}
static inline struct hbucket*
__dcache_hash(struct v_dnode* parent, u32_t* hash)
{
- u32_t _hash = *hash;
- // 确保低位更加随机
+ struct hbucket* d_cache;
+ u32_t _hash;
+
+ d_cache = parent->super_block->d_cache;
+ _hash = *hash;
_hash = _hash ^ (_hash >> VFS_HASHBITS);
- // 与parent的指针值做加法,来减小碰撞的可能性。
_hash += (u32_t)__ptr(parent);
+
*hash = _hash;
- return &dnode_cache[_hash & VFS_HASH_MASK];
+ return &d_cache[_hash & VFS_HASH_MASK];
}
static inline int
{
assert(parent);
- atomic_fetch_add(&dnode->ref_count, 1);
+ dnode->ref_count = 1;
dnode->parent = parent;
llist_append(&parent->children, &dnode->siblings);
hlist_delete(&dnode->hash_list);
dnode->parent = NULL;
- atomic_fetch_sub(&dnode->ref_count, 1);
+ dnode->ref_count = 0;
}
void
vfile->dnode = dnode;
vfile->inode = inode;
- vfile->ref_count = ATOMIC_VAR_INIT(1);
+ vfile->ref_count = 1;
vfile->ops = inode->default_fops;
- if (check_file_node(inode) && !inode->pg_cache) {
+ if (check_regfile_node(inode) && !inode->pg_cache) {
struct pcache* pcache = vzalloc(sizeof(struct pcache));
pcache_init(pcache);
pcache->master = inode;
if (errno) {
cake_release(file_pile, vfile);
} else {
- atomic_fetch_add(&dnode->ref_count, 1);
+ vfs_ref_dnode(dnode);
inode->open_count++;
- mnt_mkbusy(dnode->mnt);
*file = vfile;
}
{
struct v_inode* inode;
int errno = 0;
-
- if (file->ref_count > 1) {
- atomic_fetch_sub(&file->ref_count, 1);
- return 0;
- }
-
+
inode = file->inode;
/*
* process is writing to this file later after B exit.
*/
- if (mutex_on_hold(&inode->lock)) {
- mutex_unlock_for(&inode->lock, pid);
+ mutex_unlock_for(&inode->lock, pid);
+
+ if (vfs_check_duped_file(file)) {
+ vfs_unref_file(file);
+ return 0;
}
- lock_inode(inode);
-
- pcache_commit_all(inode);
if ((errno = file->ops->close(file))) {
- goto unlock;
+ goto done;
+ }
+
+ vfs_unref_dnode(file->dnode);
+ cake_release(file_pile, file);
+
+ /*
+ if the current inode is not being locked by other
+ threads that does not share same open context,
+ then we can try to do sync opportunistically
+ */
+ if (mutex_on_hold(&inode->lock)) {
+ goto done;
}
+
+ lock_inode(inode);
- atomic_fetch_sub(&file->dnode->ref_count, 1);
+ pcache_commit_all(inode);
inode->open_count--;
if (!inode->open_count) {
__sync_inode_nolock(inode);
}
- mnt_chillax(file->dnode->mnt);
- cake_release(file_pile, file);
-
-unlock:
unlock_inode(inode);
+
+done:
return errno;
}
struct v_superblock* sb = cake_grab(superblock_pile);
memset(sb, 0, sizeof(*sb));
llist_init_head(&sb->sb_list);
+
sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+ sb->d_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+
sb->ref_count = 1;
return sb;
}
}
void
-vfs_sb_free(struct v_superblock* sb)
+vfs_sb_unref(struct v_superblock* sb)
{
assert(sb->ref_count);
sb->ref_count--;
- if (sb->ref_count) {
+ if (likely(sb->ref_count)) {
return;
}
}
vfree(sb->i_cache);
+ vfree(sb->d_cache);
+
cake_release(superblock_pile, sb);
}
llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
- dnode->ref_count = ATOMIC_VAR_INIT(0);
dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
hstrcpy(&dnode->name, name);
dnode->destruct(dnode);
}
- vfs_sb_free(dnode->super_block);
+ vfs_sb_unref(dnode->super_block);
vfree((void*)dnode->name.value);
cake_release(dnode_pile, dnode);
}
inode->destruct(inode);
}
- vfs_sb_free(inode->sb);
+ vfs_sb_unref(inode->sb);
hlist_delete(&inode->hash_list);
cake_release(inode_pile, inode);
}
memcpy(copied, old, sizeof(struct v_fd));
- atomic_fetch_add(&old->file->ref_count, 1);
+ vfs_ref_file(old->file);
*new = copied;
return DO_STATUS(errno);
}
-void
-vfs_ref_file(struct v_file* file)
-{
- atomic_fetch_add(&file->ref_count, 1);
-}
-
-void
-vfs_ref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_add(&dnode->ref_count, 1);
-
- if (dnode->mnt) {
- mnt_mkbusy(dnode->mnt);
- }
-}
-
-void
-vfs_unref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_sub(&dnode->ref_count, 1);
- if (dnode->mnt) {
- mnt_chillax(dnode->mnt);
- }
-}
-
int
vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
{
if (check_device_node(vino)) {
struct device* rdev = resolve_device(vino->data);
- if (!rdev || rdev->magic != DEV_STRUCT_MAGIC) {
+ if (!rdev) {
errno = EINVAL;
goto done;
}
stat->st_rdev = (dev_t){.meta = rdev->ident.fn_grp,
.unique = rdev->ident.unique,
- .index = rdev->dev_uid};
+ .index = dev_uid(rdev) };
}
if (fdev) {
stat->st_dev = (dev_t){.meta = fdev->ident.fn_grp,
.unique = fdev->ident.unique,
- .index = fdev->dev_uid};
+ .index = dev_uid(fdev) };
}
done: