}
inline struct hbucket*
-__dcache_hash(struct v_dnode* parent, uint32_t* hash)
+__dcache_hash(struct v_dnode* parent, u32_t* hash)
{
- uint32_t _hash = *hash;
- // 与parent的指针值做加法,来减小碰撞的可能性。
- _hash += (uint32_t)parent;
+ u32_t _hash = *hash;
// 确保低位更加随机
_hash = _hash ^ (_hash >> VFS_HASHBITS);
+ // 与parent的指针值做加法,来减小碰撞的可能性。
+ _hash += (u32_t)parent;
*hash = _hash;
return &dnode_cache[_hash & VFS_HASH_MASK];
}
return parent->parent;
}
- uint32_t hash = str->hash;
+ u32_t hash = str->hash;
struct hbucket* slot = __dcache_hash(parent, &hash);
struct v_dnode *pos, *n;
assert(dnode->ref_count == 1);
llist_delete(&dnode->siblings);
+ llist_delete(&dnode->aka_list);
hlist_delete(&dnode->hash_list);
dnode->parent = NULL;
vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
{
if (assign_to->inode) {
+ llist_delete(&assign_to->aka_list);
assign_to->inode->link_count--;
}
+ llist_append(&inode->aka_dnodes, &assign_to->aka_list);
assign_to->inode = inode;
inode->link_count++;
}
}
int
-vfs_close(struct v_file* file)
+vfs_pclose(struct v_file* file, pid_t pid)
{
int errno = 0;
if (file->ref_count > 1) {
atomic_fetch_sub(&file->dnode->ref_count, 1);
file->inode->open_count--;
- // Remove dead lock.
+ // Prevent dead lock.
// This happened when process is terminated while blocking on read.
// In that case, the process is still holding the inode lock and it will
// never get released.
- // FIXME is this a good solution?
/*
- * Consider two process both open the same file both with fd=x.
+ * The unlocking should also include ownership check.
+ *
+ * To see why, consider two process both open the same file both with
+ * fd=x.
* Process A: busy on reading x
* Process B: do nothing with x
- * Assume that, after a very short time, process B get terminated while
- * process A is still busy in it's reading business. By this design, the
- * inode lock of this file x is get released by B rather than A. And
- * this will cause a probable race condition on A if other process is
- * writing to this file later after B exit.
- *
- * A possible solution is to add a owner identification in the lock
- * context, so only the lock holder can do the release.
+ * Assuming that, after a very short time, process B get terminated
+ * while process A is still busy in it's reading business. By this
+ * design, the inode lock of this file x is get released by B rather
+ * than A. And this will cause a probable race condition on A if other
+ * process is writing to this file later after B exit.
*/
if (mutex_on_hold(&file->inode->lock)) {
- unlock_inode(file->inode);
+ mutex_unlock_for(&file->inode->lock, pid);
}
mnt_chillax(file->dnode->mnt);
return errno;
}
+int
+vfs_close(struct v_file* file)
+{
+ return vfs_pclose(file, __current->pid);
+}
+
+void
+vfs_free_fd(struct v_fd* fd)
+{
+ cake_release(fd_pile, fd);
+}
+
int
vfs_fsync(struct v_file* file)
{
memset(dnode, 0, sizeof(*dnode));
llist_init_head(&dnode->children);
llist_init_head(&dnode->siblings);
+ llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
dnode->ref_count = ATOMIC_VAR_INIT(0);
}
struct v_inode*
-vfs_i_find(struct v_superblock* sb, uint32_t i_id)
+vfs_i_find(struct v_superblock* sb, u32_t i_id)
{
struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
struct v_inode *pos, *n;
memset(inode, 0, sizeof(*inode));
mutex_init(&inode->lock);
llist_init_head(&inode->xattrs);
+ llist_init_head(&inode->aka_dnodes);
sb->ops.init_inode(sb, inode);
pcache_release(inode->pg_cache);
vfree(inode->pg_cache);
}
- inode->ops->sync(inode);
+ // we don't need to sync inode.
+ // If an inode can be free, then it must be properly closed.
+ // Hence it must be synced already!
+ if (inode->destruct) {
+ inode->destruct(inode);
+ }
hlist_delete(&inode->hash_list);
cake_release(inode_pile, inode);
}
struct v_inode* o_inode = ofile->inode;
if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
- struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
+ struct v_fd* fd_s = cake_grab(fd_pile);
+ memset(fd_s, 0, sizeof(*fd_s));
+
ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
fd_s->file = ofile;
fd_s->flags = options;
goto done_err;
}
- vfree(fd_s);
+ cake_release(fd_pile, fd_s);
__current->fdtable->fds[fd] = 0;
done_err:
int
vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
{
- if (!dnode || dnode->parent == dnode) {
+ if (!dnode) {
return 0;
}
return ENAMETOOLONG;
}
- size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ size_t len = 0;
+
+ if (dnode->parent != dnode) {
+ len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ }
if (len >= size) {
return len;
}
- buf[len++] = VFS_PATH_DELIM;
+ if (!len || buf[len - 1] != VFS_PATH_DELIM) {
+ buf[len++] = VFS_PATH_DELIM;
+ }
size_t cpy_size = MIN(dnode->name.len, size - len);
strncpy(buf + len, dnode->name.value, cpy_size);