__dcache_hash(struct v_dnode* parent, uint32_t* hash)
{
uint32_t _hash = *hash;
- // 与parent的指针值做加法,来减小碰撞的可能性。
- _hash += (uint32_t)parent;
// 确保低位更加随机
_hash = _hash ^ (_hash >> VFS_HASHBITS);
+ // 与parent的指针值做加法,来减小碰撞的可能性。
+ _hash += (uint32_t)parent;
*hash = _hash;
return &dnode_cache[_hash & VFS_HASH_MASK];
}
assert(dnode->ref_count == 1);
llist_delete(&dnode->siblings);
+ llist_delete(&dnode->aka_list);
hlist_delete(&dnode->hash_list);
dnode->parent = NULL;
vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
{
if (assign_to->inode) {
+ llist_delete(&assign_to->aka_list);
assign_to->inode->link_count--;
}
+ llist_append(&inode->aka_dnodes, &assign_to->aka_list);
assign_to->inode = inode;
inode->link_count++;
}
}
int
-vfs_close(struct v_file* file)
+vfs_pclose(struct v_file* file, pid_t pid)
{
int errno = 0;
- if (!(errno = file->ops->close(file))) {
+ if (file->ref_count > 1) {
+ atomic_fetch_sub(&file->ref_count, 1);
+ } else if (!(errno = file->ops->close(file))) {
atomic_fetch_sub(&file->dnode->ref_count, 1);
file->inode->open_count--;
+
+ // Prevent dead lock.
+ // This happened when process is terminated while blocking on read.
+ // In that case, the process is still holding the inode lock and it will
+ // never get released.
+ /*
+ * The unlocking should also include ownership check.
+ *
+ * To see why, consider two process both open the same file both with
+ * fd=x.
+ * Process A: busy on reading x
+ * Process B: do nothing with x
+ * Assuming that, after a very short time, process B get terminated
+ * while process A is still busy in it's reading business. By this
+ * design, the inode lock of this file x is get released by B rather
+ * than A. And this will cause a probable race condition on A if other
+ * process is writing to this file later after B exit.
+ */
+ if (mutex_on_hold(&file->inode->lock)) {
+ mutex_unlock_for(&file->inode->lock, pid);
+ }
mnt_chillax(file->dnode->mnt);
pcache_commit_all(file->inode);
return errno;
}
+int
+vfs_close(struct v_file* file)
+{
+ return vfs_pclose(file, __current->pid);
+}
+
+void
+vfs_free_fd(struct v_fd* fd)
+{
+ cake_release(fd_pile, fd);
+}
+
int
vfs_fsync(struct v_file* file)
{
memset(dnode, 0, sizeof(*dnode));
llist_init_head(&dnode->children);
llist_init_head(&dnode->siblings);
+ llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
dnode->ref_count = ATOMIC_VAR_INIT(0);
memset(inode, 0, sizeof(*inode));
mutex_init(&inode->lock);
llist_init_head(&inode->xattrs);
+ llist_init_head(&inode->aka_dnodes);
sb->ops.init_inode(sb, inode);
struct v_inode* o_inode = ofile->inode;
if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
- struct v_fd* fd_s = vzalloc(sizeof(*fd_s));
+ struct v_fd* fd_s = cake_grab(fd_pile);
+ memset(fd_s, 0, sizeof(*fd_s));
+
ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
fd_s->file = ofile;
fd_s->flags = options;
goto done_err;
}
- if (fd_s->file->ref_count > 1) {
- fd_s->file->ref_count--;
- } else if ((errno = vfs_close(fd_s->file))) {
+ if ((errno = vfs_close(fd_s->file))) {
goto done_err;
}
- vfree(fd_s);
+ cake_release(fd_pile, fd_s);
__current->fdtable->fds[fd] = 0;
done_err:
__vfs_readdir_callback };
errno = 1;
if (dent->d_offset == 0) {
- __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, 0);
+ __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
} else if (dent->d_offset == 1) {
- __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, 0);
+ __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
} else {
dctx.index -= 2;
if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
file->inode->atime = clock_unixtime();
- __SYSCALL_INTERRUPTIBLE({
- if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
- errno = file->ops->read(file->inode, buf, count, file->f_pos);
- } else {
- errno = pcache_read(file->inode, buf, count, file->f_pos);
- }
- })
+ if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
+ errno = file->ops->read(file->inode, buf, count, file->f_pos);
+ } else {
+ errno = pcache_read(file->inode, buf, count, file->f_pos);
+ }
if (errno > 0) {
file->f_pos += errno;
file->inode->mtime = clock_unixtime();
- __SYSCALL_INTERRUPTIBLE({
- if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
- errno = file->ops->write(file->inode, buf, count, file->f_pos);
- } else {
- errno = pcache_write(file->inode, buf, count, file->f_pos);
- }
- })
+ if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
+ errno = file->ops->write(file->inode, buf, count, file->f_pos);
+ } else {
+ errno = pcache_write(file->inode, buf, count, file->f_pos);
+ }
if (errno > 0) {
file->f_pos += errno;
int
vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
{
- if (!dnode || dnode->parent == dnode) {
+ if (!dnode) {
return 0;
}
return ENAMETOOLONG;
}
- size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ size_t len = 0;
+
+ if (dnode->parent != dnode) {
+ len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ }
if (len >= size) {
return len;
}
- buf[len++] = VFS_PATH_DELIM;
+ if (!len || buf[len - 1] != VFS_PATH_DELIM) {
+ buf[len++] = VFS_PATH_DELIM;
+ }
size_t cpy_size = MIN(dnode->name.len, size - len);
strncpy(buf + len, dnode->name.value, cpy_size);
return 0;
}
+int
+vfs_get_dtype(int itype)
+{
+ switch (itype) {
+ case VFS_IFDIR:
+ return DT_DIR;
+ case VFS_IFSYMLINK:
+ return DT_SYMLINK;
+ default:
+ return DT_PIPE;
+ }
+}
+
__DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
{
int errno;
return DO_STATUS(errno);
}
+void
+vfs_ref_dnode(struct v_dnode* dnode)
+{
+ atomic_fetch_add(&dnode->ref_count, 1);
+ mnt_mkbusy(dnode->mnt);
+}
+
+void
+vfs_unref_dnode(struct v_dnode* dnode)
+{
+ atomic_fetch_sub(&dnode->ref_count, 1);
+ mnt_chillax(dnode->mnt);
+}
+
int
-__vfs_do_chdir(struct v_dnode* dnode)
+vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
{
int errno = 0;
goto done;
}
- if (__current->cwd) {
- atomic_fetch_sub(&__current->cwd->ref_count, 1);
- mnt_chillax(__current->cwd->mnt);
+ if (proc->cwd) {
+ vfs_unref_dnode(proc->cwd);
}
- atomic_fetch_add(&dnode->ref_count, 1);
- mnt_mkbusy(dnode->mnt);
- __current->cwd = dnode;
+ vfs_ref_dnode(dnode);
+ proc->cwd = dnode;
unlock_dnode(dnode);
goto done;
}
- errno = __vfs_do_chdir(dnode);
+ errno = vfs_do_chdir(__current, dnode);
done:
return DO_STATUS(errno);
goto done;
}
- errno = __vfs_do_chdir(fd_s->file->dnode);
+ errno = vfs_do_chdir(__current, fd_s->file->dnode);
done:
return DO_STATUS(errno);