assert(dnode->ref_count == 1);
llist_delete(&dnode->siblings);
+ llist_delete(&dnode->aka_list);
hlist_delete(&dnode->hash_list);
dnode->parent = NULL;
vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
{
if (assign_to->inode) {
+ llist_delete(&assign_to->aka_list);
assign_to->inode->link_count--;
}
+ llist_append(&inode->aka_dnodes, &assign_to->aka_list);
assign_to->inode = inode;
inode->link_count++;
}
}
int
-vfs_close(struct v_file* file)
+vfs_pclose(struct v_file* file, pid_t pid)
{
int errno = 0;
if (file->ref_count > 1) {
atomic_fetch_sub(&file->dnode->ref_count, 1);
file->inode->open_count--;
- // Remove dead lock.
+ // Prevent dead lock.
// This happened when process is terminated while blocking on read.
// In that case, the process is still holding the inode lock and it will
// never get released.
- // FIXME is this a good solution?
/*
- * Consider two process both open the same file both with fd=x.
+ * The unlocking should also include ownership check.
+ *
+ * To see why, consider two process both open the same file both with
+ * fd=x.
* Process A: busy on reading x
* Process B: do nothing with x
- * Assume that, after a very short time, process B get terminated while
- * process A is still busy in it's reading business. By this design, the
- * inode lock of this file x is get released by B rather than A. And
- * this will cause a probable race condition on A if other process is
- * writing to this file later after B exit.
- *
- * A possible solution is to add a owner identification in the lock
- * context, so only the lock holder can do the release.
+ * Assuming that, after a very short time, process B get terminated
+ * while process A is still busy in it's reading business. By this
+ * design, the inode lock of this file x is get released by B rather
+ * than A. And this will cause a probable race condition on A if other
+ * process is writing to this file later after B exit.
*/
if (mutex_on_hold(&file->inode->lock)) {
- unlock_inode(file->inode);
+ mutex_unlock_for(&file->inode->lock, pid);
}
mnt_chillax(file->dnode->mnt);
return errno;
}
+int
+vfs_close(struct v_file* file)
+{
+ return vfs_pclose(file, __current->pid);
+}
+
int
vfs_fsync(struct v_file* file)
{
memset(dnode, 0, sizeof(*dnode));
llist_init_head(&dnode->children);
llist_init_head(&dnode->siblings);
+ llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
dnode->ref_count = ATOMIC_VAR_INIT(0);
memset(inode, 0, sizeof(*inode));
mutex_init(&inode->lock);
llist_init_head(&inode->xattrs);
+ llist_init_head(&inode->aka_dnodes);
sb->ops.init_inode(sb, inode);
int
vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
{
- if (!dnode || dnode->parent == dnode) {
+ if (!dnode) {
return 0;
}
return ENAMETOOLONG;
}
- size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ size_t len = 0;
+
+ if (dnode->parent != dnode) {
+ len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ }
if (len >= size) {
return len;
}
- buf[len++] = VFS_PATH_DELIM;
+ if (!len || buf[len - 1] != VFS_PATH_DELIM) {
+ buf[len++] = VFS_PATH_DELIM;
+ }
size_t cpy_size = MIN(dnode->name.len, size - len);
strncpy(buf + len, dnode->name.value, cpy_size);