we can do similar thing in Lunaix? A block device emulation
above the regular file when we mount it on.
10. (device) device number (dev_t) allocation
- [good idea] <class>:<subclass>:<uniq_id> composition
+ [good idea] <class>:<subclass>:<uniq_id> composition (CHECKED)
*/
#include <klibc/string.h>
#include <lunaix/foptions.h>
#include <lunaix/fs.h>
#include <lunaix/mm/cake.h>
-#include <lunaix/mm/page.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/process.h>
#include <lunaix/spike.h>
#include <lunaix/fs/twifs.h>
-#include <usr/sys/dirent_defs.h>
+#include <usr/lunaix/dirent.h>
+
+#define INODE_ACCESSED 0
+#define INODE_MODIFY 1
static struct cake_pile* dnode_pile;
static struct cake_pile* inode_pile;
static struct cake_pile* superblock_pile;
static struct cake_pile* fd_pile;
-struct v_dnode* vfs_sysroot;
-static struct hbucket* dnode_cache;
+struct v_dnode* vfs_sysroot = NULL;
struct lru_zone *dnode_lru, *inode_lru;
struct hstr vfs_dot = HSTR(".", 1);
struct hstr vfs_empty = HSTR("", 0);
-struct v_superblock*
-vfs_sb_alloc();
-
-void
-vfs_sb_free(struct v_superblock* sb);
-
static int
__vfs_try_evict_dnode(struct lru_node* obj);
superblock_pile =
cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
- dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
-
- dnode_lru = lru_new_zone(__vfs_try_evict_dnode);
- inode_lru = lru_new_zone(__vfs_try_evict_inode);
+ dnode_lru = lru_new_zone("vfs_dnode", __vfs_try_evict_dnode);
+ inode_lru = lru_new_zone("vfs_inode", __vfs_try_evict_inode);
hstr_rehash(&vfs_ddot, HSTR_FULL_HASH);
hstr_rehash(&vfs_dot, HSTR_FULL_HASH);
// 创建一个根dnode。
vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
vfs_sysroot->parent = vfs_sysroot;
- atomic_fetch_add(&vfs_sysroot->ref_count, 1);
+
+ vfs_ref_dnode(vfs_sysroot);
+ lru_remove(dnode_lru, &vfs_sysroot->lru);
+}
+
+void
+vfs_vncache_init(struct vncache* cache)
+{
+ cache->pool = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+ rwlock_init(&cache->lock);
+}
+
+void
+vfs_vncache_free(struct vncache* cache)
+{
+ // clear all other reader/writer
+ rwlock_begin_write(&cache->lock);
+ vfree(cache->pool);
+
+ // already freed, so as the lock
+}
+
+void
+vfs_vncache_add(struct vncache* cache, size_t key, struct hlist_node* node)
+{
+ struct hbucket* slot;
+
+ cache_atomic_write(cache,
+ {
+ slot = &cache->pool[key & VFS_HASH_MASK];
+ hlist_delete(node);
+ hlist_add(&slot->head, node);
+ });
}
-inline struct hbucket*
-__dcache_hash(struct v_dnode* parent, u32_t* hash)
+static inline struct hbucket*
+__dcache_hash_nolock(struct v_dnode* parent, u32_t* hash)
{
- u32_t _hash = *hash;
- // 确保低位更加随机
+ struct v_superblock* sb;
+ struct hbucket* d_cache;
+ u32_t _hash;
+
+ sb = parent->super_block;
+
+ _hash = *hash;
_hash = _hash ^ (_hash >> VFS_HASHBITS);
- // 与parent的指针值做加法,来减小碰撞的可能性。
- _hash += (u32_t)parent;
+ _hash += (u32_t)__ptr(parent);
+
*hash = _hash;
- return &dnode_cache[_hash & VFS_HASH_MASK];
+ return &sb->d_cache.pool[_hash & VFS_HASH_MASK];
+}
+
+static inline int
+__sync_inode_nolock(struct v_inode* inode)
+{
+ pcache_commit_all(inode);
+
+ int errno = ENOTSUP;
+ if (inode->ops->sync) {
+ errno = inode->ops->sync(inode);
+ }
+
+ return errno;
}
struct v_dnode*
vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
{
+ u32_t hash;
+ struct hbucket* slot;
+ struct v_dnode *pos, *n;
+ struct vncache *dcache;
+
if (!str->len || HSTR_EQ(str, &vfs_dot))
return parent;
return parent->parent;
}
- u32_t hash = str->hash;
- struct hbucket* slot = __dcache_hash(parent, &hash);
+ hash = str->hash;
+ dcache = dnode_cache(parent);
+
+ vncache_lock_read(dcache);
- struct v_dnode *pos, *n;
+ slot = __dcache_hash_nolock(parent, &hash);
hashtable_bucket_foreach(slot, pos, n, hash_list)
{
- if (pos->name.hash == hash) {
- return pos;
+ if (pos->name.hash != hash || pos->parent != parent) {
+ continue;
}
+
+ vncache_unlock_read(dcache);
+ return pos;
}
+
+ vncache_unlock_read(dcache);
return NULL;
}
+static void
+__vfs_touch_inode(struct v_inode* inode, const int type)
+{
+ if (type == INODE_MODIFY) {
+ inode->mtime = clock_unixtime();
+ }
+
+ else if (type == INODE_ACCESSED) {
+ inode->atime = clock_unixtime();
+ }
+
+ lru_use_one(inode_lru, &inode->lru);
+}
+
void
vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
{
+ struct hbucket* bucket;
+ struct vncache* cache;
+
assert(parent);
+ assert(locked_node(parent));
- atomic_fetch_add(&dnode->ref_count, 1);
+ dnode->ref_count = 1;
dnode->parent = parent;
llist_append(&parent->children, &dnode->siblings);
- struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
- hlist_add(&bucket->head, &dnode->hash_list);
+ cache_atomic_write(dnode_cache(parent),
+ {
+ bucket = __dcache_hash_nolock(parent, &dnode->name.hash);
+ hlist_add(&bucket->head, &dnode->hash_list);
+ });
}
void
llist_delete(&dnode->siblings);
llist_delete(&dnode->aka_list);
- hlist_delete(&dnode->hash_list);
+ lru_remove(dnode_lru, &dnode->lru);
+
+ cache_atomic_write(dnode_cache(dnode),
+ {
+ hlist_delete(&dnode->hash_list);
+ });
dnode->parent = NULL;
- atomic_fetch_sub(&dnode->ref_count, 1);
+ dnode->ref_count = 0;
}
void
vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
{
assert(new_parent);
+ assert(locked_node(new_parent));
- hstr_rehash(&dnode->name, HSTR_FULL_HASH);
- vfs_dcache_remove(dnode);
- vfs_dcache_add(new_parent, dnode);
+ dnode_atomic(dnode,
+ {
+ hstr_rehash(&dnode->name, HSTR_FULL_HASH);
+ vfs_dcache_remove(dnode);
+ vfs_dcache_add(new_parent, dnode);
+ });
}
int
vfs_open(struct v_dnode* dnode, struct v_file** file)
{
- if (!dnode->inode || !dnode->inode->ops->open) {
+ struct v_inode* inode = dnode->inode;
+
+ if (!inode || !inode->ops->open) {
return ENOTSUP;
}
- struct v_inode* inode = dnode->inode;
-
lock_inode(inode);
struct v_file* vfile = cake_grab(file_pile);
vfile->dnode = dnode;
vfile->inode = inode;
- vfile->ref_count = ATOMIC_VAR_INIT(1);
+ vfile->ref_count = 1;
vfile->ops = inode->default_fops;
- if ((inode->itype & VFS_IFFILE) && !inode->pg_cache) {
+ if (check_regfile_node(inode) && !inode->pg_cache) {
struct pcache* pcache = vzalloc(sizeof(struct pcache));
pcache_init(pcache);
pcache->master = inode;
if (errno) {
cake_release(file_pile, vfile);
} else {
- atomic_fetch_add(&dnode->ref_count, 1);
+ vfs_ref_dnode(dnode);
inode->open_count++;
- mnt_mkbusy(dnode->mnt);
*file = vfile;
}
void
vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
{
+ lock_dnode(assign_to);
+
if (assign_to->inode) {
llist_delete(&assign_to->aka_list);
assign_to->inode->link_count--;
}
+
llist_append(&inode->aka_dnodes, &assign_to->aka_list);
assign_to->inode = inode;
inode->link_count++;
+
+ unlock_dnode(assign_to);
}
int
vfs_link(struct v_dnode* to_link, struct v_dnode* name)
{
int errno;
+ struct v_inode* inode;
+
+ inode = to_link->inode;
if ((errno = vfs_check_writable(to_link))) {
return errno;
}
- lock_inode(to_link->inode);
+ lock_inode(inode);
+
if (to_link->super_block->root != name->super_block->root) {
errno = EXDEV;
- } else if (!to_link->inode->ops->link) {
+ } else if (!inode->ops->link) {
errno = ENOTSUP;
- } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
- vfs_assign_inode(name, to_link->inode);
+ } else if (!(errno = inode->ops->link(inode, name))) {
+ vfs_assign_inode(name, inode);
}
- unlock_inode(to_link->inode);
+
+ unlock_inode(inode);
return errno;
}
int
vfs_pclose(struct v_file* file, pid_t pid)
{
+ struct v_inode* inode;
int errno = 0;
- if (file->ref_count > 1) {
- atomic_fetch_sub(&file->ref_count, 1);
- } else if (!(errno = file->ops->close(file))) {
- atomic_fetch_sub(&file->dnode->ref_count, 1);
- file->inode->open_count--;
-
- /*
- * Prevent dead lock.
- * This happened when process is terminated while blocking on read.
- * In that case, the process is still holding the inode lock and it
- will never get released.
- * The unlocking should also include ownership check.
- *
- * To see why, consider two process both open the same file both with
- * fd=x.
- * Process A: busy on reading x
- * Process B: do nothing with x
- * Assuming that, after a very short time, process B get terminated
- * while process A is still busy in it's reading business. By this
- * design, the inode lock of this file x is get released by B rather
- * than A. And this will cause a probable race condition on A if other
- * process is writing to this file later after B exit.
- */
- if (mutex_on_hold(&file->inode->lock)) {
- mutex_unlock_for(&file->inode->lock, pid);
- }
- mnt_chillax(file->dnode->mnt);
- pcache_commit_all(file->inode);
- cake_release(file_pile, file);
+ inode = file->inode;
+
+ if (vfs_check_duped_file(file)) {
+ vfs_unref_file(file);
+ return 0;
}
+
+ /*
+ * Prevent dead lock.
+ * This happened when process is terminated while blocking on read.
+ * In that case, the process is still holding the inode lock and it
+ will never get released.
+ * The unlocking should also include ownership check.
+ *
+ * To see why, consider two process both open the same file both with
+ * fd=x.
+ * Process A: busy on reading x
+ * Process B: do nothing with x
+ * Assuming that, after a very short time, process B get terminated
+ * while process A is still busy in it's reading business. By this
+ * design, the inode lock of this file x is get released by B rather
+ * than A. And this will cause a probable race condition on A if other
+ * process is writing to this file later after B exit.
+ */
+ mutex_unlock_for(&inode->lock, pid);
+
+ // now regain lock for inode syncing
+
+ lock_inode(inode);
+
+ if ((errno = file->ops->close(file))) {
+ goto done;
+ }
+
+ vfs_unref_dnode(file->dnode);
+ cake_release(file_pile, file);
+
+ pcache_commit_all(inode);
+ inode->open_count--;
+
+ if (!inode->open_count) {
+ __sync_inode_nolock(inode);
+ }
+
+done:
+ unlock_inode(inode);
return errno;
}
cake_release(fd_pile, fd);
}
+int
+vfs_isync(struct v_inode* inode)
+{
+ lock_inode(inode);
+
+ int errno = __sync_inode_nolock(inode);
+
+ unlock_inode(inode);
+
+ return errno;
+}
+
int
vfs_fsync(struct v_file* file)
{
return errno;
}
- lock_inode(file->inode);
-
- pcache_commit_all(file->inode);
-
- errno = ENOTSUP;
- if (file->ops->sync) {
- errno = file->ops->sync(file);
- }
-
- unlock_inode(file->inode);
-
- return errno;
+ return vfs_isync(file->inode);
}
int
vfs_alloc_fdslot(int* fd)
{
+ struct v_fdtable* fdtab;
+
+ fdtab = __current->fdtable;
+ lock_fdtable(fdtab);
+
for (size_t i = 0; i < VFS_MAX_FD; i++) {
- if (!__current->fdtable->fds[i]) {
- *fd = i;
- return 0;
+ if (__current->fdtable->fds[i]) {
+ continue;
}
+
+ *fd = i;
+ unlock_fdtable(fdtab);
+ return 0;
}
+
+ unlock_fdtable(fdtab);
return EMFILE;
}
struct v_superblock* sb = cake_grab(superblock_pile);
memset(sb, 0, sizeof(*sb));
llist_init_head(&sb->sb_list);
- sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+
+ vfs_vncache_init(&sb->i_cache);
+ vfs_vncache_init(&sb->d_cache);
+
+ sb->ref_count = 1;
return sb;
}
void
-vfs_sb_free(struct v_superblock* sb)
+vfs_sb_ref(struct v_superblock* sb)
+{
+ sb->ref_count++;
+}
+
+void
+vfs_sb_unref(struct v_superblock* sb)
{
- vfree(sb->i_cache);
+ assert(sb->ref_count);
+
+ sb->ref_count--;
+ if (likely(sb->ref_count)) {
+ return;
+ }
+
+ if (sb->ops.release) {
+ sb->ops.release(sb);
+ }
+
+ vfs_vncache_free(&sb->i_cache);
+ vfs_vncache_free(&sb->d_cache);
+
cake_release(superblock_pile, sb);
}
-static int
+static inline bool
+__dnode_evictable(struct v_dnode* dnode)
+{
+ return dnode->ref_count == 1
+ && llist_empty(&dnode->children);
+}
+
+static bool
__vfs_try_evict_dnode(struct lru_node* obj)
{
struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
- if (!dnode->ref_count) {
- vfs_d_free(dnode);
- return 1;
+ if (mutex_on_hold(&dnode->lock))
+ return false;
+
+ if (!__dnode_evictable(dnode)) {
+ return false;
}
- return 0;
+
+ vfs_d_free(dnode);
+ return true;
}
-static int
+static bool
__vfs_try_evict_inode(struct lru_node* obj)
{
struct v_inode* inode = container_of(obj, struct v_inode, lru);
llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
- dnode->ref_count = ATOMIC_VAR_INIT(0);
dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
hstrcpy(&dnode->name, name);
if (parent) {
- dnode->super_block = parent->super_block;
+ vfs_d_assign_sb(dnode, parent->super_block);
dnode->mnt = parent->mnt;
}
vfs_d_free(struct v_dnode* dnode)
{
assert(dnode->ref_count == 1);
-
+
if (dnode->inode) {
assert(dnode->inode->link_count > 0);
dnode->inode->link_count--;
}
vfs_dcache_remove(dnode);
+
// Make sure the children de-referencing their parent.
// With lru presented, the eviction will be propagated over the entire
// detached subtree eventually
vfs_dcache_remove(pos);
}
- vfree(dnode->name.value);
+ if (dnode->destruct) {
+ dnode->destruct(dnode);
+ }
+
+ vfs_sb_unref(dnode->super_block);
+
+ vfree((void*)dnode->name.value);
cake_release(dnode_pile, dnode);
}
struct v_inode*
vfs_i_find(struct v_superblock* sb, u32_t i_id)
{
- struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
- struct v_inode *pos, *n;
- hashtable_bucket_foreach(slot, pos, n, hash_list)
+ struct hbucket* slot;
+ struct v_inode *pos, *n, *found = NULL;
+
+ cache_atomic_read(&sb->i_cache,
{
- if (pos->id == i_id) {
+ slot = &sb->i_cache.pool[i_id & VFS_HASH_MASK];
+
+ hashtable_bucket_foreach(slot, pos, n, hash_list)
+ {
+ if (pos->id != i_id) {
+ continue;
+ }
+
lru_use_one(inode_lru, &pos->lru);
- return pos;
+ found = pos;
+ break;
}
- }
+ });
- return NULL;
+ return found;
}
void
vfs_i_addhash(struct v_inode* inode)
{
- struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
-
- hlist_delete(&inode->hash_list);
- hlist_add(&slot->head, &inode->hash_list);
+ vfs_vncache_add(inode_cache(inode), inode->id, &inode->hash_list);
}
struct v_inode*
sb->ops.init_inode(sb, inode);
- inode->sb = sb;
inode->ctime = clock_unixtime();
inode->atime = inode->ctime;
inode->mtime = inode->ctime;
-done:
+ vfs_i_assign_sb(inode, sb);
lru_use_one(inode_lru, &inode->lru);
+
return inode;
}
pcache_release(inode->pg_cache);
vfree(inode->pg_cache);
}
+
// we don't need to sync inode.
// If an inode can be free, then it must be properly closed.
// Hence it must be synced already!
if (inode->destruct) {
inode->destruct(inode);
}
+
+ vfs_sb_unref(inode->sb);
+
hlist_delete(&inode->hash_list);
+ lru_remove(inode_lru, &inode->lru);
+
cake_release(inode_pile, inode);
}
/* ---- System call definition and support ---- */
-#define FLOCATE_CREATE_EMPTY 1
+// make a new name when not exists
+#define FLOC_MAYBE_MKNAME 1
+
+// name must be non-exist and made.
+#define FLOC_MKNAME 2
+
+// no follow symlink
+#define FLOC_NOFOLLOW 4
int
vfs_getfd(int fd, struct v_fd** fd_s)
{
- if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
- return 0;
+ struct v_fdtable* fdtab;
+
+ if (!TEST_FD(fd)) {
+ return EBADF;
}
- return EBADF;
+
+ fdtab = __current->fdtable;
+
+ lock_fdtable(fdtab);
+ *fd_s = __current->fdtable->fds[fd];
+ unlock_fdtable(fdtab);
+
+ return !*fd_s ? EBADF : 0;
}
-int
+static int
+__vfs_mknod(struct v_inode* parent, struct v_dnode* dnode,
+ unsigned int itype, dev_t* dev)
+{
+ int errno;
+
+ errno = parent->ops->create(parent, dnode, itype);
+ if (errno) {
+ return errno;
+ }
+
+ return 0;
+}
+
+struct file_locator {
+ struct v_dnode* dir;
+ struct v_dnode* file;
+ bool fresh;
+};
+
+/**
+ * @brief unlock the file locator (floc) if possible.
+ * If the file to be located if not exists, and
+ * any FLOC_*MKNAME flag is set, then the parent
+ * dnode will be locked until the file has been properly
+ * finalised by subsequent logic.
+ *
+ * @param floc
+ */
+static inline void
+__floc_try_unlock(struct file_locator* floc)
+{
+ if (floc->fresh) {
+ assert(floc->dir);
+ unlock_dnode(floc->dir);
+ }
+}
+
+static int
__vfs_try_locate_file(const char* path,
- struct v_dnode** fdir,
- struct v_dnode** file,
+ struct file_locator* floc,
int options)
{
char name_str[VFS_NAME_MAXLEN];
+ struct v_dnode *fdir, *file;
struct hstr name = HSTR(name_str, 0);
- int errno;
+ int errno, woption = 0;
+ if ((options & FLOC_NOFOLLOW)) {
+ woption |= VFS_WALK_NOFOLLOW;
+ options &= ~FLOC_NOFOLLOW;
+ }
+
+ floc->fresh = false;
name_str[0] = 0;
- if ((errno = vfs_walk_proc(path, fdir, &name, VFS_WALK_PARENT))) {
+ errno = vfs_walk_proc(path, &fdir, &name, woption | VFS_WALK_PARENT);
+ if (errno) {
return errno;
}
- errno = vfs_walk(*fdir, name.value, file, NULL, 0);
- if (errno != ENOENT || !(options & FLOCATE_CREATE_EMPTY)) {
- return errno;
+ lock_dnode(fdir);
+
+ errno = vfs_walk(fdir, name.value, &file, NULL, woption);
+
+ if (errno && errno != ENOENT) {
+ goto error;
}
- struct v_dnode* parent = *fdir;
- struct v_dnode* file_new = vfs_d_alloc(parent, &name);
+ if (!errno && (options & FLOC_MKNAME)) {
+ errno = EEXIST;
+ goto error;
+ }
+
+ if (!errno) {
+ // the file present, no need to hold the directory lock
+ unlock_dnode(fdir);
+ goto done;
+ }
- if (!file_new) {
- return ENOMEM;
+ // errno == ENOENT
+ if (!options) {
+ goto error;
}
- lock_dnode(parent);
+ errno = vfs_check_writable(fdir);
+ if (errno) {
+ goto error;
+ }
- if (!(errno = parent->inode->ops->create(parent->inode, file_new))) {
- vfs_dcache_add(parent, file_new);
- *file = file_new;
- } else {
- vfs_d_free(file_new);
+ floc->fresh = true;
+
+ file = vfs_d_alloc(fdir, &name);
+
+ if (!file) {
+ errno = ENOMEM;
+ goto error;
}
- unlock_dnode(parent);
+ vfs_dcache_add(fdir, file);
+done:
+ floc->dir = fdir;
+ floc->file = file;
+
+ return errno;
+
+error:
+ unlock_dnode(fdir);
return errno;
}
+
+static bool
+__check_unlinkable(struct v_dnode* dnode)
+{
+ int acl;
+ bool wr_self, wr_parent;
+ struct v_dnode* parent;
+
+ parent = dnode->parent;
+ acl = dnode->inode->acl;
+
+ wr_self = check_allow_write(dnode->inode);
+ wr_parent = check_allow_write(parent->inode);
+
+ if (!fsacl_test(acl, svtx)) {
+ return wr_self;
+ }
+
+ if (current_euid() == dnode->inode->uid) {
+ return true;
+ }
+
+ return wr_self && wr_parent;
+}
+
int
vfs_do_open(const char* path, int options)
{
- int errno, fd;
+ int errno, fd, loptions = 0;
struct v_dnode *dentry, *file;
struct v_file* ofile = NULL;
+ struct file_locator floc;
+ struct v_inode* inode;
- errno = __vfs_try_locate_file(
- path, &dentry, &file, (options & FO_CREATE) ? FLOCATE_CREATE_EMPTY : 0);
+ if ((options & FO_CREATE)) {
+ loptions |= FLOC_MAYBE_MKNAME;
+ } else if ((options & FO_NOFOLLOW)) {
+ loptions |= FLOC_NOFOLLOW;
+ }
+
+ errno = __vfs_try_locate_file(path, &floc, loptions);
- if (!errno && !(errno = vfs_alloc_fdslot(&fd))) {
+ if (errno || (errno = vfs_alloc_fdslot(&fd))) {
+ return errno;
+ }
- if (errno || (errno = vfs_open(file, &ofile))) {
+ file = floc.file;
+ dentry = floc.dir;
+
+ if (floc.fresh) {
+ errno = __vfs_mknod(dentry->inode, file, VFS_IFFILE, NULL);
+ if (errno) {
+ vfs_d_free(file);
+ __floc_try_unlock(&floc);
return errno;
}
- struct v_fd* fd_s = cake_grab(fd_pile);
- memset(fd_s, 0, sizeof(*fd_s));
+ __floc_try_unlock(&floc);
+ }
+
- ofile->f_pos = ofile->inode->fsize & -((options & FO_APPEND) != 0);
- fd_s->file = ofile;
- fd_s->flags = options;
- __current->fdtable->fds[fd] = fd_s;
- return fd;
+ if ((errno = vfs_open(file, &ofile))) {
+ return errno;
}
- return errno;
+ inode = ofile->inode;
+ lock_inode(inode);
+
+ struct v_fd* fd_s = cake_grab(fd_pile);
+ memset(fd_s, 0, sizeof(*fd_s));
+
+ if ((options & O_TRUNC)) {
+ file->inode->fsize = 0;
+ }
+
+ if (vfs_get_dtype(inode->itype) == DT_DIR) {
+ ofile->f_pos = 0;
+ }
+
+ fd_s->file = ofile;
+ fd_s->flags = options;
+ __current->fdtable->fds[fd] = fd_s;
+
+ unlock_inode(inode);
+
+ return fd;
}
__DEFINE_LXSYSCALL2(int, open, const char*, path, int, options)
const int dtype)
{
struct lx_dirent* dent = (struct lx_dirent*)dctx->cb_data;
- strncpy(dent->d_name, name, DIRENT_NAME_MAX_LEN);
+ strncpy(dent->d_name, name, MIN(len, DIRENT_NAME_MAX_LEN));
dent->d_nlen = len;
dent->d_type = dtype;
}
lock_inode(inode);
- if (!(inode->itype & VFS_IFDIR)) {
+ if (!check_directory_node(inode)) {
errno = ENOTDIR;
- } else {
- struct dir_context dctx =
- (struct dir_context){ .cb_data = dent,
- .index = dent->d_offset,
- .read_complete_callback =
- __vfs_readdir_callback };
- errno = 1;
- if (dent->d_offset == 0) {
- __vfs_readdir_callback(&dctx, vfs_dot.value, vfs_dot.len, DT_DIR);
- } else if (dent->d_offset == 1) {
- __vfs_readdir_callback(&dctx, vfs_ddot.value, vfs_ddot.len, DT_DIR);
- } else {
- dctx.index -= 2;
- if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
- unlock_inode(inode);
- goto done;
- }
- }
- dent->d_offset++;
+ goto unlock;
+ }
+
+ if (!check_allow_read(inode)) {
+ errno = EPERM;
+ goto unlock;
}
+ struct dir_context dctx = (struct dir_context) {
+ .cb_data = dent,
+ .read_complete_callback = __vfs_readdir_callback
+ };
+
+ if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
+ goto unlock;
+ }
+
+ dent->d_offset++;
+ fd_s->file->f_pos++;
+
+unlock:
unlock_inode(inode);
done:
{
int errno = 0;
struct v_fd* fd_s;
+ struct v_inode* inode;
+
if ((errno = vfs_getfd(fd, &fd_s))) {
goto done;
}
struct v_file* file = fd_s->file;
- if ((file->inode->itype & VFS_IFDIR)) {
+ if (check_directory_node(file->inode)) {
errno = EISDIR;
goto done;
}
- lock_inode(file->inode);
+ if (!check_allow_read(file->inode)) {
+ errno = EPERM;
+ goto done;
+ }
+
+ inode = file->inode;
+ lock_inode(inode);
- file->inode->atime = clock_unixtime();
+ __vfs_touch_inode(inode, INODE_ACCESSED);
- if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
- errno = file->ops->read(file->inode, buf, count, file->f_pos);
+ if (check_seqdev_node(inode) || (fd_s->flags & FO_DIRECT)) {
+ errno = file->ops->read(inode, buf, count, file->f_pos);
} else {
- errno = pcache_read(file->inode, buf, count, file->f_pos);
+ errno = pcache_read(inode, buf, count, file->f_pos);
}
if (errno > 0) {
file->f_pos += errno;
- unlock_inode(file->inode);
+ unlock_inode(inode);
return errno;
}
- unlock_inode(file->inode);
+ unlock_inode(inode);
done:
return DO_STATUS(errno);
goto done;
}
+ struct v_inode* inode;
struct v_file* file = fd_s->file;
if ((errno = vfs_check_writable(file->dnode))) {
goto done;
}
- if ((file->inode->itype & VFS_IFDIR)) {
+ if (check_directory_node(file->inode)) {
errno = EISDIR;
goto done;
}
- lock_inode(file->inode);
+ inode = file->inode;
+ lock_inode(inode);
- file->inode->mtime = clock_unixtime();
+ __vfs_touch_inode(inode, INODE_MODIFY);
+ if ((fd_s->flags & O_APPEND)) {
+ file->f_pos = inode->fsize;
+ }
- if ((file->inode->itype & VFS_IFSEQDEV) || (fd_s->flags & FO_DIRECT)) {
- errno = file->ops->write(file->inode, buf, count, file->f_pos);
+ if (check_seqdev_node(inode) || (fd_s->flags & FO_DIRECT)) {
+ errno = file->ops->write(inode, buf, count, file->f_pos);
} else {
- errno = pcache_write(file->inode, buf, count, file->f_pos);
+ errno = pcache_write(inode, buf, count, file->f_pos);
}
if (errno > 0) {
file->f_pos += errno;
- unlock_inode(file->inode);
+ inode->fsize = MAX(inode->fsize, file->f_pos);
+
+ unlock_inode(inode);
return errno;
}
- unlock_inode(file->inode);
+ unlock_inode(inode);
done:
return DO_STATUS(errno);
}
struct v_file* file = fd_s->file;
+ struct v_inode* inode = file->inode;
if (!file->ops->seek) {
errno = ENOTSUP;
goto done;
}
- lock_inode(file->inode);
+ if (!check_allow_read(inode)) {
+ errno = EPERM;
+ goto done;
+ }
+
+ lock_inode(inode);
int overflow = 0;
int fpos = file->f_pos;
+
+ if (vfs_get_dtype(inode->itype) == DT_DIR) {
+ options = (options != FSEEK_END) ? options : FSEEK_SET;
+ }
+
switch (options) {
case FSEEK_CUR:
- overflow = __builtin_sadd_overflow((int)file->f_pos, offset, &fpos);
+ overflow = sadd_of((int)file->f_pos, offset, &fpos);
break;
case FSEEK_END:
- overflow =
- __builtin_sadd_overflow((int)file->inode->fsize, offset, &fpos);
+ overflow = sadd_of((int)inode->fsize, offset, &fpos);
break;
case FSEEK_SET:
fpos = offset;
break;
}
+
if (overflow) {
errno = EOVERFLOW;
- } else if (!(errno = file->ops->seek(file->inode, fpos))) {
- file->f_pos = fpos;
+ }
+ else {
+ errno = file->ops->seek(file, fpos);
}
- unlock_inode(file->inode);
+ unlock_inode(inode);
done:
return DO_STATUS(errno);
{
const char* link;
struct v_inode* inode = dnode->inode;
- if (inode->ops->read_symlink) {
- lock_inode(inode);
- int errno = inode->ops->read_symlink(inode, &link);
- strncpy(buf, link, size);
+ if (!check_symlink_node(inode)) {
+ return EINVAL;
+ }
- unlock_inode(inode);
- return errno;
+ if (!inode->ops->read_symlink) {
+ return ENOTSUP;
}
- return 0;
+
+ if (!check_allow_read(inode)) {
+ return EPERM;
+ }
+
+ lock_inode(inode);
+
+ int errno = inode->ops->read_symlink(inode, &link);
+ if (errno >= 0) {
+ strncpy(buf, link, MIN(size, (size_t)errno));
+ }
+
+ unlock_inode(inode);
+ return errno;
}
int
vfs_get_dtype(int itype)
{
- switch (itype) {
- case VFS_IFDIR:
- return DT_DIR;
- case VFS_IFSYMLINK:
- return DT_SYMLINK;
- default:
- return DT_PIPE;
+ int dtype = DT_FILE;
+ if (check_itype(itype, VFS_IFSYMLINK)) {
+ dtype |= DT_SYMLINK;
+ }
+
+ if (check_itype(itype, VFS_IFDIR)) {
+ dtype |= DT_DIR;
+ return dtype;
}
+
+ // TODO other types
+
+ return dtype;
+}
+
+struct v_fdtable*
+fdtable_create()
+{
+ struct v_fdtable* fdtab;
+
+ fdtab = vzalloc(sizeof(struct v_fdtable));
+ mutex_init(&fdtab->lock);
+
+ return fdtab;
+}
+
+void
+fdtable_copy(struct v_fdtable* dest, struct v_fdtable* src)
+{
+ lock_fdtable(dest);
+ lock_fdtable(src);
+
+ for (size_t i = 0; i < VFS_MAX_FD; i++) {
+ struct v_fd* fd = src->fds[i];
+ if (!fd)
+ continue;
+ vfs_dup_fd(fd, &dest->fds[i]);
+ }
+
+ unlock_fdtable(dest);
+ unlock_fdtable(src);
+}
+
+void
+fdtable_free(struct v_fdtable* table)
+{
+ assert(!mutex_on_hold(&table->lock));
+
+ vfree(table);
}
__DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
}
struct v_dnode* dnode;
- errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
- if (errno >= 0) {
- return errno;
- }
+ dnode = fd_s->file->dnode;
+
+ lock_dnode(dnode);
+ errno = vfs_get_path(dnode, buf, size, 0);
+ unlock_dnode(dnode);
done:
return DO_STATUS(errno);
return DO_STATUS(errno);
}
-__DEFINE_LXSYSCALL4(int,
- readlinkat,
- int,
- dirfd,
- const char*,
- pathname,
- char*,
- buf,
- size_t,
- size)
+__DEFINE_LXSYSCALL4(
+ int, readlinkat, int, dirfd, const char*, pathname, char*, buf, size_t, size)
{
int errno;
struct v_fd* fd_s;
goto done;
}
+ pathname = pathname ? pathname : "";
+
struct v_dnode* dnode;
if (!(errno = vfs_walk(
fd_s->file->dnode, pathname, &dnode, NULL, VFS_WALK_NOFOLLOW))) {
lock_dnode(dnode);
+ if (!__check_unlinkable(dnode)) {
+ errno = EPERM;
+ goto done;
+ }
+
if ((errno = vfs_check_writable(dnode))) {
goto done;
}
lock_dnode(parent);
lock_inode(parent->inode);
- if ((dnode->inode->itype & VFS_IFDIR)) {
+ if (check_directory_node(dnode->inode)) {
errno = parent->inode->ops->rmdir(parent->inode, dnode);
if (!errno) {
vfs_dcache_remove(dnode);
__DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
{
- int errno = 0;
+ int errno;
+ struct hstr name;
+ struct v_inode* inode;
struct v_dnode *parent, *dir;
char name_value[VFS_NAME_MAXLEN];
- struct hstr name = HHSTR(name_value, 0, 0);
+
+ name = HHSTR(name_value, 0, 0);
if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
goto done;
}
+ if (!(errno = vfs_walk(parent, name_value, &dir, NULL, 0))) {
+ errno = EEXIST;
+ goto done;
+ }
+
if ((errno = vfs_check_writable(parent))) {
goto done;
}
goto done;
}
+ inode = parent->inode;
+
lock_dnode(parent);
- lock_inode(parent->inode);
+ lock_inode(inode);
if ((parent->super_block->fs->types & FSTYPE_ROFS)) {
errno = ENOTSUP;
- } else if (!parent->inode->ops->mkdir) {
+ } else if (!inode->ops->mkdir) {
errno = ENOTSUP;
- } else if (!(parent->inode->itype & VFS_IFDIR)) {
+ } else if (!check_directory_node(inode)) {
errno = ENOTDIR;
- } else if (!(errno = parent->inode->ops->mkdir(parent->inode, dir))) {
+ } else if (!(errno = inode->ops->mkdir(inode, dir))) {
vfs_dcache_add(parent, dir);
goto cleanup;
}
vfs_d_free(dir);
cleanup:
- unlock_inode(parent->inode);
+ unlock_inode(inode);
unlock_dnode(parent);
done:
return DO_STATUS(errno);
}
-int
+static int
__vfs_do_unlink(struct v_dnode* dnode)
{
int errno;
return EBUSY;
}
+ if (!__check_unlinkable(dnode)) {
+ return EPERM;
+ }
+
if ((errno = vfs_check_writable(dnode))) {
return errno;
}
if (inode->open_count) {
errno = EBUSY;
- } else if (!(inode->itype & VFS_IFDIR)) {
- // The underlying unlink implementation should handle
- // symlink case
- errno = inode->ops->unlink(inode);
+ } else if (!check_directory_node(inode)) {
+ errno = inode->ops->unlink(inode, dnode);
if (!errno) {
vfs_d_free(dnode);
}
__DEFINE_LXSYSCALL2(int, link, const char*, oldpath, const char*, newpath)
{
int errno;
- struct v_dnode *dentry, *to_link, *name_dentry, *name_file;
+ struct file_locator floc;
+ struct v_dnode *to_link, *name_file;
- errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
+ errno = __vfs_try_locate_file(oldpath, &floc, 0);
+ if (errno) {
+ goto done;
+ }
+
+ __floc_try_unlock(&floc);
+
+ to_link = floc.file;
+ errno = __vfs_try_locate_file(newpath, &floc, FLOC_MKNAME);
if (!errno) {
- errno = __vfs_try_locate_file(
- newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
- if (!errno) {
- errno = EEXIST;
- } else if (name_file) {
- errno = vfs_link(to_link, name_file);
- }
+ goto done;
+ }
+
+ name_file = floc.file;
+ errno = vfs_link(to_link, name_file);
+ if (errno) {
+ vfs_d_free(name_file);
}
+
+done:
+ __floc_try_unlock(&floc);
return DO_STATUS(errno);
}
memcpy(copied, old, sizeof(struct v_fd));
- atomic_fetch_add(&old->file->ref_count, 1);
+ vfs_ref_file(old->file);
*new = copied;
int
vfs_dup2(int oldfd, int newfd)
{
+ int errno;
+ struct v_fdtable* fdtab;
+ struct v_fd *oldfd_s, *newfd_s;
+
if (newfd == oldfd) {
return newfd;
}
- int errno;
- struct v_fd *oldfd_s, *newfd_s;
if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
goto done;
}
goto done;
}
- newfd_s = __current->fdtable->fds[newfd];
+ fdtab = __current->fdtable;
+ lock_fdtable(fdtab);
+
+ newfd_s = fdtab->fds[newfd];
if (newfd_s && (errno = vfs_close(newfd_s->file))) {
- goto done;
+ goto unlock_and_done;
}
- if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
- __current->fdtable->fds[newfd] = newfd_s;
- return newfd;
+ if ((errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
+ goto unlock_and_done;
}
+ fdtab->fds[newfd] = newfd_s;
+
+ unlock_fdtable(fdtab);
+ return newfd;
+
+unlock_and_done:
+ unlock_fdtable(fdtab);
+
done:
return DO_STATUS(errno);
}
return DO_STATUS(errno);
}
-__DEFINE_LXSYSCALL2(int,
- symlink,
- const char*,
- pathname,
- const char*,
- link_target)
+__DEFINE_LXSYSCALL2(
+ int, symlink, const char*, pathname, const char*, link_target)
{
int errno;
- struct v_dnode* dnode;
- if ((errno = vfs_walk_proc(pathname, &dnode, NULL, 0))) {
+ struct file_locator floc;
+ struct v_dnode *file;
+ struct v_inode *f_ino;
+
+ errno = __vfs_try_locate_file(pathname, &floc, FLOC_MKNAME);
+ if (errno) {
goto done;
}
- if (errno = vfs_check_writable(dnode)) {
+ file = floc.file;
+ errno = __vfs_mknod(floc.dir->inode, file, VFS_IFSYMLINK, NULL);
+ if (errno) {
+ vfs_d_free(file);
+ goto done;
+ }
+
+ f_ino = file->inode;
+
+ assert(f_ino);
+
+ errno = vfs_check_writable(file);
+ if (errno) {
goto done;
}
- if (!dnode->inode->ops->set_symlink) {
+ if (!f_ino->ops->set_symlink) {
errno = ENOTSUP;
goto done;
}
- lock_inode(dnode->inode);
+ lock_inode(f_ino);
- errno = dnode->inode->ops->set_symlink(dnode->inode, link_target);
+ errno = f_ino->ops->set_symlink(f_ino, link_target);
- unlock_inode(dnode->inode);
+ unlock_inode(f_ino);
done:
+ __floc_try_unlock(&floc);
return DO_STATUS(errno);
}
-void
-vfs_ref_dnode(struct v_dnode* dnode)
+static int
+vfs_do_chdir_nolock(struct proc_info* proc, struct v_dnode* dnode)
{
- atomic_fetch_add(&dnode->ref_count, 1);
- mnt_mkbusy(dnode->mnt);
-}
+ if (!check_directory_node(dnode->inode)) {
+ return ENOTDIR;
+ }
-void
-vfs_unref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_sub(&dnode->ref_count, 1);
- mnt_chillax(dnode->mnt);
+ if (proc->cwd) {
+ vfs_unref_dnode(proc->cwd);
+ }
+
+ vfs_ref_dnode(dnode);
+ proc->cwd = dnode;
+
+ return 0;
}
-int
+static int
vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
{
int errno = 0;
lock_dnode(dnode);
- if (!(dnode->inode->itype & VFS_IFDIR)) {
- errno = ENOTDIR;
- goto done;
- }
-
- if (proc->cwd) {
- vfs_unref_dnode(proc->cwd);
- }
-
- vfs_ref_dnode(dnode);
- proc->cwd = dnode;
+ errno = vfs_do_chdir_nolock(proc, dnode);
unlock_dnode(dnode);
-done:
return errno;
}
goto done;
}
- errno = vfs_do_chdir(__current, dnode);
+ errno = vfs_do_chdir((struct proc_info*)__current, dnode);
done:
return DO_STATUS(errno);
goto done;
}
- errno = vfs_do_chdir(__current, fd_s->file->dnode);
+ errno = vfs_do_chdir((struct proc_info*)__current, fd_s->file->dnode);
+
+done:
+ return DO_STATUS(errno);
+}
+
+
+__DEFINE_LXSYSCALL1(int, chroot, const char*, path)
+{
+ int errno;
+ struct v_dnode* dnode;
+ if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
+ return errno;
+ }
+
+ lock_dnode(dnode);
+
+ errno = vfs_do_chdir_nolock(__current, dnode);
+ if (errno) {
+ unlock_dnode(dnode);
+ goto done;
+ }
+
+ __current->root = dnode;
+
+ unlock_dnode(dnode);
done:
return DO_STATUS(errno);
}
}
- buf[len + 1] = '\0';
+ buf[len] = '\0';
ret_ptr = buf;
done:
- __current->k_status = errno;
+ syscall_result(errno);
return ret_ptr;
}
return 0;
}
- if (errno = vfs_check_writable(current)) {
+ if ((errno = vfs_check_writable(current))) {
return errno;
}
lock_dnode(current);
lock_dnode(target);
+
if (oldparent)
lock_dnode(oldparent);
if (newparent)
cleanup:
unlock_dnode(current);
+
if (oldparent)
unlock_dnode(oldparent);
if (newparent)
errno = vfs_do_rename(cur, target);
done:
- vfree(name.value);
+ vfree((void*)name.value);
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL2(int, fstat, int, fd, struct file_stat*, stat)
+{
+ int errno = 0;
+ struct v_fd* fds;
+
+ if ((errno = vfs_getfd(fd, &fds))) {
+ goto done;
+ }
+
+ struct v_inode* vino = fds->file->inode;
+ struct device* fdev = vino->sb->dev;
+
+ stat->st_ino = vino->id;
+ stat->st_blocks = vino->lb_usage;
+ stat->st_size = vino->fsize;
+ stat->st_blksize = vino->sb->blksize;
+ stat->st_nlink = vino->link_count;
+ stat->st_uid = vino->uid;
+ stat->st_gid = vino->gid;
+
+ stat->st_ctim = vino->ctime;
+ stat->st_atim = vino->atime;
+ stat->st_mtim = vino->mtime;
+
+ stat->st_mode = (vino->itype << 16) | vino->acl;
+
+ stat->st_ioblksize = PAGE_SIZE;
+
+ if (check_device_node(vino)) {
+ struct device* rdev = resolve_device(vino->data);
+ if (!rdev) {
+ errno = EINVAL;
+ goto done;
+ }
+
+ stat->st_rdev = (dev_t){.meta = rdev->ident.fn_grp,
+ .unique = rdev->ident.unique,
+ .index = dev_uid(rdev) };
+ }
+
+ if (fdev) {
+ stat->st_dev = (dev_t){.meta = fdev->ident.fn_grp,
+ .unique = fdev->ident.unique,
+ .index = dev_uid(fdev) };
+ }
+
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL4(int, fchmodat, int, fd,
+ const char*, path, int, mode, int, flags)
+{
+ int errno;
+ struct v_dnode *dnode;
+ struct v_inode* inode;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ errno = vfs_check_writable(dnode);
+ if (errno) {
+ return errno;
+ }
+
+ inode = dnode->inode;
+ lock_inode(inode);
+
+ if (!current_is_root()) {
+ mode = mode & FSACL_RWXMASK;
+ }
+
+ inode->acl = mode;
+ __vfs_touch_inode(inode, INODE_MODIFY);
+
+ unlock_inode(inode);
+
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL5(int, fchownat, int, fd,
+ const char*, path, uid_t, uid, gid_t, gid, int, flags)
+{
+ int errno;
+ struct v_dnode *dnode;
+ struct v_inode *inode;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ errno = vfs_check_writable(dnode);
+ if (errno) {
+ return errno;
+ }
+
+ inode = dnode->inode;
+ lock_inode(inode);
+
+ inode->uid = uid;
+ inode->gid = gid;
+ __vfs_touch_inode(inode, INODE_MODIFY);
+
+ unlock_inode(inode);
+
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL4(int, faccessat, int, fd,
+ const char*, path, int, amode, int, flags)
+{
+ int errno, acl;
+ struct v_dnode *dnode;
+ struct v_inode *inode;
+ struct user_scope* uscope;
+
+ uid_t tuid;
+ gid_t tgid;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ if ((flags & AT_EACCESS)) {
+ tuid = current_euid();
+ tgid = current_egid();
+ }
+ else {
+ uscope = current_user_scope();
+ tuid = uscope->ruid;
+ tgid = uscope->rgid;
+ }
+
+ inode = dnode->inode;
+
+ acl = inode->acl;
+ acl &= amode;
+ acl &= check_acl_between(inode->uid, inode->gid, tuid, tgid);
+ if (!acl) {
+ errno = EACCESS;
+ }
+
+done:
return DO_STATUS(errno);
}
\ No newline at end of file