#include <usr/lunaix/dirent_defs.h>
+#define INODE_ACCESSED 0
+#define INODE_MODIFY 1
+
static struct cake_pile* dnode_pile;
static struct cake_pile* inode_pile;
static struct cake_pile* file_pile;
static struct cake_pile* superblock_pile;
static struct cake_pile* fd_pile;
-struct v_dnode* vfs_sysroot;
-static struct hbucket* dnode_cache;
+struct v_dnode* vfs_sysroot = NULL;
struct lru_zone *dnode_lru, *inode_lru;
superblock_pile =
cake_new_pile("sb_cache", sizeof(struct v_superblock), 1, 0);
- dnode_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
-
dnode_lru = lru_new_zone("vfs_dnode", __vfs_try_evict_dnode);
inode_lru = lru_new_zone("vfs_inode", __vfs_try_evict_inode);
// 创建一个根dnode。
vfs_sysroot = vfs_d_alloc(NULL, &vfs_empty);
vfs_sysroot->parent = vfs_sysroot;
- atomic_fetch_add(&vfs_sysroot->ref_count, 1);
+
+ vfs_ref_dnode(vfs_sysroot);
+ lru_remove(dnode_lru, &vfs_sysroot->lru);
+}
+
+void
+vfs_vncache_init(struct vncache* cache)
+{
+ cache->pool = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+ rwlock_init(&cache->lock);
+}
+
+void
+vfs_vncache_free(struct vncache* cache)
+{
+ // clear all other reader/writer
+ rwlock_begin_write(&cache->lock);
+ vfree(cache->pool);
+
+ // already freed, so as the lock
+}
+
+void
+vfs_vncache_add(struct vncache* cache, size_t key, struct hlist_node* node)
+{
+ struct hbucket* slot;
+
+ cache_atomic_write(cache,
+ {
+ slot = &cache->pool[key & VFS_HASH_MASK];
+ hlist_delete(node);
+ hlist_add(&slot->head, node);
+ });
}
static inline struct hbucket*
-__dcache_hash(struct v_dnode* parent, u32_t* hash)
+__dcache_hash_nolock(struct v_dnode* parent, u32_t* hash)
{
- u32_t _hash = *hash;
- // 确保低位更加随机
+ struct v_superblock* sb;
+ struct hbucket* d_cache;
+ u32_t _hash;
+
+ sb = parent->super_block;
+
+ _hash = *hash;
_hash = _hash ^ (_hash >> VFS_HASHBITS);
- // 与parent的指针值做加法,来减小碰撞的可能性。
_hash += (u32_t)__ptr(parent);
+
*hash = _hash;
- return &dnode_cache[_hash & VFS_HASH_MASK];
+ return &sb->d_cache.pool[_hash & VFS_HASH_MASK];
}
static inline int
struct v_dnode*
vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str)
{
+ u32_t hash;
+ struct hbucket* slot;
+ struct v_dnode *pos, *n;
+ struct vncache *dcache;
+
if (!str->len || HSTR_EQ(str, &vfs_dot))
return parent;
return parent->parent;
}
- u32_t hash = str->hash;
- struct hbucket* slot = __dcache_hash(parent, &hash);
+ hash = str->hash;
+ dcache = dnode_cache(parent);
+
+ vncache_lock_read(dcache);
- struct v_dnode *pos, *n;
+ slot = __dcache_hash_nolock(parent, &hash);
hashtable_bucket_foreach(slot, pos, n, hash_list)
{
- if (pos->name.hash == hash && pos->parent == parent) {
- return pos;
+ if (pos->name.hash != hash || pos->parent != parent) {
+ continue;
}
+
+ vncache_unlock_read(dcache);
+ return pos;
}
+
+ vncache_unlock_read(dcache);
return NULL;
}
+static void
+__vfs_touch_inode(struct v_inode* inode, const int type)
+{
+ if (type == INODE_MODIFY) {
+ inode->mtime = clock_unixtime();
+ }
+
+ else if (type == INODE_ACCESSED) {
+ inode->atime = clock_unixtime();
+ }
+
+ lru_use_one(inode_lru, &inode->lru);
+}
+
void
vfs_dcache_add(struct v_dnode* parent, struct v_dnode* dnode)
{
+ struct hbucket* bucket;
+ struct vncache* cache;
+
assert(parent);
+ assert(locked_node(parent));
- atomic_fetch_add(&dnode->ref_count, 1);
+ dnode->ref_count = 1;
dnode->parent = parent;
llist_append(&parent->children, &dnode->siblings);
- struct hbucket* bucket = __dcache_hash(parent, &dnode->name.hash);
- hlist_add(&bucket->head, &dnode->hash_list);
+ cache_atomic_write(dnode_cache(parent),
+ {
+ bucket = __dcache_hash_nolock(parent, &dnode->name.hash);
+ hlist_add(&bucket->head, &dnode->hash_list);
+ });
}
void
llist_delete(&dnode->siblings);
llist_delete(&dnode->aka_list);
- hlist_delete(&dnode->hash_list);
+ lru_remove(dnode_lru, &dnode->lru);
+
+ cache_atomic_write(dnode_cache(dnode),
+ {
+ hlist_delete(&dnode->hash_list);
+ });
dnode->parent = NULL;
- atomic_fetch_sub(&dnode->ref_count, 1);
+ dnode->ref_count = 0;
}
void
vfs_dcache_rehash(struct v_dnode* new_parent, struct v_dnode* dnode)
{
assert(new_parent);
+ assert(locked_node(new_parent));
- hstr_rehash(&dnode->name, HSTR_FULL_HASH);
- vfs_dcache_remove(dnode);
- vfs_dcache_add(new_parent, dnode);
+ dnode_atomic(dnode,
+ {
+ hstr_rehash(&dnode->name, HSTR_FULL_HASH);
+ vfs_dcache_remove(dnode);
+ vfs_dcache_add(new_parent, dnode);
+ });
}
int
vfs_open(struct v_dnode* dnode, struct v_file** file)
{
- if (!dnode->inode || !dnode->inode->ops->open) {
+ struct v_inode* inode = dnode->inode;
+
+ if (!inode || !inode->ops->open) {
return ENOTSUP;
}
- struct v_inode* inode = dnode->inode;
-
lock_inode(inode);
struct v_file* vfile = cake_grab(file_pile);
vfile->dnode = dnode;
vfile->inode = inode;
- vfile->ref_count = ATOMIC_VAR_INIT(1);
+ vfile->ref_count = 1;
vfile->ops = inode->default_fops;
- if (check_file_node(inode) && !inode->pg_cache) {
+ if (check_regfile_node(inode) && !inode->pg_cache) {
struct pcache* pcache = vzalloc(sizeof(struct pcache));
pcache_init(pcache);
pcache->master = inode;
if (errno) {
cake_release(file_pile, vfile);
} else {
- atomic_fetch_add(&dnode->ref_count, 1);
+ vfs_ref_dnode(dnode);
inode->open_count++;
- mnt_mkbusy(dnode->mnt);
*file = vfile;
}
void
vfs_assign_inode(struct v_dnode* assign_to, struct v_inode* inode)
{
+ lock_dnode(assign_to);
+
if (assign_to->inode) {
llist_delete(&assign_to->aka_list);
assign_to->inode->link_count--;
llist_append(&inode->aka_dnodes, &assign_to->aka_list);
assign_to->inode = inode;
inode->link_count++;
+
+ unlock_dnode(assign_to);
}
int
vfs_link(struct v_dnode* to_link, struct v_dnode* name)
{
int errno;
+ struct v_inode* inode;
+
+ inode = to_link->inode;
if ((errno = vfs_check_writable(to_link))) {
return errno;
}
- lock_inode(to_link->inode);
+ lock_inode(inode);
+
if (to_link->super_block->root != name->super_block->root) {
errno = EXDEV;
- } else if (!to_link->inode->ops->link) {
+ } else if (!inode->ops->link) {
errno = ENOTSUP;
- } else if (!(errno = to_link->inode->ops->link(to_link->inode, name))) {
- vfs_assign_inode(name, to_link->inode);
+ } else if (!(errno = inode->ops->link(inode, name))) {
+ vfs_assign_inode(name, inode);
}
- unlock_inode(to_link->inode);
+
+ unlock_inode(inode);
return errno;
}
inode = file->inode;
+ if (vfs_check_duped_file(file)) {
+ vfs_unref_file(file);
+ return 0;
+ }
+
/*
* Prevent dead lock.
* This happened when process is terminated while blocking on read.
* than A. And this will cause a probable race condition on A if other
* process is writing to this file later after B exit.
*/
-
mutex_unlock_for(&inode->lock, pid);
-
- if (file->ref_count > 1) {
- atomic_fetch_sub(&file->ref_count, 1);
- return 0;
- }
+
+ // now regain lock for inode syncing
+
+ lock_inode(inode);
if ((errno = file->ops->close(file))) {
goto done;
}
- atomic_fetch_sub(&file->dnode->ref_count, 1);
- mnt_chillax(file->dnode->mnt);
+ vfs_unref_dnode(file->dnode);
cake_release(file_pile, file);
- /*
- if the current inode is not being locked by other
- threads that does not share same open context,
- then we can try to do sync opportunistically
- */
- if (mutex_on_hold(&inode->lock)) {
- goto done;
- }
-
- lock_inode(inode);
-
pcache_commit_all(inode);
inode->open_count--;
__sync_inode_nolock(inode);
}
- unlock_inode(inode);
-
done:
+ unlock_inode(inode);
return errno;
}
int
vfs_alloc_fdslot(int* fd)
{
+ struct v_fdtable* fdtab;
+
+ fdtab = __current->fdtable;
+ lock_fdtable(fdtab);
+
for (size_t i = 0; i < VFS_MAX_FD; i++) {
- if (!__current->fdtable->fds[i]) {
- *fd = i;
- return 0;
+ if (__current->fdtable->fds[i]) {
+ continue;
}
+
+ *fd = i;
+ unlock_fdtable(fdtab);
+ return 0;
}
+
+ unlock_fdtable(fdtab);
return EMFILE;
}
struct v_superblock* sb = cake_grab(superblock_pile);
memset(sb, 0, sizeof(*sb));
llist_init_head(&sb->sb_list);
- sb->i_cache = vzalloc(VFS_HASHTABLE_SIZE * sizeof(struct hbucket));
+
+ vfs_vncache_init(&sb->i_cache);
+ vfs_vncache_init(&sb->d_cache);
+
sb->ref_count = 1;
return sb;
}
}
void
-vfs_sb_free(struct v_superblock* sb)
+vfs_sb_unref(struct v_superblock* sb)
{
assert(sb->ref_count);
sb->ref_count--;
- if (sb->ref_count) {
+ if (likely(sb->ref_count)) {
return;
}
sb->ops.release(sb);
}
- vfree(sb->i_cache);
+ vfs_vncache_free(&sb->i_cache);
+ vfs_vncache_free(&sb->d_cache);
+
cake_release(superblock_pile, sb);
}
-static int
+static inline bool
+__dnode_evictable(struct v_dnode* dnode)
+{
+ return dnode->ref_count == 1
+ && llist_empty(&dnode->children);
+}
+
+static bool
__vfs_try_evict_dnode(struct lru_node* obj)
{
struct v_dnode* dnode = container_of(obj, struct v_dnode, lru);
- if (!dnode->ref_count) {
- vfs_d_free(dnode);
- return 1;
+ if (mutex_on_hold(&dnode->lock))
+ return false;
+
+ if (!__dnode_evictable(dnode)) {
+ return false;
}
- return 0;
+
+ vfs_d_free(dnode);
+ return true;
}
-static int
+static bool
__vfs_try_evict_inode(struct lru_node* obj)
{
struct v_inode* inode = container_of(obj, struct v_inode, lru);
llist_init_head(&dnode->aka_list);
mutex_init(&dnode->lock);
- dnode->ref_count = ATOMIC_VAR_INIT(0);
dnode->name = HHSTR(vzalloc(VFS_NAME_MAXLEN), 0, 0);
hstrcpy(&dnode->name, name);
vfs_d_free(struct v_dnode* dnode)
{
assert(dnode->ref_count == 1);
-
+
if (dnode->inode) {
assert(dnode->inode->link_count > 0);
dnode->inode->link_count--;
}
vfs_dcache_remove(dnode);
+
// Make sure the children de-referencing their parent.
// With lru presented, the eviction will be propagated over the entire
// detached subtree eventually
dnode->destruct(dnode);
}
- vfs_sb_free(dnode->super_block);
+ vfs_sb_unref(dnode->super_block);
+
vfree((void*)dnode->name.value);
cake_release(dnode_pile, dnode);
}
struct v_inode*
vfs_i_find(struct v_superblock* sb, u32_t i_id)
{
- struct hbucket* slot = &sb->i_cache[i_id & VFS_HASH_MASK];
- struct v_inode *pos, *n;
- hashtable_bucket_foreach(slot, pos, n, hash_list)
+ struct hbucket* slot;
+ struct v_inode *pos, *n, *found = NULL;
+
+ cache_atomic_read(&sb->i_cache,
{
- if (pos->id == i_id) {
+ slot = &sb->i_cache.pool[i_id & VFS_HASH_MASK];
+
+ hashtable_bucket_foreach(slot, pos, n, hash_list)
+ {
+ if (pos->id != i_id) {
+ continue;
+ }
+
lru_use_one(inode_lru, &pos->lru);
- return pos;
+ found = pos;
+ break;
}
- }
+ });
- return NULL;
+ return found;
}
void
vfs_i_addhash(struct v_inode* inode)
{
- struct hbucket* slot = &inode->sb->i_cache[inode->id & VFS_HASH_MASK];
-
- hlist_delete(&inode->hash_list);
- hlist_add(&slot->head, &inode->hash_list);
+ vfs_vncache_add(inode_cache(inode), inode->id, &inode->hash_list);
}
struct v_inode*
vfs_i_assign_sb(inode, sb);
lru_use_one(inode_lru, &inode->lru);
+
return inode;
}
pcache_release(inode->pg_cache);
vfree(inode->pg_cache);
}
+
// we don't need to sync inode.
// If an inode can be free, then it must be properly closed.
// Hence it must be synced already!
inode->destruct(inode);
}
- vfs_sb_free(inode->sb);
+ vfs_sb_unref(inode->sb);
+
hlist_delete(&inode->hash_list);
+ lru_remove(inode_lru, &inode->lru);
+
cake_release(inode_pile, inode);
}
int
vfs_getfd(int fd, struct v_fd** fd_s)
{
- if (TEST_FD(fd) && (*fd_s = __current->fdtable->fds[fd])) {
- return 0;
+ struct v_fdtable* fdtab;
+
+ if (!TEST_FD(fd)) {
+ return EBADF;
}
- return EBADF;
+
+ fdtab = __current->fdtable;
+
+ lock_fdtable(fdtab);
+ *fd_s = __current->fdtable->fds[fd];
+ unlock_fdtable(fdtab);
+
+ return !*fd_s ? EBADF : 0;
}
static int
return errno;
}
+ lock_dnode(fdir);
+
errno = vfs_walk(fdir, name.value, &file, NULL, woption);
if (errno && errno != ENOENT) {
- goto done;
+ goto error;
+ }
+
+ if (!errno && (options & FLOC_MKNAME)) {
+ errno = EEXIST;
+ goto error;
}
if (!errno) {
- if ((options & FLOC_MKNAME)) {
- errno = EEXIST;
- }
+ // the file present, no need to hold the directory lock
+ unlock_dnode(fdir);
goto done;
}
// errno == ENOENT
if (!options) {
- goto done;
+ goto error;
}
errno = vfs_check_writable(fdir);
if (errno) {
- goto done;
+ goto error;
}
floc->fresh = true;
file = vfs_d_alloc(fdir, &name);
if (!file) {
- return ENOMEM;
+ errno = ENOMEM;
+ goto error;
}
- lock_dnode(fdir);
-
vfs_dcache_add(fdir, file);
done:
floc->dir = fdir;
floc->file = file;
+
+ return errno;
+error:
+ unlock_dnode(fdir);
return errno;
}
+
+static bool
+__check_unlinkable(struct v_dnode* dnode)
+{
+ int acl;
+ bool wr_self, wr_parent;
+ struct v_dnode* parent;
+
+ parent = dnode->parent;
+ acl = dnode->inode->acl;
+
+ wr_self = check_allow_write(dnode->inode);
+ wr_parent = check_allow_write(parent->inode);
+
+ if (!fsacl_test(acl, svtx)) {
+ return wr_self;
+ }
+
+ if (current_euid() == dnode->inode->uid) {
+ return true;
+ }
+
+ return wr_self && wr_parent;
+}
+
int
vfs_do_open(const char* path, int options)
{
goto unlock;
}
+ if (!check_allow_read(inode)) {
+ errno = EPERM;
+ goto unlock;
+ }
+
struct dir_context dctx = (struct dir_context) {
.cb_data = dent,
.read_complete_callback = __vfs_readdir_callback
if ((errno = fd_s->file->ops->readdir(fd_s->file, &dctx)) != 1) {
goto unlock;
}
+
dent->d_offset++;
fd_s->file->f_pos++;
{
int errno = 0;
struct v_fd* fd_s;
+ struct v_inode* inode;
+
if ((errno = vfs_getfd(fd, &fd_s))) {
goto done;
}
goto done;
}
- lock_inode(file->inode);
+ if (!check_allow_read(file->inode)) {
+ errno = EPERM;
+ goto done;
+ }
+
+ inode = file->inode;
+ lock_inode(inode);
- file->inode->atime = clock_unixtime();
+ __vfs_touch_inode(inode, INODE_ACCESSED);
- if (check_seqdev_node(file->inode) || (fd_s->flags & FO_DIRECT)) {
- errno = file->ops->read(file->inode, buf, count, file->f_pos);
+ if (check_seqdev_node(inode) || (fd_s->flags & FO_DIRECT)) {
+ errno = file->ops->read(inode, buf, count, file->f_pos);
} else {
- errno = pcache_read(file->inode, buf, count, file->f_pos);
+ errno = pcache_read(inode, buf, count, file->f_pos);
}
if (errno > 0) {
file->f_pos += errno;
- unlock_inode(file->inode);
+ unlock_inode(inode);
return errno;
}
- unlock_inode(file->inode);
+ unlock_inode(inode);
done:
return DO_STATUS(errno);
inode = file->inode;
lock_inode(inode);
- inode->mtime = clock_unixtime();
+ __vfs_touch_inode(inode, INODE_MODIFY);
if ((fd_s->flags & O_APPEND)) {
file->f_pos = inode->fsize;
}
goto done;
}
+ if (!check_allow_read(inode)) {
+ errno = EPERM;
+ goto done;
+ }
+
lock_inode(inode);
int overflow = 0;
return ENOTSUP;
}
+ if (!check_allow_read(inode)) {
+ return EPERM;
+ }
+
lock_inode(inode);
int errno = inode->ops->read_symlink(inode, &link);
return dtype;
}
+struct v_fdtable*
+fdtable_create()
+{
+ struct v_fdtable* fdtab;
+
+ fdtab = vzalloc(sizeof(struct v_fdtable));
+ mutex_init(&fdtab->lock);
+
+ return fdtab;
+}
+
+void
+fdtable_copy(struct v_fdtable* dest, struct v_fdtable* src)
+{
+ lock_fdtable(dest);
+ lock_fdtable(src);
+
+ for (size_t i = 0; i < VFS_MAX_FD; i++) {
+ struct v_fd* fd = src->fds[i];
+ if (!fd)
+ continue;
+ vfs_dup_fd(fd, &dest->fds[i]);
+ }
+
+ unlock_fdtable(dest);
+ unlock_fdtable(src);
+}
+
+void
+fdtable_free(struct v_fdtable* table)
+{
+ assert(!mutex_on_hold(&table->lock));
+
+ vfree(table);
+}
+
__DEFINE_LXSYSCALL3(int, realpathat, int, fd, char*, buf, size_t, size)
{
int errno;
}
struct v_dnode* dnode;
- errno = vfs_get_path(fd_s->file->dnode, buf, size, 0);
- if (errno >= 0) {
- return errno;
- }
+ dnode = fd_s->file->dnode;
+
+ lock_dnode(dnode);
+ errno = vfs_get_path(dnode, buf, size, 0);
+ unlock_dnode(dnode);
done:
return DO_STATUS(errno);
lock_dnode(dnode);
+ if (!__check_unlinkable(dnode)) {
+ errno = EPERM;
+ goto done;
+ }
+
if ((errno = vfs_check_writable(dnode))) {
goto done;
}
__DEFINE_LXSYSCALL1(int, mkdir, const char*, path)
{
- int errno = 0;
+ int errno;
+ struct hstr name;
+ struct v_inode* inode;
struct v_dnode *parent, *dir;
char name_value[VFS_NAME_MAXLEN];
- struct hstr name = HHSTR(name_value, 0, 0);
+
+ name = HHSTR(name_value, 0, 0);
if ((errno = vfs_walk_proc(path, &parent, &name, VFS_WALK_PARENT))) {
goto done;
goto done;
}
- struct v_inode* inode = parent->inode;
+ inode = parent->inode;
lock_dnode(parent);
lock_inode(inode);
return DO_STATUS(errno);
}
-int
+static int
__vfs_do_unlink(struct v_dnode* dnode)
{
int errno;
return EBUSY;
}
+ if (!__check_unlinkable(dnode)) {
+ return EPERM;
+ }
+
if ((errno = vfs_check_writable(dnode))) {
return errno;
}
memcpy(copied, old, sizeof(struct v_fd));
- atomic_fetch_add(&old->file->ref_count, 1);
+ vfs_ref_file(old->file);
*new = copied;
int
vfs_dup2(int oldfd, int newfd)
{
+ int errno;
+ struct v_fdtable* fdtab;
+ struct v_fd *oldfd_s, *newfd_s;
+
if (newfd == oldfd) {
return newfd;
}
- int errno;
- struct v_fd *oldfd_s, *newfd_s;
if ((errno = vfs_getfd(oldfd, &oldfd_s))) {
goto done;
}
goto done;
}
- newfd_s = __current->fdtable->fds[newfd];
+ fdtab = __current->fdtable;
+ lock_fdtable(fdtab);
+
+ newfd_s = fdtab->fds[newfd];
if (newfd_s && (errno = vfs_close(newfd_s->file))) {
- goto done;
+ goto unlock_and_done;
}
- if (!(errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
- __current->fdtable->fds[newfd] = newfd_s;
- return newfd;
+ if ((errno = vfs_dup_fd(oldfd_s, &newfd_s))) {
+ goto unlock_and_done;
}
+ fdtab->fds[newfd] = newfd_s;
+
+ unlock_fdtable(fdtab);
+ return newfd;
+
+unlock_and_done:
+ unlock_fdtable(fdtab);
+
done:
return DO_STATUS(errno);
}
return DO_STATUS(errno);
}
-void
-vfs_ref_file(struct v_file* file)
-{
- atomic_fetch_add(&file->ref_count, 1);
-}
-
-void
-vfs_ref_dnode(struct v_dnode* dnode)
+static int
+vfs_do_chdir_nolock(struct proc_info* proc, struct v_dnode* dnode)
{
- atomic_fetch_add(&dnode->ref_count, 1);
-
- if (dnode->mnt) {
- mnt_mkbusy(dnode->mnt);
+ if (!check_directory_node(dnode->inode)) {
+ return ENOTDIR;
}
-}
-void
-vfs_unref_dnode(struct v_dnode* dnode)
-{
- atomic_fetch_sub(&dnode->ref_count, 1);
- if (dnode->mnt) {
- mnt_chillax(dnode->mnt);
+ if (proc->cwd) {
+ vfs_unref_dnode(proc->cwd);
}
+
+ vfs_ref_dnode(dnode);
+ proc->cwd = dnode;
+
+ return 0;
}
-int
+static int
vfs_do_chdir(struct proc_info* proc, struct v_dnode* dnode)
{
int errno = 0;
lock_dnode(dnode);
- if (!check_directory_node(dnode->inode)) {
- errno = ENOTDIR;
- goto done;
- }
-
- if (proc->cwd) {
- vfs_unref_dnode(proc->cwd);
- }
-
- vfs_ref_dnode(dnode);
- proc->cwd = dnode;
+ errno = vfs_do_chdir_nolock(proc, dnode);
unlock_dnode(dnode);
-done:
return errno;
}
return DO_STATUS(errno);
}
+
+__DEFINE_LXSYSCALL1(int, chroot, const char*, path)
+{
+ int errno;
+ struct v_dnode* dnode;
+ if ((errno = vfs_walk_proc(path, &dnode, NULL, 0))) {
+ return errno;
+ }
+
+ lock_dnode(dnode);
+
+ errno = vfs_do_chdir_nolock(__current, dnode);
+ if (errno) {
+ unlock_dnode(dnode);
+ goto done;
+ }
+
+ __current->root = dnode;
+
+ unlock_dnode(dnode);
+
+done:
+ return DO_STATUS(errno);
+}
+
__DEFINE_LXSYSCALL2(char*, getcwd, char*, buf, size_t, size)
{
int errno = 0;
lock_dnode(current);
lock_dnode(target);
+
if (oldparent)
lock_dnode(oldparent);
if (newparent)
cleanup:
unlock_dnode(current);
+
if (oldparent)
unlock_dnode(oldparent);
if (newparent)
struct v_inode* vino = fds->file->inode;
struct device* fdev = vino->sb->dev;
- *stat = (struct file_stat){.st_ino = vino->id,
- .st_blocks = vino->lb_usage,
- .st_size = vino->fsize,
- .mode = vino->itype,
- .st_ioblksize = PAGE_SIZE,
- .st_blksize = vino->sb->blksize};
+ stat->st_ino = vino->id;
+ stat->st_blocks = vino->lb_usage;
+ stat->st_size = vino->fsize;
+ stat->st_blksize = vino->sb->blksize;
+ stat->st_nlink = vino->link_count;
+ stat->st_uid = vino->uid;
+ stat->st_gid = vino->gid;
+
+ stat->st_ctim = vino->ctime;
+ stat->st_atim = vino->atime;
+ stat->st_mtim = vino->mtime;
+
+ stat->st_mode = (vino->itype << 16) | vino->acl;
+
+ stat->st_ioblksize = PAGE_SIZE;
if (check_device_node(vino)) {
struct device* rdev = resolve_device(vino->data);
.index = dev_uid(fdev) };
}
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL4(int, fchmodat, int, fd,
+ const char*, path, int, mode, int, flags)
+{
+ int errno;
+ struct v_dnode *dnode;
+ struct v_inode* inode;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ errno = vfs_check_writable(dnode);
+ if (errno) {
+ return errno;
+ }
+
+ inode = dnode->inode;
+ lock_inode(inode);
+
+ if (!current_is_root()) {
+ mode = mode & FSACL_RWXMASK;
+ }
+
+ inode->acl = mode;
+ __vfs_touch_inode(inode, INODE_MODIFY);
+
+ unlock_inode(inode);
+
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL5(int, fchownat, int, fd,
+ const char*, path, uid_t, uid, gid_t, gid, int, flags)
+{
+ int errno;
+ struct v_dnode *dnode;
+ struct v_inode *inode;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ errno = vfs_check_writable(dnode);
+ if (errno) {
+ return errno;
+ }
+
+ inode = dnode->inode;
+ lock_inode(inode);
+
+ inode->uid = uid;
+ inode->gid = gid;
+ __vfs_touch_inode(inode, INODE_MODIFY);
+
+ unlock_inode(inode);
+
+done:
+ return DO_STATUS(errno);
+}
+
+__DEFINE_LXSYSCALL4(int, faccessat, int, fd,
+ const char*, path, int, amode, int, flags)
+{
+ int errno, acl;
+ struct v_dnode *dnode;
+ struct v_inode *inode;
+ struct user_scope* uscope;
+
+ uid_t tuid;
+ gid_t tgid;
+
+ errno = vfs_walkat(fd, path, flags, &dnode);
+ if (errno) {
+ goto done;
+ }
+
+ if ((flags & AT_EACCESS)) {
+ tuid = current_euid();
+ tgid = current_egid();
+ }
+ else {
+ uscope = current_user_scope();
+ tuid = uscope->ruid;
+ tgid = uscope->rgid;
+ }
+
+ inode = dnode->inode;
+
+ acl = inode->acl;
+ acl &= amode;
+ acl &= check_acl_between(inode->uid, inode->gid, tuid, tgid);
+ if (!acl) {
+ errno = EACCESS;
+ }
+
done:
return DO_STATUS(errno);
}
\ No newline at end of file