#define __LUNAIX_MUTEX_H
#include "semaphore.h"
-
-// TODO: implement mutex lock
-
-typedef struct sem_t mutex_t;
-
-static inline void mutex_init(mutex_t *mutex) {
- sem_init(mutex, 1);
+#include <lunaix/types.h>
+
+typedef struct mutex_s
+{
+ struct sem_t sem;
+ pid_t owner;
+} mutex_t;
+
+static inline void
+mutex_init(mutex_t* mutex)
+{
+ sem_init(&mutex->sem, 1);
}
-static inline unsigned int mutex_on_hold(mutex_t *mutex) {
- return !atomic_load(&mutex->counter);
+static inline int
+mutex_on_hold(mutex_t* mutex)
+{
+ return !atomic_load(&mutex->sem.counter);
}
-static inline void mutex_lock(mutex_t *mutex) {
- sem_wait(mutex);
-}
+void
+mutex_lock(mutex_t* mutex);
-static inline void mutex_unlock(mutex_t *mutex) {
- sem_post(mutex);
-}
+void
+mutex_unlock(mutex_t* mutex);
+
+void
+mutex_unlock_for(mutex_t* mutex, pid_t pid);
#endif /* __LUNAIX_MUTEX_H */
--- /dev/null
+#ifndef __LUNAIX_RWLOCK_H
+#define __LUNAIX_RWLOCK_H
+
+#include "mutex.h"
+#include "waitq.h"
+#include <stdatomic.h>
+
+typedef struct rwlock_s
+{
+ atomic_uint readers;
+ atomic_flag writer;
+ waitq_t waiting_readers;
+ waitq_t waiting_writers;
+} rwlock_t;
+
+void
+rwlock_begin_read(rwlock_t* rwlock);
+
+void
+rwlock_end_read(rwlock_t* rwlock);
+
+void
+rwlock_begin_write(rwlock_t* rwlock);
+
+void
+rwlock_end_write(rwlock_t* rwlock);
+
+#endif /* __LUNAIX_RWLOCK_H */
struct llist_header waiters;
} waitq_t;
-inline void
+static inline void
waitq_init(waitq_t* waitq)
{
llist_init_head(&waitq->waiters);
}
+static inline int
+waitq_empty(waitq_t* waitq)
+{
+ return llist_empty(&waitq->waiters);
+}
+
void
pwait(waitq_t* queue);
void
vfs_init();
+void
+vfs_export_attributes();
+
struct v_dnode*
vfs_dcache_lookup(struct v_dnode* parent, struct hstr* str);
int
vfs_open(struct v_dnode* dnode, struct v_file** file);
+int
+vfs_pclose(struct v_file* file, pid_t pid);
+
int
vfs_close(struct v_file* file);
void
vfs_unref_dnode(struct v_dnode* dnode);
+int
+vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth);
+
void
pcache_init(struct pcache* pcache);
pid_t p;
signal(_SIGINT, sigint_handle);
- // set our shell as foreground process (unistd.h:tcsetpgrp is wrapper of
- // this)
+ // set our shell as foreground process
+ // (unistd.h:tcsetpgrp is essentially a wrapper of this)
// stdout (by default, unless user did smth) is the tty we are currently at
ioctl(stdout, TIOCSPGRP, getpgid());
--- /dev/null
+#include <lunaix/ds/mutex.h>
+#include <lunaix/process.h>
+
+void
+mutex_lock(mutex_t* mutex)
+{
+ sem_wait(&mutex->sem);
+ mutex->owner = __current->pid;
+}
+
+void
+mutex_unlock(mutex_t* mutex)
+{
+ mutex_unlock_for(mutex, __current->pid);
+}
+
+void
+mutex_unlock_for(mutex_t* mutex, pid_t pid)
+{
+ if (mutex->owner != pid) {
+ return;
+ }
+ sem_post(&mutex->sem);
+}
\ No newline at end of file
--- /dev/null
+#include <lunaix/ds/rwlock.h>
+#include <lunaix/spike.h>
+
+void
+rwlock_init(rwlock_t* rwlock)
+{
+ waitq_init(&rwlock->waiting_readers);
+ waitq_init(&rwlock->waiting_writers);
+ atomic_init(&rwlock->readers, 0);
+ atomic_flag_clear(&rwlock->writer);
+}
+
+void
+rwlock_begin_read(rwlock_t* rwlock)
+{
+ while (atomic_flag_test_and_set(&rwlock->writer)) {
+ pwait(&rwlock->waiting_readers);
+ }
+ atomic_fetch_add(&rwlock->readers, 1);
+ atomic_flag_clear(&rwlock->writer);
+ pwake_all(&rwlock->waiting_readers);
+}
+
+void
+rwlock_end_read(rwlock_t* rwlock)
+{
+ assert(atomic_load(&rwlock->readers) > 0);
+ atomic_fetch_sub(&rwlock->readers, 1);
+
+ if (!atomic_load(&rwlock->readers)) {
+ pwake_one(&rwlock->waiting_writers);
+ }
+}
+
+void
+rwlock_begin_write(rwlock_t* rwlock)
+{
+ // first, acquire writer lock, prevent any incoming readers
+ while (atomic_flag_test_and_set(&rwlock->writer)) {
+ pwait(&rwlock->waiting_writers);
+ }
+
+ // then, wait for reader finish the read.
+ while (atomic_load(&rwlock->readers)) {
+ pwait(&rwlock->waiting_writers);
+ }
+}
+
+void
+rwlock_end_write(rwlock_t* rwlock)
+{
+ atomic_flag_clear(&rwlock->writer);
+ if (waitq_empty(&rwlock->waiting_writers)) {
+ pwake_all(&rwlock->waiting_readers);
+ } else {
+ pwake_one(&rwlock->waiting_writers);
+ }
+}
\ No newline at end of file
--- /dev/null
+#include <lunaix/foptions.h>
+#include <lunaix/fs.h>
+#include <lunaix/fs/twifs.h>
+
+extern struct llist_header all_mnts;
+
+void
+__mount_read(struct twimap* map)
+{
+ char path[512];
+ struct v_mount* mnt = twimap_index(map, struct v_mount*);
+ size_t len = vfs_get_path(mnt->mnt_point, path, 511, 0);
+ path[len] = '\0';
+ twimap_printf(map, "%s at %s", mnt->super_block->fs->fs_name.value, path);
+ if ((mnt->flags & MNT_RO)) {
+ twimap_printf(map, ", ro");
+ } else {
+ twimap_printf(map, ", rw");
+ }
+ twimap_printf(map, "\n");
+}
+
+int
+__mount_next(struct twimap* map)
+{
+ struct v_mount* mnt = twimap_index(map, struct v_mount*);
+ if (mnt->list.next == &all_mnts) {
+ return 0;
+ }
+ map->index = container_of(mnt->list.next, struct v_mount, list);
+ return 1;
+}
+
+void
+__mount_reset(struct twimap* map)
+{
+ map->index = container_of(all_mnts.next, struct v_mount, list);
+}
+
+void
+vfs_export_attributes()
+{
+ struct twimap* map = twifs_mapping(NULL, NULL, "mounts");
+ map->read = __mount_read;
+ map->go_next = __mount_next;
+ map->reset = __mount_reset;
+}
\ No newline at end of file
#include <lunaix/process.h>
#include <lunaix/types.h>
-static struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
+struct llist_header all_mnts = { .next = &all_mnts, .prev = &all_mnts };
struct v_mount*
vfs_create_mount(struct v_mount* parent, struct v_dnode* mnt_point)
}
int
-vfs_close(struct v_file* file)
+vfs_pclose(struct v_file* file, pid_t pid)
{
int errno = 0;
if (file->ref_count > 1) {
atomic_fetch_sub(&file->dnode->ref_count, 1);
file->inode->open_count--;
- // Remove dead lock.
+ // Prevent dead lock.
// This happened when process is terminated while blocking on read.
// In that case, the process is still holding the inode lock and it will
// never get released.
- // FIXME is this a good solution?
/*
- * Consider two process both open the same file both with fd=x.
+ * The unlocking should also include ownership check.
+ *
+ * To see why, consider two process both open the same file both with
+ * fd=x.
* Process A: busy on reading x
* Process B: do nothing with x
- * Assume that, after a very short time, process B get terminated while
- * process A is still busy in it's reading business. By this design, the
- * inode lock of this file x is get released by B rather than A. And
- * this will cause a probable race condition on A if other process is
- * writing to this file later after B exit.
- *
- * A possible solution is to add a owner identification in the lock
- * context, so only the lock holder can do the release.
+ * Assuming that, after a very short time, process B get terminated
+ * while process A is still busy in it's reading business. By this
+ * design, the inode lock of this file x is get released by B rather
+ * than A. And this will cause a probable race condition on A if other
+ * process is writing to this file later after B exit.
*/
if (mutex_on_hold(&file->inode->lock)) {
- unlock_inode(file->inode);
+ mutex_unlock_for(&file->inode->lock, pid);
}
mnt_chillax(file->dnode->mnt);
return errno;
}
+int
+vfs_close(struct v_file* file)
+{
+ return vfs_pclose(file, __current->pid);
+}
+
int
vfs_fsync(struct v_file* file)
{
int
vfs_get_path(struct v_dnode* dnode, char* buf, size_t size, int depth)
{
- if (!dnode || dnode->parent == dnode) {
+ if (!dnode) {
return 0;
}
return ENAMETOOLONG;
}
- size_t len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ size_t len = 0;
+
+ if (dnode->parent != dnode) {
+ len = vfs_get_path(dnode->parent, buf, size, depth + 1);
+ }
if (len >= size) {
return len;
}
- buf[len++] = VFS_PATH_DELIM;
+ if (!len || buf[len - 1] != VFS_PATH_DELIM) {
+ buf[len++] = VFS_PATH_DELIM;
+ }
size_t cpy_size = MIN(dnode->name.len, size - len);
strncpy(buf + len, dnode->name.value, cpy_size);
fsm_init();
input_init();
+ vfs_export_attributes();
+
if ((errno = vfs_mount_root("ramfs", NULL))) {
panickf("Fail to mount root. (errno=%d)", errno);
}
for (size_t i = 0; i < VFS_MAX_FD; i++) {
struct v_fd* fd = proc->fdtable->fds[i];
if (fd)
- vfs_close(fd->file);
+ vfs_pclose(fd->file, pid);
}
vfree(proc->fdtable);