bcobj_t cobj;
struct llist_header dirty;
struct blkio_req* breq;
+
+ unsigned int dirty_count;
+ mutex_t lock;
};
typedef void* bbuf_t;
--- /dev/null
+#ifndef __LUNAIX_KTHREAD_H
+#define __LUNAIX_KTHREAD_H
+
+#include <lunaix/types.h>
+#include <lunaix/threads.h>
+
+void
+kthread_spawn(ptr_t entry);
+
+void
+kthread_sleep(int seconds);
+
+#endif /* __LUNAIX_KTHREAD_H */
void
start_thread(struct thread* th, ptr_t entry);
-static inline void
-spawn_kthread(ptr_t entry) {
- assert(kernel_process(__current));
-
- struct thread* th = create_thread(__current, false);
-
- assert(th);
- start_thread(th, entry);
- detach_thread(th);
-}
-
void
exit_thread(void* val);
void
cleanup_detached_threads();
+static inline struct scheduler*
+scheduler()
+{
+ extern struct scheduler sched_ctx;
+ return &sched_ctx;
+}
+
#endif /* __LUNAIX_SCHEDULER_H */
#include <lunaix/syslog.h>
#include <asm/muldiv64.h>
-LOG_MODULE("blkbuf")
+LOG_MODULE("blkbuf")
+
+#define MAX_DIRTY_COUNT 16
#define bb_cache_obj(bcache) \
container_of(bcache, struct blkbuf_cache, cached)
cache->blkdev->blk_size);
}
+static inline void
+__blkbuf_lock(struct blk_buf* buf)
+{
+ mutex_lock(&buf->lock);
+}
+
+static inline void
+__blkbuf_unlock(struct blk_buf* buf)
+{
+ mutex_unlock(&buf->lock);
+}
+
static void
__blkbuf_do_sync(struct bcache* bc, unsigned long tag, void* data)
{
// give dirty a know state
llist_init_head(&buf->dirty);
+ mutex_init(&buf->lock);
blkio_setread(req);
blkio_bindctx(req, bc->blkdev->blkio);
bcache_return(bbuf->cobj);
}
+
+static void
+__schedule_sync_nolock(struct blk_buf* bbuf, bool wait)
+{
+ struct blkio_req* blkio;
+
+ blkio = bbuf->breq;
+
+ blkio_setwrite(blkio);
+ blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
+
+ llist_delete(&bbuf->dirty);
+ bbuf->dirty_count = 0;
+}
+
void
blkbuf_dirty(bbuf_t buf)
{
bbuf = ((struct blk_buf*)buf);
bc = bcache_holder_embed(bbuf->cobj, struct blkbuf_cache, cached);
- mutex_lock(&bc->lock);
+ __blkbuf_lock(bbuf);
- if (llist_empty(&bbuf->dirty)) {
+ if (llist_empty(&bbuf->dirty))
+ {
+ mutex_lock(&bc->lock);
llist_append(&bc->dirty, &bbuf->dirty);
+ mutex_unlock(&bc->lock);
}
- mutex_unlock(&bc->lock);
-}
-
-static inline void
-__schedule_sync_event(struct blk_buf* bbuf, bool wait)
-{
- struct blkio_req* blkio;
-
- blkio = bbuf->breq;
-
- blkio_setwrite(blkio);
- blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
+ bbuf->dirty_count++;
+ if (bbuf->dirty_count >= MAX_DIRTY_COUNT) {
+ __schedule_sync_nolock(bbuf, false);
+ }
- llist_delete(&bbuf->dirty);
+ __blkbuf_unlock(bbuf);
}
void
struct blk_buf* bbuf;
bbuf = to_blkbuf(buf);
- __schedule_sync_event(bbuf, false);
+ __blkbuf_lock(bbuf);
+ __schedule_sync_nolock(bbuf, false);
+ __blkbuf_unlock(bbuf);
}
bool
mutex_lock(&bc->lock);
- llist_for_each(pos, n, &bc->dirty, dirty) {
- __schedule_sync_event(pos, !async);
+ llist_for_each(pos, n, &bc->dirty, dirty)
+ {
+ __blkbuf_lock(pos);
+ __schedule_sync_nolock(pos, !async);
+ __blkbuf_unlock(pos);
}
mutex_unlock(&bc->lock);
#include <lunaix/spike.h>
#include <lunaix/fs/twimap.h>
#include <lunaix/fs/twifs.h>
+#include <lunaix/kthread.h>
+#include <lunaix/owloysius.h>
#include <klibc/string.h>
static struct llist_header zone_lead = { .next = &zone_lead, .prev = &zone_lead };
DEFINE_SPINLOCK_OPS(struct lru_zone*, lock);
-
+static DEFINE_SPINLOCK(zone_list_lock);
static void
-__do_evict_lockless(struct lru_zone* zone, struct llist_header* elem)
+__do_evict_nolock(struct lru_zone* zone, struct llist_header* elem)
{
llist_delete(elem);
if (!zone->try_evict(container_of(elem, struct lru_node, lru_nodes))) {
}
static void
-__lru_evict_all_lockness(struct lru_zone* zone)
+__lru_evict_all_nolock(struct lru_zone* zone)
{
- struct llist_header* tail = zone->lead_node.prev;
- while (tail != &zone->lead_node) {
- __do_evict_lockless(zone, tail);
+ struct llist_header* tail, *curr;
+
+ tail = zone->lead_node.prev;
+ while (tail != &zone->lead_node)
+ {
+ curr = tail;
tail = tail->prev;
+ __do_evict_nolock(zone, curr);
}
}
strncpy(zone->name, name, sizeof(zone->name) - 1);
llist_init_head(&zone->lead_node);
- llist_append(&zone_lead, &zone->zones);
spinlock_init(&zone->lock);
+ spinlock_acquire(&zone_list_lock);
+ llist_append(&zone_lead, &zone->zones);
+ spinlock_release(&zone_list_lock);
+
return zone;
}
{
lock(zone);
- __lru_evict_all_lockness(zone);
+ __lru_evict_all_nolock(zone);
- if (llist_empty(&zone->lead_node)) {
+ if (llist_empty(&zone->lead_node))
+ {
llist_delete(&zone->zones);
+ unlock(zone);
vfree(zone);
return;
}
struct llist_header* tail = zone->lead_node.prev;
if (tail == &zone->lead_node) {
- return;
+ goto done;
}
- __do_evict_lockless(zone, tail);
+ __do_evict_nolock(zone, tail);
+done:
unlock(zone);
}
void
lru_evict_half(struct lru_zone* zone)
{
+ int target;
+ struct llist_header *tail, *curr;
+
lock(zone);
- int target = (int)(zone->objects / 2);
- struct llist_header* tail = zone->lead_node.prev;
+ target = (int)(zone->objects / 2);
+ tail = zone->lead_node.prev;
+
while (tail != &zone->lead_node && target > 0) {
- __do_evict_lockless(zone, tail);
+ curr = tail;
tail = tail->prev;
+
+ __do_evict_nolock(zone, curr);
target--;
}
{
lock(zone);
- __lru_evict_all_lockness(zone);
+ __lru_evict_all_nolock(zone);
zone->evict_stats.n_full++;
void
lru_remove(struct lru_zone* zone, struct lru_node* node)
{
- lock(zone);
+ if (llist_empty(&node->lru_nodes))
+ return;
- if (node->lru_nodes.next && node->lru_nodes.prev) {
- llist_delete(&node->lru_nodes);
- }
+ lock(zone);
+
+ llist_delete(&node->lru_nodes);
zone->objects--;
unlock(zone);
}
+
+static void
+__lru_pool_daemon()
+{
+ struct lru_zone *pos, *n;
+
+ while (true)
+ {
+ spinlock_acquire(&zone_list_lock);
+
+ // TODO add a watermark check before doing eviction
+ llist_for_each(pos, n, &zone_lead, zones) {
+ lru_evict_half(pos);
+ }
+
+ spinlock_release(&zone_list_lock);
+
+ kthread_sleep(10);
+ }
+}
+
+static void
+__lru_pool_init()
+{
+ // TODO make sure other are thread-safe first
+
+ // kthread_spawn((ptr_t)__lru_pool_daemon);
+}
+owloysius_fetch_init(__lru_pool_init, on_postboot)
+
+
static void
__twimap_read_lru_pool(struct twimap* map)
{
#include <lunaix/sched.h>
#include <lunaix/kpreempt.h>
#include <lunaix/kcmd.h>
+#include <lunaix/kthread.h>
#include <klibc/string.h>
}
}
+static void
+__thread_cleaner()
+{
+ while (true)
+ {
+ cleanup_detached_threads();
+ kthread_sleep(30);
+ }
+}
+
+
/**
* @brief LunaixOS的内核进程,该进程永远为可执行。
*
void
lunad_main()
{
- spawn_kthread((ptr_t)init_platform);
-
- /*
- NOTE Kernel preemption after this point.
+ kthread_spawn((ptr_t)init_platform);
+ kthread_spawn((ptr_t)__thread_cleaner);
- More specifically, it is not a real kernel preemption (as in preemption
- happened at any point of kernel, except those marked explicitly).
- In Lunaix, things are designed in an non-preemptive fashion, we implement
- kernel preemption the other way around: only selected kernel functions which,
- of course, with great care of preemptive assumption, will goes into kernel
- thread (which is preemptive!)
- */
-
- set_preemption();
while (1)
{
- cleanup_detached_threads();
yield_current();
}
}
fail("unexpected return from scheduler");
}
-__DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
-{
- if (!seconds) {
- return 0;
- }
-
- time_t systime = clock_systime() / 1000;
- struct haybed* bed = ¤t_thread->sleep;
-
- if (bed->wakeup_time) {
- return (bed->wakeup_time - systime);
- }
-
- bed->wakeup_time = systime + seconds;
-
- if (llist_empty(&bed->sleepers)) {
- llist_append(&sched_ctx.sleepers, &bed->sleepers);
- }
-
- store_retval(seconds);
-
- block_current_thread();
- schedule();
-
- return 0;
-}
-
__DEFINE_LXSYSCALL1(unsigned int, alarm, unsigned int, seconds)
{
struct haybed* bed = ¤t_thread->sleep;
proc->root = vfs_sysroot;
proc->sigreg = vzalloc(sizeof(struct sigregistry));
- proc->fdtable = vzalloc(sizeof(struct v_fdtable));
+ proc->fdtable = fdtable_create();
proc->mm = procvm_create(proc);
// 如果其父进程的状态是terminated 或 destroy中的一种
// 或者其父进程是在该进程之后创建的,那么该进程为孤儿进程
return proc_terminated(parent) || parent->created > proc->created;
-}
\ No newline at end of file
+}
+
return align_stack(ptep_va(ptep, LFT_SIZE) - 1);
}
+static int
+__thread_putsleep(int seconds)
+{
+ if (!seconds) {
+ return 0;
+ }
+
+ struct scheduler* sched;
+ time_t systime;
+ struct haybed* bed;
+
+ sched = scheduler();
+ systime = clock_systime() / 1000;
+ bed = ¤t_thread->sleep;
+
+ if (bed->wakeup_time) {
+ return (bed->wakeup_time - systime);
+ }
+
+ bed->wakeup_time = systime + seconds;
+
+ if (llist_empty(&bed->sleepers)) {
+ llist_append(&sched->sleepers, &bed->sleepers);
+ }
+
+ block_current_thread();
+ return seconds;
+}
+
void
thread_release_mem(struct thread* thread)
{
stats->last_entry = now;
}
+void
+kthread_spawn(ptr_t entry)
+{
+ assert(kernel_process(__current));
+
+ struct thread* th = create_thread(__current, false);
+
+ assert(th);
+ start_thread(th, entry);
+ detach_thread(th);
+}
+
+void
+kthread_sleep(int seconds)
+{
+ if (__thread_putsleep(seconds))
+ yield_current();
+}
+
__DEFINE_LXSYSCALL3(int, th_create, tid_t*, tid,
struct uthread_param*, thparam, void*, entry)
{
return 0;
}
+
+__DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
+{
+ int sec;
+
+ sec = __thread_putsleep(seconds);
+ store_retval(seconds);
+
+ if (sec)
+ schedule();
+
+ return 0;
+}
\ No newline at end of file