#include <lunaix/syscall.h>
#include <lunaix/syscall_utils.h>
#include <lunaix/mm/mmap.h>
-#include <lunaix/mm/vmm.h>
-#include <lunaix/mm/pmm.h>
+#include <lunaix/mm/page.h>
#include <lunaix/syslog.h>
+#include <lunaix/kpreempt.h>
#include <usr/lunaix/threads.h>
-#include <sys/abi.h>
-#include <sys/mm/mm_defs.h>
+#include <asm/abi.h>
+#include <asm/mm_defs.h>
LOG_MODULE("THREAD")
static ptr_t
-__alloc_user_thread_stack(struct proc_info* proc, struct mm_region** stack_region, ptr_t vm_mnt)
+__alloc_user_thread_stack(struct proc_info* proc,
+ struct mm_region** stack_region, ptr_t vm_mnt)
{
- ptr_t th_stack_top = (proc->thread_count + 1) * USR_STACK_SIZE;
- th_stack_top = ROUNDUP(USR_STACK_END - th_stack_top, MEM_PAGE);
+ ptr_t th_stack_top = (proc->thread_count + 1) * USR_STACK_SIZE_THREAD;
+ th_stack_top = ROUNDUP(USR_STACK_END - th_stack_top, PAGE_SIZE);
struct mm_region* vmr;
struct proc_mm* mm = vmspace(proc);
struct mmap_param param = { .vms_mnt = vm_mnt,
.pvms = mm,
- .mlen = USR_STACK_SIZE,
+ .mlen = USR_STACK_SIZE_THREAD,
.proct = PROT_READ | PROT_WRITE,
.flags = MAP_ANON | MAP_PRIVATE,
.type = REGION_TYPE_STACK };
- int errno = mmap_user((void**)&th_stack_top, &vmr, th_stack_top, NULL, ¶m);
-
+ int errno;
+
+ errno = mmap_user((void**)&th_stack_top, &vmr, th_stack_top, NULL, ¶m);
if (errno) {
WARN("failed to create user thread stack: %d", errno);
return 0;
}
- set_pte(mkptep_va(vm_mnt, vmr->start), guard_pte);
+ pte_t* guardp = mkptep_va(vm_mnt, vmr->start);
+ set_pte(guardp, guard_pte);
*stack_region = vmr;
- ptr_t stack_top = align_stack(th_stack_top + USR_STACK_SIZE - 1);
+ ptr_t stack_top = align_stack(th_stack_top + USR_STACK_SIZE_THREAD - 1);
return stack_top;
}
static ptr_t
__alloc_kernel_thread_stack(struct proc_info* proc, ptr_t vm_mnt)
{
- pfn_t kstack_top = leaf_count(KSTACK_AREA_END);
+ pfn_t kstack_top = pfn(KSTACK_AREA_END);
pfn_t kstack_end = pfn(KSTACK_AREA);
pte_t* ptep = mkptep_pn(vm_mnt, kstack_top);
while (ptep_pfn(ptep) > kstack_end) {
ptep -= KSTACK_PAGES;
- // first page in the kernel stack is guardian page
- pte_t pte = *(ptep + 1);
+ pte_t pte = pte_at(ptep);
if (pte_isnull(pte)) {
goto found;
}
+
+ ptep--;
}
WARN("failed to create kernel stack: max stack num reach\n");
return 0;
found:;
- ptr_t pa = pmm_alloc_cpage(KSTACK_PAGES - 1, 0);
+ unsigned int po = count_order(KSTACK_PAGES);
+ struct leaflet* leaflet = alloc_leaflet(po);
- if (!pa) {
+ if (!leaflet) {
WARN("failed to create kernel stack: nomem\n");
return 0;
}
- set_pte(ptep, guard_pte);
-
- pte_t pte = mkpte(pa, KERNEL_DATA);
- vmm_set_ptes_contig(ptep + 1, pte, LFT_SIZE, KSTACK_PAGES - 1);
+ set_pte(ptep++, guard_pte);
+ ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
ptep += KSTACK_PAGES;
return align_stack(ptep_va(ptep, LFT_SIZE) - 1);
}
+static int
+__thread_putsleep(int seconds)
+{
+ if (!seconds) {
+ return 0;
+ }
+
+ struct scheduler* sched;
+ time_t systime;
+ struct haybed* bed;
+
+ sched = scheduler();
+ systime = clock_systime() / 1000;
+ bed = ¤t_thread->sleep;
+
+ if (bed->wakeup_time) {
+ return (bed->wakeup_time - systime);
+ }
+
+ bed->wakeup_time = systime + seconds;
+
+ if (llist_empty(&bed->sleepers)) {
+ llist_append(&sched->sleepers, &bed->sleepers);
+ }
+
+ block_current_thread();
+ return seconds;
+}
+
void
thread_release_mem(struct thread* thread)
{
+ struct leaflet* leaflet;
struct proc_mm* mm = vmspace(thread->process);
ptr_t vm_mnt = mm->vm_mnt;
assert(vm_mnt);
pte_t* ptep = mkptep_va(vm_mnt, thread->kstack);
+ leaflet = pte_leaflet(*ptep);
- ptep -= KSTACK_PAGES - 1;
- vmm_unset_ptes(ptep, KSTACK_PAGES);
+ ptep -= KSTACK_PAGES;
+ set_pte(ptep, null_pte);
+ ptep_unmap_leaflet(ptep + 1, leaflet);
+
+ leaflet_return(leaflet);
if (thread->ustack) {
if ((thread->ustack->start & 0xfff)) {
th->kstack = kstack;
th->ustack = ustack_region;
+
+ if (ustack_region) {
+ th->ustack_top = align_stack(ustack_region->end - 1);
+ }
return th;
}
assert(mm->vm_mnt);
- struct transfer_context transfer;
+ struct hart_transition transition;
if (!kernel_addr(entry)) {
assert(th->ustack);
- ptr_t ustack_top = align_stack(th->ustack->end - 1);
- ustack_top -= 16; // pre_allocate a 16 byte for inject parameter
- thread_create_user_transfer(&transfer, th->kstack, ustack_top, entry);
-
- th->ustack_top = ustack_top;
+ hart_user_transfer(&transition, th->kstack, th->ustack_top, entry);
}
else {
- thread_create_kernel_transfer(&transfer, th->kstack, entry);
+ hart_kernel_transfer(&transition, th->kstack, entry);
}
- inject_transfer_context(mm->vm_mnt, &transfer);
- th->intr_ctx = (isr_param*)transfer.inject;
+ install_hart_transition(mm->vm_mnt, &transition);
+ th->hstate = (struct hart_state*)transition.inject;
commit_thread(th);
}
return NULL;
}
-__DEFINE_LXSYSCALL4(int, th_create, tid_t*, tid, struct uthread_info*, thinfo,
- void*, entry, void*, param)
+void
+thread_stats_update(bool inbound, bool voluntary)
{
+ struct thread_stats* stats;
+ time_t now;
+
+ now = clock_systime();
+ stats = ¤t_thread->stats;
+
+ stats->at_user = !kernel_context(current_thread->hstate);
+
+ if (!inbound) {
+ if (kernel_process(current_thread->process) ||
+ stats->at_user)
+ {
+ // exiting to user or kernel (kernel thread only), how graceful
+ stats->last_leave = now;
+ }
+ else {
+ // exiting to kernel, effectively reentry
+ stats->last_reentry = now;
+ }
+
+ stats->last_resume = now;
+ return;
+ }
+
+ stats->last_reentry = now;
+
+ if (!stats->at_user)
+ {
+ // entering from kernel, it is a kernel preempt
+ thread_stats_update_kpreempt();
+ return;
+ }
+
+ // entering from user space, a clean entrance.
+
+ if (!voluntary) {
+ stats->entry_count_invol++;
+ }
+ else {
+ stats->entry_count_vol++;
+ }
+
+ thread_stats_reset_kpreempt();
+ stats->last_entry = now;
+}
+
+void
+kthread_spawn(ptr_t entry)
+{
+ assert(kernel_process(__current));
+
+ struct thread* th = create_thread(__current, false);
+
+ assert(th);
+ start_thread(th, entry);
+ detach_thread(th);
+}
+
+void
+kthread_sleep(int seconds)
+{
+ if (__thread_putsleep(seconds))
+ yield_current();
+}
+
+__DEFINE_LXSYSCALL3(int, th_create, tid_t*, tid,
+ struct uthread_param*, thparam, void*, entry)
+{
+ no_preemption();
+
struct thread* th = create_thread(__current, true);
if (!th) {
return EAGAIN;
}
- start_thread(th, (ptr_t)entry);
+ ptr_t ustack_top;
+
+ ustack_top = th->ustack_top;
+ ustack_top = align_stack(ustack_top - sizeof(*thparam));
- ptr_t ustack_top = th->ustack_top;
- *((void**)ustack_top) = param;
+ memcpy((void*)ustack_top, thparam, sizeof(*thparam));
- thinfo->th_stack_sz = region_size(th->ustack);
- thinfo->th_stack_top = (void*)ustack_top;
+ th->ustack_top = ustack_top;
+ start_thread(th, (ptr_t)entry);
if (tid) {
*tid = th->tid;
}
while (!proc_terminated(th)) {
- sched_pass();
+ yield_current();
}
if (val_ptr) {
*val_ptr = (void*)th->exit_val;
}
+ no_preemption();
destory_thread(th);
return 0;
return 0;
}
+
+__DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
+{
+ int sec;
+
+ sec = __thread_putsleep(seconds);
+ store_retval(seconds);
+
+ if (sec)
+ schedule();
+
+ return 0;
+}
\ No newline at end of file