1 #include <lunaix/process.h>
2 #include <lunaix/sched.h>
3 #include <lunaix/syscall.h>
4 #include <lunaix/syscall_utils.h>
5 #include <lunaix/mm/mmap.h>
6 #include <lunaix/mm/page.h>
7 #include <lunaix/syslog.h>
8 #include <lunaix/kpreempt.h>
10 #include <usr/lunaix/threads.h>
13 #include <asm/mm_defs.h>
18 __alloc_user_thread_stack(struct proc_info* proc,
19 struct mm_region** stack_region, ptr_t vm_mnt)
21 ptr_t th_stack_top = (proc->thread_count + 1) * USR_STACK_SIZE_THREAD;
22 th_stack_top = ROUNDUP(USR_STACK_END - th_stack_top, PAGE_SIZE);
24 struct mm_region* vmr;
25 struct proc_mm* mm = vmspace(proc);
26 struct mmap_param param = { .vms_mnt = vm_mnt,
28 .mlen = USR_STACK_SIZE_THREAD,
29 .proct = PROT_READ | PROT_WRITE,
30 .flags = MAP_ANON | MAP_PRIVATE,
31 .type = REGION_TYPE_STACK };
35 errno = mmap_user((void**)&th_stack_top, &vmr, th_stack_top, NULL, ¶m);
37 WARN("failed to create user thread stack: %d", errno);
41 pte_t* guardp = mkptep_va(vm_mnt, vmr->start);
42 set_pte(guardp, guard_pte);
46 ptr_t stack_top = align_stack(th_stack_top + USR_STACK_SIZE_THREAD - 1);
51 __alloc_kernel_thread_stack(struct proc_info* proc, ptr_t vm_mnt)
53 pfn_t kstack_top = pfn(KSTACK_AREA_END);
54 pfn_t kstack_end = pfn(KSTACK_AREA);
55 pte_t* ptep = mkptep_pn(vm_mnt, kstack_top);
56 while (ptep_pfn(ptep) > kstack_end) {
59 pte_t pte = pte_at(ptep);
60 if (pte_isnull(pte)) {
67 WARN("failed to create kernel stack: max stack num reach\n");
71 unsigned int po = count_order(KSTACK_PAGES);
72 struct leaflet* leaflet = alloc_leaflet(po);
75 WARN("failed to create kernel stack: nomem\n");
79 set_pte(ptep++, guard_pte);
80 ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
83 return align_stack(ptep_va(ptep, LFT_SIZE) - 1);
87 __thread_putsleep(int seconds)
93 struct scheduler* sched;
98 systime = clock_systime() / 1000;
99 bed = ¤t_thread->sleep;
101 if (bed->wakeup_time) {
102 return (bed->wakeup_time - systime);
105 bed->wakeup_time = systime + seconds;
107 if (llist_empty(&bed->sleepers)) {
108 llist_append(&sched->sleepers, &bed->sleepers);
111 block_current_thread();
116 thread_release_mem(struct thread* thread)
118 struct leaflet* leaflet;
119 struct proc_mm* mm = vmspace(thread->process);
120 ptr_t vm_mnt = mm->vm_mnt;
122 // Ensure we have mounted
125 pte_t* ptep = mkptep_va(vm_mnt, thread->kstack);
126 leaflet = pte_leaflet(*ptep);
128 ptep -= KSTACK_PAGES;
129 set_pte(ptep, null_pte);
130 ptep_unmap_leaflet(ptep + 1, leaflet);
132 leaflet_return(leaflet);
134 if (thread->ustack) {
135 if ((thread->ustack->start & 0xfff)) {
136 fail("invalid ustack struct");
138 mem_unmap_region(vm_mnt, thread->ustack);
143 create_thread(struct proc_info* proc, bool with_ustack)
145 struct proc_mm* mm = vmspace(proc);
148 ptr_t vm_mnt = mm->vm_mnt;
149 struct mm_region* ustack_region = NULL;
151 !(__alloc_user_thread_stack(proc, &ustack_region, vm_mnt)))
156 ptr_t kstack = __alloc_kernel_thread_stack(proc, vm_mnt);
158 mem_unmap_region(vm_mnt, ustack_region);
162 struct thread* th = alloc_thread(proc);
168 th->ustack = ustack_region;
171 th->ustack_top = align_stack(ustack_region->end - 1);
178 start_thread(struct thread* th, ptr_t entry)
181 struct proc_mm* mm = vmspace(th->process);
185 struct hart_transition transition;
186 if (!kernel_addr(entry)) {
189 hart_user_transfer(&transition, th->kstack, th->ustack_top, entry);
192 hart_kernel_transfer(&transition, th->kstack, entry);
195 install_hart_transition(mm->vm_mnt, &transition);
196 th->hstate = (struct hart_state*)transition.inject;
202 exit_thread(void* val) {
203 terminate_current_thread((ptr_t)val);
208 thread_find(struct proc_info* proc, tid_t tid)
210 struct thread *pos, *n;
211 llist_for_each(pos, n, &proc->threads, proc_sibs) {
212 if (pos->tid == tid) {
221 thread_stats_update(bool inbound, bool voluntary)
223 struct thread_stats* stats;
226 now = clock_systime();
227 stats = ¤t_thread->stats;
229 stats->at_user = !kernel_context(current_thread->hstate);
232 if (kernel_process(current_thread->process) ||
235 // exiting to user or kernel (kernel thread only), how graceful
236 stats->last_leave = now;
239 // exiting to kernel, effectively reentry
240 stats->last_reentry = now;
243 stats->last_resume = now;
247 stats->last_reentry = now;
251 // entering from kernel, it is a kernel preempt
252 thread_stats_update_kpreempt();
256 // entering from user space, a clean entrance.
259 stats->entry_count_invol++;
262 stats->entry_count_vol++;
265 thread_stats_reset_kpreempt();
266 stats->last_entry = now;
270 kthread_spawn(ptr_t entry)
272 assert(kernel_process(__current));
274 struct thread* th = create_thread(__current, false);
277 start_thread(th, entry);
282 kthread_sleep(int seconds)
284 if (__thread_putsleep(seconds))
288 __DEFINE_LXSYSCALL3(int, th_create, tid_t*, tid,
289 struct uthread_param*, thparam, void*, entry)
293 struct thread* th = create_thread(__current, true);
300 ustack_top = th->ustack_top;
301 ustack_top = align_stack(ustack_top - sizeof(*thparam));
303 memcpy((void*)ustack_top, thparam, sizeof(*thparam));
305 th->ustack_top = ustack_top;
306 start_thread(th, (ptr_t)entry);
315 __DEFINE_LXSYSCALL(tid_t, th_self)
317 return current_thread->tid;
320 __DEFINE_LXSYSCALL1(void, th_exit, void*, val)
325 __DEFINE_LXSYSCALL2(int, th_join, tid_t, tid, void**, val_ptr)
327 struct thread* th = thread_find(__current, tid);
332 if (th == current_thread) {
336 while (!proc_terminated(th)) {
341 *val_ptr = (void*)th->exit_val;
350 __DEFINE_LXSYSCALL1(int, th_detach, tid_t, tid)
352 // can not detach the only thread
353 if (__current->thread_count == 1) {
357 struct thread* th = thread_find(__current, tid);
366 __DEFINE_LXSYSCALL2(int, th_kill, tid_t, tid, int, signum)
368 struct thread* target = thread_find(__current, tid);
373 if (signum > _SIG_NUM) {
378 thread_setsignal(target, signum);
384 __DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
388 sec = __thread_putsleep(seconds);
389 store_retval(seconds);