1 #include <lunaix/process.h>
2 #include <lunaix/sched.h>
3 #include <lunaix/syscall.h>
4 #include <lunaix/syscall_utils.h>
5 #include <lunaix/mm/mmap.h>
6 #include <lunaix/mm/page.h>
7 #include <lunaix/mm/vmm.h>
8 #include <lunaix/mm/pmm.h>
9 #include <lunaix/syslog.h>
11 #include <usr/lunaix/threads.h>
14 #include <sys/mm/mm_defs.h>
19 inject_guardian_page(ptr_t vm_mnt, ptr_t va)
21 vmm_set_mapping(vm_mnt, PG_ALIGN(va), 0, 0, VMAP_GUARDPAGE);
25 __alloc_user_thread_stack(struct proc_info* proc, struct mm_region** stack_region, ptr_t vm_mnt)
27 ptr_t th_stack_top = (proc->thread_count + 1) * USR_STACK_SIZE;
28 th_stack_top = ROUNDUP(USR_STACK_END - th_stack_top, MEM_PAGE);
30 struct mm_region* vmr;
31 struct proc_mm* mm = vmspace(proc);
32 struct mmap_param param = { .vms_mnt = vm_mnt,
34 .mlen = USR_STACK_SIZE,
35 .proct = PROT_READ | PROT_WRITE,
36 .flags = MAP_ANON | MAP_PRIVATE,
37 .type = REGION_TYPE_STACK };
39 int errno = mmap_user((void**)&th_stack_top, &vmr, th_stack_top, NULL, ¶m);
42 WARN("failed to create user thread stack: %d", errno);
46 // Pre-allocate a page contains stack top, to avoid immediate trap to kernel
47 // upon thread execution
48 ptr_t pa = pmm_alloc_page(0);
49 ptr_t stack_top = align_stack(th_stack_top + USR_STACK_SIZE - 1);
51 vmm_set_mapping(vm_mnt, PG_ALIGN(stack_top),
52 pa, region_ptattr(vmr), 0);
55 inject_guardian_page(vm_mnt, vmr->start);
63 __alloc_kernel_thread_stack(struct proc_info* proc, ptr_t vm_mnt)
66 ptr_t kstack = PG_ALIGN(KSTACK_AREA_END - KSTACK_SIZE);
67 while (kstack >= KSTACK_AREA) {
68 // first page in the kernel stack is guardian page
69 if (!vmm_lookupat(vm_mnt, kstack + MEM_PAGE, &mapping)
70 || !PG_IS_PRESENT(mapping.flags))
75 kstack -= KSTACK_SIZE;
78 if (kstack < KSTACK_AREA) {
79 WARN("failed to create kernel stack: max stack num reach\n");
83 ptr_t pa = pmm_alloc_cpage(PN(KSTACK_SIZE) - 1, 0);
86 WARN("failed to create kernel stack: nomem\n");
90 inject_guardian_page(vm_mnt, kstack);
91 for (size_t i = MEM_PAGE, j = 0; i < KSTACK_SIZE; i+=MEM_PAGE, j+=MEM_PAGE) {
92 vmm_set_mapping(vm_mnt, kstack + i, pa + j, PG_PREM_RW, 0);
95 return align_stack(kstack + KSTACK_SIZE - 1);
99 thread_release_mem(struct thread* thread, ptr_t vm_mnt)
101 for (size_t i = 0; i < KSTACK_SIZE; i+=MEM_PAGE) {
102 ptr_t stack_page = PG_ALIGN(thread->kstack - i);
103 vmm_del_mapping(vm_mnt, stack_page);
106 if (thread->ustack) {
107 if ((thread->ustack->start & 0xfff)) {
108 fail("invalid ustack struct");
110 mem_unmap_region(vm_mnt, thread->ustack);
115 create_thread(struct proc_info* proc, ptr_t vm_mnt, bool with_ustack)
117 struct mm_region* ustack_region = NULL;
119 !(__alloc_user_thread_stack(proc, &ustack_region, vm_mnt)))
124 ptr_t kstack = __alloc_kernel_thread_stack(proc, vm_mnt);
126 mem_unmap_region(vm_mnt, ustack_region);
130 struct thread* th = alloc_thread(proc);
136 th->ustack = ustack_region;
142 start_thread(struct thread* th, ptr_t vm_mnt, ptr_t entry)
146 struct transfer_context transfer;
147 if (!kernel_addr(entry)) {
150 ptr_t ustack_top = align_stack(th->ustack->end - 1);
151 ustack_top -= 16; // pre_allocate a 16 byte for inject parameter
152 thread_create_user_transfer(&transfer, th->kstack, ustack_top, entry);
154 th->ustack_top = ustack_top;
157 thread_create_kernel_transfer(&transfer, th->kstack, entry);
160 inject_transfer_context(vm_mnt, &transfer);
161 th->intr_ctx = (isr_param*)transfer.inject;
167 exit_thread(void* val) {
168 terminate_current_thread((ptr_t)val);
173 thread_find(struct proc_info* proc, tid_t tid)
175 struct thread *pos, *n;
176 llist_for_each(pos, n, &proc->threads, proc_sibs) {
177 if (pos->tid == tid) {
185 __DEFINE_LXSYSCALL4(int, th_create, tid_t*, tid, struct uthread_info*, thinfo,
186 void*, entry, void*, param)
188 struct thread* th = create_thread(__current, VMS_SELF, true);
193 start_thread(th, VMS_SELF, (ptr_t)entry);
195 ptr_t ustack_top = th->ustack_top;
196 *((void**)ustack_top) = param;
198 thinfo->th_stack_sz = region_size(th->ustack);
199 thinfo->th_stack_top = (void*)ustack_top;
208 __DEFINE_LXSYSCALL(tid_t, th_self)
210 return current_thread->tid;
213 __DEFINE_LXSYSCALL1(void, th_exit, void*, val)
218 __DEFINE_LXSYSCALL2(int, th_join, tid_t, tid, void**, val_ptr)
220 struct thread* th = thread_find(__current, tid);
225 if (th == current_thread) {
229 while (!proc_terminated(th)) {
234 *val_ptr = (void*)th->exit_val;
237 destory_thread(VMS_SELF, th);
242 __DEFINE_LXSYSCALL1(int, th_detach, tid_t, tid)
244 // can not detach the only thread
245 if (__current->thread_count == 1) {
249 struct thread* th = thread_find(__current, tid);
258 __DEFINE_LXSYSCALL2(int, th_kill, tid_t, tid, int, signum)
260 struct thread* target = thread_find(__current, tid);
265 if (signum > _SIG_NUM) {
270 thread_setsignal(target, signum);