2 #include <asm/mempart.h>
6 #include <lunaix/fs/taskfs.h>
7 #include <lunaix/mm/cake.h>
8 #include <lunaix/mm/mmap.h>
9 #include <lunaix/mm/pmm.h>
10 #include <lunaix/mm/valloc.h>
11 #include <lunaix/mm/vmm.h>
12 #include <lunaix/mm/procvm.h>
13 #include <lunaix/process.h>
14 #include <lunaix/sched.h>
15 #include <lunaix/signal.h>
16 #include <lunaix/spike.h>
17 #include <lunaix/status.h>
18 #include <lunaix/syscall.h>
19 #include <lunaix/syslog.h>
20 #include <lunaix/hart_state.h>
21 #include <lunaix/kpreempt.h>
23 #include <klibc/string.h>
25 struct thread empty_thread_obj;
27 volatile struct proc_info* __current = NULL;
28 volatile struct thread* current_thread = &empty_thread_obj;
30 struct scheduler sched_ctx;
32 struct cake_pile *proc_pile ,*thread_pile;
34 #define root_process (sched_ctx.procs[1])
41 proc_pile = cake_new_pile("proc", sizeof(struct proc_info), 1, 0);
42 thread_pile = cake_new_pile("thread", sizeof(struct thread), 1, 0);
43 cake_set_constructor(proc_pile, cake_ctor_zeroing);
44 cake_set_constructor(thread_pile, cake_ctor_zeroing);
46 sched_ctx = (struct scheduler){
47 .procs = vzalloc(PROC_TABLE_SIZE), .ptable_len = 0, .procs_index = 0};
49 llist_init_head(&sched_ctx.sleepers);
53 run(struct thread* thread)
55 thread->state = PS_RUNNING;
56 thread->process->state = PS_RUNNING;
57 thread->process->th_active = thread;
59 procvm_mount_self(vmspace(thread->process));
60 set_current_executing(thread);
64 fail("unexpected return from switching");
68 Currently, we do not allow self-destorying thread, doing
69 so will eliminate current kernel stack which is disaster.
70 A compromise solution is to perform a regular scan and
71 clean-up on these thread, in the preemptible kernel thread.
75 cleanup_detached_threads()
77 // XXX may be a lock on sched_context will ben the most appropriate?
78 cpu_disable_interrupt();
81 struct thread *pos, *n;
82 llist_for_each(pos, n, sched_ctx.threads, sched_sibs) {
83 if (likely(!proc_terminated(pos) || !thread_detached(pos))) {
87 struct proc_mm* mm = vmspace(pos->process);
97 INFO("cleaned %d terminated detached thread(s)", i);
100 cpu_enable_interrupt();
104 can_schedule(struct thread* thread)
110 if (proc_terminated(thread)) {
114 if (preempt_check_stalled(thread)) {
115 thread_flags_set(thread, TH_STALLED);
119 if (unlikely(kernel_process(thread->process))) {
120 // a kernel process is always runnable
121 return thread->state == PS_READY;
124 struct sigctx* sh = &thread->sigctx;
126 if ((thread->state & PS_PAUSED)) {
127 return !!(sh->sig_pending & ~1);
130 if ((thread->state & PS_BLOCKED)) {
131 return sigset_test(sh->sig_pending, _SIGINT);
134 if (sigset_test(sh->sig_pending, _SIGSTOP)) {
135 // If one thread is experiencing SIGSTOP, then we know
136 // all other threads are also SIGSTOP (as per POSIX-2008.1)
137 // In which case, the entire process is stopped.
138 thread->state = PS_STOPPED;
142 if (sigset_test(sh->sig_pending, _SIGCONT)) {
143 thread->state = PS_READY;
146 return (thread->state == PS_READY) \
147 && proc_runnable(thread->process);
153 struct thread *pos, *n;
154 time_t now = clock_systime() / 1000;
156 llist_for_each(pos, n, &sched_ctx.sleepers, sleep.sleepers)
158 if (proc_terminated(pos)) {
162 time_t wtime = pos->sleep.wakeup_time;
163 time_t atime = pos->sleep.alarm_time;
165 if (wtime && now >= wtime) {
166 pos->sleep.wakeup_time = 0;
167 pos->state = PS_READY;
170 if (atime && now >= atime) {
171 pos->sleep.alarm_time = 0;
172 thread_setsignal(pos, _SIGALRM);
175 if (!wtime && !atime) {
177 llist_delete(&pos->sleep.sleepers);
185 assert(sched_ctx.ptable_len && sched_ctx.ttable_len);
187 // 上下文切换相当的敏感!我们不希望任何的中断打乱栈的顺序……
190 if (!(current_thread->state & ~PS_RUNNING)) {
191 current_thread->state = PS_READY;
192 __current->state = PS_READY;
196 procvm_unmount_self(vmspace(__current));
199 // round-robin scheduler
201 struct thread* current = current_thread;
202 struct thread* to_check = current;
205 to_check = list_next(to_check, struct thread, sched_sibs);
207 if (can_schedule(to_check)) {
211 if (to_check == current) {
212 // FIXME do something less leathal here
213 fail("Ran out of threads!")
219 sched_ctx.procs_index = to_check->process->pid;
224 fail("unexpected return from scheduler");
227 __DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
233 time_t systime = clock_systime() / 1000;
234 struct haybed* bed = ¤t_thread->sleep;
236 if (bed->wakeup_time) {
237 return (bed->wakeup_time - systime);
240 bed->wakeup_time = systime + seconds;
242 if (llist_empty(&bed->sleepers)) {
243 llist_append(&sched_ctx.sleepers, &bed->sleepers);
246 store_retval(seconds);
248 block_current_thread();
254 __DEFINE_LXSYSCALL1(unsigned int, alarm, unsigned int, seconds)
256 struct haybed* bed = ¤t_thread->sleep;
257 time_t prev_ddl = bed->alarm_time;
258 time_t now = clock_systime() / 1000;
260 bed->alarm_time = seconds ? now + seconds : 0;
262 if (llist_empty(&bed->sleepers)) {
263 llist_append(&sched_ctx.sleepers, &bed->sleepers);
266 return prev_ddl ? (prev_ddl - now) : 0;
269 __DEFINE_LXSYSCALL1(void, exit, int, status)
271 terminate_current(status);
275 __DEFINE_LXSYSCALL(void, yield)
281 _wait(pid_t wpid, int* status, int options);
283 __DEFINE_LXSYSCALL1(pid_t, wait, int*, status)
285 return _wait(-1, status, 0);
288 __DEFINE_LXSYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, options)
290 return _wait(pid, status, options);
293 __DEFINE_LXSYSCALL(int, geterrno)
295 return current_thread->syscall_ret;
299 _wait(pid_t wpid, int* status, int options)
301 pid_t cur = __current->pid;
302 int status_flags = 0;
303 struct proc_info *proc, *n;
304 if (llist_empty(&__current->children)) {
308 wpid = wpid ? wpid : -__current->pgid;
311 llist_for_each(proc, n, &__current->children, siblings)
313 if (!~wpid || proc->pid == wpid || proc->pgid == -wpid) {
314 if (proc->state == PS_TERMNAT && !options) {
315 status_flags |= PEXITTERM;
318 if (proc->state == PS_READY && (options & WUNTRACED)) {
319 status_flags |= PEXITSTOP;
324 if ((options & WNOHANG)) {
333 *status = PEXITNUM(status_flags, proc->exit_code);
335 return destroy_process(proc->pid);
342 for (; i < sched_ctx.ptable_len && sched_ctx.procs[i]; i++)
345 if (unlikely(i == MAX_PROCESS)) {
346 fail("Panic in Ponyville shimmer!");
353 alloc_thread(struct proc_info* process) {
354 if (process->thread_count >= MAX_THREAD_PP) {
358 struct thread* th = cake_grab(thread_pile);
360 th->process = process;
361 th->created = clock_systime();
363 // FIXME we need a better tid allocation method!
364 th->tid = th->created;
365 th->tid = (th->created ^ ((ptr_t)th)) % MAX_THREAD_PP;
367 th->state = PS_CREATED;
369 llist_init_head(&th->sleep.sleepers);
370 llist_init_head(&th->sched_sibs);
371 llist_init_head(&th->proc_sibs);
372 waitq_init(&th->waitqueue);
380 pid_t i = get_free_pid();
382 if (i == sched_ctx.ptable_len) {
383 sched_ctx.ptable_len++;
386 struct proc_info* proc = cake_grab(proc_pile);
391 proc->state = PS_CREATED;
393 proc->created = clock_systime();
394 proc->pgid = proc->pid;
396 proc->root = vfs_sysroot;
398 proc->sigreg = vzalloc(sizeof(struct sigregistry));
399 proc->fdtable = vzalloc(sizeof(struct v_fdtable));
401 proc->mm = procvm_create(proc);
403 llist_init_head(&proc->tasks);
404 llist_init_head(&proc->children);
405 llist_init_head(&proc->grp_member);
406 llist_init_head(&proc->threads);
408 iopoll_init(&proc->pollctx);
410 sched_ctx.procs[i] = proc;
416 commit_thread(struct thread* thread) {
417 struct proc_info* process = thread->process;
419 assert(process && !proc_terminated(process));
421 llist_append(&process->threads, &thread->proc_sibs);
423 if (sched_ctx.threads) {
424 llist_append(sched_ctx.threads, &thread->sched_sibs);
426 sched_ctx.threads = &thread->sched_sibs;
429 sched_ctx.ttable_len++;
430 process->thread_count++;
431 thread->state = PS_READY;
435 commit_process(struct proc_info* process)
437 assert(process == sched_ctx.procs[process->pid]);
438 assert(process->state == PS_CREATED);
440 // every process is the child of first process (pid=1)
441 if (!process->parent) {
442 if (likely(!kernel_process(process))) {
443 process->parent = root_process;
445 process->parent = process;
448 assert(!proc_terminated(process->parent));
451 if (sched_ctx.proc_list) {
452 llist_append(sched_ctx.proc_list, &process->tasks);
454 sched_ctx.proc_list = &process->tasks;
457 llist_append(&process->parent->children, &process->siblings);
459 process->state = PS_READY;
463 destory_thread(struct thread* thread)
465 cake_ensure_valid(thread);
467 struct proc_info* proc = thread->process;
469 llist_delete(&thread->sched_sibs);
470 llist_delete(&thread->proc_sibs);
471 llist_delete(&thread->sleep.sleepers);
472 waitq_cancel_wait(&thread->waitqueue);
474 thread_release_mem(thread);
476 proc->thread_count--;
477 sched_ctx.ttable_len--;
479 cake_release(thread_pile, thread);
483 orphan_children(struct proc_info* proc)
485 struct proc_info *root;
486 struct proc_info *pos, *n;
490 llist_for_each(pos, n, &proc->children, siblings) {
492 llist_append(&root->children, &pos->siblings);
497 delete_process(struct proc_info* proc)
499 pid_t pid = proc->pid;
500 struct proc_mm* mm = vmspace(proc);
502 assert(pid); // long live the pid0 !!
504 sched_ctx.procs[pid] = NULL;
506 llist_delete(&proc->siblings);
507 llist_delete(&proc->grp_member);
508 llist_delete(&proc->tasks);
512 taskfs_invalidate(pid);
515 vfs_unref_dnode(proc->cwd);
522 for (size_t i = 0; i < VFS_MAX_FD; i++) {
523 struct v_fd* fd = proc->fdtable->fds[i];
525 vfs_pclose(fd->file, pid);
530 vfree(proc->fdtable);
532 signal_free_registry(proc->sigreg);
536 struct thread *pos, *n;
537 llist_for_each(pos, n, &proc->threads, proc_sibs) {
538 // terminate and destory all thread unconditionally
542 orphan_children(proc);
544 procvm_unmount_release(mm);
546 cake_release(proc_pile, proc);
550 destroy_process(pid_t pid)
553 if (index <= 0 || index > sched_ctx.ptable_len) {
554 syscall_result(EINVAL);
558 struct proc_info* proc = sched_ctx.procs[index];
559 delete_process(proc);
565 terminate_proc_only(struct proc_info* proc, int exit_code) {
566 assert(proc->pid != 0);
568 proc->state = PS_TERMNAT;
569 proc->exit_code = exit_code;
571 proc_setsignal(proc->parent, _SIGCHLD);
575 terminate_thread(struct thread* thread, ptr_t val) {
576 thread->exit_val = val;
577 thread->state = PS_TERMNAT;
579 struct proc_info* proc = thread->process;
580 if (proc->thread_count == 1) {
581 terminate_proc_only(thread->process, 0);
586 terminate_current_thread(ptr_t val) {
587 terminate_thread(current_thread, val);
591 terminate_proccess(struct proc_info* proc, int exit_code) {
592 assert(!kernel_process(proc));
594 if (proc->pid == 1) {
595 fail("Attempt to kill init");
598 terminate_proc_only(proc, exit_code);
600 struct thread *pos, *n;
601 llist_for_each(pos, n, &proc->threads, proc_sibs) {
602 pos->state = PS_TERMNAT;
607 terminate_current(int exit_code)
609 terminate_proccess(__current, exit_code);
613 get_process(pid_t pid)
616 if (index < 0 || index > sched_ctx.ptable_len) {
619 return sched_ctx.procs[index];
623 orphaned_proc(pid_t pid)
627 if (pid >= sched_ctx.ptable_len)
629 struct proc_info* proc = sched_ctx.procs[pid];
630 struct proc_info* parent = proc->parent;
632 // 如果其父进程的状态是terminated 或 destroy中的一种
633 // 或者其父进程是在该进程之后创建的,那么该进程为孤儿进程
634 return proc_terminated(parent) || parent->created > proc->created;