2 #include <asm/mempart.h>
6 #include <lunaix/fs/taskfs.h>
7 #include <lunaix/mm/cake.h>
8 #include <lunaix/mm/mmap.h>
9 #include <lunaix/mm/pmm.h>
10 #include <lunaix/mm/valloc.h>
11 #include <lunaix/mm/vmm.h>
12 #include <lunaix/mm/procvm.h>
13 #include <lunaix/process.h>
14 #include <lunaix/sched.h>
15 #include <lunaix/signal.h>
16 #include <lunaix/spike.h>
17 #include <lunaix/status.h>
18 #include <lunaix/syscall.h>
19 #include <lunaix/syslog.h>
20 #include <lunaix/hart_state.h>
21 #include <lunaix/kpreempt.h>
23 #include <asm-generic/isrm.h>
25 #include <klibc/string.h>
27 struct thread empty_thread_obj;
29 volatile struct proc_info* __current;
30 volatile struct thread* current_thread = &empty_thread_obj;
32 struct scheduler sched_ctx;
34 struct cake_pile *proc_pile ,*thread_pile;
36 #define root_process (sched_ctx.procs[1])
43 proc_pile = cake_new_pile("proc", sizeof(struct proc_info), 1, 0);
44 thread_pile = cake_new_pile("thread", sizeof(struct thread), 1, 0);
45 cake_set_constructor(proc_pile, cake_ctor_zeroing);
46 cake_set_constructor(thread_pile, cake_ctor_zeroing);
48 sched_ctx = (struct scheduler){
49 .procs = vzalloc(PROC_TABLE_SIZE), .ptable_len = 0, .procs_index = 0};
51 llist_init_head(&sched_ctx.sleepers);
55 run(struct thread* thread)
57 thread->state = PS_RUNNING;
58 thread->process->state = PS_RUNNING;
59 thread->process->th_active = thread;
61 procvm_mount_self(vmspace(thread->process));
62 set_current_executing(thread);
66 fail("unexpected return from switching");
70 Currently, we do not allow self-destorying thread, doing
71 so will eliminate current kernel stack which is disaster.
72 A compromise solution is to perform a regular scan and
73 clean-up on these thread, in the preemptible kernel thread.
77 cleanup_detached_threads()
79 // XXX may be a lock on sched_context will ben the most appropriate?
80 cpu_disable_interrupt();
83 struct thread *pos, *n;
84 llist_for_each(pos, n, sched_ctx.threads, sched_sibs) {
85 if (likely(!proc_terminated(pos) || !thread_detached(pos))) {
89 struct proc_mm* mm = vmspace(pos->process);
99 INFO("cleaned %d terminated detached thread(s)", i);
102 cpu_enable_interrupt();
106 can_schedule(struct thread* thread)
112 if (proc_terminated(thread)) {
116 if (preempt_check_stalled(thread)) {
117 thread_flags_set(thread, TH_STALLED);
121 if (unlikely(kernel_process(thread->process))) {
122 // a kernel process is always runnable
123 return thread->state == PS_READY;
126 struct sigctx* sh = &thread->sigctx;
128 if ((thread->state & PS_PAUSED)) {
129 return !!(sh->sig_pending & ~1);
132 if ((thread->state & PS_BLOCKED)) {
133 return sigset_test(sh->sig_pending, _SIGINT);
136 if (sigset_test(sh->sig_pending, _SIGSTOP)) {
137 // If one thread is experiencing SIGSTOP, then we know
138 // all other threads are also SIGSTOP (as per POSIX-2008.1)
139 // In which case, the entire process is stopped.
140 thread->state = PS_STOPPED;
144 if (sigset_test(sh->sig_pending, _SIGCONT)) {
145 thread->state = PS_READY;
148 return (thread->state == PS_READY) \
149 && proc_runnable(thread->process);
155 struct thread *pos, *n;
156 time_t now = clock_systime() / 1000;
158 llist_for_each(pos, n, &sched_ctx.sleepers, sleep.sleepers)
160 if (proc_terminated(pos)) {
164 time_t wtime = pos->sleep.wakeup_time;
165 time_t atime = pos->sleep.alarm_time;
167 if (wtime && now >= wtime) {
168 pos->sleep.wakeup_time = 0;
169 pos->state = PS_READY;
172 if (atime && now >= atime) {
173 pos->sleep.alarm_time = 0;
174 thread_setsignal(pos, _SIGALRM);
177 if (!wtime && !atime) {
179 llist_delete(&pos->sleep.sleepers);
187 assert(sched_ctx.ptable_len && sched_ctx.ttable_len);
189 // 上下文切换相当的敏感!我们不希望任何的中断打乱栈的顺序……
192 if (!(current_thread->state & ~PS_RUNNING)) {
193 current_thread->state = PS_READY;
194 __current->state = PS_READY;
198 procvm_unmount_self(vmspace(__current));
201 // round-robin scheduler
203 struct thread* current = current_thread;
204 struct thread* to_check = current;
207 to_check = list_next(to_check, struct thread, sched_sibs);
209 if (can_schedule(to_check)) {
213 if (to_check == current) {
214 // FIXME do something less leathal here
215 fail("Ran out of threads!")
221 sched_ctx.procs_index = to_check->process->pid;
227 fail("unexpected return from scheduler");
230 __DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
236 time_t systime = clock_systime() / 1000;
237 struct haybed* bed = ¤t_thread->sleep;
239 if (bed->wakeup_time) {
240 return (bed->wakeup_time - systime);
243 bed->wakeup_time = systime + seconds;
245 if (llist_empty(&bed->sleepers)) {
246 llist_append(&sched_ctx.sleepers, &bed->sleepers);
249 store_retval(seconds);
251 block_current_thread();
257 __DEFINE_LXSYSCALL1(unsigned int, alarm, unsigned int, seconds)
259 struct haybed* bed = ¤t_thread->sleep;
260 time_t prev_ddl = bed->alarm_time;
261 time_t now = clock_systime() / 1000;
263 bed->alarm_time = seconds ? now + seconds : 0;
265 if (llist_empty(&bed->sleepers)) {
266 llist_append(&sched_ctx.sleepers, &bed->sleepers);
269 return prev_ddl ? (prev_ddl - now) : 0;
272 __DEFINE_LXSYSCALL1(void, exit, int, status)
274 terminate_current(status);
278 __DEFINE_LXSYSCALL(void, yield)
284 _wait(pid_t wpid, int* status, int options);
286 __DEFINE_LXSYSCALL1(pid_t, wait, int*, status)
288 return _wait(-1, status, 0);
291 __DEFINE_LXSYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, options)
293 return _wait(pid, status, options);
296 __DEFINE_LXSYSCALL(int, geterrno)
298 return current_thread->syscall_ret;
302 _wait(pid_t wpid, int* status, int options)
304 pid_t cur = __current->pid;
305 int status_flags = 0;
306 struct proc_info *proc, *n;
307 if (llist_empty(&__current->children)) {
311 wpid = wpid ? wpid : -__current->pgid;
314 llist_for_each(proc, n, &__current->children, siblings)
316 if (!~wpid || proc->pid == wpid || proc->pgid == -wpid) {
317 if (proc->state == PS_TERMNAT && !options) {
318 status_flags |= PEXITTERM;
321 if (proc->state == PS_READY && (options & WUNTRACED)) {
322 status_flags |= PEXITSTOP;
327 if ((options & WNOHANG)) {
336 *status = PEXITNUM(status_flags, proc->exit_code);
338 return destroy_process(proc->pid);
345 for (; i < sched_ctx.ptable_len && sched_ctx.procs[i]; i++)
348 if (unlikely(i == MAX_PROCESS)) {
349 fail("Panic in Ponyville shimmer!");
356 alloc_thread(struct proc_info* process) {
357 if (process->thread_count >= MAX_THREAD_PP) {
361 struct thread* th = cake_grab(thread_pile);
363 th->process = process;
364 th->created = clock_systime();
366 // FIXME we need a better tid allocation method!
367 th->tid = th->created;
368 th->tid = (th->created ^ ((ptr_t)th)) % MAX_THREAD_PP;
370 th->state = PS_CREATED;
372 llist_init_head(&th->sleep.sleepers);
373 llist_init_head(&th->sched_sibs);
374 llist_init_head(&th->proc_sibs);
375 waitq_init(&th->waitqueue);
383 pid_t i = get_free_pid();
385 if (i == sched_ctx.ptable_len) {
386 sched_ctx.ptable_len++;
389 struct proc_info* proc = cake_grab(proc_pile);
394 proc->state = PS_CREATED;
396 proc->created = clock_systime();
397 proc->pgid = proc->pid;
399 proc->sigreg = vzalloc(sizeof(struct sigregistry));
400 proc->fdtable = vzalloc(sizeof(struct v_fdtable));
402 proc->mm = procvm_create(proc);
404 llist_init_head(&proc->tasks);
405 llist_init_head(&proc->children);
406 llist_init_head(&proc->grp_member);
407 llist_init_head(&proc->threads);
409 iopoll_init(&proc->pollctx);
411 sched_ctx.procs[i] = proc;
417 commit_thread(struct thread* thread) {
418 struct proc_info* process = thread->process;
420 assert(process && !proc_terminated(process));
422 llist_append(&process->threads, &thread->proc_sibs);
424 if (sched_ctx.threads) {
425 llist_append(sched_ctx.threads, &thread->sched_sibs);
427 sched_ctx.threads = &thread->sched_sibs;
430 sched_ctx.ttable_len++;
431 process->thread_count++;
432 thread->state = PS_READY;
436 commit_process(struct proc_info* process)
438 assert(process == sched_ctx.procs[process->pid]);
439 assert(process->state == PS_CREATED);
441 // every process is the child of first process (pid=1)
442 if (!process->parent) {
443 if (likely(!kernel_process(process))) {
444 process->parent = root_process;
446 process->parent = process;
449 assert(!proc_terminated(process->parent));
452 if (sched_ctx.proc_list) {
453 llist_append(sched_ctx.proc_list, &process->tasks);
455 sched_ctx.proc_list = &process->tasks;
458 llist_append(&process->parent->children, &process->siblings);
460 process->state = PS_READY;
464 destory_thread(struct thread* thread)
466 cake_ensure_valid(thread);
468 struct proc_info* proc = thread->process;
470 llist_delete(&thread->sched_sibs);
471 llist_delete(&thread->proc_sibs);
472 llist_delete(&thread->sleep.sleepers);
473 waitq_cancel_wait(&thread->waitqueue);
475 thread_release_mem(thread);
477 proc->thread_count--;
478 sched_ctx.ttable_len--;
480 cake_release(thread_pile, thread);
484 orphan_children(struct proc_info* proc)
486 struct proc_info *root;
487 struct proc_info *pos, *n;
491 llist_for_each(pos, n, &proc->children, siblings) {
493 llist_append(&root->children, &pos->siblings);
498 delete_process(struct proc_info* proc)
500 pid_t pid = proc->pid;
501 struct proc_mm* mm = vmspace(proc);
503 assert(pid); // long live the pid0 !!
505 sched_ctx.procs[pid] = NULL;
507 llist_delete(&proc->siblings);
508 llist_delete(&proc->grp_member);
509 llist_delete(&proc->tasks);
513 taskfs_invalidate(pid);
516 vfs_unref_dnode(proc->cwd);
523 for (size_t i = 0; i < VFS_MAX_FD; i++) {
524 struct v_fd* fd = proc->fdtable->fds[i];
526 vfs_pclose(fd->file, pid);
531 vfree(proc->fdtable);
533 signal_free_registry(proc->sigreg);
537 struct thread *pos, *n;
538 llist_for_each(pos, n, &proc->threads, proc_sibs) {
539 // terminate and destory all thread unconditionally
543 orphan_children(proc);
545 procvm_unmount_release(mm);
547 cake_release(proc_pile, proc);
551 destroy_process(pid_t pid)
554 if (index <= 0 || index > sched_ctx.ptable_len) {
555 syscall_result(EINVAL);
559 struct proc_info* proc = sched_ctx.procs[index];
560 delete_process(proc);
566 terminate_proc_only(struct proc_info* proc, int exit_code) {
567 assert(proc->pid != 0);
569 proc->state = PS_TERMNAT;
570 proc->exit_code = exit_code;
572 proc_setsignal(proc->parent, _SIGCHLD);
576 terminate_thread(struct thread* thread, ptr_t val) {
577 thread->exit_val = val;
578 thread->state = PS_TERMNAT;
580 struct proc_info* proc = thread->process;
581 if (proc->thread_count == 1) {
582 terminate_proc_only(thread->process, 0);
587 terminate_current_thread(ptr_t val) {
588 terminate_thread(current_thread, val);
592 terminate_proccess(struct proc_info* proc, int exit_code) {
593 assert(!kernel_process(proc));
595 if (proc->pid == 1) {
596 fail("Attempt to kill init");
599 terminate_proc_only(proc, exit_code);
601 struct thread *pos, *n;
602 llist_for_each(pos, n, &proc->threads, proc_sibs) {
603 pos->state = PS_TERMNAT;
608 terminate_current(int exit_code)
610 terminate_proccess(__current, exit_code);
614 get_process(pid_t pid)
617 if (index < 0 || index > sched_ctx.ptable_len) {
620 return sched_ctx.procs[index];
624 orphaned_proc(pid_t pid)
628 if (pid >= sched_ctx.ptable_len)
630 struct proc_info* proc = sched_ctx.procs[pid];
631 struct proc_info* parent = proc->parent;
633 // 如果其父进程的状态是terminated 或 destroy中的一种
634 // 或者其父进程是在该进程之后创建的,那么该进程为孤儿进程
635 return proc_terminated(parent) || parent->created > proc->created;