2 #include <sys/mm/mempart.h>
7 #include <lunaix/fs/taskfs.h>
8 #include <lunaix/mm/cake.h>
9 #include <lunaix/mm/mmap.h>
10 #include <lunaix/mm/pmm.h>
11 #include <lunaix/mm/valloc.h>
12 #include <lunaix/mm/vmm.h>
13 #include <lunaix/mm/procvm.h>
14 #include <lunaix/process.h>
15 #include <lunaix/sched.h>
16 #include <lunaix/signal.h>
17 #include <lunaix/spike.h>
18 #include <lunaix/status.h>
19 #include <lunaix/syscall.h>
20 #include <lunaix/syslog.h>
21 #include <lunaix/pcontext.h>
22 #include <lunaix/kpreempt.h>
24 #include <klibc/string.h>
26 volatile struct proc_info* __current;
27 volatile struct thread* current_thread;
29 struct scheduler sched_ctx;
31 struct cake_pile *proc_pile ,*thread_pile;
38 proc_pile = cake_new_pile("proc", sizeof(struct proc_info), 1, 0);
39 thread_pile = cake_new_pile("thread", sizeof(struct thread), 1, 0);
40 cake_set_constructor(proc_pile, cake_ctor_zeroing);
41 cake_set_constructor(thread_pile, cake_ctor_zeroing);
43 sched_ctx = (struct scheduler){
44 .procs = vzalloc(PROC_TABLE_SIZE), .ptable_len = 0, .procs_index = 0};
46 llist_init_head(&sched_ctx.sleepers);
50 run(struct thread* thread)
52 thread->state = PS_RUNNING;
53 thread->process->state = PS_RUNNING;
54 thread->process->th_active = thread;
56 set_current_executing(thread);
59 fail("unexpected return from switching");
63 Currently, we do not allow self-destorying thread, doing
64 so will eliminate current kernel stack which is disaster.
65 A compromise solution is to perform a regular scan and
66 clean-up on these thread, in the preemptible kernel thread.
70 cleanup_detached_threads() {
71 ensure_preempt_caller();
73 // XXX may be a lock on sched_context will ben the most appropriate?
74 cpu_disable_interrupt();
77 struct thread *pos, *n;
78 llist_for_each(pos, n, sched_ctx.threads, sched_sibs) {
79 if (likely(!proc_terminated(pos) || !thread_detached(pos))) {
83 vmm_mount_pd(VMS_MOUNT_1, vmroot(pos->process));
84 destory_thread(VMS_MOUNT_1, pos);
85 vmm_unmount_pd(VMS_MOUNT_1);
91 INFO("cleaned %d terminated detached thread(s)", i);
94 cpu_enable_interrupt();
98 can_schedule(struct thread* thread)
104 if (unlikely(kernel_process(thread->process))) {
105 // a kernel process is always runnable
106 return thread->state == PS_READY;
109 struct sigctx* sh = &thread->sigctx;
111 if ((thread->state & PS_PAUSED)) {
112 return !!(sh->sig_pending & ~1);
114 if ((thread->state & PS_BLOCKED)) {
115 return sigset_test(sh->sig_pending, _SIGINT);
118 if (sigset_test(sh->sig_pending, _SIGSTOP)) {
119 // If one thread is experiencing SIGSTOP, then we know
120 // all other threads are also SIGSTOP (as per POSIX-2008.1)
121 // In which case, the entire process is stopped.
122 thread->state = PS_STOPPED;
125 if (sigset_test(sh->sig_pending, _SIGCONT)) {
126 thread->state = PS_READY;
129 return (thread->state == PS_READY) \
130 && proc_runnable(thread->process);
136 struct thread *pos, *n;
137 time_t now = clock_systime() / 1000;
139 llist_for_each(pos, n, &sched_ctx.sleepers, sleep.sleepers)
141 if (proc_terminated(pos)) {
145 time_t wtime = pos->sleep.wakeup_time;
146 time_t atime = pos->sleep.alarm_time;
148 if (wtime && now >= wtime) {
149 pos->sleep.wakeup_time = 0;
150 pos->state = PS_READY;
153 if (atime && now >= atime) {
154 pos->sleep.alarm_time = 0;
155 thread_setsignal(pos, _SIGALRM);
158 if (!wtime && !atime) {
160 llist_delete(&pos->sleep.sleepers);
168 assert(sched_ctx.ptable_len && sched_ctx.ttable_len);
170 // 上下文切换相当的敏感!我们不希望任何的中断打乱栈的顺序……
171 cpu_disable_interrupt();
173 if (!(current_thread->state & ~PS_RUNNING)) {
174 current_thread->state = PS_READY;
175 __current->state = PS_READY;
180 // round-robin scheduler
182 struct thread* current = current_thread;
183 struct thread* to_check = current;
186 to_check = list_next(to_check, struct thread, sched_sibs);
188 if (can_schedule(to_check)) {
192 if (to_check == current) {
193 // FIXME do something less leathal here
194 fail("Ran out of threads!")
200 sched_ctx.procs_index = to_check->process->pid;
206 fail("unexpected return from scheduler");
212 cpu_enable_interrupt();
216 __DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
222 time_t systime = clock_systime() / 1000;
223 struct haybed* bed = ¤t_thread->sleep;
225 if (bed->wakeup_time) {
226 return (bed->wakeup_time - systime);
229 bed->wakeup_time = systime + seconds;
231 if (llist_empty(&bed->sleepers)) {
232 llist_append(&sched_ctx.sleepers, &bed->sleepers);
235 store_retval(seconds);
237 block_current_thread();
243 __DEFINE_LXSYSCALL1(unsigned int, alarm, unsigned int, seconds)
245 struct haybed* bed = ¤t_thread->sleep;
246 time_t prev_ddl = bed->alarm_time;
247 time_t now = clock_systime() / 1000;
249 bed->alarm_time = seconds ? now + seconds : 0;
251 struct proc_info* root_proc = sched_ctx.procs[0];
252 if (llist_empty(&bed->sleepers)) {
253 llist_append(&sched_ctx.sleepers, &bed->sleepers);
256 return prev_ddl ? (prev_ddl - now) : 0;
259 __DEFINE_LXSYSCALL1(void, exit, int, status)
261 terminate_current(status);
265 __DEFINE_LXSYSCALL(void, yield)
271 _wait(pid_t wpid, int* status, int options);
273 __DEFINE_LXSYSCALL1(pid_t, wait, int*, status)
275 return _wait(-1, status, 0);
278 __DEFINE_LXSYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, options)
280 return _wait(pid, status, options);
283 __DEFINE_LXSYSCALL(int, geterrno)
285 return current_thread->syscall_ret;
289 _wait(pid_t wpid, int* status, int options)
291 pid_t cur = __current->pid;
292 int status_flags = 0;
293 struct proc_info *proc, *n;
294 if (llist_empty(&__current->children)) {
298 wpid = wpid ? wpid : -__current->pgid;
300 llist_for_each(proc, n, &__current->children, siblings)
302 if (!~wpid || proc->pid == wpid || proc->pgid == -wpid) {
303 if (proc->state == PS_TERMNAT && !options) {
304 status_flags |= PEXITTERM;
307 if (proc->state == PS_READY && (options & WUNTRACED)) {
308 status_flags |= PEXITSTOP;
313 if ((options & WNOHANG)) {
322 *status = proc->exit_code | status_flags;
324 return destroy_process(proc->pid);
331 for (; i < sched_ctx.ptable_len && sched_ctx.procs[i]; i++)
334 if (unlikely(i == MAX_PROCESS)) {
335 panick("Panic in Ponyville shimmer!");
342 alloc_thread(struct proc_info* process) {
343 if (process->thread_count >= MAX_THREAD_PP) {
347 struct thread* th = cake_grab(thread_pile);
349 th->process = process;
350 th->created = clock_systime();
352 // FIXME we need a better tid allocation method!
353 th->tid = th->created;
354 th->tid = (th->created ^ ((ptr_t)th)) % MAX_THREAD_PP;
356 th->state = PS_CREATED;
358 llist_init_head(&th->sleep.sleepers);
359 llist_init_head(&th->sched_sibs);
360 llist_init_head(&th->proc_sibs);
361 waitq_init(&th->waitqueue);
369 pid_t i = get_free_pid();
371 if (i == sched_ctx.ptable_len) {
372 sched_ctx.ptable_len++;
375 struct proc_info* proc = cake_grab(proc_pile);
380 proc->state = PS_CREATED;
382 proc->created = clock_systime();
383 proc->pgid = proc->pid;
385 proc->sigreg = vzalloc(sizeof(struct sigregister));
386 proc->fdtable = vzalloc(sizeof(struct v_fdtable));
388 proc->mm = procvm_create(proc);
390 llist_init_head(&proc->tasks);
391 llist_init_head(&proc->children);
392 llist_init_head(&proc->grp_member);
393 llist_init_head(&proc->threads);
395 iopoll_init(&proc->pollctx);
397 sched_ctx.procs[i] = proc;
403 commit_thread(struct thread* thread) {
404 struct proc_info* process = thread->process;
406 assert(process && !proc_terminated(process));
408 llist_append(&process->threads, &thread->proc_sibs);
410 if (sched_ctx.threads) {
411 llist_append(sched_ctx.threads, &thread->sched_sibs);
413 sched_ctx.threads = &thread->sched_sibs;
416 sched_ctx.ttable_len++;
417 process->thread_count++;
418 thread->state = PS_READY;
422 commit_process(struct proc_info* process)
424 assert(process == sched_ctx.procs[process->pid]);
425 assert(process->state == PS_CREATED);
427 // every process is the child of first process (pid=1)
428 if (!process->parent) {
429 if (likely(!kernel_process(process))) {
430 process->parent = sched_ctx.procs[1];
432 process->parent = process;
435 assert(!proc_terminated(process->parent));
438 if (sched_ctx.proc_list) {
439 llist_append(sched_ctx.proc_list, &process->tasks);
441 sched_ctx.proc_list = &process->tasks;
444 llist_append(&process->parent->children, &process->siblings);
446 process->state = PS_READY;
450 destory_thread(ptr_t vm_mnt, struct thread* thread)
452 cake_ensure_valid(thread);
454 struct proc_info* proc = thread->process;
456 llist_delete(&thread->sched_sibs);
457 llist_delete(&thread->proc_sibs);
458 llist_delete(&thread->sleep.sleepers);
459 waitq_cancel_wait(&thread->waitqueue);
461 thread_release_mem(thread, vm_mnt);
463 proc->thread_count--;
464 sched_ctx.ttable_len--;
466 cake_release(thread_pile, thread);
470 delete_process(struct proc_info* proc)
472 pid_t pid = proc->pid;
474 assert(pid); // long live the pid0 !!
476 sched_ctx.procs[pid] = NULL;
478 llist_delete(&proc->siblings);
479 llist_delete(&proc->grp_member);
480 llist_delete(&proc->tasks);
484 taskfs_invalidate(pid);
487 vfs_unref_dnode(proc->cwd);
494 for (size_t i = 0; i < VFS_MAX_FD; i++) {
495 struct v_fd* fd = proc->fdtable->fds[i];
497 vfs_pclose(fd->file, pid);
502 vfree(proc->fdtable);
504 signal_free_registers(proc->sigreg);
506 vmm_mount_pd(VMS_MOUNT_1, vmroot(proc));
508 struct thread *pos, *n;
509 llist_for_each(pos, n, &proc->threads, proc_sibs) {
510 // terminate and destory all thread unconditionally
511 destory_thread(VMS_MOUNT_1, pos);
514 procvm_cleanup(VMS_MOUNT_1, proc);
516 vmm_unmount_pd(VMS_MOUNT_1);
518 cake_release(proc_pile, proc);
522 destroy_process(pid_t pid)
525 if (index <= 0 || index > sched_ctx.ptable_len) {
526 syscall_result(EINVAL);
530 struct proc_info* proc = sched_ctx.procs[index];
531 delete_process(proc);
537 terminate_proc_only(struct proc_info* proc, int exit_code) {
538 proc->state = PS_TERMNAT;
539 proc->exit_code = exit_code;
541 proc_setsignal(proc->parent, _SIGCHLD);
545 terminate_thread(struct thread* thread, ptr_t val) {
546 thread->exit_val = val;
547 thread->state = PS_TERMNAT;
549 struct proc_info* proc = thread->process;
550 if (proc->thread_count == 1) {
551 terminate_proc_only(thread->process, 0);
556 terminate_current_thread(ptr_t val) {
557 terminate_thread(current_thread, val);
561 terminate_proccess(struct proc_info* proc, int exit_code) {
562 assert(!kernel_process(proc));
564 if (proc->pid == 1) {
565 panick("Attempt to kill init");
568 terminate_proc_only(proc, exit_code);
570 struct thread *pos, *n;
571 llist_for_each(pos, n, &__current->threads, proc_sibs) {
572 pos->state = PS_TERMNAT;
577 terminate_current(int exit_code)
579 terminate_proccess(__current, exit_code);
583 get_process(pid_t pid)
586 if (index < 0 || index > sched_ctx.ptable_len) {
589 return sched_ctx.procs[index];
593 orphaned_proc(pid_t pid)
597 if (pid >= sched_ctx.ptable_len)
599 struct proc_info* proc = sched_ctx.procs[pid];
600 struct proc_info* parent = proc->parent;
602 // 如果其父进程的状态是terminated 或 destroy中的一种
603 // 或者其父进程是在该进程之后创建的,那么该进程为孤儿进程
604 return proc_terminated(parent) || parent->created > proc->created;