1 #include <arch/x86/interrupts.h>
2 #include <arch/x86/tss.h>
7 #include <lunaix/fs/taskfs.h>
8 #include <lunaix/mm/cake.h>
9 #include <lunaix/mm/kalloc.h>
10 #include <lunaix/mm/pmm.h>
11 #include <lunaix/mm/valloc.h>
12 #include <lunaix/mm/vmm.h>
13 #include <lunaix/process.h>
14 #include <lunaix/sched.h>
15 #include <lunaix/signal.h>
16 #include <lunaix/spike.h>
17 #include <lunaix/status.h>
18 #include <lunaix/syscall.h>
19 #include <lunaix/syslog.h>
21 volatile struct proc_info* __current;
23 struct proc_info dummy;
25 struct scheduler sched_ctx;
27 struct cake_pile* proc_pile;
34 // size_t pg_size = ROUNDUP(sizeof(struct proc_info) * MAX_PROCESS, 0x1000);
36 // for (size_t i = 0; i <= pg_size; i += 4096) {
37 // uintptr_t pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
39 // PD_REFERENCED, PROC_START + i, pa, PG_PREM_RW, VMAP_NULL);
42 proc_pile = cake_new_pile("proc", sizeof(struct proc_info), 1, 0);
43 cake_set_constructor(proc_pile, cake_ctor_zeroing);
45 sched_ctx = (struct scheduler){ ._procs = vzalloc(PROC_TABLE_SIZE),
51 run(struct proc_info* proc)
53 proc->state = PS_RUNNING;
56 将tss.esp0设置为上次调度前的esp值。
57 当处理信号时,上下文信息是不会恢复的,而是保存在用户栈中,然后直接跳转进位于用户空间的sig_wrapper进行
58 信号的处理。当用户自定义的信号处理函数返回时,sigreturn的系统调用才开始进行上下文的恢复(或者说是进行
60 由于这中间没有进行地址空间的交换,所以第二次跳转使用的是同一个内核栈,而之前默认tss.esp0的值是永远指向最顶部
61 这样一来就有可能会覆盖更早的上下文信息(比如嵌套的信号捕获函数)
63 tss_update_esp(proc->intr_ctx.registers.esp);
65 apic_done_servicing();
67 asm volatile("pushl %0\n"
68 "jmp switch_to\n" ::"r"(proc)
69 : "memory"); // kernel/asm/x86/interrupt.S
73 can_schedule(struct proc_info* proc)
75 if (__SIGTEST(proc->sig_pending, _SIGCONT)) {
76 __SIGCLEAR(proc->sig_pending, _SIGSTOP);
77 } else if (__SIGTEST(proc->sig_pending, _SIGSTOP)) {
78 // 如果进程受到SIGSTOP,则该进程不给予调度。
88 struct proc_info* leader = sched_ctx._procs[0];
89 struct proc_info *pos, *n;
90 time_t now = clock_systime();
91 llist_for_each(pos, n, &leader->sleep.sleepers, sleep.sleepers)
93 if (PROC_TERMINATED(pos->state)) {
97 time_t wtime = pos->sleep.wakeup_time;
98 time_t atime = pos->sleep.alarm_time;
100 if (wtime && now >= wtime) {
101 pos->sleep.wakeup_time = 0;
102 pos->state = PS_READY;
105 if (atime && now >= atime) {
106 pos->sleep.alarm_time = 0;
107 __SIGSET(pos->sig_pending, _SIGALRM);
110 if (!wtime && !atime) {
112 llist_delete(&pos->sleep.sleepers);
120 if (!sched_ctx.ptable_len) {
124 // 上下文切换相当的敏感!我们不希望任何的中断打乱栈的顺序……
125 cpu_disable_interrupt();
126 struct proc_info* next;
127 int prev_ptr = sched_ctx.procs_index;
130 if (!(__current->state & ~PS_RUNNING)) {
131 __current->state = PS_READY;
136 // round-robin scheduler
139 ptr = (ptr + 1) % sched_ctx.ptable_len;
140 next = sched_ctx._procs[ptr];
141 } while (!next || (next->state != PS_READY && ptr != prev_ptr));
143 sched_ctx.procs_index = ptr;
145 if (!can_schedule(next)) {
146 // 如果该进程不给予调度,则尝试重新选择
156 cpu_enable_interrupt();
157 cpu_int(LUNAIX_SCHED);
160 __DEFINE_LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
166 if (__current->sleep.wakeup_time) {
167 return (__current->sleep.wakeup_time - clock_systime()) / 1000U;
170 struct proc_info* root_proc = sched_ctx._procs[0];
171 __current->sleep.wakeup_time = clock_systime() + seconds * 1000;
172 llist_append(&root_proc->sleep.sleepers, &__current->sleep.sleepers);
174 __current->intr_ctx.registers.eax = seconds;
175 __current->state = PS_BLOCKED;
179 __DEFINE_LXSYSCALL1(unsigned int, alarm, unsigned int, seconds)
181 time_t prev_ddl = __current->sleep.alarm_time;
182 time_t now = clock_systime();
184 __current->sleep.alarm_time = seconds ? now + seconds * 1000 : 0;
186 struct proc_info* root_proc = sched_ctx._procs[0];
187 if (llist_empty(&__current->sleep.sleepers)) {
188 llist_append(&root_proc->sleep.sleepers, &__current->sleep.sleepers);
191 return prev_ddl ? (prev_ddl - now) / 1000 : 0;
194 __DEFINE_LXSYSCALL1(void, exit, int, status)
196 terminate_proc(status);
200 __DEFINE_LXSYSCALL(void, yield)
206 _wait(pid_t wpid, int* status, int options);
208 __DEFINE_LXSYSCALL1(pid_t, wait, int*, status)
210 return _wait(-1, status, 0);
213 __DEFINE_LXSYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, options)
215 return _wait(pid, status, options);
218 __DEFINE_LXSYSCALL(int, geterrno)
220 return __current->k_status;
224 _wait(pid_t wpid, int* status, int options)
226 pid_t cur = __current->pid;
227 int status_flags = 0;
228 struct proc_info *proc, *n;
229 if (llist_empty(&__current->children)) {
233 wpid = wpid ? wpid : -__current->pgid;
235 llist_for_each(proc, n, &__current->children, siblings)
237 if (!~wpid || proc->pid == wpid || proc->pgid == -wpid) {
238 if (proc->state == PS_TERMNAT && !options) {
239 status_flags |= PEXITTERM;
242 if (proc->state == PS_READY && (options & WUNTRACED)) {
243 status_flags |= PEXITSTOP;
248 if ((options & WNOHANG)) {
256 status_flags |= PEXITSIG * (proc->sig_inprogress != 0);
258 *status = proc->exit_code | status_flags;
260 return destroy_process(proc->pid);
267 for (; i < sched_ctx.ptable_len && sched_ctx._procs[i]; i++)
270 if (i == MAX_PROCESS) {
271 panick("Panic in Ponyville shimmer!");
274 if (i == sched_ctx.ptable_len) {
275 sched_ctx.ptable_len++;
278 struct proc_info* proc = cake_grab(proc_pile);
280 proc->state = PS_CREATED;
282 proc->created = clock_systime();
283 proc->pgid = proc->pid;
284 proc->fdtable = vzalloc(sizeof(struct v_fdtable));
286 llist_init_head(&proc->mm.regions.head);
287 llist_init_head(&proc->tasks);
288 llist_init_head(&proc->children);
289 llist_init_head(&proc->grp_member);
290 llist_init_head(&proc->sleep.sleepers);
291 waitq_init(&proc->waitqueue);
293 sched_ctx._procs[i] = proc;
299 commit_process(struct proc_info* process)
301 assert(process == sched_ctx._procs[process->pid]);
303 if (process->state != PS_CREATED) {
304 __current->k_status = EINVAL;
308 // every process is the child of first process (pid=1)
309 if (!process->parent) {
310 process->parent = sched_ctx._procs[1];
313 llist_append(&process->parent->children, &process->siblings);
314 llist_append(&sched_ctx._procs[0]->tasks, &process->tasks);
316 process->state = PS_READY;
319 // from <kernel/process.c>
321 __del_pagetable(pid_t pid, uintptr_t mount_point);
324 destroy_process(pid_t pid)
327 if (index <= 0 || index > sched_ctx.ptable_len) {
328 __current->k_status = EINVAL;
331 struct proc_info* proc = sched_ctx._procs[index];
332 sched_ctx._procs[index] = 0;
334 llist_delete(&proc->siblings);
335 llist_delete(&proc->grp_member);
336 llist_delete(&proc->tasks);
337 llist_delete(&proc->sleep.sleepers);
339 taskfs_invalidate(pid);
342 vfs_unref_dnode(proc->cwd);
345 for (size_t i = 0; i < VFS_MAX_FD; i++) {
346 struct v_fd* fd = proc->fdtable->fds[i];
348 vfs_pclose(fd->file, pid);
351 vfree(proc->fdtable);
353 struct mm_region *pos, *n;
354 llist_for_each(pos, n, &proc->mm.regions.head, head)
359 vmm_mount_pd(PD_MOUNT_1, proc->page_table);
361 __del_pagetable(pid, PD_MOUNT_1);
363 vmm_unmount_pd(PD_MOUNT_1);
365 cake_release(proc_pile, proc);
371 terminate_proc(int exit_code)
373 __current->state = PS_TERMNAT;
374 __current->exit_code = exit_code;
376 __SIGSET(__current->parent->sig_pending, _SIGCHLD);
380 get_process(pid_t pid)
383 if (index < 0 || index > sched_ctx.ptable_len) {
386 return sched_ctx._procs[index];
390 orphaned_proc(pid_t pid)
394 if (pid >= sched_ctx.ptable_len)
396 struct proc_info* proc = sched_ctx._procs[pid];
397 struct proc_info* parent = proc->parent;
399 // 如果其父进程的状态是terminated 或 destroy中的一种
400 // 或者其父进程是在该进程之后创建的,那么该进程为孤儿进程
401 return PROC_TERMINATED(parent->state) || parent->created > proc->created;