-void
-run(struct proc_info* proc)
-{
- proc->state = PS_RUNNING;
-
- /*
- 将tss.esp0设置为上次调度前的esp值。
- 当处理信号时,上下文信息是不会恢复的,而是保存在用户栈中,然后直接跳转进位于用户空间的sig_wrapper进行
- 信号的处理。当用户自定义的信号处理函数返回时,sigreturn的系统调用才开始进行上下文的恢复(或者说是进行
- 另一次调度。
- 由于这中间没有进行地址空间的交换,所以第二次跳转使用的是同一个内核栈,而之前默认tss.esp0的值是永远指向最顶部
- 这样一来就有可能会覆盖更早的上下文信息(比如嵌套的信号捕获函数)
- */
- tss_update_esp(proc->intr_ctx.registers.esp);
-
- apic_done_servicing();
-
- asm volatile("pushl %0\n"
- "jmp switch_to\n" ::"r"(proc)
- : "memory"); // kernel/asm/x86/interrupt.S
+/*
+ Currently, we do not allow self-destorying thread, doing
+ so will eliminate current kernel stack which is disaster.
+ A compromise solution is to perform a regular scan and
+ clean-up on these thread, in the preemptible kernel thread.
+*/
+
+void _preemptible
+cleanup_detached_threads() {
+ ensure_preempt_caller();
+
+ // XXX may be a lock on sched_context will ben the most appropriate?
+ cpu_disable_interrupt();
+
+ int i = 0;
+ struct thread *pos, *n;
+ llist_for_each(pos, n, sched_ctx.threads, sched_sibs) {
+ if (likely(!proc_terminated(pos) || !thread_detached(pos))) {
+ continue;
+ }
+
+ vmm_mount_pd(VMS_MOUNT_1, vmroot(pos->process));
+ destory_thread(VMS_MOUNT_1, pos);
+ vmm_unmount_pd(VMS_MOUNT_1);
+
+ i++;
+ }
+
+ if (i) {
+ INFO("cleaned %d terminated detached thread(s)", i);
+ }
+
+ cpu_enable_interrupt();