#include <lunaix/spike.h>
#include <lunaix/status.h>
#include <lunaix/syscall.h>
+#include <lunaix/syslog.h>
+#include <lunaix/mm/valloc.h>
#include <klibc/string.h>
+#include <sys/mm/mempart.h>
+
+LOG_MODULE("SIG")
+
extern struct scheduler sched_ctx; /* kernel/sched.c */
-static u32_t term_sigs =
- (1 << SIGSEGV) | (1 << SIGINT) | (1 << SIGKILL) | (1 << SIGTERM);
+#define UNMASKABLE (sigset(SIGKILL) | sigset(SIGTERM) | sigset(SIGILL))
+#define TERMSIG (sigset(SIGSEGV) | sigset(SIGINT) | UNMASKABLE)
+#define CORE (sigset(SIGSEGV))
+#define within_kstack(addr) \
+ (KSTACK_AREA <= (addr) && (addr) <= KSTACK_AREA_END)
+
+static inline void
+signal_terminate(int caused_by)
+{
+ terminate_current(caused_by | PEXITSIG);
+}
// Referenced in kernel/asm/x86/interrupt.S
void*
signal_dispatch()
{
- if (!__current->sig_pending) {
+ if (kernel_process(__current)) {
+ // signal is undefined under 'kernel process'
+ return 0;
+ }
+
+ if (!pending_sigs(current_thread)) {
// 没有待处理信号
return 0;
}
- int sig_selected =
- 31 - __builtin_clz(__current->sig_pending &
- ~(__current->sig_mask | __current->sig_inprogress));
+ struct sigregistry* sigreg = __current->sigreg;
+ struct sigctx* psig = ¤t_thread->sigctx;
+ struct sigact* prev_working = active_signal(current_thread);
+ sigset_t mask = psig->sig_mask | (prev_working ? prev_working->sa_mask : 0);
- __SIGCLEAR(__current->sig_pending, sig_selected);
+ int sig_selected = 31 - clz(psig->sig_pending & ~mask);
+ sigset_clear(psig->sig_pending, sig_selected);
- if (sig_selected == 0) {
+ if (!sig_selected) {
// SIG0 is reserved
return 0;
}
- if (!__current->sig_handler[sig_selected]) {
- if ((term_sigs & (1 << sig_selected))) {
- terminate_proc(sig_selected);
+ struct sigact* action = sigreg->signals[sig_selected];
+ if (!action || !action->sa_actor) {
+ if (sigset_test(TERMSIG, sig_selected)) {
+ signal_terminate(sig_selected);
schedule();
// never return
}
return 0;
}
- ptr_t ustack = __current->ustack_top & ~0xf;
-
- if ((int)(ustack - USTACK_END) < (int)sizeof(struct proc_sig)) {
+ ptr_t ustack = current_thread->ustack_top;
+ ptr_t ustack_start = current_thread->ustack->start;
+ if ((int)(ustack - ustack_start) < (int)sizeof(struct proc_sig)) {
// 用户栈没有空间存放信号上下文
return 0;
}
- struct proc_sig* sig_ctx =
- (struct proc_sig*)(ustack - sizeof(struct proc_sig));
+ struct proc_sig* sigframe =
+ (struct proc_sig*)((ustack - sizeof(struct proc_sig)) & ~0xf);
- /*
- 这是一个相当恶心的坑。
- 问题是出在原本的sig_ctx->prev_context = __current->intr_ctx的上面
- 这个语句会被gcc在编译时,用更加高效的 rep movsl 来代替。
+ sigframe->sig_num = sig_selected;
+ sigframe->sigact = action->sa_actor;
+ sigframe->sighand = action->sa_handler;
- 由于我们采用按需分页,所以在很多情况下,用户栈实际被分配的空间不允许我们进行完整的
- 注入,而需要走page fault handler进行动态分页。
+ sigframe->saved_hstate = current_thread->hstate;
- 竞态条件就出现在这里!
+ sigactive_push(current_thread, sig_selected);
- 假若我们的__current->intr_ctx注入了一半,然后产生page-fault中断,
- 那么这就会导致我们的__current->intr_ctx被这个page-fault中断导致的
- 上下文信息覆盖。那么当page-fault handler成功分配了一个页,返回,
- 拷贝也就得以进行。遗憾的是,只不过这次拷贝的内容和前面的拷贝是没有任何的关系
- (因为此时的intr_ctx已经不是之前的intr_ctx了!)
- 而这就会导致我们保存在信号上下文中的进程上下文信息不完整,从而在soft_iret时
- 触发#GP。
+ return sigframe;
+}
- 解决办法就是先吧intr_ctx拷贝到一个静态分配的区域里,然后再注入到用户栈。
- */
- static volatile struct proc_sigstate __temp_save;
- __temp_save.proc_regs = __current->intr_ctx;
- memcpy(__temp_save.fxstate, __current->fxstate, 512);
+static inline void must_inline
+__set_signal(struct thread* thread, signum_t signum)
+{
+ raise_signal(thread, signum);
- sig_ctx->prev_context = __temp_save;
+ // for these mutually exclusive signal
+ if (signum == SIGCONT || signum == SIGSTOP) {
+ sigset_clear(thread->sigctx.sig_pending, signum ^ 1);
+ }
+
+ struct sigact* sig = sigact_of(thread->process, signum);
+ if (sig) {
+ sig->sender = __current->pid;
+ }
+}
- sig_ctx->sig_num = sig_selected;
- sig_ctx->signal_handler = __current->sig_handler[sig_selected];
+static inline void must_inline
+__set_signal_all_threads(struct proc_info* proc, signum_t signum)
+{
+ struct thread *pos, *n;
+ llist_for_each(pos, n, &proc->threads, proc_sibs) {
+ __set_signal(pos, signum);
+ }
+}
- __SIGSET(__current->sig_inprogress, sig_selected);
+void
+thread_setsignal(struct thread* thread, signum_t signum)
+{
+ if (unlikely(kernel_process(thread->process))) {
+ return;
+ }
- return sig_ctx;
+ __set_signal(thread, signum);
+}
+
+void
+proc_setsignal(struct proc_info* proc, signum_t signum)
+{
+ if (unlikely(kernel_process(proc))) {
+ return;
+ }
+
+ // FIXME handle signal delivery at process level.
+ switch (signum)
+ {
+ case SIGKILL:
+ signal_terminate(signum);
+ break;
+ case SIGCONT:
+ case SIGSTOP:
+ __set_signal_all_threads(proc, signum);
+ default:
+ break;
+ }
+
+ __set_signal(proc->th_active, signum);
}
int
-signal_send(pid_t pid, int signum)
+signal_send(pid_t pid, signum_t signum)
{
- if (signum < 0 || signum >= _SIG_NUM) {
- __current->k_status = EINVAL;
+ if (signum >= _SIG_NUM) {
+ syscall_result(EINVAL);
return -1;
}
+ pid_t sender_pid = __current->pid;
struct proc_info* proc;
+
if (pid > 0) {
proc = get_process(pid);
goto send_single;
} else if (!pid) {
proc = __current;
goto send_grp;
- } else if (pid < -1) {
+ } else if (pid < 0) {
proc = get_process(-pid);
goto send_grp;
} else {
// TODO: send to all process.
// But I don't want to support it yet.
- __current->k_status = EINVAL;
+ syscall_result(EINVAL);
return -1;
}
-send_grp:
+send_grp: ;
struct proc_info *pos, *n;
llist_for_each(pos, n, &proc->grp_member, grp_member)
{
- __SIGSET(pos->sig_pending, signum);
+ proc_setsignal(pos, signum);
}
send_single:
- if (PROC_TERMINATED(proc->state)) {
- __current->k_status = EINVAL;
+ if (proc_terminated(proc)) {
+ syscall_result(EINVAL);
return -1;
}
- __SIGSET(proc->sig_pending, signum);
+
+ proc_setsignal(proc, signum);
+
return 0;
}
-__DEFINE_LXSYSCALL1(int, sigreturn, struct proc_sig, *sig_ctx)
+void
+signal_dup_context(struct sigctx* dest_ctx)
{
- memcpy(__current->fxstate, sig_ctx->prev_context.fxstate, 512);
- __current->intr_ctx = sig_ctx->prev_context.proc_regs;
- __current->flags &= ~PROC_FINPAUSE;
- __SIGCLEAR(__current->sig_inprogress, sig_ctx->sig_num);
- schedule();
+ struct sigctx* old_ctx = ¤t_thread->sigctx;
+ memcpy(dest_ctx, old_ctx, sizeof(struct sigctx));
+}
- // never reach!
- return 0;
+void
+signal_dup_registry(struct sigregistry* dest_reg)
+{
+ struct sigregistry* oldreg = __current->sigreg;
+ for (int i = 0; i < _SIG_NUM; i++) {
+ struct sigact* oldact = oldreg->signals[i];
+ if (!oldact) {
+ continue;
+ }
+
+ struct sigact* newact = valloc(sizeof(struct sigact));
+ memcpy(newact, oldact, sizeof(struct sigact));
+
+ dest_reg->signals[i] = newact;
+ }
+}
+
+void
+signal_reset_context(struct sigctx* sigctx) {
+ memset(sigctx, 0, sizeof(struct sigctx));
+}
+
+void
+signal_reset_registry(struct sigregistry* sigreg) {
+ for (int i = 0; i < _SIG_NUM; i++) {
+ struct sigact* act = sigreg->signals[i];
+ if (act) {
+ vfree(act);
+ sigreg->signals[i] = NULL;
+ }
+ }
+}
+
+void
+signal_free_registry(struct sigregistry* sigreg) {
+ signal_reset_registry(sigreg);
+ vfree(sigreg);
}
-__DEFINE_LXSYSCALL3(int,
- sigprocmask,
- int,
- how,
- const sigset_t,
- *set,
- sigset_t,
- *oldset)
+static bool
+signal_set_sigmask(struct thread* thread, int how, sigset_t* oldset, sigset_t* set)
{
- *oldset = __current->sig_mask;
+ struct sigctx* sh = ¤t_thread->sigctx;
+ *oldset = sh->sig_mask;
+
if (how == _SIG_BLOCK) {
- __current->sig_mask |= *set;
+ sigset_union(sh->sig_mask, *set);
} else if (how == _SIG_UNBLOCK) {
- __current->sig_mask &= ~(*set);
+ sigset_intersect(sh->sig_mask, ~(*set));
} else if (how == _SIG_SETMASK) {
- __current->sig_mask = *set;
+ sh->sig_mask = *set;
} else {
- return 0;
+ return false;
}
- __current->sig_mask &= ~_SIGNAL_UNMASKABLE;
- return 1;
+
+ sigset_intersect(sh->sig_mask, ~UNMASKABLE);
+ return true;
}
-__DEFINE_LXSYSCALL2(int, signal, int, signum, sighandler_t, handler)
+__DEFINE_LXSYSCALL1(int, sigreturn, struct proc_sig, *sig_ctx)
{
- if (signum <= 0 || signum >= _SIG_NUM) {
- return -1;
+ struct sigctx* sigctx = ¤t_thread->sigctx;
+ struct sigact* active = active_signal(current_thread);
+
+ /* We choose signal#0 as our base case, that is sig#0 means no signal.
+ Therefore, it is an ill situation to return from such sigctx.
+ */
+ if (!active) {
+ signal_terminate(SIGSEGV);
+ schedule();
}
- if ((__SIGNAL(signum) & _SIGNAL_UNMASKABLE)) {
- return -1;
+ current_thread->hstate = sig_ctx->saved_hstate;
+ if (proc_terminated(__current)) {
+ __current->exit_code |= PEXITSIG;
+ } else if (sigset_test(CORE, sig_ctx->sig_num)) {
+ signal_terminate(sig_ctx->sig_num);
}
- __current->sig_handler[signum] = (void*)handler;
+ ptr_t ictx = (ptr_t)current_thread->hstate;
+ /*
+ Ensure our restored context is within kernel stack
+
+ This prevent user to forge their own context such that arbitrary code
+ can be executed as supervisor level
+ */
+ if (!within_kstack(ictx)) {
+ signal_terminate(SIGSEGV);
+ }
+
+ sigactive_pop(current_thread);
+
+ schedule();
+
+ // never reach!
return 0;
}
-void
-__do_pause()
+__DEFINE_LXSYSCALL3(
+ int, sigprocmask, int, how, const sigset_t, *set, sigset_t, *oldset)
{
- __current->flags |= PROC_FINPAUSE;
+ // TODO maybe it is a good opportunity to introduce a process-wide
+ // signal mask?
+
+ if (signal_set_sigmask(current_thread, how, oldset, set)) {
+ return 0;
+ }
- while ((__current->flags & PROC_FINPAUSE)) {
- sched_yieldk();
+ syscall_result(EINVAL);
+ return -1;
+}
+
+__DEFINE_LXSYSCALL3(
+ int, th_sigmask, int, how, const sigset_t, *set, sigset_t, *oldset)
+{
+ if (signal_set_sigmask(current_thread, how, oldset, set)) {
+ return 0;
}
- __current->k_status = EINTR;
+ return EINVAL;
+}
+
+__DEFINE_LXSYSCALL2(int, sys_sigaction, int, signum, struct sigaction*, action)
+{
+ if (signum <= 0 || signum >= _SIG_NUM) {
+ return -1;
+ }
+
+ if (sigset_test(UNMASKABLE, signum)) {
+ return -1;
+ }
+
+ struct sigctx* sigctx = ¤t_thread->sigctx;
+ if (signum == sigctx->sig_active) {
+ return -1;
+ }
+
+ struct sigact* sa = sigact_of(__current, signum);
+
+ if (!sa) {
+ sa = vzalloc(sizeof(struct sigact));
+ set_sigact(__current, signum, sa);
+ }
+
+ sa->sa_actor = (void*)action->sa_sigaction;
+ sa->sa_handler = (void*)action->sa_handler;
+ sigset_union(sa->sa_mask, sigset(signum));
+
+ return 0;
}
__DEFINE_LXSYSCALL(int, pause)
{
- __do_pause();
+ pause_current_thread();
+ sched_pass();
+
+ syscall_result(EINTR);
return -1;
}
__DEFINE_LXSYSCALL1(int, sigpending, sigset_t, *sigset)
{
- *sigset = __current->sig_pending;
+ *sigset = pending_sigs(current_thread);
return 0;
}
__DEFINE_LXSYSCALL1(int, sigsuspend, sigset_t, *mask)
{
- sigset_t tmp = __current->sig_mask;
- __current->sig_mask = (*mask) & ~_SIGNAL_UNMASKABLE;
- __do_pause();
- __current->sig_mask = tmp;
+ struct sigctx* sigctx = ¤t_thread->sigctx;
+ sigset_t tmp = current_thread->sigctx.sig_mask;
+ sigctx->sig_mask = (*mask) & ~UNMASKABLE;
+
+ pause_current_thread();
+ sched_pass();
+
+ sigctx->sig_mask = tmp;
return -1;
}
\ No newline at end of file