trace_ctx.ksym_table->ksym_label_off + sym->label_off);
}
+static inline bool valid_fp(ptr_t ptr) {
+ return KERNEL_STACK < ptr && ptr < KERNEL_EXEC_END;
+}
+
int
trace_walkback(struct trace_record* tb_buffer,
ptr_t fp,
struct ksym_entry* current = NULL;
int i = 0;
- while (frame && i < limit) {
+ while (valid_fp((ptr_t)frame) && i < limit) {
ptr_t pc = *(frame + 1);
current = trace_sym_lookup(pc);
- tb_buffer[i] = (struct trace_record){ .pc = current ? current->pc : pc,
- .symbol = ksym_getstr(current) };
+ tb_buffer[i] =
+ (struct trace_record){ .pc = pc,
+ .sym_pc = current ? current->pc : 0,
+ .symbol = ksym_getstr(current) };
frame = (ptr_t*)*frame;
i++;
}
+ if (!valid_fp((ptr_t)frame)) {
+ frame = NULL;
+ }
+
if (last_fp) {
*last_fp = (ptr_t)frame;
}
return i;
}
+static inline void
+trace_print_code_entry(ptr_t sym_pc, ptr_t inst_pc, char* sym)
+{
+ DEBUG("%p+%p: %s", sym_pc, inst_pc - sym_pc, sym);
+}
+
void
trace_printstack_of(ptr_t fp)
{
struct trace_record tbs[NB_TRACEBACK];
+ // Let's get our Stackwalker does his job ;)
int n = trace_walkback(tbs, fp, NB_TRACEBACK, &fp);
if (fp) {
- kprintf(KDEBUG "...<truncated>");
+ DEBUG("...<truncated>");
}
for (int i = 0; i < n; i++) {
- kprintf(KDEBUG "%p: %s", tbs[i].pc, tbs[i].symbol);
+ struct trace_record* tb = &tbs[i];
+ trace_print_code_entry(tb->sym_pc, tb->pc, tb->symbol);
}
}
struct ksym_entry* sym = trace_sym_lookup(p->execp->eip);
- kprintf(KDEBUG ">> (sw:%s) iv:%d, errno:%p <<",
- direction,
- p->execp->vector,
- p->execp->err_code);
- kprintf(KDEBUG "%p:%s", p->execp->eip, ksym_getstr(sym));
+ DEBUG(">> (sw:%s) iv:%d, errno:%p <<",
+ direction,
+ p->execp->vector,
+ p->execp->err_code);
+
+ ptr_t sym_pc = sym ? sym->pc : p->execp->eip;
+ trace_print_code_entry(sym_pc, p->execp->eip, ksym_getstr(sym));
}
void
ptr_t fp = cpu_get_fp();
int prev_fromusr = 0;
- kprintf(KDEBUG "stack trace (pid=%d)\n", __current->pid);
+ DEBUG("stack trace (pid=%d)\n", __current->pid);
trace_printstack_of(fp);