mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
Merge branch 'sh/dwarf-unwinder' of git://github.com/mfleming/linux-2.6 into sh/dwarf-unwinder
This commit is contained in:
commit
c153a58e71
10 changed files with 252 additions and 105 deletions
|
@ -1,6 +1,7 @@
|
|||
#ifndef __ASM_SH_BUG_H
|
||||
#define __ASM_SH_BUG_H
|
||||
|
||||
#define TRAPA_UNWINDER_BUG_OPCODE 0xc33b /* trapa #0x3b */
|
||||
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG
|
||||
|
@ -72,6 +73,30 @@ do { \
|
|||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#define UNWINDER_BUG() \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t.short %O0\n" \
|
||||
_EMIT_BUG_ENTRY \
|
||||
: \
|
||||
: "n" (TRAPA_UNWINDER_BUG_OPCODE), \
|
||||
"i" (__FILE__), \
|
||||
"i" (__LINE__), "i" (0), \
|
||||
"i" (sizeof(struct bug_entry))); \
|
||||
} while (0)
|
||||
|
||||
#define UNWINDER_BUG_ON(x) ({ \
|
||||
int __ret_unwinder_on = !!(x); \
|
||||
if (__builtin_constant_p(__ret_unwinder_on)) { \
|
||||
if (__ret_unwinder_on) \
|
||||
UNWINDER_BUG(); \
|
||||
} else { \
|
||||
if (unlikely(__ret_unwinder_on)) \
|
||||
UNWINDER_BUG(); \
|
||||
} \
|
||||
unlikely(__ret_unwinder_on); \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
|
|
@ -265,10 +265,7 @@ struct dwarf_frame {
|
|||
|
||||
unsigned long pc;
|
||||
|
||||
struct dwarf_reg *regs;
|
||||
unsigned int num_regs; /* how many regs are allocated? */
|
||||
|
||||
unsigned int depth; /* what level are we in the callstack? */
|
||||
struct list_head reg_list;
|
||||
|
||||
unsigned long cfa;
|
||||
|
||||
|
@ -292,20 +289,15 @@ struct dwarf_frame {
|
|||
* @flags: Describes how to calculate the value of this register
|
||||
*/
|
||||
struct dwarf_reg {
|
||||
struct list_head link;
|
||||
|
||||
unsigned int number;
|
||||
|
||||
unsigned long addr;
|
||||
unsigned long flags;
|
||||
#define DWARF_REG_OFFSET (1 << 0)
|
||||
};
|
||||
|
||||
/**
|
||||
* dwarf_stack - a DWARF stack contains a collection of DWARF frames
|
||||
* @depth: the number of frames in the stack
|
||||
* @level: an array of DWARF frames, indexed by stack level
|
||||
*
|
||||
*/
|
||||
struct dwarf_stack {
|
||||
unsigned int depth;
|
||||
struct dwarf_frame **level;
|
||||
#define DWARF_VAL_OFFSET (1 << 1)
|
||||
#define DWARF_UNDEFINED (1 << 2)
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -372,13 +364,14 @@ static inline unsigned int DW_CFA_operand(unsigned long insn)
|
|||
|
||||
extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
|
||||
struct dwarf_frame *);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define CFI_STARTPROC .cfi_startproc
|
||||
#define CFI_ENDPROC .cfi_endproc
|
||||
#define CFI_DEF_CFA .cfi_def_cfa
|
||||
#define CFI_REGISTER .cfi_register
|
||||
#define CFI_REL_OFFSET .cfi_rel_offset
|
||||
#define CFI_UNDEFINED .cfi_undefined
|
||||
|
||||
#else
|
||||
|
||||
|
@ -392,6 +385,7 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
|
|||
#define CFI_DEF_CFA CFI_IGNORE
|
||||
#define CFI_REGISTER CFI_IGNORE
|
||||
#define CFI_REL_OFFSET CFI_IGNORE
|
||||
#define CFI_UNDEFINED CFI_IGNORE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void dwarf_unwinder_init(void)
|
||||
|
|
|
@ -181,6 +181,11 @@ BUILD_TRAP_HANDLER(breakpoint);
|
|||
BUILD_TRAP_HANDLER(singlestep);
|
||||
BUILD_TRAP_HANDLER(fpu_error);
|
||||
BUILD_TRAP_HANDLER(fpu_state_restore);
|
||||
BUILD_TRAP_HANDLER(unwinder);
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
extern void handle_BUG(struct pt_regs *);
|
||||
#endif
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
|
|
|
@ -22,4 +22,10 @@ extern void stack_reader_dump(struct task_struct *, struct pt_regs *,
|
|||
unsigned long *, const struct stacktrace_ops *,
|
||||
void *);
|
||||
|
||||
/*
|
||||
* Used by fault handling code to signal to the unwinder code that it
|
||||
* should switch to a different unwinder.
|
||||
*/
|
||||
extern int unwinder_faulted;
|
||||
|
||||
#endif /* _LINUX_UNWINDER_H */
|
||||
|
|
|
@ -508,6 +508,8 @@ ENTRY(handle_interrupt)
|
|||
bsr save_regs ! needs original pr value in k3
|
||||
mov #-1, k2 ! default vector kept in k2
|
||||
|
||||
setup_frame_reg
|
||||
|
||||
! Setup return address and jump to do_IRQ
|
||||
mov.l 4f, r9 ! fetch return address
|
||||
lds r9, pr ! put return address in pr
|
||||
|
|
|
@ -19,6 +19,10 @@
|
|||
|
||||
#if !defined(CONFIG_SH_STANDARD_BIOS)
|
||||
#define sh_bios_handler debug_trap_handler
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_DWARF_UNWINDER)
|
||||
#define unwinder_trap_handler debug_trap_handler
|
||||
#endif
|
||||
|
||||
.data
|
||||
|
@ -35,7 +39,7 @@ ENTRY(debug_trap_table)
|
|||
.long debug_trap_handler /* 0x38 */
|
||||
.long debug_trap_handler /* 0x39 */
|
||||
.long debug_trap_handler /* 0x3a */
|
||||
.long debug_trap_handler /* 0x3b */
|
||||
.long unwinder_trap_handler /* 0x3b */
|
||||
.long breakpoint_trap_handler /* 0x3c */
|
||||
.long singlestep_trap_handler /* 0x3d */
|
||||
.long bug_trap_handler /* 0x3e */
|
||||
|
|
|
@ -11,12 +11,14 @@
|
|||
*
|
||||
* TODO:
|
||||
* - DWARF64 doesn't work.
|
||||
* - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
|
||||
*/
|
||||
|
||||
/* #define DEBUG */
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/dwarf.h>
|
||||
#include <asm/unwinder.h>
|
||||
|
@ -25,6 +27,17 @@
|
|||
#include <asm/dwarf.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
/* Reserve enough memory for two stack frames */
|
||||
#define DWARF_FRAME_MIN_REQ 2
|
||||
/* ... with 4 registers per frame. */
|
||||
#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
|
||||
|
||||
static struct kmem_cache *dwarf_frame_cachep;
|
||||
static mempool_t *dwarf_frame_pool;
|
||||
|
||||
static struct kmem_cache *dwarf_reg_cachep;
|
||||
static mempool_t *dwarf_reg_pool;
|
||||
|
||||
static LIST_HEAD(dwarf_cie_list);
|
||||
static DEFINE_SPINLOCK(dwarf_cie_lock);
|
||||
|
||||
|
@ -33,47 +46,70 @@ static DEFINE_SPINLOCK(dwarf_fde_lock);
|
|||
|
||||
static struct dwarf_cie *cached_cie;
|
||||
|
||||
/*
|
||||
* Figure out whether we need to allocate some dwarf registers. If dwarf
|
||||
* registers have already been allocated then we may need to realloc
|
||||
* them. "reg" is a register number that we need to be able to access
|
||||
* after this call.
|
||||
/**
|
||||
* dwarf_frame_alloc_reg - allocate memory for a DWARF register
|
||||
* @frame: the DWARF frame whose list of registers we insert on
|
||||
* @reg_num: the register number
|
||||
*
|
||||
* Register numbers start at zero, therefore we need to allocate space
|
||||
* for "reg" + 1 registers.
|
||||
* Allocate space for, and initialise, a dwarf reg from
|
||||
* dwarf_reg_pool and insert it onto the (unsorted) linked-list of
|
||||
* dwarf registers for @frame.
|
||||
*
|
||||
* Return the initialised DWARF reg.
|
||||
*/
|
||||
static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
|
||||
unsigned int reg)
|
||||
static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
|
||||
unsigned int reg_num)
|
||||
{
|
||||
struct dwarf_reg *regs;
|
||||
unsigned int num_regs = reg + 1;
|
||||
size_t new_size;
|
||||
size_t old_size;
|
||||
struct dwarf_reg *reg;
|
||||
|
||||
new_size = num_regs * sizeof(*regs);
|
||||
old_size = frame->num_regs * sizeof(*regs);
|
||||
|
||||
/* Fast path: don't allocate any regs if we've already got enough. */
|
||||
if (frame->num_regs >= num_regs)
|
||||
return;
|
||||
|
||||
regs = kzalloc(new_size, GFP_ATOMIC);
|
||||
if (!regs) {
|
||||
printk(KERN_WARNING "Unable to allocate DWARF registers\n");
|
||||
reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
|
||||
if (!reg) {
|
||||
printk(KERN_WARNING "Unable to allocate a DWARF register\n");
|
||||
/*
|
||||
* Let's just bomb hard here, we have no way to
|
||||
* gracefully recover.
|
||||
*/
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
}
|
||||
|
||||
if (frame->regs) {
|
||||
memcpy(regs, frame->regs, old_size);
|
||||
kfree(frame->regs);
|
||||
reg->number = reg_num;
|
||||
reg->addr = 0;
|
||||
reg->flags = 0;
|
||||
|
||||
list_add(®->link, &frame->reg_list);
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static void dwarf_frame_free_regs(struct dwarf_frame *frame)
|
||||
{
|
||||
struct dwarf_reg *reg, *n;
|
||||
|
||||
list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
|
||||
list_del(®->link);
|
||||
mempool_free(reg, dwarf_reg_pool);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dwarf_frame_reg - return a DWARF register
|
||||
* @frame: the DWARF frame to search in for @reg_num
|
||||
* @reg_num: the register number to search for
|
||||
*
|
||||
* Lookup and return the dwarf reg @reg_num for this frame. Return
|
||||
* NULL if @reg_num is an register invalid number.
|
||||
*/
|
||||
static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
|
||||
unsigned int reg_num)
|
||||
{
|
||||
struct dwarf_reg *reg;
|
||||
|
||||
list_for_each_entry(reg, &frame->reg_list, link) {
|
||||
if (reg->number == reg_num)
|
||||
return reg;
|
||||
}
|
||||
|
||||
frame->regs = regs;
|
||||
frame->num_regs = num_regs;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -196,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
|
|||
break;
|
||||
default:
|
||||
pr_debug("encoding=0x%x\n", (encoding & 0x70));
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
}
|
||||
|
||||
if ((encoding & 0x07) == 0x00)
|
||||
|
@ -211,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
|
|||
break;
|
||||
default:
|
||||
pr_debug("encoding=0x%x\n", encoding);
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
}
|
||||
|
||||
return count;
|
||||
|
@ -347,6 +383,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
unsigned char insn;
|
||||
unsigned char *current_insn;
|
||||
unsigned int count, delta, reg, expr_len, offset;
|
||||
struct dwarf_reg *regp;
|
||||
|
||||
current_insn = insn_start;
|
||||
|
||||
|
@ -369,9 +406,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
count = dwarf_read_uleb128(current_insn, &offset);
|
||||
current_insn += count;
|
||||
offset *= cie->data_alignment_factor;
|
||||
dwarf_frame_alloc_regs(frame, reg);
|
||||
frame->regs[reg].addr = offset;
|
||||
frame->regs[reg].flags |= DWARF_REG_OFFSET;
|
||||
regp = dwarf_frame_alloc_reg(frame, reg);
|
||||
regp->addr = offset;
|
||||
regp->flags |= DWARF_REG_OFFSET;
|
||||
continue;
|
||||
/* NOTREACHED */
|
||||
case DW_CFA_restore:
|
||||
|
@ -415,6 +452,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
case DW_CFA_undefined:
|
||||
count = dwarf_read_uleb128(current_insn, ®);
|
||||
current_insn += count;
|
||||
regp = dwarf_frame_alloc_reg(frame, reg);
|
||||
regp->flags |= DWARF_UNDEFINED;
|
||||
break;
|
||||
case DW_CFA_def_cfa:
|
||||
count = dwarf_read_uleb128(current_insn,
|
||||
|
@ -453,17 +492,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
count = dwarf_read_leb128(current_insn, &offset);
|
||||
current_insn += count;
|
||||
offset *= cie->data_alignment_factor;
|
||||
dwarf_frame_alloc_regs(frame, reg);
|
||||
frame->regs[reg].flags |= DWARF_REG_OFFSET;
|
||||
frame->regs[reg].addr = offset;
|
||||
regp = dwarf_frame_alloc_reg(frame, reg);
|
||||
regp->flags |= DWARF_REG_OFFSET;
|
||||
regp->addr = offset;
|
||||
break;
|
||||
case DW_CFA_val_offset:
|
||||
count = dwarf_read_uleb128(current_insn, ®);
|
||||
current_insn += count;
|
||||
count = dwarf_read_leb128(current_insn, &offset);
|
||||
offset *= cie->data_alignment_factor;
|
||||
frame->regs[reg].flags |= DWARF_REG_OFFSET;
|
||||
frame->regs[reg].addr = offset;
|
||||
regp = dwarf_frame_alloc_reg(frame, reg);
|
||||
regp->flags |= DWARF_VAL_OFFSET;
|
||||
regp->addr = offset;
|
||||
break;
|
||||
case DW_CFA_GNU_args_size:
|
||||
count = dwarf_read_uleb128(current_insn, &offset);
|
||||
|
@ -474,12 +514,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
current_insn += count;
|
||||
count = dwarf_read_uleb128(current_insn, &offset);
|
||||
offset *= cie->data_alignment_factor;
|
||||
dwarf_frame_alloc_regs(frame, reg);
|
||||
frame->regs[reg].flags |= DWARF_REG_OFFSET;
|
||||
frame->regs[reg].addr = -offset;
|
||||
|
||||
regp = dwarf_frame_alloc_reg(frame, reg);
|
||||
regp->flags |= DWARF_REG_OFFSET;
|
||||
regp->addr = -offset;
|
||||
break;
|
||||
default:
|
||||
pr_debug("unhandled DWARF instruction 0x%x\n", insn);
|
||||
UNWINDER_BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -496,14 +538,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|||
* on the callstack. Each of the lower (older) stack frames are
|
||||
* linked via the "prev" member.
|
||||
*/
|
||||
struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
|
||||
struct dwarf_frame *prev)
|
||||
struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
||||
struct dwarf_frame *prev)
|
||||
{
|
||||
struct dwarf_frame *frame;
|
||||
struct dwarf_cie *cie;
|
||||
struct dwarf_fde *fde;
|
||||
struct dwarf_reg *reg;
|
||||
unsigned long addr;
|
||||
int i, offset;
|
||||
|
||||
/*
|
||||
* If this is the first invocation of this recursive function we
|
||||
|
@ -516,11 +558,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
|
|||
if (!pc && !prev)
|
||||
pc = (unsigned long)current_text_addr();
|
||||
|
||||
frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
|
||||
if (!frame)
|
||||
return NULL;
|
||||
frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
|
||||
if (!frame) {
|
||||
printk(KERN_ERR "Unable to allocate a dwarf frame\n");
|
||||
UNWINDER_BUG();
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&frame->reg_list);
|
||||
frame->flags = 0;
|
||||
frame->prev = prev;
|
||||
frame->return_addr = 0;
|
||||
|
||||
fde = dwarf_lookup_fde(pc);
|
||||
if (!fde) {
|
||||
|
@ -540,7 +587,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
|
|||
* case above, which sucks because we could print a
|
||||
* warning here.
|
||||
*/
|
||||
return NULL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
cie = dwarf_lookup_cie(fde->cie_pointer);
|
||||
|
@ -560,10 +607,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
|
|||
switch (frame->flags) {
|
||||
case DWARF_FRAME_CFA_REG_OFFSET:
|
||||
if (prev) {
|
||||
BUG_ON(!prev->regs[frame->cfa_register].flags);
|
||||
reg = dwarf_frame_reg(prev, frame->cfa_register);
|
||||
UNWINDER_BUG_ON(!reg);
|
||||
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
|
||||
|
||||
addr = prev->cfa;
|
||||
addr += prev->regs[frame->cfa_register].addr;
|
||||
addr = prev->cfa + reg->addr;
|
||||
frame->cfa = __raw_readl(addr);
|
||||
|
||||
} else {
|
||||
|
@ -580,27 +628,30 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
|
|||
frame->cfa += frame->cfa_offset;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
}
|
||||
|
||||
/* If we haven't seen the return address reg, we're screwed. */
|
||||
BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
|
||||
reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
|
||||
|
||||
for (i = 0; i <= frame->num_regs; i++) {
|
||||
struct dwarf_reg *reg = &frame->regs[i];
|
||||
/*
|
||||
* If we haven't seen the return address register or the return
|
||||
* address column is undefined then we must assume that this is
|
||||
* the end of the callstack.
|
||||
*/
|
||||
if (!reg || reg->flags == DWARF_UNDEFINED)
|
||||
goto bail;
|
||||
|
||||
if (!reg->flags)
|
||||
continue;
|
||||
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
|
||||
|
||||
offset = reg->addr;
|
||||
offset += frame->cfa;
|
||||
}
|
||||
|
||||
addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
|
||||
addr = frame->cfa + reg->addr;
|
||||
frame->return_addr = __raw_readl(addr);
|
||||
|
||||
frame->next = dwarf_unwind_stack(frame->return_addr, frame);
|
||||
return frame;
|
||||
|
||||
bail:
|
||||
dwarf_frame_free_regs(frame);
|
||||
mempool_free(frame, dwarf_frame_pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
||||
|
@ -625,7 +676,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|||
cie->cie_pointer = (unsigned long)entry;
|
||||
|
||||
cie->version = *(char *)p++;
|
||||
BUG_ON(cie->version != 1);
|
||||
UNWINDER_BUG_ON(cie->version != 1);
|
||||
|
||||
cie->augmentation = p;
|
||||
p += strlen(cie->augmentation) + 1;
|
||||
|
@ -655,7 +706,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|||
count = dwarf_read_uleb128(p, &length);
|
||||
p += count;
|
||||
|
||||
BUG_ON((unsigned char *)p > end);
|
||||
UNWINDER_BUG_ON((unsigned char *)p > end);
|
||||
|
||||
cie->initial_instructions = p + length;
|
||||
cie->augmentation++;
|
||||
|
@ -683,16 +734,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|||
* routine in the CIE
|
||||
* augmentation.
|
||||
*/
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
} else if (*cie->augmentation == 'S') {
|
||||
BUG();
|
||||
UNWINDER_BUG();
|
||||
} else {
|
||||
/*
|
||||
* Unknown augmentation. Assume
|
||||
* 'z' augmentation.
|
||||
*/
|
||||
p = cie->initial_instructions;
|
||||
BUG_ON(!p);
|
||||
UNWINDER_BUG_ON(!p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -709,7 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|||
}
|
||||
|
||||
static int dwarf_parse_fde(void *entry, u32 entry_type,
|
||||
void *start, unsigned long len)
|
||||
void *start, unsigned long len,
|
||||
unsigned char *end)
|
||||
{
|
||||
struct dwarf_fde *fde;
|
||||
struct dwarf_cie *cie;
|
||||
|
@ -756,7 +808,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|||
|
||||
/* Call frame instructions. */
|
||||
fde->instructions = p;
|
||||
fde->end = start + len;
|
||||
fde->end = end;
|
||||
|
||||
/* Add to list. */
|
||||
spin_lock_irqsave(&dwarf_fde_lock, flags);
|
||||
|
@ -766,17 +818,33 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
|
||||
static void dwarf_unwinder_dump(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned long *sp,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
const struct stacktrace_ops *ops,
|
||||
void *data)
|
||||
{
|
||||
struct dwarf_frame *frame;
|
||||
struct dwarf_frame *frame, *_frame;
|
||||
unsigned long return_addr;
|
||||
|
||||
frame = dwarf_unwind_stack(0, NULL);
|
||||
_frame = NULL;
|
||||
return_addr = 0;
|
||||
|
||||
while (frame && frame->return_addr) {
|
||||
ops->address(data, frame->return_addr, 1);
|
||||
frame = frame->next;
|
||||
while (1) {
|
||||
frame = dwarf_unwind_stack(return_addr, _frame);
|
||||
|
||||
if (_frame) {
|
||||
dwarf_frame_free_regs(_frame);
|
||||
mempool_free(_frame, dwarf_frame_pool);
|
||||
}
|
||||
|
||||
_frame = frame;
|
||||
|
||||
if (!frame || !frame->return_addr)
|
||||
break;
|
||||
|
||||
return_addr = frame->return_addr;
|
||||
ops->address(data, return_addr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -801,6 +869,9 @@ static void dwarf_unwinder_cleanup(void)
|
|||
|
||||
list_for_each_entry(fde, &dwarf_fde_list, link)
|
||||
kfree(fde);
|
||||
|
||||
kmem_cache_destroy(dwarf_reg_cachep);
|
||||
kmem_cache_destroy(dwarf_frame_cachep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -827,6 +898,21 @@ static int __init dwarf_unwinder_init(void)
|
|||
f_entries = 0;
|
||||
entry = &__start_eh_frame;
|
||||
|
||||
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
|
||||
sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
|
||||
dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
|
||||
sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
|
||||
|
||||
dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
||||
mempool_alloc_slab,
|
||||
mempool_free_slab,
|
||||
dwarf_frame_cachep);
|
||||
|
||||
dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
||||
mempool_alloc_slab,
|
||||
mempool_free_slab,
|
||||
dwarf_reg_cachep);
|
||||
|
||||
while ((char *)entry < __stop_eh_frame) {
|
||||
p = entry;
|
||||
|
||||
|
@ -856,7 +942,7 @@ static int __init dwarf_unwinder_init(void)
|
|||
else
|
||||
c_entries++;
|
||||
} else {
|
||||
err = dwarf_parse_fde(entry, entry_type, p, len);
|
||||
err = dwarf_parse_fde(entry, entry_type, p, len, end);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
else
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <asm/system.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
static void handle_BUG(struct pt_regs *regs)
|
||||
void handle_BUG(struct pt_regs *regs)
|
||||
{
|
||||
enum bug_trap_type tt;
|
||||
tt = report_bug(regs->pc, regs);
|
||||
|
@ -29,7 +29,10 @@ int is_valid_bugaddr(unsigned long addr)
|
|||
if (probe_kernel_address((insn_size_t *)addr, opcode))
|
||||
return 0;
|
||||
|
||||
return opcode == TRAPA_BUG_OPCODE;
|
||||
if (opcode == TRAPA_BUG_OPCODE || opcode == TRAPA_UNWINDER_BUG_OPCODE)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -136,6 +136,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
|
|||
regs->pc = fixup->fixup;
|
||||
return;
|
||||
}
|
||||
|
||||
die(str, regs, err);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,8 +54,6 @@ static struct list_head unwinder_list = {
|
|||
|
||||
static DEFINE_SPINLOCK(unwinder_lock);
|
||||
|
||||
static atomic_t unwinder_running = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* select_unwinder - Select the best registered stack unwinder.
|
||||
*
|
||||
|
@ -123,6 +121,8 @@ int unwinder_register(struct unwinder *u)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int unwinder_faulted = 0;
|
||||
|
||||
/*
|
||||
* Unwind the call stack and pass information to the stacktrace_ops
|
||||
* functions. Also handle the case where we need to switch to a new
|
||||
|
@ -145,20 +145,41 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
|
|||
* Hopefully this will give us a semi-reliable stacktrace so we
|
||||
* can diagnose why curr_unwinder->dump() faulted.
|
||||
*/
|
||||
if (atomic_inc_return(&unwinder_running) != 1) {
|
||||
if (unwinder_faulted) {
|
||||
spin_lock_irqsave(&unwinder_lock, flags);
|
||||
|
||||
if (!list_is_singular(&unwinder_list)) {
|
||||
/* Make sure no one beat us to changing the unwinder */
|
||||
if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
|
||||
list_del(&curr_unwinder->list);
|
||||
curr_unwinder = select_unwinder();
|
||||
|
||||
unwinder_faulted = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&unwinder_lock, flags);
|
||||
atomic_dec(&unwinder_running);
|
||||
}
|
||||
|
||||
curr_unwinder->dump(task, regs, sp, ops, data);
|
||||
}
|
||||
|
||||
atomic_dec(&unwinder_running);
|
||||
/*
|
||||
* Trap handler for UWINDER_BUG() statements. We must switch to the
|
||||
* unwinder with the next highest rating.
|
||||
*/
|
||||
BUILD_TRAP_HANDLER(unwinder)
|
||||
{
|
||||
insn_size_t insn;
|
||||
TRAP_HANDLER_DECL;
|
||||
|
||||
/* Rewind */
|
||||
regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
|
||||
insn = *(insn_size_t *)instruction_pointer(regs);
|
||||
|
||||
/* Switch unwinders when unwind_stack() is called */
|
||||
unwinder_faulted = 1;
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
handle_BUG(regs);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unwind_stack);
|
||||
|
|
Loading…
Reference in a new issue