mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
[MIPS] Fix potential latency problem due to non-atomic cpu_wait.
If an interrupt happened between checking of NEED_RESCHED and WAIT instruction, adjust EPC to restart from checking of NEED_RESCHED. Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
6657fe0a02
commit
c65a5480ff
3 changed files with 55 additions and 20 deletions
|
@ -45,18 +45,7 @@ static void r39xx_wait(void)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a race when WAIT instruction executed with interrupt
|
||||
* enabled.
|
||||
* But it is implementation-dependent wheter the pipelie restarts when
|
||||
* a non-enabled interrupt is requested.
|
||||
*/
|
||||
static void r4k_wait(void)
|
||||
{
|
||||
__asm__(" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set mips0 \n");
|
||||
}
|
||||
extern void r4k_wait(void);
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
|
@ -128,7 +117,7 @@ static int __init wait_disable(char *s)
|
|||
|
||||
__setup("nowait", wait_disable);
|
||||
|
||||
static inline void check_wait(void)
|
||||
void __init check_wait(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
|
@ -242,7 +231,6 @@ static inline void check_errata(void)
|
|||
|
||||
void __init check_bugs32(void)
|
||||
{
|
||||
check_wait();
|
||||
check_errata();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <asm/stackframe.h>
|
||||
#include <asm/war.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#define PANIC_PIC(msg) \
|
||||
.set push; \
|
||||
|
@ -126,7 +127,42 @@ handle_vcei:
|
|||
|
||||
__FINIT
|
||||
|
||||
.align 5 /* 32 byte rollback region */
|
||||
LEAF(r4k_wait)
|
||||
.set push
|
||||
.set noreorder
|
||||
/* start of rollback region */
|
||||
LONG_L t0, TI_FLAGS($28)
|
||||
nop
|
||||
andi t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
.set mips3
|
||||
wait
|
||||
/* end of rollback region (the region size must be power of two) */
|
||||
.set pop
|
||||
1:
|
||||
jr ra
|
||||
END(r4k_wait)
|
||||
|
||||
.macro BUILD_ROLLBACK_PROLOGUE handler
|
||||
FEXPORT(rollback_\handler)
|
||||
.set push
|
||||
.set noat
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_LA k1, r4k_wait
|
||||
ori k0, 0x1f /* 32 byte rollback region */
|
||||
xori k0, 0x1f
|
||||
bne k0, k1, 9f
|
||||
MTC0 k0, CP0_EPC
|
||||
9:
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.align 5
|
||||
BUILD_ROLLBACK_PROLOGUE handle_int
|
||||
NESTED(handle_int, PT_SIZE, sp)
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
|
@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
|
|||
* This prototype is copied to ebase + n*IntCtl.VS and patched
|
||||
* to invoke the handler
|
||||
*/
|
||||
BUILD_ROLLBACK_PROLOGUE except_vec_vi
|
||||
NESTED(except_vec_vi, 0, sp)
|
||||
SAVE_SOME
|
||||
SAVE_AT
|
||||
|
|
|
@ -46,6 +46,9 @@
|
|||
#include <asm/types.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
extern void check_wait(void);
|
||||
extern asmlinkage void r4k_wait(void);
|
||||
extern asmlinkage void rollback_handle_int(void);
|
||||
extern asmlinkage void handle_int(void);
|
||||
extern asmlinkage void handle_tlbm(void);
|
||||
extern asmlinkage void handle_tlbl(void);
|
||||
|
@ -1251,6 +1254,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|||
|
||||
extern char except_vec_vi, except_vec_vi_lui;
|
||||
extern char except_vec_vi_ori, except_vec_vi_end;
|
||||
extern char rollback_except_vec_vi;
|
||||
char *vec_start = (cpu_wait == r4k_wait) ?
|
||||
&rollback_except_vec_vi : &except_vec_vi;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* We need to provide the SMTC vectored interrupt handler
|
||||
|
@ -1258,11 +1264,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|||
* Status.IM bit to be masked before going there.
|
||||
*/
|
||||
extern char except_vec_vi_mori;
|
||||
const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
|
||||
const int mori_offset = &except_vec_vi_mori - vec_start;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
const int handler_len = &except_vec_vi_end - &except_vec_vi;
|
||||
const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
|
||||
const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
|
||||
const int handler_len = &except_vec_vi_end - vec_start;
|
||||
const int lui_offset = &except_vec_vi_lui - vec_start;
|
||||
const int ori_offset = &except_vec_vi_ori - vec_start;
|
||||
|
||||
if (handler_len > VECTORSPACING) {
|
||||
/*
|
||||
|
@ -1272,7 +1278,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|||
panic("VECTORSPACING too small");
|
||||
}
|
||||
|
||||
memcpy(b, &except_vec_vi, handler_len);
|
||||
memcpy(b, vec_start, handler_len);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
|
||||
|
||||
|
@ -1554,6 +1560,10 @@ void __init trap_init(void)
|
|||
extern char except_vec3_generic, except_vec3_r4000;
|
||||
extern char except_vec4;
|
||||
unsigned long i;
|
||||
int rollback;
|
||||
|
||||
check_wait();
|
||||
rollback = (cpu_wait == r4k_wait);
|
||||
|
||||
#if defined(CONFIG_KGDB)
|
||||
if (kgdb_early_setup)
|
||||
|
@ -1618,7 +1628,7 @@ void __init trap_init(void)
|
|||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(0, handle_int);
|
||||
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
|
||||
set_except_vector(1, handle_tlbm);
|
||||
set_except_vector(2, handle_tlbl);
|
||||
set_except_vector(3, handle_tlbs);
|
||||
|
|
Loading…
Reference in a new issue