diff --git a/sys/mips/include/md_var.h b/sys/mips/include/md_var.h index c2a61559ff1a..6f65a0f8f248 100644 --- a/sys/mips/include/md_var.h +++ b/sys/mips/include/md_var.h @@ -56,6 +56,7 @@ void MipsSwitchFPState(struct thread *, struct trapframe *); u_long kvtop(void *addr); int is_cacheable_mem(vm_paddr_t addr); void mips_generic_reset(void); +void mips_wait(void); #define MIPS_DEBUG 0 diff --git a/sys/mips/mips/exception.S b/sys/mips/mips/exception.S index 729391e1efcf..8b7307c7cdb2 100644 --- a/sys/mips/mips/exception.S +++ b/sys/mips/mips/exception.S @@ -557,6 +557,33 @@ NNON_LEAF(MipsUserGenException, CALLFRAME_SIZ, ra) .set at END(MipsUserGenException) + .set push + .set noat +NON_LEAF(mips_wait, CALLFRAME_SIZ, ra) + PTR_SUBU sp, sp, CALLFRAME_SIZ + .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) + REG_S ra, CALLFRAME_RA(sp) # save RA + mfc0 t0, MIPS_COP_0_STATUS + xori t1, t0, MIPS_SR_INT_IE + mtc0 t1, MIPS_COP_0_STATUS + COP0_SYNC + jal sched_runnable + nop + REG_L ra, CALLFRAME_RA(sp) + mfc0 t0, MIPS_COP_0_STATUS + ori t1, t0, MIPS_SR_INT_IE + .align 4 +GLOBAL(MipsWaitStart) # this is 16 byte aligned + mtc0 t1, MIPS_COP_0_STATUS + bnez v0, MipsWaitEnd + nop + wait +GLOBAL(MipsWaitEnd) # MipsWaitStart + 16 + jr ra + PTR_ADDU sp, sp, CALLFRAME_SIZ +END(mips_wait) + .set pop + /*---------------------------------------------------------------------------- * * MipsKernIntr -- @@ -578,6 +605,19 @@ NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra) .set noat PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE .mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE) + +/* + * Check for getting interrupts just before wait + */ + MFC0 k0, MIPS_COP_0_EXC_PC + ori k0, 0xf + xori k0, 0xf # 16 byte align + PTR_LA k1, MipsWaitStart + bne k0, k1, 1f + nop + PTR_ADDU k1, 16 # skip over wait + MTC0 k1, MIPS_COP_0_EXC_PC +1: /* * Save CPU state, building 'frame'. */ diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c index e348e41d7c85..405d457b966b 100644 --- a/sys/mips/mips/machdep.c +++ b/sys/mips/mips/machdep.c @@ -163,6 +163,9 @@ extern char MipsTLBMiss[], MipsTLBMissEnd[]; /* Cache error handler */ extern char MipsCache[], MipsCacheEnd[]; +/* MIPS wait skip region */ +extern char MipsWaitStart[], MipsWaitEnd[]; + extern char edata[], end[]; #ifdef DDB extern vm_offset_t ksym_start, ksym_end; @@ -326,6 +329,12 @@ struct msgbuf *msgbufp=0; void mips_vector_init(void) { + /* + * Make sure that the Wait region logic is not been + * changed + */ + if (MipsWaitEnd - MipsWaitStart != 16) + panic("startup: MIPS wait region not correct"); /* * Copy down exception vector code. */ @@ -485,24 +494,9 @@ spinlock_exit(void) /* * call platform specific code to halt (until next interrupt) for the idle loop */ -/* - * This is disabled because of three issues: - * - * + By calling critical_enter(), any interrupt which occurs after that but - * before the wait instruction will be handled but not serviced (in the case - * of a netisr) because preemption is not allowed at this point; - * + Any fast interrupt handler which schedules an immediate or fast callout - * will not occur until the wait instruction is interrupted, as the clock - * has already been set by cpu_idleclock(); - * + There is currently no known way to atomically enable interrupts and call - * wait, which is how the i386/amd64 code gets around (1). Thus even if - * interrupts were disabled and reenabled just before the wait call, any - * interrupt that did occur may not interrupt wait. - */ void cpu_idle(int busy) { -#if 0 KASSERT((mips_rd_status() & MIPS_SR_INT_IE) != 0, ("interrupts disabled in idle process.")); KASSERT((mips_rd_status() & MIPS_INT_MASK) != 0, @@ -512,12 +506,11 @@ cpu_idle(int busy) critical_enter(); cpu_idleclock(); } - __asm __volatile ("wait"); + mips_wait(); if (!busy) { cpu_activeclock(); critical_exit(); } -#endif } int