- As it is not possible for sched_bind(9) to context switch with

td_critnest > 1 when not already running on the desired CPU read the
  TICK counter of the BSP via a direct cross trap request in that case
  instead.
- Treat the STICK based timecounter the same way as the TICK based one
  regarding its quality and obtaining the counter value from the BSP.
  Like the TICK timers the STICK ones also are only synchronized during
  their startup (which might not result in good synchronicity in the
  first place) but not afterwards and might drift over time, causing
  problems when the time is read from different CPUs (see r135972).
This commit is contained in:
Marius Strobl 2010-08-08 14:00:21 +00:00
parent dd41ceb14b
commit 553cf1a13c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=211071
5 changed files with 114 additions and 35 deletions

View file

@ -81,6 +81,11 @@ struct ipi_cache_args {
vm_paddr_t ica_pa;
};
struct ipi_rd_args {
u_int ira_mask;
register_t *ira_val;
};
struct ipi_tlb_args {
u_int ita_mask;
struct pmap *ita_pmap;
@ -105,6 +110,7 @@ void mp_init(u_int cpu_impl);
extern struct mtx ipi_mtx;
extern struct ipi_cache_args ipi_cache_args;
extern struct ipi_rd_args ipi_rd_args;
extern struct ipi_tlb_args ipi_tlb_args;
extern char *mp_tramp_code;
@ -119,6 +125,10 @@ extern char tl_ipi_spitfire_dcache_page_inval[];
extern char tl_ipi_spitfire_icache_page_inval[];
extern char tl_ipi_level[];
extern char tl_ipi_stick_rd[];
extern char tl_ipi_tick_rd[];
extern char tl_ipi_tlb_context_demap[];
extern char tl_ipi_tlb_page_demap[];
extern char tl_ipi_tlb_range_demap[];
@ -178,6 +188,22 @@ ipi_icache_page_inval(void *func, vm_paddr_t pa)
return (&ica->ica_mask);
}
static __inline void *
ipi_rd(u_int cpu, void *func, u_long *val)
{
struct ipi_rd_args *ira;
if (smp_cpus == 1)
return (NULL);
sched_pin();
ira = &ipi_rd_args;
mtx_lock_spin(&ipi_mtx);
ira->ira_mask = 1 << cpu | PCPU_GET(cpumask);
ira->ira_val = val;
cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
return (&ira->ira_mask);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
@ -282,6 +308,13 @@ ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
return (NULL);
}
static __inline void *
ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
{
return (NULL);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm __unused)
{

View file

@ -216,6 +216,9 @@ ASSYM(IR_PRI, offsetof(struct intr_request, ir_pri));
ASSYM(IR_VEC, offsetof(struct intr_request, ir_vec));
#ifdef SMP
ASSYM(IRA_MASK, offsetof(struct ipi_rd_args, ira_mask));
ASSYM(IRA_VAL, offsetof(struct ipi_rd_args, ira_val));
ASSYM(ITA_MASK, offsetof(struct ipi_tlb_args, ita_mask));
ASSYM(ITA_PMAP, offsetof(struct ipi_tlb_args, ita_pmap));
ASSYM(ITA_START, offsetof(struct ipi_tlb_args, ita_start));

View file

@ -47,7 +47,7 @@ __FBSDID("$FreeBSD$");
*/
ENTRY(tl_ipi_spitfire_dcache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_dcache_page_inval: pa=%#lx"
CATR(KTR_SMP, "tl_ipi_spitfire_dcache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ICA_PA], %g2
stx %g2, [%g1 + KTR_PARM1]
@ -87,7 +87,7 @@ END(tl_ipi_spitfire_dcache_page_inval)
*/
ENTRY(tl_ipi_spitfire_icache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_icache_page_inval: pa=%#lx"
CATR(KTR_SMP, "tl_ipi_spitfire_icache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ICA_PA], %g2
stx %g2, [%g1 + KTR_PARM1]
@ -126,7 +126,7 @@ END(tl_ipi_spitfire_icache_page_inval)
*/
ENTRY(tl_ipi_cheetah_dcache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_dcache_page_inval: pa=%#lx"
CATR(KTR_SMP, "tl_ipi_cheetah_dcache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ICA_PA], %g2
stx %g2, [%g1 + KTR_PARM1]
@ -256,7 +256,7 @@ END(tl_ipi_tlb_range_demap)
*/
ENTRY(tl_ipi_tlb_context_demap)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_tlb_page_demap: pm=%p va=%#lx"
CATR(KTR_SMP, "tl_ipi_tlb_context_demap: pm=%p va=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ITA_PMAP], %g2
stx %g2, [%g1 + KTR_PARM1]
@ -274,3 +274,27 @@ ENTRY(tl_ipi_tlb_context_demap)
IPI_DONE(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_context_demap)
/*
* Read %stick.
*/
ENTRY(tl_ipi_stick_rd)
ldx [%g5 + IRA_VAL], %g1
rd %asr24, %g2
stx %g2, [%g1]
IPI_DONE(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_stick_rd)
/*
* Read %tick.
*/
ENTRY(tl_ipi_tick_rd)
ldx [%g5 + IRA_VAL], %g1
rd %tick, %g2
stx %g2, [%g1]
IPI_DONE(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tick_rd)

View file

@ -109,6 +109,7 @@ static ih_func_t cpu_ipi_stop;
*/
struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 };
struct ipi_cache_args ipi_cache_args;
struct ipi_rd_args ipi_rd_args;
struct ipi_tlb_args ipi_tlb_args;
struct pcb stoppcbs[MAXCPU];

View file

@ -43,13 +43,16 @@ __FBSDID("$FreeBSD$");
#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/intr_machdep.h>
#include <machine/smp.h>
#include <machine/tick.h>
#include <machine/ver.h>
#define STICK_QUALITY -500
#define TICK_QUALITY_MP 10
#define TICK_QUALITY_UP 1000
@ -80,7 +83,10 @@ static struct timecounter tick_tc;
static struct eventtimer tick_et;
static uint64_t tick_cputicks(void);
static timecounter_get_t stick_get_timecount;
static timecounter_get_t stick_get_timecount_up;
#ifdef SMP
static timecounter_get_t stick_get_timecount_mp;
#endif
static timecounter_get_t tick_get_timecount_up;
#ifdef SMP
static timecounter_get_t tick_get_timecount_mp;
@ -135,8 +141,14 @@ cpu_initclocks(void)
}
/*
* Initialize the TICK-based timecounter. This must not happen
* before SI_SUB_INTRINSIC for tick_get_timecount_mp() to work.
* Initialize the (S)TICK-based timecounter(s).
* Note that we (try to) sync the (S)TICK timers of APs with the BSP
* during their startup but not afterwards. The resulting drift can
* cause problems when the time is calculated based on (S)TICK values
* read on different CPUs. Thus we always read the register on the
* BSP (if necessary via an IPI as sched_bind(9) isn't available in
* all circumstances) and use a low quality for the otherwise high
* quality (S)TICK timers in the MP case.
*/
tick_tc.tc_get_timecount = tick_get_timecount_up;
tick_tc.tc_poll_pps = NULL;
@ -146,14 +158,6 @@ cpu_initclocks(void)
tick_tc.tc_quality = TICK_QUALITY_UP;
tick_tc.tc_priv = NULL;
#ifdef SMP
/*
* We (try to) sync the (S)TICK timers of APs with the BSP during
* their startup but not afterwards. The resulting drift can
* cause problems when the time is calculated based on (S)TICK
* values read on different CPUs. Thus we bind to the BSP for
* reading the register and use a low quality for the otherwise
* high quality (S)TICK timers in the MP case.
*/
if (cpu_mp_probe()) {
tick_tc.tc_get_timecount = tick_get_timecount_mp;
tick_tc.tc_quality = TICK_QUALITY_MP;
@ -161,17 +165,23 @@ cpu_initclocks(void)
#endif
tc_init(&tick_tc);
if (sclock != 0) {
stick_tc.tc_get_timecount = stick_get_timecount;
stick_tc.tc_get_timecount = stick_get_timecount_up;
stick_tc.tc_poll_pps = NULL;
stick_tc.tc_counter_mask = ~0u;
stick_tc.tc_frequency = sclock;
stick_tc.tc_name = "stick";
stick_tc.tc_quality = STICK_QUALITY;
stick_tc.tc_quality = TICK_QUALITY_UP;
stick_tc.tc_priv = NULL;
#ifdef SMP
if (cpu_mp_probe()) {
stick_tc.tc_get_timecount = stick_get_timecount_mp;
stick_tc.tc_quality = TICK_QUALITY_MP;
}
#endif
tc_init(&stick_tc);
}
tick_et.et_name = hardclock_use_stick ? "stick" : "tick";
tick_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
tick_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
ET_FLAGS_PERCPU;
tick_et.et_quality = 1000;
tick_et.et_frequency = hardclock_use_stick ? sclock : clock;
@ -183,7 +193,7 @@ cpu_initclocks(void)
tick_et.et_stop = tick_et_stop;
tick_et.et_priv = NULL;
et_register(&tick_et);
cpu_initclocks_bsp();
}
@ -307,7 +317,7 @@ tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
}
static u_int
stick_get_timecount(struct timecounter *tc)
stick_get_timecount_up(struct timecounter *tc)
{
return ((u_int)rdstick());
@ -321,23 +331,31 @@ tick_get_timecount_up(struct timecounter *tc)
}
#ifdef SMP
static u_int
stick_get_timecount_mp(struct timecounter *tc)
{
u_long stick;
sched_pin();
if (curcpu == 0)
stick = rdstick();
else
ipi_wait(ipi_rd(0, tl_ipi_stick_rd, &stick));
sched_unpin();
return (stick);
}
static u_int
tick_get_timecount_mp(struct timecounter *tc)
{
struct thread *td;
u_int tick;
td = curthread;
thread_lock(td);
sched_bind(td, 0);
thread_unlock(td);
tick = tick_get_timecount_up(tc);
thread_lock(td);
sched_unbind(td);
thread_unlock(td);
u_long tick;
sched_pin();
if (curcpu == 0)
tick = rd(tick);
else
ipi_wait(ipi_rd(0, tl_ipi_tick_rd, &tick));
sched_unpin();
return (tick);
}
#endif
@ -360,7 +378,7 @@ tick_et_start(struct eventtimer *et,
fdiv = (tick_et.et_frequency * (first->frac >> 32)) >> 32;
if (first->sec != 0)
fdiv += tick_et.et_frequency * first->sec;
} else
} else
fdiv = div;
PCPU_SET(tickincrement, div);