Use per-cpu values for base and last in tc_cpu_ticks(). The values

are updated lockess, different CPUs write its own view of timecounter
state.  The critical section is done for safety, callers of
tc_cpu_ticks() are supposed to already enter critical section, or to
own a spinlock.

The change fixes sporadical reports of too high values reported for
the (W)CPU on platforms that do not provide cpu ticker and use
tc_cpu_ticks(), in particular, arm*.

Diagnosed and reviewed by:	jhb
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2015-09-25 13:03:57 +00:00
parent e675024a02
commit b2557db607
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=288216

View file

@ -1924,20 +1924,27 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
static int cpu_tick_variable;
static uint64_t cpu_tick_frequency;
static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
static uint64_t
tc_cpu_ticks(void)
{
static uint64_t base;
static unsigned last;
unsigned u;
struct timecounter *tc;
uint64_t res, *base;
unsigned u, *last;
critical_enter();
base = DPCPU_PTR(tc_cpu_ticks_base);
last = DPCPU_PTR(tc_cpu_ticks_last);
tc = timehands->th_counter;
u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
if (u < last)
base += (uint64_t)tc->tc_counter_mask + 1;
last = u;
return (u + base);
if (u < *last)
*base += (uint64_t)tc->tc_counter_mask + 1;
*last = u;
res = u + *base;
critical_exit();
return (res);
}
void