linux/arch/blackfin/kernel/time-ts.c
Nicolai Stange 18154c5c97 blackfin: time-ts: Set ->min_delta_ticks and ->max_delta_ticks
In preparation for making the clockevents core NTP correction aware,
all clockevent device drivers must set ->min_delta_ticks and
->max_delta_ticks rather than ->min_delta_ns and ->max_delta_ns: a
clockevent device's rate is going to change dynamically and thus, the
ratio of ns to ticks ceases to stay invariant.

Make the blackfin arch's clockevent driver initialize these fields
properly.

This patch alone doesn't introduce any change in functionality as the
clockevents core still looks exclusively at the (untouched) ->min_delta_ns
and ->max_delta_ns. As soon as this has changed, a followup patch will
purge the initialization of ->min_delta_ns and ->max_delta_ns from this
driver.

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Stephen Boyd <sboyd@codeaurora.org>
Cc: Steven Miao <realmz6@gmail.com>
Signed-off-by: Nicolai Stange <nicstange@gmail.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
2017-04-14 13:11:18 -07:00

401 lines
9.5 KiB
C

/*
* Based on arm clockevents implementation and old bfin time tick.
*
* Copyright 2008-2009 Analog Devics Inc.
* 2008 GeoTechnologies
* Vitja Makarov
*
* Licensed under the GPL-2
*/
#include <linux/module.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpufreq.h>
#include <asm/blackfin.h>
#include <asm/time.h>
#include <asm/gptimers.h>
#include <asm/nmi.h>
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
static notrace u64 bfin_read_cycles(struct clocksource *cs)
{
#ifdef CONFIG_CPU_FREQ
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
#else
return get_cycles();
#endif
}
static struct clocksource bfin_cs_cycles = {
.name = "bfin_cs_cycles",
.rating = 400,
.read = bfin_read_cycles,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static inline unsigned long long bfin_cs_cycles_sched_clock(void)
{
return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
bfin_cs_cycles.mult, bfin_cs_cycles.shift);
}
static int __init bfin_cs_cycles_init(void)
{
if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
panic("failed to register clocksource");
return 0;
}
#else
# define bfin_cs_cycles_init()
#endif
#ifdef CONFIG_GPTMR0_CLOCKSOURCE
void __init setup_gptimer0(void)
{
disable_gptimers(TIMER0bit);
#ifdef CONFIG_BF60x
bfin_write16(TIMER_DATA_IMSK, 0);
set_gptimer_config(TIMER0_id, TIMER_OUT_DIS
| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
#else
set_gptimer_config(TIMER0_id, \
TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
#endif
set_gptimer_period(TIMER0_id, -1);
set_gptimer_pwidth(TIMER0_id, -2);
SSYNC();
enable_gptimers(TIMER0bit);
}
static u64 bfin_read_gptimer0(struct clocksource *cs)
{
return bfin_read_TIMER0_COUNTER();
}
static struct clocksource bfin_cs_gptimer0 = {
.name = "bfin_cs_gptimer0",
.rating = 350,
.read = bfin_read_gptimer0,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
{
return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
}
static int __init bfin_cs_gptimer0_init(void)
{
setup_gptimer0();
if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
panic("failed to register clocksource");
return 0;
}
#else
# define bfin_cs_gptimer0_init()
#endif
#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
/* prefer to use cycles since it has higher rating */
notrace unsigned long long sched_clock(void)
{
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
return bfin_cs_cycles_sched_clock();
#else
return bfin_cs_gptimer0_sched_clock();
#endif
}
#endif
#if defined(CONFIG_TICKSOURCE_GPTMR0)
static int bfin_gptmr0_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
disable_gptimers(TIMER0bit);
/* it starts counting three SCLK cycles after the TIMENx bit is set */
set_gptimer_pwidth(TIMER0_id, cycles - 3);
enable_gptimers(TIMER0bit);
return 0;
}
static int bfin_gptmr0_set_periodic(struct clock_event_device *evt)
{
#ifndef CONFIG_BF60x
set_gptimer_config(TIMER0_id,
TIMER_OUT_DIS | TIMER_IRQ_ENA |
TIMER_PERIOD_CNT | TIMER_MODE_PWM);
#else
set_gptimer_config(TIMER0_id,
TIMER_OUT_DIS | TIMER_MODE_PWM_CONT |
TIMER_PULSE_HI | TIMER_IRQ_PER);
#endif
set_gptimer_period(TIMER0_id, get_sclk() / HZ);
set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
enable_gptimers(TIMER0bit);
return 0;
}
static int bfin_gptmr0_set_oneshot(struct clock_event_device *evt)
{
disable_gptimers(TIMER0bit);
#ifndef CONFIG_BF60x
set_gptimer_config(TIMER0_id,
TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
#else
set_gptimer_config(TIMER0_id,
TIMER_OUT_DIS | TIMER_MODE_PWM | TIMER_PULSE_HI |
TIMER_IRQ_WID_DLY);
#endif
set_gptimer_period(TIMER0_id, 0);
return 0;
}
static int bfin_gptmr0_shutdown(struct clock_event_device *evt)
{
disable_gptimers(TIMER0bit);
return 0;
}
static void bfin_gptmr0_ack(void)
{
clear_gptimer_intr(TIMER0_id);
}
static void __init bfin_gptmr0_init(void)
{
disable_gptimers(TIMER0bit);
}
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
smp_mb();
/*
* We want to ACK before we handle so that we can handle smaller timer
* intervals. This way if the timer expires again while we're handling
* things, we're more likely to see that 2nd int rather than swallowing
* it by ACKing the int at the end of this handler.
*/
bfin_gptmr0_ack();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction gptmr0_irq = {
.name = "Blackfin GPTimer0",
.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
.handler = bfin_gptmr0_interrupt,
};
static struct clock_event_device clockevent_gptmr0 = {
.name = "bfin_gptimer0",
.rating = 300,
.irq = IRQ_TIMER0,
.shift = 32,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = bfin_gptmr0_set_next_event,
.set_state_shutdown = bfin_gptmr0_shutdown,
.set_state_periodic = bfin_gptmr0_set_periodic,
.set_state_oneshot = bfin_gptmr0_set_oneshot,
};
static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
{
unsigned long clock_tick;
clock_tick = get_sclk();
evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
evt->max_delta_ns = clockevent_delta2ns(-1, evt);
evt->max_delta_ticks = (unsigned long)-1;
evt->min_delta_ns = clockevent_delta2ns(100, evt);
evt->min_delta_ticks = 100;
evt->cpumask = cpumask_of(0);
clockevents_register_device(evt);
}
#endif /* CONFIG_TICKSOURCE_GPTMR0 */
#if defined(CONFIG_TICKSOURCE_CORETMR)
/* per-cpu local core timer */
DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
static int bfin_coretmr_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
bfin_write_TCNTL(TMPWR);
CSYNC();
bfin_write_TCOUNT(cycles);
CSYNC();
bfin_write_TCNTL(TMPWR | TMREN);
return 0;
}
static int bfin_coretmr_set_periodic(struct clock_event_device *evt)
{
unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
bfin_write_TCNTL(TMPWR);
CSYNC();
bfin_write_TSCALE(TIME_SCALE - 1);
bfin_write_TPERIOD(tcount);
bfin_write_TCOUNT(tcount);
CSYNC();
bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
return 0;
}
static int bfin_coretmr_set_oneshot(struct clock_event_device *evt)
{
bfin_write_TCNTL(TMPWR);
CSYNC();
bfin_write_TSCALE(TIME_SCALE - 1);
bfin_write_TPERIOD(0);
bfin_write_TCOUNT(0);
return 0;
}
static int bfin_coretmr_shutdown(struct clock_event_device *evt)
{
bfin_write_TCNTL(0);
CSYNC();
return 0;
}
void bfin_coretmr_init(void)
{
/* power up the timer, but don't enable it just yet */
bfin_write_TCNTL(TMPWR);
CSYNC();
/* the TSCALE prescaler counter. */
bfin_write_TSCALE(TIME_SCALE - 1);
bfin_write_TPERIOD(0);
bfin_write_TCOUNT(0);
CSYNC();
}
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
smp_mb();
evt->event_handler(evt);
touch_nmi_watchdog();
return IRQ_HANDLED;
}
static struct irqaction coretmr_irq = {
.name = "Blackfin CoreTimer",
.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
.handler = bfin_coretmr_interrupt,
};
void bfin_coretmr_clockevent_init(void)
{
unsigned long clock_tick;
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
#ifdef CONFIG_SMP
evt->broadcast = smp_timer_broadcast;
#endif
evt->name = "bfin_core_timer";
evt->rating = 350;
evt->irq = -1;
evt->shift = 32;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->set_next_event = bfin_coretmr_set_next_event;
evt->set_state_shutdown = bfin_coretmr_shutdown;
evt->set_state_periodic = bfin_coretmr_set_periodic;
evt->set_state_oneshot = bfin_coretmr_set_oneshot;
clock_tick = get_cclk() / TIME_SCALE;
evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
evt->max_delta_ns = clockevent_delta2ns(-1, evt);
evt->max_delta_ticks = (unsigned long)-1;
evt->min_delta_ns = clockevent_delta2ns(100, evt);
evt->min_delta_ticks = 100;
evt->cpumask = cpumask_of(cpu);
clockevents_register_device(evt);
}
#endif /* CONFIG_TICKSOURCE_CORETMR */
void read_persistent_clock(struct timespec *ts)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
ts->tv_sec = secs_since_1970;
ts->tv_nsec = 0;
}
void __init time_init(void)
{
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
* 2^31 seconds (which uClibc cannot cope with yet)
*/
if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
bfin_write_RTC_STAT(0);
}
#endif
bfin_cs_cycles_init();
bfin_cs_gptimer0_init();
#if defined(CONFIG_TICKSOURCE_CORETMR)
bfin_coretmr_init();
setup_irq(IRQ_CORETMR, &coretmr_irq);
bfin_coretmr_clockevent_init();
#endif
#if defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_gptmr0_init();
setup_irq(IRQ_TIMER0, &gptmr0_irq);
gptmr0_irq.dev_id = &clockevent_gptmr0;
bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
#endif
#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
# error at least one clock event device is required
#endif
}