linux/arch/arm/plat-omap/dmtimer.c
Jon Hunter 1eaff71017 ARM: OMAP: Don't restore DMTIMER interrupt status register
Restoring the timer interrupt status is not possible because writing a 1 to any
bit in the register clears that bit if set and writing a 0 has no affect.
Furthermore, if an interrupt is pending when someone attempts to disable a
timer, the timer will fail to transition to the idle state and hence it's
context will not be lost. Users should take care to service all interrupts
before disabling the timer.

Signed-off-by: Jon Hunter <jon-hunter@ti.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
2012-11-12 16:23:54 -06:00

879 lines
22 KiB
C

/*
* linux/arch/arm/plat-omap/dmtimer.c
*
* OMAP Dual-Mode Timers
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
* Tarun Kanti DebBarma <tarun.kanti@ti.com>
* Thara Gopinath <thara@ti.com>
*
* dmtimer adaptation to platform_driver.
*
* Copyright (C) 2005 Nokia Corporation
* OMAP2 support by Juha Yrjola
* API improvements and OMAP2 clock framework support by Timo Teras
*
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <plat/dmtimer.h>
static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
/**
* omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
* @timer: timer pointer over which read operation to perform
* @reg: lowest byte holds the register offset
*
* The posted mode bit is encoded in reg. Note that in posted mode write
* pending bit must be checked. Otherwise a read of a non completed write
* will produce an error.
*/
static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
{
WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
return __omap_dm_timer_read(timer, reg, timer->posted);
}
/**
* omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
* @timer: timer pointer over which write operation is to perform
* @reg: lowest byte holds the register offset
* @value: data to write into the register
*
* The posted mode bit is encoded in reg. Note that in posted mode the write
* pending bit must be checked. Otherwise a write on a register which has a
* pending write will be lost.
*/
static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
u32 value)
{
WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
__omap_dm_timer_write(timer, reg, value, timer->posted);
}
static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
timer->context.twer);
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
timer->context.tcrr);
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
timer->context.tldr);
omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
timer->context.tmar);
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
timer->context.tsicr);
__raw_writel(timer->context.tier, timer->irq_ena);
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
timer->context.tclr);
}
static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
{
int c;
if (!timer->sys_stat)
return;
c = 0;
while (!(__raw_readl(timer->sys_stat) & 1)) {
c++;
if (c > 100000) {
printk(KERN_ERR "Timer failed to reset\n");
return;
}
}
}
static void omap_dm_timer_reset(struct omap_dm_timer *timer)
{
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
omap_dm_timer_wait_for_reset(timer);
__omap_dm_timer_reset(timer, 0, 0);
}
int omap_dm_timer_prepare(struct omap_dm_timer *timer)
{
/*
* FIXME: OMAP1 devices do not use the clock framework for dmtimers so
* do not call clk_get() for these devices.
*/
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
timer->fclk = clk_get(&timer->pdev->dev, "fck");
if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
timer->fclk = NULL;
dev_err(&timer->pdev->dev, ": No fclk handle.\n");
return -EINVAL;
}
}
omap_dm_timer_enable(timer);
if (timer->capability & OMAP_TIMER_NEEDS_RESET)
omap_dm_timer_reset(timer);
__omap_dm_timer_enable_posted(timer);
omap_dm_timer_disable(timer);
return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
}
static inline u32 omap_dm_timer_reserved_systimer(int id)
{
return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
}
int omap_dm_timer_reserve_systimer(int id)
{
if (omap_dm_timer_reserved_systimer(id))
return -ENODEV;
omap_reserved_systimers |= (1 << (id - 1));
return 0;
}
struct omap_dm_timer *omap_dm_timer_request(void)
{
struct omap_dm_timer *timer = NULL, *t;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(t, &omap_timer_list, node) {
if (t->reserved)
continue;
timer = t;
timer->reserved = 1;
break;
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
if (ret) {
timer->reserved = 0;
timer = NULL;
}
}
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
return timer;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_request);
struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
struct omap_dm_timer *timer = NULL, *t;
unsigned long flags;
int ret = 0;
/* Requesting timer by ID is not supported when device tree is used */
if (of_have_populated_dt()) {
pr_warn("%s: Please use omap_dm_timer_request_by_cap()\n",
__func__);
return NULL;
}
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(t, &omap_timer_list, node) {
if (t->pdev->id == id && !t->reserved) {
timer = t;
timer->reserved = 1;
break;
}
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
if (ret) {
timer->reserved = 0;
timer = NULL;
}
}
if (!timer)
pr_debug("%s: timer%d request failed!\n", __func__, id);
return timer;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
/**
* omap_dm_timer_request_by_cap - Request a timer by capability
* @cap: Bit mask of capabilities to match
*
* Find a timer based upon capabilities bit mask. Callers of this function
* should use the definitions found in the plat/dmtimer.h file under the
* comment "timer capabilities used in hwmod database". Returns pointer to
* timer handle on success and a NULL pointer on failure.
*/
struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
{
struct omap_dm_timer *timer = NULL, *t;
unsigned long flags;
if (!cap)
return NULL;
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(t, &omap_timer_list, node) {
if ((!t->reserved) && ((t->capability & cap) == cap)) {
/*
* If timer is not NULL, we have already found one timer
* but it was not an exact match because it had more
* capabilites that what was required. Therefore,
* unreserve the last timer found and see if this one
* is a better match.
*/
if (timer)
timer->reserved = 0;
timer = t;
timer->reserved = 1;
/* Exit loop early if we find an exact match */
if (t->capability == cap)
break;
}
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer && omap_dm_timer_prepare(timer)) {
timer->reserved = 0;
timer = NULL;
}
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
return timer;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_request_by_cap);
int omap_dm_timer_free(struct omap_dm_timer *timer)
{
if (unlikely(!timer))
return -EINVAL;
clk_put(timer->fclk);
WARN_ON(!timer->reserved);
timer->reserved = 0;
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_free);
void omap_dm_timer_enable(struct omap_dm_timer *timer)
{
pm_runtime_get_sync(&timer->pdev->dev);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
void omap_dm_timer_disable(struct omap_dm_timer *timer)
{
pm_runtime_put_sync(&timer->pdev->dev);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
{
if (timer)
return timer->irq;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
#if defined(CONFIG_ARCH_OMAP1)
#include <mach/hardware.h>
/**
* omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
* @inputmask: current value of idlect mask
*/
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
int i = 0;
struct omap_dm_timer *timer = NULL;
unsigned long flags;
/* If ARMXOR cannot be idled this function call is unnecessary */
if (!(inputmask & (1 << 1)))
return inputmask;
/* If any active timer is using ARMXOR return modified mask */
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(timer, &omap_timer_list, node) {
u32 l;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
else
inputmask &= ~(1 << 2);
}
i++;
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
return inputmask;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
#else
struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
{
if (timer)
return timer->fclk;
return NULL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
BUG();
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
#endif
int omap_dm_timer_trigger(struct omap_dm_timer *timer)
{
if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
int omap_dm_timer_start(struct omap_dm_timer *timer)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
if (!(timer->capability & OMAP_TIMER_ALWON)) {
if (timer->get_context_loss_count &&
timer->get_context_loss_count(&timer->pdev->dev) !=
timer->ctx_loss_count)
omap_timer_restore_context(timer);
}
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
/* Save the context */
timer->context.tclr = l;
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_start);
int omap_dm_timer_stop(struct omap_dm_timer *timer)
{
unsigned long rate = 0;
if (unlikely(!timer))
return -EINVAL;
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
rate = clk_get_rate(timer->fclk);
__omap_dm_timer_stop(timer, timer->posted, rate);
if (!(timer->capability & OMAP_TIMER_ALWON)) {
if (timer->get_context_loss_count)
timer->ctx_loss_count =
timer->get_context_loss_count(&timer->pdev->dev);
}
/*
* Since the register values are computed and written within
* __omap_dm_timer_stop, we need to use read to retrieve the
* context.
*/
timer->context.tclr =
omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
int ret;
char *parent_name = NULL;
struct clk *fclk, *parent;
struct dmtimer_platform_data *pdata;
if (unlikely(!timer))
return -EINVAL;
pdata = timer->pdev->dev.platform_data;
if (source < 0 || source >= 3)
return -EINVAL;
/*
* FIXME: Used for OMAP1 devices only because they do not currently
* use the clock framework to set the parent clock. To be removed
* once OMAP1 migrated to using clock framework for dmtimers
*/
if (pdata && pdata->set_timer_src)
return pdata->set_timer_src(timer->pdev, source);
fclk = clk_get(&timer->pdev->dev, "fck");
if (IS_ERR_OR_NULL(fclk)) {
pr_err("%s: fck not found\n", __func__);
return -EINVAL;
}
switch (source) {
case OMAP_TIMER_SRC_SYS_CLK:
parent_name = "timer_sys_ck";
break;
case OMAP_TIMER_SRC_32_KHZ:
parent_name = "timer_32k_ck";
break;
case OMAP_TIMER_SRC_EXT_CLK:
parent_name = "timer_ext_ck";
break;
}
parent = clk_get(&timer->pdev->dev, parent_name);
if (IS_ERR_OR_NULL(parent)) {
pr_err("%s: %s not found\n", __func__, parent_name);
ret = -EINVAL;
goto out;
}
ret = clk_set_parent(fclk, parent);
if (IS_ERR_VALUE(ret))
pr_err("%s: failed to set %s as parent\n", __func__,
parent_name);
clk_put(parent);
out:
clk_put(fclk);
return ret;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
else
l &= ~OMAP_TIMER_CTRL_AR;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
/* Save the context */
timer->context.tclr = l;
timer->context.tldr = load;
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
/* Optimized set_load which removes costly spin wait in timer_start */
int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
if (!(timer->capability & OMAP_TIMER_ALWON)) {
if (timer->get_context_loss_count &&
timer->get_context_loss_count(&timer->pdev->dev) !=
timer->ctx_loss_count)
omap_timer_restore_context(timer);
}
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload) {
l |= OMAP_TIMER_CTRL_AR;
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
} else {
l &= ~OMAP_TIMER_CTRL_AR;
}
l |= OMAP_TIMER_CTRL_ST;
__omap_dm_timer_load_start(timer, l, load, timer->posted);
/* Save the context */
timer->context.tclr = l;
timer->context.tldr = load;
timer->context.tcrr = load;
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
else
l &= ~OMAP_TIMER_CTRL_CE;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
/* Save the context */
timer->context.tclr = l;
timer->context.tmar = match;
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10));
if (def_on)
l |= OMAP_TIMER_CTRL_SCPWM;
if (toggle)
l |= OMAP_TIMER_CTRL_PT;
l |= trigger << 10;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
/* Save the context */
timer->context.tclr = l;
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
{
u32 l;
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0x00 && prescaler <= 0x07) {
l |= OMAP_TIMER_CTRL_PRE;
l |= prescaler << 2;
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
/* Save the context */
timer->context.tclr = l;
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
unsigned int value)
{
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
__omap_dm_timer_int_enable(timer, value);
/* Save the context */
timer->context.tier = value;
timer->context.twer = value;
omap_dm_timer_disable(timer);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
{
unsigned int l;
if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
l = __raw_readl(timer->irq_stat);
return l;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
return -EINVAL;
__omap_dm_timer_write_status(timer, value);
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
return __omap_dm_timer_read_counter(timer, timer->posted);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
{
if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
int omap_dm_timers_active(void)
{
struct omap_dm_timer *timer;
list_for_each_entry(timer, &omap_timer_list, node) {
if (!timer->reserved)
continue;
if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
OMAP_TIMER_CTRL_ST) {
return 1;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timers_active);
/**
* omap_dm_timer_probe - probe function called for every registered device
* @pdev: pointer to current timer platform device
*
* Called by driver framework at the end of device registration for all
* timer devices.
*/
static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
struct omap_dm_timer *timer;
struct resource *mem, *irq;
struct device *dev = &pdev->dev;
struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
if (!pdata && !dev->of_node) {
dev_err(dev, "%s: no platform data.\n", __func__);
return -ENODEV;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (unlikely(!irq)) {
dev_err(dev, "%s: no IRQ resource.\n", __func__);
return -ENODEV;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!mem)) {
dev_err(dev, "%s: no memory resource.\n", __func__);
return -ENODEV;
}
timer = devm_kzalloc(dev, sizeof(struct omap_dm_timer), GFP_KERNEL);
if (!timer) {
dev_err(dev, "%s: memory alloc failed!\n", __func__);
return -ENOMEM;
}
timer->io_base = devm_request_and_ioremap(dev, mem);
if (!timer->io_base) {
dev_err(dev, "%s: region already claimed.\n", __func__);
return -ENOMEM;
}
if (dev->of_node) {
if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
timer->capability |= OMAP_TIMER_ALWON;
if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
timer->capability |= OMAP_TIMER_HAS_PWM;
if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
timer->capability |= OMAP_TIMER_SECURE;
} else {
timer->id = pdev->id;
timer->errata = pdata->timer_errata;
timer->capability = pdata->timer_capability;
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
timer->get_context_loss_count = pdata->get_context_loss_count;
}
timer->irq = irq->start;
timer->pdev = pdev;
/* Skip pm_runtime_enable for OMAP1 */
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
}
if (!timer->reserved) {
pm_runtime_get_sync(dev);
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
}
/* add the timer element to the list */
spin_lock_irqsave(&dm_timer_lock, flags);
list_add_tail(&timer->node, &omap_timer_list);
spin_unlock_irqrestore(&dm_timer_lock, flags);
dev_dbg(dev, "Device Probed.\n");
return 0;
}
/**
* omap_dm_timer_remove - cleanup a registered timer device
* @pdev: pointer to current timer platform device
*
* Called by driver framework whenever a timer device is unregistered.
* In addition to freeing platform resources it also deletes the timer
* entry from the local list.
*/
static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
{
struct omap_dm_timer *timer;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(timer, &omap_timer_list, node)
if (!strcmp(dev_name(&timer->pdev->dev),
dev_name(&pdev->dev))) {
list_del(&timer->node);
ret = 0;
break;
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
return ret;
}
static const struct of_device_id omap_timer_match[] = {
{ .compatible = "ti,omap2-timer", },
{},
};
MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
.remove = __devexit_p(omap_dm_timer_remove),
.driver = {
.name = "omap_timer",
.of_match_table = of_match_ptr(omap_timer_match),
},
};
static int __init omap_dm_timer_driver_init(void)
{
return platform_driver_register(&omap_dm_timer_driver);
}
static void __exit omap_dm_timer_driver_exit(void)
{
platform_driver_unregister(&omap_dm_timer_driver);
}
early_platform_init("earlytimer", &omap_dm_timer_driver);
module_init(omap_dm_timer_driver_init);
module_exit(omap_dm_timer_driver_exit);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");