linux/arch/arm/mach-ux500/cpu.c
Linus Walleij 1bf6d2c1bb ARM: mach-ux500: unlock I&D l2x0 caches before init
Apparently U8500 U-Boot versions may leave the l2x0 locked down
before executing the kernel. Make sure we unlock it before we
initialize the l2x0. This fixes a performance problem reported
by Jan Rinze.

The l2x0 core has been modified to unlock the l2x0 by default,
but it will not touch the locking registers if the l2x0 was
already enabled, as on the ux500, so we need this quirk to
make sure it is properly turned off.

Cc: stable@kernel.org
Cc: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
Cc: Rabin Vincent <rabin.vincent@stericsson.com>
Cc: Adrian Bunk <adrian.bunk@movial.com>
Reported-by: Jan Rinze <janrinze@gmail.com>
Tested-by: Robert Marklund <robert.marklund@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2011-09-22 14:07:34 +02:00

172 lines
4.1 KiB
C

/*
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/mfd/db8500-prcmu.h>
#include <linux/mfd/db5500-prcmu.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
#include <asm/localtimer.h>
#include <plat/mtu.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
#include "clock.h"
void __iomem *_PRCMU_BASE;
#ifdef CONFIG_CACHE_L2X0
static void __iomem *l2x0_base;
#endif
void __init ux500_init_irq(void)
{
void __iomem *dist_base;
void __iomem *cpu_base;
if (cpu_is_u5500()) {
dist_base = __io_address(U5500_GIC_DIST_BASE);
cpu_base = __io_address(U5500_GIC_CPU_BASE);
} else if (cpu_is_u8500()) {
dist_base = __io_address(U8500_GIC_DIST_BASE);
cpu_base = __io_address(U8500_GIC_CPU_BASE);
} else
ux500_unknown_soc();
gic_init(0, 29, dist_base, cpu_base);
/*
* Init clocks here so that they are available for system timer
* initialization.
*/
if (cpu_is_u5500())
db5500_prcmu_early_init();
if (cpu_is_u8500())
prcmu_early_init();
clk_init();
}
#ifdef CONFIG_CACHE_L2X0
static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
{
/* wait for the operation to complete */
while (readl_relaxed(reg) & mask)
;
}
static inline void ux500_cache_sync(void)
{
void __iomem *base = l2x0_base;
writel_relaxed(0, base + L2X0_CACHE_SYNC);
ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
}
/*
* The L2 cache cannot be turned off in the non-secure world.
* Dummy until a secure service is in place.
*/
static void ux500_l2x0_disable(void)
{
}
/*
* This is only called when doing a kexec, just after turning off the L2
* and L1 cache, and it is surrounded by a spinlock in the generic version.
* However, we're not really turning off the L2 cache right now and the
* PL310 does not support exclusive accesses (used to implement the spinlock).
* So, the invalidation needs to be done without the spinlock.
*/
static void ux500_l2x0_inv_all(void)
{
void __iomem *base = l2x0_base;
uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
/* invalidate all ways */
writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY);
ux500_cache_wait(base + L2X0_INV_WAY, l2x0_way_mask);
ux500_cache_sync();
}
static int __init ux500_l2x0_unlock(void)
{
int i;
/*
* Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
* apparently locks both caches before jumping to the kernel. The
* l2x0 core will not touch the unlock registers if the l2x0 is
* already enabled, so we do it right here instead. The PL310 has
* 8 sets of registers, one per possible CPU.
*/
for (i = 0; i < 8; i++) {
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
i * L2X0_LOCKDOWN_STRIDE);
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
i * L2X0_LOCKDOWN_STRIDE);
}
return 0;
}
static int __init ux500_l2x0_init(void)
{
if (cpu_is_u5500())
l2x0_base = __io_address(U5500_L2CC_BASE);
else if (cpu_is_u8500())
l2x0_base = __io_address(U8500_L2CC_BASE);
else
ux500_unknown_soc();
/* Unlock before init */
ux500_l2x0_unlock();
/* 64KB way size, 8 way associativity, force WA */
l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
/* Override invalidate function */
outer_cache.disable = ux500_l2x0_disable;
outer_cache.inv_all = ux500_l2x0_inv_all;
return 0;
}
early_initcall(ux500_l2x0_init);
#endif
static void __init ux500_timer_init(void)
{
#ifdef CONFIG_LOCAL_TIMERS
/* Setup the local timer base */
if (cpu_is_u5500())
twd_base = __io_address(U5500_TWD_BASE);
else if (cpu_is_u8500())
twd_base = __io_address(U8500_TWD_BASE);
else
ux500_unknown_soc();
#endif
if (cpu_is_u5500())
mtu_base = __io_address(U5500_MTU0_BASE);
else if (cpu_is_u8500ed())
mtu_base = __io_address(U8500_MTU0_BASE_ED);
else if (cpu_is_u8500())
mtu_base = __io_address(U8500_MTU0_BASE);
else
ux500_unknown_soc();
nmdk_timer_init();
}
struct sys_timer ux500_timer = {
.init = ux500_timer_init,
};