Here's the main MIPS pull request for v5.1:

- Support for the MIPSr6 MemoryMapID register & Global INValidate TLB
   (GINVT) instructions, allowing for more efficient TLB maintenance when
   running on a CPU such as the I6500 that supports these.
 
 - Enable huge page support for MIPS64r6.
 
 - Optimize post-DMA cache sync by removing that code entirely for kernel
   configurations in which we know it won't be needed.
 
 - The number of pages allocated for interrupt stacks is now calculated
   correctly, where before we would wastefully allocate too much memory
   in some configurations.
 
 - The ath79 platform migrates to devicetree.
 
 - The bcm47xx platform sees fixes for the Buffalo WHR-G54S board.
 
 - The ingenic/jz4740 platform gains support for appended devicetrees.
 
 - The cavium_octeon, lantiq, loongson32 & sgi-ip27 platforms all see
   cleanups as do various pieces of core architecture code.
 -----BEGIN PGP SIGNATURE-----
 
 iIsEABYIADMWIQRgLjeFAZEXQzy86/s+p5+stXUA3QUCXH3BQxUccGF1bC5idXJ0
 b25AbWlwcy5jb20ACgkQPqefrLV1AN1+4wD+Oh4JTfZN/NEOQMlrSkXxjEHqjX3u
 1Y6CiiPCs+q2UnYBANb+ic+ZH5MnvJxxmcvlYI2q3rIh4b8TDriip4KMUTUP
 =Sw9X
 -----END PGP SIGNATURE-----

Merge tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Paul Burton:

 - Support for the MIPSr6 MemoryMapID register & Global INValidate TLB
   (GINVT) instructions, allowing for more efficient TLB maintenance
   when running on a CPU such as the I6500 that supports these.

 - Enable huge page support for MIPS64r6.

 - Optimize post-DMA cache sync by removing that code entirely for
   kernel configurations in which we know it won't be needed.

 - The number of pages allocated for interrupt stacks is now calculated
   correctly, where before we would wastefully allocate too much memory
   in some configurations.

 - The ath79 platform migrates to devicetree.

 - The bcm47xx platform sees fixes for the Buffalo WHR-G54S board.

 - The ingenic/jz4740 platform gains support for appended devicetrees.

 - The cavium_octeon, lantiq, loongson32 & sgi-ip27 platforms all see
   cleanups as do various pieces of core architecture code.

* tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (66 commits)
  MIPS: lantiq: Remove separate GPHY Firmware loader
  MIPS: ingenic: Add support for appended devicetree
  MIPS: SGI-IP27: rework HUB interrupts
  MIPS: SGI-IP27: do boot CPU init later
  MIPS: SGI-IP27: do xtalk scanning later
  MIPS: SGI-IP27: use pr_info/pr_emerg and pr_cont to fix output
  MIPS: SGI-IP27: clean up bridge access and header files
  MIPS: SGI-IP27: get rid of volatile and hubreg_t
  MIPS: irq: Allocate accurate order pages for irq stack
  MIPS: dma-noncoherent: Remove bogus condition in dma_sync_phys()
  MIPS: eBPF: Remove REG_32BIT_ZERO_EX
  MIPS: eBPF: Always return sign extended 32b values
  MIPS: CM: Fix indentation
  MIPS: BCM47XX: Fix/improve Buffalo WHR-G54S support
  MIPS: OCTEON: program rx/tx-delay always from DT
  MIPS: OCTEON: delete board-specific link status
  MIPS: OCTEON: don't lie about interface type of CN3005 board
  MIPS: OCTEON: warn if deprecated link status is being used
  MIPS: OCTEON: add fixed-link nodes to in-kernel device tree
  MIPS: Delete unused flush_cache_sigtramp()
  ...
This commit is contained in:
Linus Torvalds 2019-03-05 11:28:25 -08:00
commit d9862cfbe2
119 changed files with 1615 additions and 4620 deletions

View File

@ -1,36 +0,0 @@
Lantiq XWAY SoC GPHY binding
============================
This binding describes a software-defined ethernet PHY, provided by the RCU
module on newer Lantiq XWAY SoCs (xRX200 and newer).
-------------------------------------------------------------------------------
Required properties:
- compatible : Should be one of
"lantiq,xrx200a1x-gphy"
"lantiq,xrx200a2x-gphy"
"lantiq,xrx300-gphy"
"lantiq,xrx330-gphy"
- reg : Addrress of the GPHY FW load address register
- resets : Must reference the RCU GPHY reset bit
- reset-names : One entry, value must be "gphy" or optional "gphy2"
- clocks : A reference to the (PMU) GPHY clock gate
Optional properties:
- lantiq,gphy-mode : GPHY_MODE_GE (default) or GPHY_MODE_FE as defined in
<dt-bindings/mips/lantiq_xway_gphy.h>
-------------------------------------------------------------------------------
Example for the GPHys on the xRX200 SoCs:
#include <dt-bindings/mips/lantiq_rcu_gphy.h>
gphy0: gphy@20 {
compatible = "lantiq,xrx200a2x-gphy";
reg = <0x20 0x4>;
resets = <&reset0 31 30>, <&reset1 7 7>;
reset-names = "gphy", "gphy2";
clocks = <&pmu0 XRX200_PMU_GATE_GPHY>;
lantiq,gphy-mode = <GPHY_MODE_GE>;
};

View File

@ -26,24 +26,6 @@ Example of the RCU bindings on a xRX200 SoC:
ranges = <0x0 0x203000 0x100>;
big-endian;
gphy0: gphy@20 {
compatible = "lantiq,xrx200a2x-gphy";
reg = <0x20 0x4>;
resets = <&reset0 31 30>, <&reset1 7 7>;
reset-names = "gphy", "gphy2";
lantiq,gphy-mode = <GPHY_MODE_GE>;
};
gphy1: gphy@68 {
compatible = "lantiq,xrx200a2x-gphy";
reg = <0x68 0x4>;
resets = <&reset0 29 28>, <&reset1 6 6>;
reset-names = "gphy", "gphy2";
lantiq,gphy-mode = <GPHY_MODE_GE>;
};
reset0: reset-controller@10 {
compatible = "lantiq,xrx200-reset";
reg = <0x10 4>, <0x14 4>;

View File

@ -206,7 +206,6 @@ config ATH79
select COMMON_CLK
select CLKDEV_LOOKUP
select IRQ_MIPS_CPU
select MIPS_MACHINE
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
@ -391,7 +390,7 @@ config MACH_INGENIC
select GPIOLIB
select COMMON_CLK
select GENERIC_IRQ_CHIP
select BUILTIN_DTB
select BUILTIN_DTB if MIPS_NO_APPENDED_DTB
select USE_OF
select LIBFDT
@ -676,6 +675,7 @@ config SGI_IP27
select DEFAULT_SGI_PARTITION
select SYS_HAS_EARLY_PRINTK
select HAVE_PCI
select IRQ_MIPS_CPU
select NR_CPUS_DEFAULT_64
select SYS_HAS_CPU_R10000
select SYS_SUPPORTS_64BIT_KERNEL
@ -1124,7 +1124,6 @@ config DMA_NONCOHERENT
bool
select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_CACHE_SYNC
@ -1556,6 +1555,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select CPU_SUPPORTS_MSA
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
@ -1881,7 +1881,7 @@ config CPU_LOONGSON2
config CPU_LOONGSON1
bool
select CPU_MIPS32
select CPU_MIPSR1
select CPU_MIPSR2
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
@ -1943,9 +1943,11 @@ config SYS_HAS_CPU_MIPS32_R3_5
config SYS_HAS_CPU_MIPS32_R5
bool
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_MIPS32_R6
bool
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_MIPS64_R1
bool
@ -1955,6 +1957,7 @@ config SYS_HAS_CPU_MIPS64_R2
config SYS_HAS_CPU_MIPS64_R6
bool
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_R3000
bool
@ -1991,6 +1994,7 @@ config SYS_HAS_CPU_R8000
config SYS_HAS_CPU_R10000
bool
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_RM7000
bool
@ -2019,6 +2023,7 @@ config SYS_HAS_CPU_BMIPS4380
config SYS_HAS_CPU_BMIPS5000
bool
select SYS_HAS_CPU_BMIPS
select ARCH_HAS_SYNC_DMA_FOR_CPU
config SYS_HAS_CPU_XLR
bool

View File

@ -233,6 +233,8 @@ toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc)
cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC
toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp)
cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP
toolchain-ginv := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mginv)
cflags-$(toolchain-ginv) += -DTOOLCHAIN_SUPPORTS_GINV
#
# Firmware support

View File

@ -1,79 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
if ATH79
menu "Atheros AR71XX/AR724X/AR913X machine selection"
config ATH79_MACH_AP121
bool "Atheros AP121 reference board"
select SOC_AR933X
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
select ATH79_DEV_USB
select ATH79_DEV_WMAC
help
Say 'Y' here if you want your kernel to support the
Atheros AP121 reference board.
config ATH79_MACH_AP136
bool "Atheros AP136 reference board"
select SOC_QCA955X
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
select ATH79_DEV_USB
select ATH79_DEV_WMAC
help
Say 'Y' here if you want your kernel to support the
Atheros AP136 reference board.
config ATH79_MACH_AP81
bool "Atheros AP81 reference board"
select SOC_AR913X
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
select ATH79_DEV_USB
select ATH79_DEV_WMAC
help
Say 'Y' here if you want your kernel to support the
Atheros AP81 reference board.
config ATH79_MACH_DB120
bool "Atheros DB120 reference board"
select SOC_AR934X
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
select ATH79_DEV_USB
select ATH79_DEV_WMAC
help
Say 'Y' here if you want your kernel to support the
Atheros DB120 reference board.
config ATH79_MACH_PB44
bool "Atheros PB44 reference board"
select SOC_AR71XX
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
select ATH79_DEV_USB
help
Say 'Y' here if you want your kernel to support the
Atheros PB44 reference board.
config ATH79_MACH_UBNT_XM
bool "Ubiquiti Networks XM (rev 1.0) board"
select SOC_AR724X
select ATH79_DEV_GPIO_BUTTONS
select ATH79_DEV_LEDS_GPIO
select ATH79_DEV_SPI
help
Say 'Y' here if you want your kernel to support the
Ubiquiti Networks XM (rev 1.0) board.
endmenu
config SOC_AR71XX
select HAVE_PCI
def_bool n

View File

@ -8,27 +8,6 @@
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
obj-y := prom.o setup.o irq.o common.o clock.o
obj-y := prom.o setup.o common.o clock.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_PCI) += pci.o
#
# Devices
#
obj-y += dev-common.o
obj-$(CONFIG_ATH79_DEV_GPIO_BUTTONS) += dev-gpio-buttons.o
obj-$(CONFIG_ATH79_DEV_LEDS_GPIO) += dev-leds-gpio.o
obj-$(CONFIG_ATH79_DEV_SPI) += dev-spi.o
obj-$(CONFIG_ATH79_DEV_USB) += dev-usb.o
obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
#
# Machines
#
obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o
obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o

View File

@ -26,7 +26,6 @@
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "machtypes.h"
#define AR71XX_BASE_FREQ 40000000
#define AR724X_BASE_FREQ 40000000
@ -37,24 +36,63 @@ static struct clk_onecell_data clk_data = {
.clk_num = ARRAY_SIZE(clks),
};
static struct clk *__init ath79_add_sys_clkdev(
const char *id, unsigned long rate)
static const char * const clk_names[ATH79_CLK_END] = {
[ATH79_CLK_CPU] = "cpu",
[ATH79_CLK_DDR] = "ddr",
[ATH79_CLK_AHB] = "ahb",
[ATH79_CLK_REF] = "ref",
[ATH79_CLK_MDIO] = "mdio",
};
static const char * __init ath79_clk_name(int type)
{
struct clk *clk;
int err;
BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]);
return clk_names[type];
}
clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate);
static void __init __ath79_set_clk(int type, const char *name, struct clk *clk)
{
if (IS_ERR(clk))
panic("failed to allocate %s clock structure", id);
panic("failed to allocate %s clock structure", clk_names[type]);
err = clk_register_clkdev(clk, id, NULL);
if (err)
panic("unable to register %s clock device", id);
clks[type] = clk;
clk_register_clkdev(clk, name, NULL);
}
static struct clk * __init ath79_set_clk(int type, unsigned long rate)
{
const char *name = ath79_clk_name(type);
struct clk *clk;
clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate);
__ath79_set_clk(type, name, clk);
return clk;
}
static void __init ar71xx_clocks_init(void)
static struct clk * __init ath79_set_ff_clk(int type, const char *parent,
unsigned int mult, unsigned int div)
{
const char *name = ath79_clk_name(type);
struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div);
__ath79_set_clk(type, name, clk);
return clk;
}
static unsigned long __init ath79_setup_ref_clk(unsigned long rate)
{
struct clk *clk = clks[ATH79_CLK_REF];
if (clk)
rate = clk_get_rate(clk);
else
clk = ath79_set_clk(ATH79_CLK_REF, rate);
return rate;
}
static void __init ar71xx_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@ -64,9 +102,9 @@ static void __init ar71xx_clocks_init(void)
u32 freq;
u32 div;
ref_rate = AR71XX_BASE_FREQ;
ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ);
pll = ath79_pll_rr(AR71XX_PLL_REG_CPU_CONFIG);
pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG);
div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1;
freq = div * ref_rate;
@ -80,31 +118,17 @@ static void __init ar71xx_clocks_init(void)
div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2;
ahb_rate = cpu_rate / div;
ath79_add_sys_clkdev("ref", ref_rate);
clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ahb", NULL);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static struct clk * __init ath79_reg_ffclk(const char *name,
const char *parent_name, unsigned int mult, unsigned int div)
static void __init ar724x_clocks_init(void __iomem *pll_base)
{
struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
if (IS_ERR(clk))
panic("failed to allocate %s clock structure", name);
return clk;
}
static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
{
u32 pll;
u32 mult, div, ddr_div, ahb_div;
u32 pll;
ath79_setup_ref_clk(AR71XX_BASE_FREQ);
pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
@ -114,30 +138,14 @@ static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div);
clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div);
clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div);
ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div);
ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div);
ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div);
}
static void __init ar724x_clocks_init(void)
{
struct clk *ref_clk;
ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ);
ar724x_clk_init(ref_clk, ath79_pll_base);
/* just make happy plat_time_init() from arch/mips/ath79/setup.c */
clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ahb", NULL);
}
static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
static void __init ar933x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
u32 clock_ctrl;
u32 ref_div;
u32 ninit_mul;
@ -146,6 +154,15 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
u32 cpu_div;
u32 ddr_div;
u32 ahb_div;
u32 t;
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
ref_rate = (40 * 1000 * 1000);
else
ref_rate = (25 * 1000 * 1000);
ath79_setup_ref_clk(ref_rate);
clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
@ -186,37 +203,12 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
}
clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref",
ninit_mul, ref_div * out_div * cpu_div);
clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref",
ninit_mul, ref_div * out_div * ddr_div);
clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref",
ninit_mul, ref_div * out_div * ahb_div);
}
static void __init ar933x_clocks_init(void)
{
struct clk *ref_clk;
unsigned long ref_rate;
u32 t;
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
ref_rate = (40 * 1000 * 1000);
else
ref_rate = (25 * 1000 * 1000);
ref_clk = ath79_add_sys_clkdev("ref", ref_rate);
ar9330_clk_init(ref_clk, ath79_pll_base);
/* just make happy plat_time_init() from arch/mips/ath79/setup.c */
clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul,
ref_div * out_div * cpu_div);
ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul,
ref_div * out_div * ddr_div);
ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul,
ref_div * out_div * ahb_div);
}
static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
@ -239,7 +231,7 @@ static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
return ret;
}
static void __init ar934x_clocks_init(void)
static void __init ar934x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@ -258,6 +250,8 @@ static void __init ar934x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
@ -270,7 +264,7 @@ static void __init ar934x_clocks_init(void)
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG);
out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@ -297,7 +291,7 @@ static void __init ar934x_clocks_init(void)
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG);
out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@ -312,7 +306,7 @@ static void __init ar934x_clocks_init(void)
ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
nfrac, frac, out_div);
clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
@ -344,18 +338,18 @@ static void __init ar934x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG);
if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL)
ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000);
iounmap(dpll_base);
}
static void __init qca953x_clocks_init(void)
static void __init qca953x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@ -371,7 +365,9 @@ static void __init qca953x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@ -385,7 +381,7 @@ static void __init qca953x_clocks_init(void)
cpu_pll += frac * (ref_rate >> 6) / ref_div;
cpu_pll /= (1 << out_div);
pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@ -399,7 +395,7 @@ static void __init qca953x_clocks_init(void)
ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
ddr_pll /= (1 << out_div);
clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@ -431,16 +427,12 @@ static void __init qca953x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
ath79_add_sys_clkdev("cpu", cpu_rate);
ath79_add_sys_clkdev("ddr", ddr_rate);
ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init qca955x_clocks_init(void)
static void __init qca955x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@ -456,7 +448,9 @@ static void __init qca955x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG);
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@ -470,7 +464,7 @@ static void __init qca955x_clocks_init(void)
cpu_pll += frac * ref_rate / (ref_div * (1 << 6));
cpu_pll /= (1 << out_div);
pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG);
pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@ -484,7 +478,7 @@ static void __init qca955x_clocks_init(void)
ddr_pll += frac * ref_rate / (ref_div * (1 << 10));
ddr_pll /= (1 << out_div);
clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG);
clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@ -516,16 +510,12 @@ static void __init qca955x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init qca956x_clocks_init(void)
static void __init qca956x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@ -551,13 +541,15 @@ static void __init qca956x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
@ -570,12 +562,12 @@ static void __init qca956x_clocks_init(void)
cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
cpu_pll /= (1 << out_div);
pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
@ -588,7 +580,7 @@ static void __init qca956x_clocks_init(void)
ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
ddr_pll /= (1 << out_div);
clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@ -620,72 +612,19 @@ static void __init qca956x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
ath79_add_sys_clkdev("cpu", cpu_rate);
ath79_add_sys_clkdev("ddr", ddr_rate);
ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
ar71xx_clocks_init();
else if (soc_is_ar724x() || soc_is_ar913x())
ar724x_clocks_init();
else if (soc_is_ar933x())
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
else if (soc_is_qca953x())
qca953x_clocks_init();
else if (soc_is_qca955x())
qca955x_clocks_init();
else if (soc_is_qca956x() || soc_is_tp9343())
qca956x_clocks_init();
else
BUG();
}
unsigned long __init
ath79_get_sys_clk_rate(const char *id)
{
struct clk *clk;
unsigned long rate;
clk = clk_get(NULL, id);
if (IS_ERR(clk))
panic("unable to get %s clock, err=%d", id, (int) PTR_ERR(clk));
rate = clk_get_rate(clk);
clk_put(clk);
return rate;
}
#ifdef CONFIG_OF
static void __init ath79_clocks_init_dt(struct device_node *np)
{
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt);
static void __init ath79_clocks_init_dt_ng(struct device_node *np)
{
struct clk *ref_clk;
void __iomem *pll_base;
ref_clk = of_clk_get(np, 0);
if (IS_ERR(ref_clk)) {
pr_err("%pOF: of_clk_get failed\n", np);
goto err;
}
if (!IS_ERR(ref_clk))
clks[ATH79_CLK_REF] = ref_clk;
pll_base = of_iomap(np, 0);
if (!pll_base) {
@ -693,14 +632,24 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np)
goto err_clk;
}
if (of_device_is_compatible(np, "qca,ar9130-pll"))
ar724x_clk_init(ref_clk, pll_base);
if (of_device_is_compatible(np, "qca,ar7100-pll"))
ar71xx_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar7240-pll") ||
of_device_is_compatible(np, "qca,ar9130-pll"))
ar724x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar9330-pll"))
ar9330_clk_init(ref_clk, pll_base);
else {
pr_err("%pOF: could not find any appropriate clk_init()\n", np);
goto err_iounmap;
}
ar933x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar9340-pll"))
ar934x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9530-pll"))
qca953x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9550-pll"))
qca955x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9560-pll"))
qca956x_clocks_init(pll_base);
if (!clks[ATH79_CLK_MDIO])
clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF];
if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
pr_err("%pOF: could not register clk provider\n", np);
@ -714,10 +663,13 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np)
err_clk:
clk_put(ref_clk);
err:
return;
}
CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng);
CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng);
#endif
CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt);

View File

@ -19,11 +19,6 @@
#define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024)
#define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024)
void ath79_clocks_init(void);
unsigned long ath79_get_sys_clk_rate(const char *id);
void ath79_ddr_ctrl_init(void);
void ath79_gpio_init(void);
#endif /* __ATH79_COMMON_H */

View File

@ -1,159 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X common devices
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros' 2.6.15 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-ath79.h>
#include <linux/serial_8250.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-common.h"
static struct resource ath79_uart_resources[] = {
{
.start = AR71XX_UART_BASE,
.end = AR71XX_UART_BASE + AR71XX_UART_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
#define AR71XX_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP)
static struct plat_serial8250_port ath79_uart_data[] = {
{
.mapbase = AR71XX_UART_BASE,
.irq = ATH79_MISC_IRQ(3),
.flags = AR71XX_UART_FLAGS,
.iotype = UPIO_MEM32,
.regshift = 2,
}, {
/* terminating entry */
}
};
static struct platform_device ath79_uart_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.resource = ath79_uart_resources,
.num_resources = ARRAY_SIZE(ath79_uart_resources),
.dev = {
.platform_data = ath79_uart_data
},
};
static struct resource ar933x_uart_resources[] = {
{
.start = AR933X_UART_BASE,
.end = AR933X_UART_BASE + AR71XX_UART_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = ATH79_MISC_IRQ(3),
.end = ATH79_MISC_IRQ(3),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ar933x_uart_device = {
.name = "ar933x-uart",
.id = -1,
.resource = ar933x_uart_resources,
.num_resources = ARRAY_SIZE(ar933x_uart_resources),
};
void __init ath79_register_uart(void)
{
unsigned long uart_clk_rate;
uart_clk_rate = ath79_get_sys_clk_rate("uart");
if (soc_is_ar71xx() ||
soc_is_ar724x() ||
soc_is_ar913x() ||
soc_is_ar934x() ||
soc_is_qca955x()) {
ath79_uart_data[0].uartclk = uart_clk_rate;
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
platform_device_register(&ar933x_uart_device);
} else {
BUG();
}
}
void __init ath79_register_wdt(void)
{
struct resource res;
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM;
res.start = AR71XX_RESET_BASE + AR71XX_RESET_REG_WDOG_CTRL;
res.end = res.start + 0x8 - 1;
platform_device_register_simple("ath79-wdt", -1, &res, 1);
}
static struct ath79_gpio_platform_data ath79_gpio_pdata;
static struct resource ath79_gpio_resources[] = {
{
.flags = IORESOURCE_MEM,
.start = AR71XX_GPIO_BASE,
.end = AR71XX_GPIO_BASE + AR71XX_GPIO_SIZE - 1,
},
{
.start = ATH79_MISC_IRQ(2),
.end = ATH79_MISC_IRQ(2),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ath79_gpio_device = {
.name = "ath79-gpio",
.id = -1,
.resource = ath79_gpio_resources,
.num_resources = ARRAY_SIZE(ath79_gpio_resources),
.dev = {
.platform_data = &ath79_gpio_pdata
},
};
void __init ath79_gpio_init(void)
{
if (soc_is_ar71xx()) {
ath79_gpio_pdata.ngpios = AR71XX_GPIO_COUNT;
} else if (soc_is_ar7240()) {
ath79_gpio_pdata.ngpios = AR7240_GPIO_COUNT;
} else if (soc_is_ar7241() || soc_is_ar7242()) {
ath79_gpio_pdata.ngpios = AR7241_GPIO_COUNT;
} else if (soc_is_ar913x()) {
ath79_gpio_pdata.ngpios = AR913X_GPIO_COUNT;
} else if (soc_is_ar933x()) {
ath79_gpio_pdata.ngpios = AR933X_GPIO_COUNT;
} else if (soc_is_ar934x()) {
ath79_gpio_pdata.ngpios = AR934X_GPIO_COUNT;
ath79_gpio_pdata.oe_inverted = 1;
} else if (soc_is_qca955x()) {
ath79_gpio_pdata.ngpios = QCA955X_GPIO_COUNT;
ath79_gpio_pdata.oe_inverted = 1;
} else {
BUG();
}
platform_device_register(&ath79_gpio_device);
}

View File

@ -1,18 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X common devices
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_COMMON_H
#define _ATH79_DEV_COMMON_H
void ath79_register_uart(void);
void ath79_register_wdt(void);
#endif /* _ATH79_DEV_COMMON_H */

View File

@ -1,56 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO button support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "linux/init.h"
#include "linux/slab.h"
#include <linux/platform_device.h>
#include "dev-gpio-buttons.h"
void __init ath79_register_gpio_keys_polled(int id,
unsigned poll_interval,
unsigned nbuttons,
struct gpio_keys_button *buttons)
{
struct platform_device *pdev;
struct gpio_keys_platform_data pdata;
struct gpio_keys_button *p;
int err;
p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
if (!p)
return;
pdev = platform_device_alloc("gpio-keys-polled", id);
if (!pdev)
goto err_free_buttons;
memset(&pdata, 0, sizeof(pdata));
pdata.poll_interval = poll_interval;
pdata.nbuttons = nbuttons;
pdata.buttons = p;
err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
if (err)
goto err_put_pdev;
err = platform_device_add(pdev);
if (err)
goto err_put_pdev;
return;
err_put_pdev:
platform_device_put(pdev);
err_free_buttons:
kfree(p);
}

View File

@ -1,23 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO button support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_GPIO_BUTTONS_H
#define _ATH79_DEV_GPIO_BUTTONS_H
#include <linux/input.h>
#include <linux/gpio_keys.h>
void ath79_register_gpio_keys_polled(int id,
unsigned poll_interval,
unsigned nbuttons,
struct gpio_keys_button *buttons);
#endif /* _ATH79_DEV_GPIO_BUTTONS_H */

View File

@ -1,54 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include "dev-leds-gpio.h"
void __init ath79_register_leds_gpio(int id,
unsigned num_leds,
struct gpio_led *leds)
{
struct platform_device *pdev;
struct gpio_led_platform_data pdata;
struct gpio_led *p;
int err;
p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
if (!p)
return;
pdev = platform_device_alloc("leds-gpio", id);
if (!pdev)
goto err_free_leds;
memset(&pdata, 0, sizeof(pdata));
pdata.num_leds = num_leds;
pdata.leds = p;
err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
if (err)
goto err_put_pdev;
err = platform_device_add(pdev);
if (err)
goto err_put_pdev;
return;
err_put_pdev:
platform_device_put(pdev);
err_free_leds:
kfree(p);
}

View File

@ -1,21 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_LEDS_GPIO_H
#define _ATH79_DEV_LEDS_GPIO_H
#include <linux/leds.h>
void ath79_register_leds_gpio(int id,
unsigned num_leds,
struct gpio_led *leds);
#endif /* _ATH79_DEV_LEDS_GPIO_H */

View File

@ -1,38 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X SPI controller device
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "dev-spi.h"
static struct resource ath79_spi_resources[] = {
{
.start = AR71XX_SPI_BASE,
.end = AR71XX_SPI_BASE + AR71XX_SPI_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device ath79_spi_device = {
.name = "ath79-spi",
.id = -1,
.resource = ath79_spi_resources,
.num_resources = ARRAY_SIZE(ath79_spi_resources),
};
void __init ath79_register_spi(struct ath79_spi_platform_data *pdata,
struct spi_board_info const *info,
unsigned n)
{
spi_register_board_info(info, n);
ath79_spi_device.dev.platform_data = pdata;
platform_device_register(&ath79_spi_device);
}

View File

@ -1,22 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X SPI controller device
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_SPI_H
#define _ATH79_DEV_SPI_H
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-ath79.h>
void ath79_register_spi(struct ath79_spi_platform_data *pdata,
struct spi_board_info const *info,
unsigned n);
#endif /* _ATH79_DEV_SPI_H */

View File

@ -1,242 +0,0 @@
/*
* Atheros AR7XXX/AR9XXX USB Host Controller device
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros' 2.6.15 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-usb.h"
static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
static struct usb_ohci_pdata ath79_ohci_pdata = {
};
static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
.has_synopsys_hc_bug = 1,
};
static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
.caps_offset = 0x100,
.has_tt = 1,
};
static void __init ath79_usb_register(const char *name, int id,
unsigned long base, unsigned long size,
int irq, const void *data,
size_t data_size)
{
struct resource res[2];
struct platform_device *pdev;
memset(res, 0, sizeof(res));
res[0].flags = IORESOURCE_MEM;
res[0].start = base;
res[0].end = base + size - 1;
res[1].flags = IORESOURCE_IRQ;
res[1].start = irq;
res[1].end = irq;
pdev = platform_device_register_resndata(NULL, name, id,
res, ARRAY_SIZE(res),
data, data_size);
if (IS_ERR(pdev)) {
pr_err("ath79: unable to register USB at %08lx, err=%d\n",
base, (int) PTR_ERR(pdev));
return;
}
pdev->dev.dma_mask = &ath79_usb_dmamask;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \
AR71XX_RESET_USB_PHY | \
AR71XX_RESET_USB_OHCI_DLL)
static void __init ath79_usb_setup(void)
{
void __iomem *usb_ctrl_base;
ath79_device_reset_set(AR71XX_USB_RESET_MASK);
mdelay(1000);
ath79_device_reset_clear(AR71XX_USB_RESET_MASK);
usb_ctrl_base = ioremap(AR71XX_USB_CTRL_BASE, AR71XX_USB_CTRL_SIZE);
/* Turning on the Buff and Desc swap bits */
__raw_writel(0xf0000, usb_ctrl_base + AR71XX_USB_CTRL_REG_CONFIG);
/* WAR for HW bug. Here it adjusts the duration between two SOFS */
__raw_writel(0x20c00, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
iounmap(usb_ctrl_base);
mdelay(900);
ath79_usb_register("ohci-platform", -1,
AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
ATH79_MISC_IRQ(6),
&ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
ath79_usb_register("ehci-platform", -1,
AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
}
static void __init ar7240_usb_setup(void)
{
void __iomem *usb_ctrl_base;
ath79_device_reset_clear(AR7240_RESET_OHCI_DLL);
ath79_device_reset_set(AR7240_RESET_USB_HOST);
mdelay(1000);
ath79_device_reset_set(AR7240_RESET_OHCI_DLL);
ath79_device_reset_clear(AR7240_RESET_USB_HOST);
usb_ctrl_base = ioremap(AR7240_USB_CTRL_BASE, AR7240_USB_CTRL_SIZE);
/* WAR for HW bug. Here it adjusts the duration between two SOFS */
__raw_writel(0x3, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
iounmap(usb_ctrl_base);
ath79_usb_register("ohci-platform", -1,
AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
}
static void __init ar724x_usb_setup(void)
{
ath79_device_reset_set(AR724X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR724X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR724X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar913x_usb_setup(void)
{
ath79_device_reset_set(AR913X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR913X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR913X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar933x_usb_setup(void)
{
ath79_device_reset_set(AR933X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR933X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR933X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar934x_usb_setup(void)
{
u32 bootstrap;
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE)
return;
ath79_device_reset_set(AR934X_RESET_USBSUS_OVERRIDE);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_PHY);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_PHY_ANALOG);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_HOST);
udelay(1000);
ath79_usb_register("ehci-platform", -1,
AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init qca955x_usb_setup(void)
{
ath79_usb_register("ehci-platform", 0,
QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
ATH79_IP3_IRQ(0),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
ath79_usb_register("ehci-platform", 1,
QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
ATH79_IP3_IRQ(1),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
void __init ath79_register_usb(void)
{
if (soc_is_ar71xx())
ath79_usb_setup();
else if (soc_is_ar7240())
ar7240_usb_setup();
else if (soc_is_ar7241() || soc_is_ar7242())
ar724x_usb_setup();
else if (soc_is_ar913x())
ar913x_usb_setup();
else if (soc_is_ar933x())
ar933x_usb_setup();
else if (soc_is_ar934x())
ar934x_usb_setup();
else if (soc_is_qca955x())
qca955x_usb_setup();
else
BUG();
}

View File

@ -1,17 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X USB Host Controller support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_USB_H
#define _ATH79_DEV_USB_H
void ath79_register_usb(void);
#endif /* _ATH79_DEV_USB_H */

View File

@ -1,155 +0,0 @@
/*
* Atheros AR913X/AR933X SoC built-in WMAC device support
*
* Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "dev-wmac.h"
static struct ath9k_platform_data ath79_wmac_data;
static struct resource ath79_wmac_resources[] = {
{
/* .start and .end fields are filled dynamically */
.flags = IORESOURCE_MEM,
}, {
/* .start and .end fields are filled dynamically */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ath79_wmac_device = {
.name = "ath9k",
.id = -1,
.resource = ath79_wmac_resources,
.num_resources = ARRAY_SIZE(ath79_wmac_resources),
.dev = {
.platform_data = &ath79_wmac_data,
},
};
static void __init ar913x_wmac_setup(void)
{
/* reset the WMAC */
ath79_device_reset_set(AR913X_RESET_AMBA2WMAC);
mdelay(10);
ath79_device_reset_clear(AR913X_RESET_AMBA2WMAC);
mdelay(10);
ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
}
static int ar933x_wmac_reset(void)
{
ath79_device_reset_set(AR933X_RESET_WMAC);
ath79_device_reset_clear(AR933X_RESET_WMAC);
return 0;
}
static int ar933x_r1_get_wmac_revision(void)
{
return ath79_soc_rev;
}
static void __init ar933x_wmac_setup(void)
{
u32 t;
ar933x_wmac_reset();
ath79_wmac_device.name = "ar933x_wmac";
ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
ath79_wmac_data.is_clk_25mhz = false;
else
ath79_wmac_data.is_clk_25mhz = true;
if (ath79_soc_rev == 1)
ath79_wmac_data.get_mac_revision = ar933x_r1_get_wmac_revision;
ath79_wmac_data.external_reset = ar933x_wmac_reset;
}
static void ar934x_wmac_setup(void)
{
u32 t;
ath79_wmac_device.name = "ar934x_wmac";
ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (t & AR934X_BOOTSTRAP_REF_CLK_40)
ath79_wmac_data.is_clk_25mhz = false;
else
ath79_wmac_data.is_clk_25mhz = true;
}
static void qca955x_wmac_setup(void)
{
u32 t;
ath79_wmac_device.name = "qca955x_wmac";
ath79_wmac_resources[0].start = QCA955X_WMAC_BASE;
ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
if (t & QCA955X_BOOTSTRAP_REF_CLK_40)
ath79_wmac_data.is_clk_25mhz = false;
else
ath79_wmac_data.is_clk_25mhz = true;
}
void __init ath79_register_wmac(u8 *cal_data)
{
if (soc_is_ar913x())
ar913x_wmac_setup();
else if (soc_is_ar933x())
ar933x_wmac_setup();
else if (soc_is_ar934x())
ar934x_wmac_setup();
else if (soc_is_qca955x())
qca955x_wmac_setup();
else
BUG();
if (cal_data)
memcpy(ath79_wmac_data.eeprom_data, cal_data,
sizeof(ath79_wmac_data.eeprom_data));
platform_device_register(&ath79_wmac_device);
}

View File

@ -1,17 +0,0 @@
/*
* Atheros AR913X/AR933X SoC built-in WMAC device support
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_DEV_WMAC_H
#define _ATH79_DEV_WMAC_H
void ath79_register_wmac(u8 *cal_data);
#endif /* _ATH79_DEV_WMAC_H */

View File

@ -1,169 +0,0 @@
/*
* Atheros AR71xx/AR724x/AR913x specific interrupt handling
*
* Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/of_irq.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "machtypes.h"
static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
{
u32 status;
status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
ath79_ddr_wb_flush(3);
generic_handle_irq(ATH79_IP2_IRQ(0));
} else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
ath79_ddr_wb_flush(4);
generic_handle_irq(ATH79_IP2_IRQ(1));
} else {
spurious_interrupt();
}
}
static void ar934x_ip2_irq_init(void)
{
int i;
for (i = ATH79_IP2_IRQ_BASE;
i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
irq_set_chip_and_handler(i, &dummy_irq_chip,
handle_level_irq);
irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
}
static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
{
u32 status;
status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
if (status == 0) {
spurious_interrupt();
return;
}
if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
/* TODO: flush DDR? */
generic_handle_irq(ATH79_IP2_IRQ(0));
}
if (status & QCA955X_EXT_INT_WMAC_ALL) {
/* TODO: flush DDR? */
generic_handle_irq(ATH79_IP2_IRQ(1));
}
}
static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
{
u32 status;
status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
QCA955X_EXT_INT_USB1 |
QCA955X_EXT_INT_USB2;
if (status == 0) {
spurious_interrupt();
return;
}
if (status & QCA955X_EXT_INT_USB1) {
/* TODO: flush DDR? */
generic_handle_irq(ATH79_IP3_IRQ(0));
}
if (status & QCA955X_EXT_INT_USB2) {
/* TODO: flush DDR? */
generic_handle_irq(ATH79_IP3_IRQ(1));
}
if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
/* TODO: flush DDR? */
generic_handle_irq(ATH79_IP3_IRQ(2));
}
}
static void qca955x_irq_init(void)
{
int i;
for (i = ATH79_IP2_IRQ_BASE;
i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
irq_set_chip_and_handler(i, &dummy_irq_chip,
handle_level_irq);
irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
for (i = ATH79_IP3_IRQ_BASE;
i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
irq_set_chip_and_handler(i, &dummy_irq_chip,
handle_level_irq);
irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
}
void __init arch_init_irq(void)
{
unsigned irq_wb_chan2 = -1;
unsigned irq_wb_chan3 = -1;
bool misc_is_ar71xx;
if (mips_machtype == ATH79_MACH_GENERIC_OF) {
irqchip_init();
return;
}
if (soc_is_ar71xx() || soc_is_ar724x() ||
soc_is_ar913x() || soc_is_ar933x()) {
irq_wb_chan2 = 3;
irq_wb_chan3 = 2;
} else if (soc_is_ar934x()) {
irq_wb_chan3 = 2;
}
ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3);
if (soc_is_ar71xx() || soc_is_ar913x())
misc_is_ar71xx = true;
else if (soc_is_ar724x() ||
soc_is_ar933x() ||
soc_is_ar934x() ||
soc_is_qca955x())
misc_is_ar71xx = false;
else
BUG();
ath79_misc_irq_init(
ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS,
ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx);
if (soc_is_ar934x())
ar934x_ip2_irq_init();
else if (soc_is_qca955x())
qca955x_irq_init();
}

View File

@ -1,92 +0,0 @@
/*
* Atheros AP121 board support
*
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
#include "dev-wmac.h"
#define AP121_GPIO_LED_WLAN 0
#define AP121_GPIO_LED_USB 1
#define AP121_GPIO_BTN_JUMPSTART 11
#define AP121_GPIO_BTN_RESET 12
#define AP121_KEYS_POLL_INTERVAL 20 /* msecs */
#define AP121_KEYS_DEBOUNCE_INTERVAL (3 * AP121_KEYS_POLL_INTERVAL)
#define AP121_CAL_DATA_ADDR 0x1fff1000
static struct gpio_led ap121_leds_gpio[] __initdata = {
{
.name = "ap121:green:usb",
.gpio = AP121_GPIO_LED_USB,
.active_low = 0,
},
{
.name = "ap121:green:wlan",
.gpio = AP121_GPIO_LED_WLAN,
.active_low = 0,
},
};
static struct gpio_keys_button ap121_gpio_keys[] __initdata = {
{
.desc = "jumpstart button",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP121_GPIO_BTN_JUMPSTART,
.active_low = 1,
},
{
.desc = "reset button",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP121_GPIO_BTN_RESET,
.active_low = 1,
}
};
static struct spi_board_info ap121_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "mx25l1606e",
}
};
static struct ath79_spi_platform_data ap121_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
static void __init ap121_setup(void)
{
u8 *cal_data = (u8 *) KSEG1ADDR(AP121_CAL_DATA_ADDR);
ath79_register_leds_gpio(-1, ARRAY_SIZE(ap121_leds_gpio),
ap121_leds_gpio);
ath79_register_gpio_keys_polled(-1, AP121_KEYS_POLL_INTERVAL,
ARRAY_SIZE(ap121_gpio_keys),
ap121_gpio_keys);
ath79_register_spi(&ap121_spi_data, ap121_spi_info,
ARRAY_SIZE(ap121_spi_info));
ath79_register_usb();
ath79_register_wmac(cal_data);
}
MIPS_MACHINE(ATH79_MACH_AP121, "AP121", "Atheros AP121 reference board",
ap121_setup);

View File

@ -1,156 +0,0 @@
/*
* Qualcomm Atheros AP136 reference board support
*
* Copyright (c) 2012 Qualcomm Atheros
* Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include <linux/pci.h>
#include <linux/ath9k_platform.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
#include "dev-wmac.h"
#include "pci.h"
#define AP136_GPIO_LED_STATUS_RED 14
#define AP136_GPIO_LED_STATUS_GREEN 19
#define AP136_GPIO_LED_USB 4
#define AP136_GPIO_LED_WLAN_2G 13
#define AP136_GPIO_LED_WLAN_5G 12
#define AP136_GPIO_LED_WPS_RED 15
#define AP136_GPIO_LED_WPS_GREEN 20
#define AP136_GPIO_BTN_WPS 16
#define AP136_GPIO_BTN_RFKILL 21
#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */
#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL)
#define AP136_WMAC_CALDATA_OFFSET 0x1000
#define AP136_PCIE_CALDATA_OFFSET 0x5000
static struct gpio_led ap136_leds_gpio[] __initdata = {
{
.name = "qca:green:status",
.gpio = AP136_GPIO_LED_STATUS_GREEN,
.active_low = 1,
},
{
.name = "qca:red:status",
.gpio = AP136_GPIO_LED_STATUS_RED,
.active_low = 1,
},
{
.name = "qca:green:wps",
.gpio = AP136_GPIO_LED_WPS_GREEN,
.active_low = 1,
},
{
.name = "qca:red:wps",
.gpio = AP136_GPIO_LED_WPS_RED,
.active_low = 1,
},
{
.name = "qca:red:wlan-2g",
.gpio = AP136_GPIO_LED_WLAN_2G,
.active_low = 1,
},
{
.name = "qca:red:usb",
.gpio = AP136_GPIO_LED_USB,
.active_low = 1,
}
};
static struct gpio_keys_button ap136_gpio_keys[] __initdata = {
{
.desc = "WPS button",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP136_GPIO_BTN_WPS,
.active_low = 1,
},
{
.desc = "RFKILL button",
.type = EV_KEY,
.code = KEY_RFKILL,
.debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP136_GPIO_BTN_RFKILL,
.active_low = 1,
},
};
static struct spi_board_info ap136_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "mx25l6405d",
}
};
static struct ath79_spi_platform_data ap136_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
#ifdef CONFIG_PCI
static struct ath9k_platform_data ap136_ath9k_data;
static int ap136_pci_plat_dev_init(struct pci_dev *dev)
{
if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0)
dev->dev.platform_data = &ap136_ath9k_data;
return 0;
}
static void __init ap136_pci_init(u8 *eeprom)
{
memcpy(ap136_ath9k_data.eeprom_data, eeprom,
sizeof(ap136_ath9k_data.eeprom_data));
ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
ath79_register_pci();
}
#else
static inline void ap136_pci_init(u8 *eeprom) {}
#endif /* CONFIG_PCI */
static void __init ap136_setup(void)
{
u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
ap136_leds_gpio);
ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL,
ARRAY_SIZE(ap136_gpio_keys),
ap136_gpio_keys);
ath79_register_spi(&ap136_spi_data, ap136_spi_info,
ARRAY_SIZE(ap136_spi_info));
ath79_register_usb();
ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET);
ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET);
}
MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010",
"Atheros AP136-010 reference board",
ap136_setup);

View File

@ -1,100 +0,0 @@
/*
* Atheros AP81 board support
*
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "machtypes.h"
#include "dev-wmac.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
#define AP81_GPIO_LED_STATUS 1
#define AP81_GPIO_LED_AOSS 3
#define AP81_GPIO_LED_WLAN 6
#define AP81_GPIO_LED_POWER 14
#define AP81_GPIO_BTN_SW4 12
#define AP81_GPIO_BTN_SW1 21
#define AP81_KEYS_POLL_INTERVAL 20 /* msecs */
#define AP81_KEYS_DEBOUNCE_INTERVAL (3 * AP81_KEYS_POLL_INTERVAL)
#define AP81_CAL_DATA_ADDR 0x1fff1000
static struct gpio_led ap81_leds_gpio[] __initdata = {
{
.name = "ap81:green:status",
.gpio = AP81_GPIO_LED_STATUS,
.active_low = 1,
}, {
.name = "ap81:amber:aoss",
.gpio = AP81_GPIO_LED_AOSS,
.active_low = 1,
}, {
.name = "ap81:green:wlan",
.gpio = AP81_GPIO_LED_WLAN,
.active_low = 1,
}, {
.name = "ap81:green:power",
.gpio = AP81_GPIO_LED_POWER,
.active_low = 1,
}
};
static struct gpio_keys_button ap81_gpio_keys[] __initdata = {
{
.desc = "sw1",
.type = EV_KEY,
.code = BTN_0,
.debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP81_GPIO_BTN_SW1,
.active_low = 1,
} , {
.desc = "sw4",
.type = EV_KEY,
.code = BTN_1,
.debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
.gpio = AP81_GPIO_BTN_SW4,
.active_low = 1,
}
};
static struct spi_board_info ap81_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "m25p64",
}
};
static struct ath79_spi_platform_data ap81_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
static void __init ap81_setup(void)
{
u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR);
ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio),
ap81_leds_gpio);
ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL,
ARRAY_SIZE(ap81_gpio_keys),
ap81_gpio_keys);
ath79_register_spi(&ap81_spi_data, ap81_spi_info,
ARRAY_SIZE(ap81_spi_info));
ath79_register_wmac(cal_data);
ath79_register_usb();
}
MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board",
ap81_setup);

View File

@ -1,136 +0,0 @@
/*
* Atheros DB120 reference board support
*
* Copyright (c) 2011 Qualcomm Atheros
* Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include <linux/pci.h>
#include <linux/ath9k_platform.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
#include "dev-wmac.h"
#include "pci.h"
#define DB120_GPIO_LED_WLAN_5G 12
#define DB120_GPIO_LED_WLAN_2G 13
#define DB120_GPIO_LED_STATUS 14
#define DB120_GPIO_LED_WPS 15
#define DB120_GPIO_BTN_WPS 16
#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */
#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL)
#define DB120_WMAC_CALDATA_OFFSET 0x1000
#define DB120_PCIE_CALDATA_OFFSET 0x5000
static struct gpio_led db120_leds_gpio[] __initdata = {
{
.name = "db120:green:status",
.gpio = DB120_GPIO_LED_STATUS,
.active_low = 1,
},
{
.name = "db120:green:wps",
.gpio = DB120_GPIO_LED_WPS,
.active_low = 1,
},
{
.name = "db120:green:wlan-5g",
.gpio = DB120_GPIO_LED_WLAN_5G,
.active_low = 1,
},
{
.name = "db120:green:wlan-2g",
.gpio = DB120_GPIO_LED_WLAN_2G,
.active_low = 1,
},
};
static struct gpio_keys_button db120_gpio_keys[] __initdata = {
{
.desc = "WPS button",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
.gpio = DB120_GPIO_BTN_WPS,
.active_low = 1,
},
};
static struct spi_board_info db120_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "s25sl064a",
}
};
static struct ath79_spi_platform_data db120_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
#ifdef CONFIG_PCI
static struct ath9k_platform_data db120_ath9k_data;
static int db120_pci_plat_dev_init(struct pci_dev *dev)
{
switch (PCI_SLOT(dev->devfn)) {
case 0:
dev->dev.platform_data = &db120_ath9k_data;
break;
}
return 0;
}
static void __init db120_pci_init(u8 *eeprom)
{
memcpy(db120_ath9k_data.eeprom_data, eeprom,
sizeof(db120_ath9k_data.eeprom_data));
ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
ath79_register_pci();
}
#else
static inline void db120_pci_init(u8 *eeprom) {}
#endif /* CONFIG_PCI */
static void __init db120_setup(void)
{
u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
db120_leds_gpio);
ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
ARRAY_SIZE(db120_gpio_keys),
db120_gpio_keys);
ath79_register_spi(&db120_spi_data, db120_spi_info,
ARRAY_SIZE(db120_spi_info));
ath79_register_usb();
ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
}
MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
db120_setup);

View File

@ -1,128 +0,0 @@
/*
* Atheros PB44 reference board support
*
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/gpio/machine.h>
#include <linux/platform_data/pcf857x.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
#include "pci.h"
#define PB44_GPIO_I2C_SCL 0
#define PB44_GPIO_I2C_SDA 1
#define PB44_GPIO_EXP_BASE 16
#define PB44_GPIO_SW_RESET (PB44_GPIO_EXP_BASE + 6)
#define PB44_GPIO_SW_JUMP (PB44_GPIO_EXP_BASE + 8)
#define PB44_GPIO_LED_JUMP1 (PB44_GPIO_EXP_BASE + 9)
#define PB44_GPIO_LED_JUMP2 (PB44_GPIO_EXP_BASE + 10)
#define PB44_KEYS_POLL_INTERVAL 20 /* msecs */
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
.dev_id = "i2c-gpio.0",
.table = {
GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SCL,
NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
},
};
static struct platform_device pb44_i2c_gpio_device = {
.name = "i2c-gpio",
.id = 0,
.dev = {
.platform_data = NULL,
}
};
static struct pcf857x_platform_data pb44_pcf857x_data = {
.gpio_base = PB44_GPIO_EXP_BASE,
};
static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
{
I2C_BOARD_INFO("pcf8575", 0x20),
.platform_data = &pb44_pcf857x_data,
},
};
static struct gpio_led pb44_leds_gpio[] __initdata = {
{
.name = "pb44:amber:jump1",
.gpio = PB44_GPIO_LED_JUMP1,
.active_low = 1,
}, {
.name = "pb44:green:jump2",
.gpio = PB44_GPIO_LED_JUMP2,
.active_low = 1,
},
};
static struct gpio_keys_button pb44_gpio_keys[] __initdata = {
{
.desc = "soft_reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
.gpio = PB44_GPIO_SW_RESET,
.active_low = 1,
} , {
.desc = "jumpstart",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
.gpio = PB44_GPIO_SW_JUMP,
.active_low = 1,
}
};
static struct spi_board_info pb44_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "m25p64",
},
};
static struct ath79_spi_platform_data pb44_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
static void __init pb44_init(void)
{
gpiod_add_lookup_table(&pb44_i2c_gpiod_table);
i2c_register_board_info(0, pb44_i2c_board_info,
ARRAY_SIZE(pb44_i2c_board_info));
platform_device_register(&pb44_i2c_gpio_device);
ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio),
pb44_leds_gpio);
ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL,
ARRAY_SIZE(pb44_gpio_keys),
pb44_gpio_keys);
ath79_register_spi(&pb44_spi_data, pb44_spi_info,
ARRAY_SIZE(pb44_spi_info));
ath79_register_usb();
ath79_register_pci();
}
MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
pb44_init);

View File

@ -1,126 +0,0 @@
/*
* Ubiquiti Networks XM (rev 1.0) board support
*
* Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
*
* Derived from: mach-pb44.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ath9k_platform.h>
#include <asm/mach-ath79/irq.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "pci.h"
#define UBNT_XM_GPIO_LED_L1 0
#define UBNT_XM_GPIO_LED_L2 1
#define UBNT_XM_GPIO_LED_L3 11
#define UBNT_XM_GPIO_LED_L4 7
#define UBNT_XM_GPIO_BTN_RESET 12
#define UBNT_XM_KEYS_POLL_INTERVAL 20
#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000)
static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
{
.name = "ubnt-xm:red:link1",
.gpio = UBNT_XM_GPIO_LED_L1,
.active_low = 0,
}, {
.name = "ubnt-xm:orange:link2",
.gpio = UBNT_XM_GPIO_LED_L2,
.active_low = 0,
}, {
.name = "ubnt-xm:green:link3",
.gpio = UBNT_XM_GPIO_LED_L3,
.active_low = 0,
}, {
.name = "ubnt-xm:green:link4",
.gpio = UBNT_XM_GPIO_LED_L4,
.active_low = 0,
},
};
static struct gpio_keys_button ubnt_xm_gpio_keys[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = UBNT_XM_KEYS_DEBOUNCE_INTERVAL,
.gpio = UBNT_XM_GPIO_BTN_RESET,
.active_low = 1,
}
};
static struct spi_board_info ubnt_xm_spi_info[] = {
{
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 25000000,
.modalias = "mx25l6405d",
}
};
static struct ath79_spi_platform_data ubnt_xm_spi_data = {
.bus_num = 0,
.num_chipselect = 1,
};
#ifdef CONFIG_PCI
static struct ath9k_platform_data ubnt_xm_eeprom_data;
static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
{
switch (PCI_SLOT(dev->devfn)) {
case 0:
dev->dev.platform_data = &ubnt_xm_eeprom_data;
break;
}
return 0;
}
static void __init ubnt_xm_pci_init(void)
{
memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
sizeof(ubnt_xm_eeprom_data.eeprom_data));
ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
ath79_register_pci();
}
#else
static inline void ubnt_xm_pci_init(void) {}
#endif /* CONFIG_PCI */
static void __init ubnt_xm_init(void)
{
ath79_register_leds_gpio(-1, ARRAY_SIZE(ubnt_xm_leds_gpio),
ubnt_xm_leds_gpio);
ath79_register_gpio_keys_polled(-1, UBNT_XM_KEYS_POLL_INTERVAL,
ARRAY_SIZE(ubnt_xm_gpio_keys),
ubnt_xm_gpio_keys);
ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
ARRAY_SIZE(ubnt_xm_spi_info));
ubnt_xm_pci_init();
}
MIPS_MACHINE(ATH79_MACH_UBNT_XM,
"UBNT-XM",
"Ubiquiti Networks XM (rev 1.0) board",
ubnt_xm_init);

View File

@ -1,28 +0,0 @@
/*
* Atheros AR71XX/AR724X/AR913X machine type definitions
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_MACHTYPE_H
#define _ATH79_MACHTYPE_H
#include <asm/mips_machine.h>
enum ath79_mach_type {
ATH79_MACH_GENERIC_OF = -1, /* Device tree board */
ATH79_MACH_GENERIC = 0,
ATH79_MACH_AP121, /* Atheros AP121 reference board */
ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */
ATH79_MACH_AP81, /* Atheros AP81 reference board */
ATH79_MACH_DB120, /* Atheros DB120 reference board */
ATH79_MACH_PB44, /* Atheros PB44 reference board */
ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */
};
#endif /* _ATH79_MACHTYPE_H */

View File

@ -1,273 +0,0 @@
/*
* Atheros AR71XX/AR724X specific PCI setup code
*
* Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros' 2.6.15 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/resource.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/irq.h>
#include "pci.h"
static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
static const struct ath79_pci_irq *ath79_pci_irq_map;
static unsigned ath79_pci_nr_irqs;
static const struct ath79_pci_irq ar71xx_pci_irq_map[] = {
{
.slot = 17,
.pin = 1,
.irq = ATH79_PCI_IRQ(0),
}, {
.slot = 18,
.pin = 1,
.irq = ATH79_PCI_IRQ(1),
}, {
.slot = 19,
.pin = 1,
.irq = ATH79_PCI_IRQ(2),
}
};
static const struct ath79_pci_irq ar724x_pci_irq_map[] = {
{
.slot = 0,
.pin = 1,
.irq = ATH79_PCI_IRQ(0),
}
};
static const struct ath79_pci_irq qca955x_pci_irq_map[] = {
{
.bus = 0,
.slot = 0,
.pin = 1,
.irq = ATH79_PCI_IRQ(0),
},
{
.bus = 1,
.slot = 0,
.pin = 1,
.irq = ATH79_PCI_IRQ(1),
},
};
int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
{
int irq = -1;
int i;
if (ath79_pci_nr_irqs == 0 ||
ath79_pci_irq_map == NULL) {
if (soc_is_ar71xx()) {
ath79_pci_irq_map = ar71xx_pci_irq_map;
ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
} else if (soc_is_ar724x() ||
soc_is_ar9342() ||
soc_is_ar9344()) {
ath79_pci_irq_map = ar724x_pci_irq_map;
ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
} else if (soc_is_qca955x()) {
ath79_pci_irq_map = qca955x_pci_irq_map;
ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map);
} else {
pr_crit("pci %s: invalid irq map\n",
pci_name((struct pci_dev *) dev));
return irq;
}
}
for (i = 0; i < ath79_pci_nr_irqs; i++) {
const struct ath79_pci_irq *entry;
entry = &ath79_pci_irq_map[i];
if (entry->bus == dev->bus->number &&
entry->slot == slot &&
entry->pin == pin) {
irq = entry->irq;
break;
}
}
if (irq < 0)
pr_crit("pci %s: no irq found for pin %u\n",
pci_name((struct pci_dev *) dev), pin);
else
pr_info("pci %s: using irq %d for pin %u\n",
pci_name((struct pci_dev *) dev), irq, pin);
return irq;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
if (ath79_pci_plat_dev_init)
return ath79_pci_plat_dev_init(dev);
return 0;
}
void __init ath79_pci_set_irq_map(unsigned nr_irqs,
const struct ath79_pci_irq *map)
{
ath79_pci_nr_irqs = nr_irqs;
ath79_pci_irq_map = map;
}
void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
{
ath79_pci_plat_dev_init = func;
}
static struct platform_device *
ath79_register_pci_ar71xx(void)
{
struct platform_device *pdev;
struct resource res[4];
memset(res, 0, sizeof(res));
res[0].name = "cfg_base";
res[0].flags = IORESOURCE_MEM;
res[0].start = AR71XX_PCI_CFG_BASE;
res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1;
res[1].flags = IORESOURCE_IRQ;
res[1].start = ATH79_CPU_IRQ(2);
res[1].end = ATH79_CPU_IRQ(2);
res[2].name = "io_base";
res[2].flags = IORESOURCE_IO;
res[2].start = 0;
res[2].end = 0;
res[3].name = "mem_base";
res[3].flags = IORESOURCE_MEM;
res[3].start = AR71XX_PCI_MEM_BASE;
res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1;
pdev = platform_device_register_simple("ar71xx-pci", -1,
res, ARRAY_SIZE(res));
return pdev;
}
static struct platform_device *
ath79_register_pci_ar724x(int id,
unsigned long cfg_base,
unsigned long ctrl_base,
unsigned long crp_base,
unsigned long mem_base,
unsigned long mem_size,
unsigned long io_base,
int irq)
{
struct platform_device *pdev;
struct resource res[6];
memset(res, 0, sizeof(res));
res[0].name = "cfg_base";
res[0].flags = IORESOURCE_MEM;
res[0].start = cfg_base;
res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1;
res[1].name = "ctrl_base";
res[1].flags = IORESOURCE_MEM;
res[1].start = ctrl_base;
res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1;
res[2].flags = IORESOURCE_IRQ;
res[2].start = irq;
res[2].end = irq;
res[3].name = "mem_base";
res[3].flags = IORESOURCE_MEM;
res[3].start = mem_base;
res[3].end = mem_base + mem_size - 1;
res[4].name = "io_base";
res[4].flags = IORESOURCE_IO;
res[4].start = io_base;
res[4].end = io_base;
res[5].name = "crp_base";
res[5].flags = IORESOURCE_MEM;
res[5].start = crp_base;
res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1;
pdev = platform_device_register_simple("ar724x-pci", id,
res, ARRAY_SIZE(res));
return pdev;
}
int __init ath79_register_pci(void)
{
struct platform_device *pdev = NULL;
if (soc_is_ar71xx()) {
pdev = ath79_register_pci_ar71xx();
} else if (soc_is_ar724x()) {
pdev = ath79_register_pci_ar724x(-1,
AR724X_PCI_CFG_BASE,
AR724X_PCI_CTRL_BASE,
AR724X_PCI_CRP_BASE,
AR724X_PCI_MEM_BASE,
AR724X_PCI_MEM_SIZE,
0,
ATH79_CPU_IRQ(2));
} else if (soc_is_ar9342() ||
soc_is_ar9344()) {
u32 bootstrap;
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0)
return -ENODEV;
pdev = ath79_register_pci_ar724x(-1,
AR724X_PCI_CFG_BASE,
AR724X_PCI_CTRL_BASE,
AR724X_PCI_CRP_BASE,
AR724X_PCI_MEM_BASE,
AR724X_PCI_MEM_SIZE,
0,
ATH79_IP2_IRQ(0));
} else if (soc_is_qca9558()) {
pdev = ath79_register_pci_ar724x(0,
QCA955X_PCI_CFG_BASE0,
QCA955X_PCI_CTRL_BASE0,
QCA955X_PCI_CRP_BASE0,
QCA955X_PCI_MEM_BASE0,
QCA955X_PCI_MEM_SIZE,
0,
ATH79_IP2_IRQ(0));
pdev = ath79_register_pci_ar724x(1,
QCA955X_PCI_CFG_BASE1,
QCA955X_PCI_CTRL_BASE1,
QCA955X_PCI_CRP_BASE1,
QCA955X_PCI_MEM_BASE1,
QCA955X_PCI_MEM_SIZE,
1,
ATH79_IP3_IRQ(2));
} else {
/* No PCI support */
return -ENODEV;
}
if (!pdev)
pr_err("unable to register PCI controller device\n");
return pdev ? 0 : -ENODEV;
}

View File

@ -1,35 +0,0 @@
/*
* Atheros AR71XX/AR724X PCI support
*
* Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _ATH79_PCI_H
#define _ATH79_PCI_H
struct ath79_pci_irq {
int bus;
u8 slot;
u8 pin;
int irq;
};
#ifdef CONFIG_PCI
void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
int ath79_register_pci(void);
#else
static inline void
ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
static inline void
ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
static inline int ath79_register_pci(void) { return 0; }
#endif
#endif /* _ATH79_PCI_H */

View File

@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_fdt.h>
#include <linux/irqchip.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
@ -31,8 +32,6 @@
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-common.h"
#include "machtypes.h"
#define ATH79_SYS_TYPE_LEN 64
@ -235,25 +234,21 @@ void __init plat_mem_setup(void)
else if (fw_passed_dtb)
__dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
if (mips_machtype != ATH79_MACH_GENERIC_OF) {
ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
AR71XX_RESET_SIZE);
ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
AR71XX_PLL_SIZE);
ath79_detect_sys_type();
ath79_ddr_ctrl_init();
ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
AR71XX_RESET_SIZE);
ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
AR71XX_PLL_SIZE);
ath79_detect_sys_type();
ath79_ddr_ctrl_init();
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
/* OF machines should use the reset driver */
_machine_restart = ath79_restart;
}
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
_machine_restart = ath79_restart;
_machine_halt = ath79_halt;
pm_power_off = ath79_halt;
}
static void __init ath79_of_plat_time_init(void)
void __init plat_time_init(void)
{
struct device_node *np;
struct clk *clk;
@ -283,61 +278,12 @@ static void __init ath79_of_plat_time_init(void)
clk_put(clk);
}
void __init plat_time_init(void)
void __init arch_init_irq(void)
{
unsigned long cpu_clk_rate;
unsigned long ahb_clk_rate;
unsigned long ddr_clk_rate;
unsigned long ref_clk_rate;
if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) {
ath79_of_plat_time_init();
return;
}
ath79_clocks_init();
cpu_clk_rate = ath79_get_sys_clk_rate("cpu");
ahb_clk_rate = ath79_get_sys_clk_rate("ahb");
ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
ref_clk_rate = ath79_get_sys_clk_rate("ref");
pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
ref_clk_rate / 1000000, (ref_clk_rate / 1000) % 1000);
mips_hpt_frequency = cpu_clk_rate / 2;
irqchip_init();
}
static int __init ath79_setup(void)
{
if (mips_machtype == ATH79_MACH_GENERIC_OF)
return 0;
ath79_gpio_init();
ath79_register_uart();
ath79_register_wdt();
mips_machine_setup();
return 0;
}
arch_initcall(ath79_setup);
void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
MIPS_MACHINE(ATH79_MACH_GENERIC,
"Generic",
"Generic AR71XX/AR724X/AR913X based board",
NULL);
MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
"DTB",
"Generic AR71XX/AR724X/AR913X based board (DT)",
NULL);

View File

@ -147,7 +147,7 @@ bcm47xx_buttons_buffalo_whr_g125[] __initconst = {
static const struct gpio_keys_button
bcm47xx_buttons_buffalo_whr_g54s[] __initconst = {
BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
BCM47XX_GPIO_KEY(4, KEY_RESTART),
BCM47XX_GPIO_KEY_H(4, KEY_RESTART),
BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
};

View File

@ -152,11 +152,11 @@ bcm47xx_leds_buffalo_whr_g125[] __initconst = {
static const struct gpio_led
bcm47xx_leds_buffalo_whr_g54s[] __initconst = {
BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(1, "green", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(2, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(3, "green", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(6, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(7, "red", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
};
static const struct gpio_led

View File

@ -180,14 +180,28 @@ interface@0 {
ethernet@0 {
phy-handle = <&phy2>;
cavium,alt-phy-handle = <&phy100>;
rx-delay = <0>;
tx-delay = <0>;
fixed-link {
speed = <1000>;
full-duplex;
};
};
ethernet@1 {
phy-handle = <&phy3>;
cavium,alt-phy-handle = <&phy101>;
rx-delay = <0>;
tx-delay = <0>;
fixed-link {
speed = <1000>;
full-duplex;
};
};
ethernet@2 {
phy-handle = <&phy4>;
cavium,alt-phy-handle = <&phy102>;
rx-delay = <0>;
tx-delay = <0>;
};
ethernet@3 {
compatible = "cavium,octeon-3860-pip-port";

View File

@ -33,12 +33,18 @@ pip: pip@11800a0000000 {
interface@0 {
ethernet@0 {
phy-handle = <&phy7>;
rx-delay = <0>;
tx-delay = <0x10>;
};
ethernet@1 {
phy-handle = <&phy6>;
rx-delay = <0>;
tx-delay = <0x10>;
};
ethernet@2 {
phy-handle = <&phy5>;
rx-delay = <0>;
tx-delay = <0x10>;
};
};
};

View File

@ -31,6 +31,7 @@
* network ports from the rest of the cvmx-helper files.
*/
#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-bootinfo.h>
@ -210,56 +211,18 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
WARN(!octeon_is_simulation(),
"Using deprecated link status - please update your DT");
/* Unless we fix it later, all links are defaulted to down */
result.u64 = 0;
/*
* This switch statement should handle all ports that either don't use
* Marvell PHYS, or don't support in-band status.
*/
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_SIM:
if (octeon_is_simulation()) {
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
case CVMX_BOARD_TYPE_EBH3100:
case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
/* Port 1 on these boards is always Gigabit */
if (ipd_port == 1) {
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
/* Fall through to the generic code below */
break;
case CVMX_BOARD_TYPE_CUST_NB5:
/* Port 1 on these boards is always Gigabit */
if (ipd_port == 1) {
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
break;
case CVMX_BOARD_TYPE_BBGW_REF:
/* Port 1 on these boards is always Gigabit */
if (ipd_port == 2) {
/* Port 2 is not hooked up */
result.u64 = 0;
return result;
} else {
/* Ports 0 and 1 connect to the switch */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
break;
}
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
@ -357,45 +320,6 @@ int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
return supported_ports;
}
/**
* Enable packet input/output from the hardware. This function is
* called after by cvmx_helper_packet_hardware_enable() to
* perform board specific initialization. For most boards
* nothing is needed.
*
* @interface: Interface to enable
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_board_hardware_enable(int interface)
{
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
if (interface == 0) {
/* Different config for switch port */
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
/*
* Boards with gigabit WAN ports need a
* different setting that is compatible with
* 100 Mbit settings
*/
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
0xc);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
0xc);
}
} else if (cvmx_sysinfo_get()->board_type ==
CVMX_BOARD_TYPE_UBNT_E100) {
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0);
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0);
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10);
}
return 0;
}
/**
* Get the clock type used for the USB block based on board type.
* Used by the USB code for auto configuration of clock type.

View File

@ -30,6 +30,7 @@
* Helper functions for common, but complicated tasks.
*
*/
#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
@ -43,7 +44,6 @@
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
/* Port count per interface */
@ -317,22 +317,6 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (interface == 0
&& cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
&& cvmx_sysinfo_get()->board_rev_major == 1) {
/*
* Lie about interface type of CN3005 board. This
* board has a switch on port 1 like the other
* evaluation boards, but it is connected over RGMII
* instead of GMII. Report GMII mode so that the
* speed is forced to 1 Gbit full duplex. Other than
* some initial configuration (which does not use the
* output of this function) there is no difference in
* setup between GMII and RGMII modes.
*/
return CVMX_HELPER_INTERFACE_MODE_GMII;
}
/* Interface 1 is always disabled on CN31XX and CN30XX */
if ((interface == 1)
&& (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
@ -778,7 +762,6 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
result = __cvmx_helper_loop_enable(interface);
break;
}
result |= __cvmx_helper_board_hardware_enable(interface);
return result;
}
@ -1026,7 +1009,6 @@ int cvmx_helper_initialize_packet_io_global(void)
int result = 0;
int interface;
union cvmx_l2c_cfg l2c_cfg;
union cvmx_smix_en smix_en;
const int num_interfaces = cvmx_helper_get_number_of_interfaces();
/*
@ -1046,24 +1028,6 @@ int cvmx_helper_initialize_packet_io_global(void)
l2c_cfg.s.rfb_arb_mode = 0;
cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
/* Make sure SMI/MDIO is enabled so we can query PHYs */
smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
if (!smix_en.s.en) {
smix_en.s.en = 1;
cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
}
/* Newer chips actually have two SMI/MDIO interfaces */
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
!OCTEON_IS_MODEL(OCTEON_CN58XX) &&
!OCTEON_IS_MODEL(OCTEON_CN50XX)) {
smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
if (!smix_en.s.en) {
smix_en.s.en = 1;
cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
}
}
cvmx_pko_initialize_global();
for (interface = 0; interface < num_interfaces; interface++) {
result |= cvmx_helper_interface_probe(interface);
@ -1136,6 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
if (index == 0)
result = __cvmx_helper_rgmii_link_get(ipd_port);
else {
WARN(1, "Using deprecated link status - please update your DT");
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;

View File

@ -63,31 +63,11 @@ static int reset_statistics(void *data, u64 value)
DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
static int init_debufs(void)
static void init_debugfs(void)
{
struct dentry *show_dentry;
dir = debugfs_create_dir("oct_ilm", 0);
if (!dir) {
pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
return -1;
}
show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
&oct_ilm_ops);
if (!show_dentry) {
pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
return -1;
}
show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
&reset_statistics_ops);
if (!show_dentry) {
pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
return -1;
}
return 0;
debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops);
debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops);
}
static void init_latency_info(struct latency_info *li, int startup)
@ -169,11 +149,7 @@ static __init int oct_ilm_module_init(void)
int rc;
int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
rc = init_debufs();
if (rc) {
WARN(1, "Could not create debugfs entries");
return rc;
}
init_debugfs();
rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
"oct_ilm", 0);

View File

@ -458,6 +458,23 @@ static bool __init octeon_has_88e1145(void)
!OCTEON_IS_MODEL(OCTEON_CN56XX);
}
static bool __init octeon_has_fixed_link(int ipd_port)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
case CVMX_BOARD_TYPE_CUST_NB5:
case CVMX_BOARD_TYPE_EBH3100:
/* Port 1 on these boards is always gigabit. */
return ipd_port == 1;
case CVMX_BOARD_TYPE_BBGW_REF:
/* Ports 0 and 1 connect to the switch. */
return ipd_port == 0 || ipd_port == 1;
}
return false;
}
static void __init octeon_fdt_set_phy(int eth, int phy_addr)
{
const __be32 *phy_handle;
@ -586,12 +603,52 @@ static void __init octeon_fdt_rm_ethernet(int node)
fdt_nop_node(initial_boot_params, node);
}
static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay)
{
fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay",
rx_delay);
fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay",
tx_delay);
}
static void __init octeon_rx_tx_delay(int eth, int iface, int port)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
if (iface == 0) {
if (port == 0) {
/*
* Boards with gigabit WAN ports need a
* different setting that is compatible with
* 100 Mbit settings
*/
_octeon_rx_tx_delay(eth, 0xc, 0x0c);
return;
} else if (port == 1) {
/* Different config for switch port. */
_octeon_rx_tx_delay(eth, 0x0, 0x0);
return;
}
}
break;
case CVMX_BOARD_TYPE_UBNT_E100:
if (iface == 0 && port <= 2) {
_octeon_rx_tx_delay(eth, 0x0, 0x10);
return;
}
break;
}
fdt_nop_property(initial_boot_params, eth, "rx-delay");
fdt_nop_property(initial_boot_params, eth, "tx-delay");
}
static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
{
char name_buffer[20];
int eth;
int phy_addr;
int ipd_port;
int fixed_link;
snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
@ -609,6 +666,13 @@ static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
octeon_fdt_set_phy(eth, phy_addr);
fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link");
if (fixed_link < 0)
WARN_ON(octeon_has_fixed_link(ipd_port));
else if (!octeon_has_fixed_link(ipd_port))
fdt_nop_node(initial_boot_params, fixed_link);
octeon_rx_tx_delay(eth, i, p);
}
static void __init octeon_fdt_pip_iface(int pip, int idx)

View File

@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_LANTIQ=y
CONFIG_PCI_LANTIQ=y
CONFIG_XRX200_PHY_FW=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_MIPS_VPE_LOADER=y
CONFIG_NR_CPUS=2

View File

@ -3,7 +3,6 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += current.h
generic-y += device.h
generic-y += dma-contiguous.h

View File

@ -105,6 +105,20 @@
*/
#define STYPE_SYNC_MB 0x10
/*
* stype 0x14 - A completion barrier specific to global invalidations
*
* When a sync instruction of this type completes any preceding GINVI or GINVT
* operation has been globalized & completed on all coherent CPUs. Anything
* that the GINV* instruction should invalidate will have been invalidated on
* all coherent CPUs when this instruction completes. It is implementation
* specific whether the GINV* instructions themselves will ensure completion,
* or this sync type will.
*
* In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
* this sync type also requires that previous SYNCI operations have completed.
*/
#define STYPE_GINV 0x14
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
@ -258,6 +272,11 @@
#define loongson_llsc_mb() do { } while (0)
#endif
static inline void sync_ginv(void)
{
asm volatile("sync\t%0" :: "i"(STYPE_GINV));
}
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */

View File

@ -25,7 +25,6 @@
*
* MIPS specific flush operations:
*
* - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
* - __flush_icache_user_range(start, end) flushes range of user instructions
@ -110,7 +109,6 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);

View File

@ -36,6 +36,8 @@
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
extern unsigned long __cmpxchg64_unsupported(void)
__compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
cmpxchg((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
#endif
# include <asm-generic/cmpxchg-local.h>
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
# ifdef CONFIG_SMP
static inline unsigned long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long tmp, ret;
unsigned long flags;
/*
* The assembly below has to combine 32 bit values into a 64 bit
* register, and split 64 bit values from one register into two. If we
* were to take an interrupt in the middle of this we'd only save the
* least significant 32 bits of each register & probably clobber the
* most significant 32 bits of the 64 bit values we're using. In order
* to avoid this we must disable interrupts.
*/
local_irq_save(flags);
asm volatile(
" .set push \n"
" .set " MIPS_ISA_ARCH_LEVEL " \n"
/* Load 64 bits from ptr */
"1: lld %L0, %3 # __cmpxchg64 \n"
/*
* Split the 64 bit value we loaded into the 2 registers that hold the
* ret variable.
*/
" dsra %M0, %L0, 32 \n"
" sll %L0, %L0, 0 \n"
/*
* Compare ret against old, breaking out of the loop if they don't
* match.
*/
" bne %M0, %M4, 2f \n"
" bne %L0, %L4, 2f \n"
/*
* Combine the 32 bit halves from the 2 registers that hold the new
* variable into a single 64 bit register.
*/
# if MIPS_ISA_REV >= 2
" move %L1, %L5 \n"
" dins %L1, %M5, 32, 32 \n"
# else
" dsll %L1, %L5, 32 \n"
" dsrl %L1, %L1, 32 \n"
" .set noat \n"
" dsll $at, %M5, 32 \n"
" or %L1, %L1, $at \n"
" .set at \n"
# endif
/* Attempt to store new at ptr */
" scd %L1, %2 \n"
/* If we failed, loop! */
"\t" __scbeqz " %L1, 1b \n"
" .set pop \n"
"2: \n"
: "=&r"(ret),
"=&r"(tmp),
"=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
: GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
"r" (old),
"r" (new)
: "memory");
local_irq_restore(flags);
return ret;
}
# define cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \
\
/* \
* We can only use cmpxchg64 if we know that the CPU supports \
* 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \
* will cause a build error unless cpu_has_64bits is a \
* compile-time constant 1. \
*/ \
if (cpu_has_64bits && kernel_uses_llsc) \
__res = __cmpxchg64((ptr), __old, __new); \
else \
__res = __cmpxchg64_unsupported(); \
\
__res; \
})
# else /* !CONFIG_SMP */
# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
#undef __scbeqz

View File

@ -590,6 +590,19 @@
# define cpu_has_mipsmt_pertccounters 0
#endif /* CONFIG_MIPS_MT_SMP */
/*
* We only enable MMID support for configurations which natively support 64 bit
* atomics because getting good performance from the allocator relies upon
* efficient atomic64_*() functions.
*/
#ifndef cpu_has_mmid
# ifdef CONFIG_GENERIC_ATOMIC64
# define cpu_has_mmid 0
# else
# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID)
# endif
#endif
/*
* Guest capabilities
*/

View File

@ -422,6 +422,7 @@ enum cpu_type_enum {
MBIT_ULL(55) /* CPU shares FTLB entries with another */
#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */
/*
* CPU ASE encodings

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MIPS_ASM_GINVT_H__
#define __MIPS_ASM_GINVT_H__
#include <asm/mipsregs.h>
enum ginvt_type {
GINVT_FULL,
GINVT_VA,
GINVT_MMID,
};
#ifdef TOOLCHAIN_SUPPORTS_GINV
# define _ASM_SET_GINV ".set ginv\n"
#else
_ASM_MACRO_1R1I(ginvt, rs, type,
_ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8))
_ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9)));
# define _ASM_SET_GINV
#endif
static inline void ginvt(unsigned long addr, enum ginvt_type type)
{
asm volatile(
".set push\n"
_ASM_SET_GINV
" ginvt %0, %1\n"
".set pop"
: /* no outputs */
: "r"(addr), "i"(type)
: "memory");
}
static inline void ginvt_full(void)
{
ginvt(0, GINVT_FULL);
}
static inline void ginvt_va(unsigned long addr)
{
addr &= PAGE_MASK << 1;
ginvt(addr, GINVT_VA);
}
static inline void ginvt_mmid(void)
{
ginvt(0, GINVT_MMID);
}
static inline void ginvt_va_mmid(unsigned long addr)
{
addr &= PAGE_MASK << 1;
ginvt(addr, GINVT_VA | GINVT_MMID);
}
#endif /* __MIPS_ASM_GINVT_H__ */

View File

@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
#if defined(CONFIG_CPU_LOONGSON3)
#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
" mfc0 %[flags], $12 \n"
" di \n"
#else

View File

@ -178,8 +178,4 @@ static inline u32 ath79_reset_rr(unsigned reg)
void ath79_device_reset_set(u32 mask);
void ath79_device_reset_clear(u32 mask);
void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3);
void ath79_misc_irq_init(void __iomem *regs, int irq,
int irq_base, bool is_ar71xx);
#endif /* __ASM_MACH_ATH79_H */

View File

@ -10,13 +10,15 @@
#ifndef __ASM_MACH_IP27_IRQ_H
#define __ASM_MACH_IP27_IRQ_H
/*
* A hardwired interrupt number is completely stupid for this system - a
* large configuration might have thousands if not tenthousands of
* interrupts.
*/
#define NR_IRQS 256
#include_next <irq.h>
#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2)
#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3)
#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4)
#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
#define IP27_HUB_IRQ_COUNT 128
#endif /* __ASM_MACH_IP27_IRQ_H */

View File

@ -8,20 +8,11 @@
#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
#define LEVELS_PER_SLICE 128
struct slice_data {
unsigned long irq_enable_mask[2];
int level_to_irq[LEVELS_PER_SLICE];
};
struct hub_data {
kern_vars_t kern_vars;
DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
cpumask_t h_cpus;
unsigned long slice_map;
unsigned long irq_alloc_mask[2];
struct slice_data slice[2];
};
struct node_data {

View File

@ -17,19 +17,15 @@
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_cpufreq_pdev;
extern struct platform_device ls1x_dma_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
extern struct platform_device ls1x_ehci_pdev;
extern struct platform_device ls1x_gpio0_pdev;
extern struct platform_device ls1x_gpio1_pdev;
extern struct platform_device ls1x_nand_pdev;
extern struct platform_device ls1x_rtc_pdev;
extern struct platform_device ls1x_wdt_pdev;
void __init ls1x_clk_init(void);
void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
void __init ls1x_serial_set_uartclk(struct platform_device *pdev);

View File

@ -667,6 +667,7 @@
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
@ -1247,6 +1248,13 @@ __asm__(".macro parse_r var r\n\t"
ENC \
".endm")
/* Instructions with 1 register operand & 1 immediate operand */
#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
__asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
"parse_r __" #R1 ", \\" #R1 "\n\t" \
ENC \
".endm")
/* Instructions with 2 register operands */
#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
__asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \
@ -1603,6 +1611,9 @@ do { \
#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)

View File

@ -7,7 +7,11 @@
#include <linux/wait.h>
typedef struct {
u64 asid[NR_CPUS];
union {
u64 asid[NR_CPUS];
atomic64_t mmid;
};
void *vdso;
/* lock to be held whilst modifying fp_bd_emupage_allocmap */

View File

@ -17,8 +17,10 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/dsemul.h>
#include <asm/ginvt.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@ -72,6 +74,19 @@ extern unsigned long pgd_current[];
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
/*
* The ginvt instruction will invalidate wired entries when its type field
* targets anything other than the entire TLB. That means that if we were to
* allow the kernel to create wired entries with the MMID of current->active_mm
* then those wired entries could be invalidated when we later use ginvt to
* invalidate TLB entries with that MMID.
*
* In order to prevent ginvt from trashing wired entries, we reserve one MMID
* for use by the kernel when creating wired entries. This MMID will never be
* assigned to a struct mm, and we'll never target it with a ginvt instruction.
*/
#define MMID_KERNEL_WIRED 0
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
@ -88,7 +103,23 @@ static inline u64 asid_first_version(unsigned int cpu)
return ~asid_version_mask(cpu) + 1;
}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
{
if (cpu_has_mmid)
return atomic64_read(&mm->context.mmid);
return mm->context.asid[cpu];
}
static inline void set_cpu_context(unsigned int cpu,
struct mm_struct *mm, u64 ctx)
{
if (cpu_has_mmid)
atomic64_set(&mm->context.mmid, ctx);
else
mm->context.asid[cpu] = ctx;
}
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
@ -97,21 +128,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
/* Normal, classic MIPS get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
u64 asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
extern void get_new_mmu_context(struct mm_struct *mm);
extern void check_mmu_context(struct mm_struct *mm);
extern void check_switch_mmu_context(struct mm_struct *mm);
/*
* Initialize the context related info for a new mm_struct
@ -122,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
if (cpu_has_mmid) {
set_cpu_context(0, mm, 0);
} else {
for_each_possible_cpu(i)
set_cpu_context(i, mm, 0);
}
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
@ -140,11 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_save(flags);
htw_stop();
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(next, cpu);
write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
check_switch_mmu_context(next);
/*
* Mark current->active_mm as not "active" anymore.
@ -166,55 +185,55 @@ static inline void destroy_context(struct mm_struct *mm)
dsemul_mm_cleanup(mm);
}
#define activate_mm(prev, next) switch_mm(prev, next, current)
#define deactivate_mm(tsk, mm) do { } while (0)
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
drop_mmu_context(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
unsigned int cpu;
u32 old_mmid;
u64 ctx;
local_irq_save(flags);
htw_stop();
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
cpu = smp_processor_id();
ctx = cpu_context(cpu, mm);
write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
htw_start();
local_irq_restore(flags);
}
/*
* If mm is currently active_mm, we can't really drop it. Instead,
* we will get a new one for it.
*/
static inline void
drop_mmu_context(struct mm_struct *mm, unsigned cpu)
{
unsigned long flags;
local_irq_save(flags);
htw_stop();
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
if (!ctx) {
/* no-op */
} else if (cpu_has_mmid) {
/*
* Globally invalidating TLB entries associated with the MMID
* is pretty cheap using the GINVT instruction, so we'll do
* that rather than incur the overhead of allocating a new
* MMID. The latter would be especially difficult since MMIDs
* are global & other CPUs may be actively using ctx.
*/
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
mtc0_tlbw_hazard();
ginvt_mmid();
sync_ginv();
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
/*
* mm is currently active, so we can't really drop it.
* Instead we bump the ASID.
*/
htw_stop();
get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
htw_start();
} else {
/* will get a new context next time */
cpu_context(cpu, mm) = 0;
set_cpu_context(cpu, mm, 0);
}
htw_start();
local_irq_restore(flags);
}

View File

@ -119,18 +119,6 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
/**
* Enable packet input/output from the hardware. This function is
* called after by cvmx_helper_packet_hardware_enable() to
* perform board specific initialization. For most boards
* nothing is needed.
*
* @interface: Interface to enable
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_board_hardware_enable(int interface);
enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
#endif /* __CVMX_HELPER_BOARD_H__ */

View File

@ -1,276 +0,0 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_SMIX_DEFS_H__
#define __CVMX_SMIX_DEFS_H__
static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
}
union cvmx_smix_clk {
uint64_t u64;
struct cvmx_smix_clk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
uint64_t mode:1;
uint64_t reserved_21_23:3;
uint64_t sample_hi:5;
uint64_t sample_mode:1;
uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
uint64_t phase:8;
#else
uint64_t phase:8;
uint64_t sample:4;
uint64_t preamble:1;
uint64_t clk_idle:1;
uint64_t reserved_14_14:1;
uint64_t sample_mode:1;
uint64_t sample_hi:5;
uint64_t reserved_21_23:3;
uint64_t mode:1;
uint64_t reserved_25_63:39;
#endif
} s;
struct cvmx_smix_clk_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
uint64_t sample_hi:5;
uint64_t sample_mode:1;
uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
uint64_t phase:8;
#else
uint64_t phase:8;
uint64_t sample:4;
uint64_t preamble:1;
uint64_t clk_idle:1;
uint64_t reserved_14_14:1;
uint64_t sample_mode:1;
uint64_t sample_hi:5;
uint64_t reserved_21_63:43;
#endif
} cn30xx;
};
union cvmx_smix_cmd {
uint64_t u64;
struct cvmx_smix_cmd_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t phy_op:2;
uint64_t reserved_13_15:3;
uint64_t phy_adr:5;
uint64_t reserved_5_7:3;
uint64_t reg_adr:5;
#else
uint64_t reg_adr:5;
uint64_t reserved_5_7:3;
uint64_t phy_adr:5;
uint64_t reserved_13_15:3;
uint64_t phy_op:2;
uint64_t reserved_18_63:46;
#endif
} s;
struct cvmx_smix_cmd_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t phy_op:1;
uint64_t reserved_13_15:3;
uint64_t phy_adr:5;
uint64_t reserved_5_7:3;
uint64_t reg_adr:5;
#else
uint64_t reg_adr:5;
uint64_t reserved_5_7:3;
uint64_t phy_adr:5;
uint64_t reserved_13_15:3;
uint64_t phy_op:1;
uint64_t reserved_17_63:47;
#endif
} cn30xx;
};
union cvmx_smix_en {
uint64_t u64;
struct cvmx_smix_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t en:1;
#else
uint64_t en:1;
uint64_t reserved_1_63:63;
#endif
} s;
};
union cvmx_smix_rd_dat {
uint64_t u64;
struct cvmx_smix_rd_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t pending:1;
uint64_t val:1;
uint64_t dat:16;
#else
uint64_t dat:16;
uint64_t val:1;
uint64_t pending:1;
uint64_t reserved_18_63:46;
#endif
} s;
};
union cvmx_smix_wr_dat {
uint64_t u64;
struct cvmx_smix_wr_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t pending:1;
uint64_t val:1;
uint64_t dat:16;
#else
uint64_t dat:16;
uint64_t val:1;
uint64_t pending:1;
uint64_t reserved_18_63:46;
#endif
} s;
};
#endif

View File

@ -45,18 +45,21 @@
#ifndef __ASSEMBLY__
/*
* All accesses to bridge hardware registers must be done
* using 32-bit loads and stores.
*/
typedef u32 bridgereg_t;
#define ATE_V 0x01
#define ATE_CO 0x02
#define ATE_PREC 0x04
#define ATE_PREF 0x08
#define ATE_BAR 0x10
typedef u64 bridge_ate_t;
#define ATE_PFNSHIFT 12
#define ATE_TIDSHIFT 8
#define ATE_RMFSHIFT 48
/* pointers to bridge ATEs
* are always "pointer to volatile"
*/
typedef volatile bridge_ate_t *bridge_ate_p;
#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \
((xid)<<ATE_TIDSHIFT) | \
(attr))
#define BRIDGE_INTERNAL_ATES 128
/*
* It is generally preferred that hardware registers on the bridge
@ -65,7 +68,7 @@ typedef volatile bridge_ate_t *bridge_ate_p;
* Generated from Bridge spec dated 04oct95
*/
typedef volatile struct bridge_s {
struct bridge_regs {
/* Local Registers 0x000000-0x00FFFF */
/* standard widget configuration 0x000000-0x000057 */
@ -86,105 +89,105 @@ typedef volatile struct bridge_s {
#define b_wid_tflush b_widget.w_tflush
/* bridge-specific widget configuration 0x000058-0x00007F */
bridgereg_t _pad_000058;
bridgereg_t b_wid_aux_err; /* 0x00005C */
bridgereg_t _pad_000060;
bridgereg_t b_wid_resp_upper; /* 0x000064 */
bridgereg_t _pad_000068;
bridgereg_t b_wid_resp_lower; /* 0x00006C */
bridgereg_t _pad_000070;
bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */
bridgereg_t _pad_000078[2];
u32 _pad_000058;
u32 b_wid_aux_err; /* 0x00005C */
u32 _pad_000060;
u32 b_wid_resp_upper; /* 0x000064 */
u32 _pad_000068;
u32 b_wid_resp_lower; /* 0x00006C */
u32 _pad_000070;
u32 b_wid_tst_pin_ctrl; /* 0x000074 */
u32 _pad_000078[2];
/* PMU & Map 0x000080-0x00008F */
bridgereg_t _pad_000080;
bridgereg_t b_dir_map; /* 0x000084 */
bridgereg_t _pad_000088[2];
u32 _pad_000080;
u32 b_dir_map; /* 0x000084 */
u32 _pad_000088[2];
/* SSRAM 0x000090-0x00009F */
bridgereg_t _pad_000090;
bridgereg_t b_ram_perr; /* 0x000094 */
bridgereg_t _pad_000098[2];
u32 _pad_000090;
u32 b_ram_perr; /* 0x000094 */
u32 _pad_000098[2];
/* Arbitration 0x0000A0-0x0000AF */
bridgereg_t _pad_0000A0;
bridgereg_t b_arb; /* 0x0000A4 */
bridgereg_t _pad_0000A8[2];
u32 _pad_0000A0;
u32 b_arb; /* 0x0000A4 */
u32 _pad_0000A8[2];
/* Number In A Can 0x0000B0-0x0000BF */
bridgereg_t _pad_0000B0;
bridgereg_t b_nic; /* 0x0000B4 */
bridgereg_t _pad_0000B8[2];
u32 _pad_0000B0;
u32 b_nic; /* 0x0000B4 */
u32 _pad_0000B8[2];
/* PCI/GIO 0x0000C0-0x0000FF */
bridgereg_t _pad_0000C0;
bridgereg_t b_bus_timeout; /* 0x0000C4 */
u32 _pad_0000C0;
u32 b_bus_timeout; /* 0x0000C4 */
#define b_pci_bus_timeout b_bus_timeout
bridgereg_t _pad_0000C8;
bridgereg_t b_pci_cfg; /* 0x0000CC */
bridgereg_t _pad_0000D0;
bridgereg_t b_pci_err_upper; /* 0x0000D4 */
bridgereg_t _pad_0000D8;
bridgereg_t b_pci_err_lower; /* 0x0000DC */
bridgereg_t _pad_0000E0[8];
u32 _pad_0000C8;
u32 b_pci_cfg; /* 0x0000CC */
u32 _pad_0000D0;
u32 b_pci_err_upper; /* 0x0000D4 */
u32 _pad_0000D8;
u32 b_pci_err_lower; /* 0x0000DC */
u32 _pad_0000E0[8];
#define b_gio_err_lower b_pci_err_lower
#define b_gio_err_upper b_pci_err_upper
/* Interrupt 0x000100-0x0001FF */
bridgereg_t _pad_000100;
bridgereg_t b_int_status; /* 0x000104 */
bridgereg_t _pad_000108;
bridgereg_t b_int_enable; /* 0x00010C */
bridgereg_t _pad_000110;
bridgereg_t b_int_rst_stat; /* 0x000114 */
bridgereg_t _pad_000118;
bridgereg_t b_int_mode; /* 0x00011C */
bridgereg_t _pad_000120;
bridgereg_t b_int_device; /* 0x000124 */
bridgereg_t _pad_000128;
bridgereg_t b_int_host_err; /* 0x00012C */
u32 _pad_000100;
u32 b_int_status; /* 0x000104 */
u32 _pad_000108;
u32 b_int_enable; /* 0x00010C */
u32 _pad_000110;
u32 b_int_rst_stat; /* 0x000114 */
u32 _pad_000118;
u32 b_int_mode; /* 0x00011C */
u32 _pad_000120;
u32 b_int_device; /* 0x000124 */
u32 _pad_000128;
u32 b_int_host_err; /* 0x00012C */
struct {
bridgereg_t __pad; /* 0x0001{30,,,68} */
bridgereg_t addr; /* 0x0001{34,,,6C} */
u32 __pad; /* 0x0001{30,,,68} */
u32 addr; /* 0x0001{34,,,6C} */
} b_int_addr[8]; /* 0x000130 */
bridgereg_t _pad_000170[36];
u32 _pad_000170[36];
/* Device 0x000200-0x0003FF */
struct {
bridgereg_t __pad; /* 0x0002{00,,,38} */
bridgereg_t reg; /* 0x0002{04,,,3C} */
u32 __pad; /* 0x0002{00,,,38} */
u32 reg; /* 0x0002{04,,,3C} */
} b_device[8]; /* 0x000200 */
struct {
bridgereg_t __pad; /* 0x0002{40,,,78} */
bridgereg_t reg; /* 0x0002{44,,,7C} */
u32 __pad; /* 0x0002{40,,,78} */
u32 reg; /* 0x0002{44,,,7C} */
} b_wr_req_buf[8]; /* 0x000240 */
struct {
bridgereg_t __pad; /* 0x0002{80,,,88} */
bridgereg_t reg; /* 0x0002{84,,,8C} */
u32 __pad; /* 0x0002{80,,,88} */
u32 reg; /* 0x0002{84,,,8C} */
} b_rrb_map[2]; /* 0x000280 */
#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
bridgereg_t _pad_000290;
bridgereg_t b_resp_status; /* 0x000294 */
bridgereg_t _pad_000298;
bridgereg_t b_resp_clear; /* 0x00029C */
u32 _pad_000290;
u32 b_resp_status; /* 0x000294 */
u32 _pad_000298;
u32 b_resp_clear; /* 0x00029C */
bridgereg_t _pad_0002A0[24];
u32 _pad_0002A0[24];
char _pad_000300[0x10000 - 0x000300];
/* Internal Address Translation Entry RAM 0x010000-0x0103FF */
union {
bridge_ate_t wr; /* write-only */
u64 wr; /* write-only */
struct {
bridgereg_t _p_pad;
bridgereg_t rd; /* read-only */
u32 _p_pad;
u32 rd; /* read-only */
} hi;
} b_int_ate_ram[128];
@ -192,8 +195,8 @@ typedef volatile struct bridge_s {
/* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */
struct {
bridgereg_t _p_pad;
bridgereg_t rd; /* read-only */
u32 _p_pad;
u32 rd; /* read-only */
} b_int_ate_ram_lo[128];
char _pad_011400[0x20000 - 0x011400];
@ -212,7 +215,7 @@ typedef volatile struct bridge_s {
} f[8];
} b_type0_cfg_dev[8]; /* 0x020000 */
/* PCI Type 1 Configuration Space 0x028000-0x028FFF */
/* PCI Type 1 Configuration Space 0x028000-0x028FFF */
union { /* make all access sizes available. */
u8 c[0x1000 / 1];
u16 s[0x1000 / 2];
@ -233,7 +236,7 @@ typedef volatile struct bridge_s {
u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
/* External Address Translation Entry RAM 0x080000-0x0FFFFF */
bridge_ate_t b_ext_ate_ram[0x10000];
u64 b_ext_ate_ram[0x10000];
/* Reserved 0x100000-0x1FFFFF */
char _pad_100000[0x200000-0x100000];
@ -259,13 +262,13 @@ typedef volatile struct bridge_s {
u32 l[0x400000 / 4]; /* read-only */
u64 d[0x400000 / 8]; /* read-only */
} b_external_flash; /* 0xC00000 */
} bridge_t;
};
/*
* Field formats for Error Command Word and Auxiliary Error Command Word
* of bridge.
*/
typedef struct bridge_err_cmdword_s {
struct bridge_err_cmdword {
union {
u32 cmd_word;
struct {
@ -282,7 +285,7 @@ typedef struct bridge_err_cmdword_s {
rsvd:8;
} berr_st;
} berr_un;
} bridge_err_cmdword_t;
};
#define berr_field berr_un.berr_st
#endif /* !__ASSEMBLY__ */
@ -290,7 +293,7 @@ typedef struct bridge_err_cmdword_s {
/*
* The values of these macros can and should be crosschecked
* regularly against the offsets of the like-named fields
* within the "bridge_t" structure above.
* within the bridge_regs structure above.
*/
/* Byte offset macros for Bridge internal registers */
@ -797,49 +800,14 @@ typedef struct bridge_err_cmdword_s {
#define PCI64_ATTR_RMF_MASK 0x00ff000000000000
#define PCI64_ATTR_RMF_SHFT 48
#ifndef __ASSEMBLY__
/* Address translation entry for mapped pci32 accesses */
typedef union ate_u {
u64 ent;
struct ate_s {
u64 rmf:16;
u64 addr:36;
u64 targ:4;
u64 reserved:3;
u64 barrier:1;
u64 prefetch:1;
u64 precise:1;
u64 coherent:1;
u64 valid:1;
} field;
} ate_t;
#endif /* !__ASSEMBLY__ */
#define ATE_V 0x01
#define ATE_CO 0x02
#define ATE_PREC 0x04
#define ATE_PREF 0x08
#define ATE_BAR 0x10
#define ATE_PFNSHIFT 12
#define ATE_TIDSHIFT 8
#define ATE_RMFSHIFT 48
#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \
((xid)<<ATE_TIDSHIFT) | \
(attr)
#define BRIDGE_INTERNAL_ATES 128
struct bridge_controller {
struct pci_controller pc;
struct resource mem;
struct resource io;
struct resource busn;
bridge_t *base;
struct bridge_regs *base;
nasid_t nasid;
unsigned int widget_id;
unsigned int irq_cpu;
u64 baddr;
unsigned int pci_int[8];
};
@ -847,8 +815,14 @@ struct bridge_controller {
#define BRIDGE_CONTROLLER(bus) \
((struct bridge_controller *)((bus)->sysdata))
extern void register_bridge_irq(unsigned int irq);
extern int request_bridge_irq(struct bridge_controller *bc);
#define bridge_read(bc, reg) __raw_readl(&bc->base->reg)
#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg)
#define bridge_set(bc, reg, val) \
__raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg)
#define bridge_clr(bc, reg, val) \
__raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
extern int request_bridge_irq(struct bridge_controller *bc, int pin);
extern struct pci_ops bridge_pci_ops;

View File

@ -17,6 +17,7 @@
#include <asm/pgtable-64.h>
#endif
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/pgtable-bits.h>
@ -204,51 +205,11 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set push \n"
" .set arch=r4000 \n"
" .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
" or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n"
" beqzl %[tmp], 1b \n"
" nop \n"
"2: \n"
" .set pop \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
} else if (kernel_uses_llsc) {
loongson_llsc_mb();
__asm__ __volatile__ (
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
" or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n"
" nop \n"
"2: \n"
" .set pop \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
loongson_llsc_mb();
}
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
# else
cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
# endif
}
#endif
}

View File

@ -29,6 +29,7 @@ struct plat_smp_ops {
int (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
void (*prepare_boot_cpu)(void);
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);

View File

@ -27,16 +27,11 @@
#ifndef __ASSEMBLY__
#define PS_UINT_CAST (unsigned long)
#define UINT64_CAST (unsigned long)
#define HUBREG_CAST (volatile hubreg_t *)
#else /* __ASSEMBLY__ */
#define PS_UINT_CAST
#define UINT64_CAST
#define HUBREG_CAST
#endif /* __ASSEMBLY__ */
@ -256,41 +251,22 @@
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#ifdef CONFIG_SGI_IP27
#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#endif /* CONFIG_SGI_IP27 */
#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x))
#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))
#ifndef __ASSEMBLY__
#define HUB_L(_a) *(_a)
#define HUB_S(_a, _d) *(_a) = (_d)
#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x)))
#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x)))
#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r))
#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d))
#define REMOTE_HUB_L(_n, _r) HUB_L(REMOTE_HUB_ADDR((_n), (_r)))
#define REMOTE_HUB_S(_n, _r, _d) HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d))
#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)))
#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d))
#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r))
#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r))
#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r)))
#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \
REMOTE_HUB_PTR((_n), (_r)))
#endif /* !__ASSEMBLY__ */
/*
* The following macros are used to get to a hub/bridge register, given
* the base of the register space.
*/
#define HUB_REG_PTR(_base, _off) \
(HUBREG_CAST((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
#define HUB_REG_PTR_L(_base, _off) \
HUB_L(HUB_REG_PTR((_base), (_off)))
#define HUB_REG_PTR_S(_base, _off, _data) \
HUB_S(HUB_REG_PTR((_base), (_off)), (_data))
/*
* Software structure locations -- permanently fixed
* See diagram in kldir.h
@ -387,44 +363,14 @@
#define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size)
/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
* relocatable program
*/
#define UNIX_DEBUG_LOADADDR 0x300000
#define SYMMON_LOADADDR(nasid) \
TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset
#define FREEMEM_ADDR(nasid) SYMMON_STK_END(nasid)
/*
* XXX
* Fix this. FREEMEM_ADDR should be aware of if symmon is loaded.
* Also, it should take into account what prom thinks to be a safe
* address
PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid))
*/
#define FREEMEM_SIZE(nasid) KLD_FREEMEM(nasid)->size
#define PI_ERROR_OFFSET(nasid) KLD_PI_ERROR(nasid)->offset
#define PI_ERROR_ADDR(nasid) \
TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid))
#define PI_ERROR_SIZE(nasid) KLD_PI_ERROR(nasid)->size
#define NODE_OFFSET_TO_K0(_nasid, _off) \
PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE)
#define NODE_OFFSET_TO_K1(_nasid, _off) \
TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE)
#define K0_TO_NODE_OFFSET(_k0addr) \
((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK)
#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET)
#endif /* !__ASSEMBLY__ */

View File

@ -17,8 +17,6 @@
#include <asm/sn/sn0/arch.h>
#endif
typedef u64 hubreg_t;
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
#define makespnum(_nasid, _slice) \

View File

@ -44,7 +44,7 @@
IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
(bigwin), IIO_ITTE_INVALID_WIDGET, 0)
#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin))
#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin))
/*
* Macro which takes the widget number, and returns the

View File

@ -134,11 +134,6 @@
#define CALIAS_BASE CAC_BASE
#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \
((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
#define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid)))
/* Turn on sable logging for the processors whose bits are set. */

View File

@ -14,7 +14,6 @@
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void local_flush_tlb_kernel_range(unsigned long start,
@ -23,6 +22,8 @@ extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
#include <asm/mmu_context.h>
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
@ -36,7 +37,7 @@ extern void flush_tlb_one(unsigned long vaddr);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_mm(mm) drop_mmu_context(mm)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
#define flush_tlb_kernel_range(vmaddr,end) \
local_flush_tlb_kernel_range(vmaddr, end)

View File

@ -31,7 +31,6 @@
#define JZ4740_EMC_SDRAM_CTRL 0x80
static void __init jz4740_detect_mem(void)
{
void __iomem *jz_emc_base;
@ -66,15 +65,22 @@ static unsigned long __init get_board_mach_type(const void *fdt)
void __init plat_mem_setup(void)
{
int offset;
void *dtb;
jz4740_reset_init();
__dt_setup_arch(__dtb_start);
offset = fdt_path_offset(__dtb_start, "/memory");
if (__dtb_start != __dtb_end)
dtb = __dtb_start;
else
dtb = (void *)fw_passed_dtb;
__dt_setup_arch(dtb);
offset = fdt_path_offset(dtb, "/memory");
if (offset < 0)
jz4740_detect_mem();
mips_machtype = get_board_mach_type(__dtb_start);
mips_machtype = get_board_mach_type(dtb);
}
void __init device_tree_init(void)

View File

@ -872,10 +872,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
static inline unsigned int decode_config5(struct cpuinfo_mips *c)
{
unsigned int config5;
unsigned int config5, max_mmid_width;
unsigned long asid_mask;
config5 = read_c0_config5();
config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
if (cpu_has_mips_r6) {
if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
config5 |= MIPS_CONF5_MI;
else
config5 &= ~MIPS_CONF5_MI;
}
write_c0_config5(config5);
if (config5 & MIPS_CONF5_EVA)
@ -894,6 +903,50 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_CRCP)
elf_hwcap |= HWCAP_MIPS_CRC32;
if (cpu_has_mips_r6) {
/* Ensure the write to config5 above takes effect */
back_to_back_c0_hazard();
/* Check whether we successfully enabled MMID support */
config5 = read_c0_config5();
if (config5 & MIPS_CONF5_MI)
c->options |= MIPS_CPU_MMID;
/*
* Warn if we've hardcoded cpu_has_mmid to a value unsuitable
* for the CPU we're running on, or if CPUs in an SMP system
* have inconsistent MMID support.
*/
WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
if (cpu_has_mmid) {
write_c0_memorymapid(~0ul);
back_to_back_c0_hazard();
asid_mask = read_c0_memorymapid();
/*
* We maintain a bitmap to track MMID allocation, and
* need a sensible upper bound on the size of that
* bitmap. The initial CPU with MMID support (I6500)
* supports 16 bit MMIDs, which gives us an 8KiB
* bitmap. The architecture recommends that hardware
* support 32 bit MMIDs, which would give us a 512MiB
* bitmap - that's too big in most cases.
*
* Cap MMID width at 16 bits for now & we can revisit
* this if & when hardware supports anything wider.
*/
max_mmid_width = 16;
if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
pr_info("Capping MMID width at %d bits",
max_mmid_width);
asid_mask = GENMASK(max_mmid_width - 1, 0);
}
set_cpu_asid_mask(c, asid_mask);
}
}
return config5 & MIPS_CONF_M;
}

View File

@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
void __init init_IRQ(void)
{
int i;
unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@ -62,8 +63,7 @@ void __init init_IRQ(void)
arch_init_irq();
for_each_possible_cpu(i) {
int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,

View File

@ -382,8 +382,8 @@ void mips_cm_error_report(void)
sc_bit ? "True" : "False",
cm2_cmd[cmd_bits], sport_bits);
}
pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
cm2_causes[cause], buf);
pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
cm2_causes[cause], buf);
pr_err("CM_ADDR =%08llx\n", cm_addr);
pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
} else { /* CM3 */

View File

@ -2351,23 +2351,10 @@ DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
static int __init mipsr2_init_debugfs(void)
{
struct dentry *mipsr2_emul;
if (!mips_debugfs_dir)
return -ENODEV;
mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
mips_debugfs_dir, NULL,
&mipsr2_emul_fops);
if (!mipsr2_emul)
return -ENOMEM;
mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
mips_debugfs_dir, NULL,
&mipsr2_clear_fops);
if (!mipsr2_emul)
return -ENOMEM;
debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
&mipsr2_emul_fops);
debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
NULL, &mipsr2_clear_fops);
return 0;
}

View File

@ -95,18 +95,9 @@ static const struct file_operations segments_fops = {
static int __init segments_info(void)
{
struct dentry *segments;
if (cpu_has_segments) {
if (!mips_debugfs_dir)
return -ENODEV;
segments = debugfs_create_file("segments", S_IRUGO,
mips_debugfs_dir, NULL,
&segments_fops);
if (!segments)
return -ENOMEM;
}
if (cpu_has_segments)
debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL,
&segments_fops);
return 0;
}

View File

@ -1011,12 +1011,7 @@ unsigned long fw_passed_dtb;
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
struct dentry *d;
d = debugfs_create_dir("mips", NULL);
if (!d)
return -ENOMEM;
mips_debugfs_dir = d;
mips_debugfs_dir = debugfs_create_dir("mips", NULL);
return 0;
}
arch_initcall(debugfs_mips);

View File

@ -39,6 +39,7 @@
#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/ginvt.h>
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
@ -443,6 +444,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
if (mp_ops->prepare_boot_cpu)
mp_ops->prepare_boot_cpu();
set_cpu_possible(0, true);
set_cpu_online(0, true);
}
@ -482,12 +485,21 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
if (cpu_has_mmid) {
htw_stop();
ginvt_full();
sync_ginv();
instruction_hazard();
htw_start();
return;
}
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
local_flush_tlb_mm((struct mm_struct *)mm);
drop_mmu_context((struct mm_struct *)mm);
}
/*
@ -530,17 +542,22 @@ void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
if (cpu_has_mmid) {
/*
* No need to worry about other CPUs - the ginvt in
* drop_mmu_context() will be globalized.
*/
} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
set_cpu_context(cpu, mm, 0);
}
}
local_flush_tlb_mm(mm);
drop_mmu_context(mm);
preempt_enable();
}
@ -561,9 +578,26 @@ static void flush_tlb_range_ipi(void *info)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr;
u32 old_mmid;
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
if (cpu_has_mmid) {
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(cpu_asid(0, mm));
mtc0_tlbw_hazard();
addr = round_down(start, PAGE_SIZE * 2);
end = round_up(end, PAGE_SIZE * 2);
do {
ginvt_va_mmid(addr);
sync_ginv();
addr += PAGE_SIZE * 2;
} while (addr < end);
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
@ -571,6 +605,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
};
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
local_flush_tlb_range(vma, start, end);
} else {
unsigned int cpu;
int exec = vma->vm_flags & VM_EXEC;
@ -583,10 +618,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
* mm has been completely unused by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = !exec;
set_cpu_context(cpu, mm, !exec);
}
local_flush_tlb_range(vma, start, end);
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
}
@ -616,14 +651,28 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
u32 old_mmid;
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
if (cpu_has_mmid) {
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
mtc0_tlbw_hazard();
ginvt_va_mmid(page);
sync_ginv();
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
local_flush_tlb_page(vma, page);
} else {
unsigned int cpu;
@ -635,10 +684,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
* by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 1;
set_cpu_context(cpu, vma->vm_mm, 1);
}
local_flush_tlb_page(vma, page);
}
local_flush_tlb_page(vma, page);
preempt_enable();
}

View File

@ -118,23 +118,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
static int __init spinlock_test(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_file("spin_single", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_ss);
if (!d)
return -ENOMEM;
d = debugfs_create_file("spin_multi", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_multi);
if (!d)
return -ENOMEM;
debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
&fops_ss);
debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
&fops_multi);
return 0;
}
device_initcall(spinlock_test);

View File

@ -2223,7 +2223,9 @@ void per_cpu_trap_init(bool is_boot_cpu)
cp0_fdc_irq = -1;
}
if (!cpu_data[cpu].asid_cache)
if (cpu_has_mmid)
cpu_data[cpu].asid_cache = 0;
else if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
mmgrab(&init_mm);

View File

@ -89,6 +89,7 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#define STR(x) __STR(x)
@ -2374,18 +2375,10 @@ asmlinkage void do_ade(struct pt_regs *regs)
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
&unaligned_instructions);
debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
return 0;
}
arch_initcall(debugfs_unaligned);

View File

@ -1016,10 +1016,10 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
*/
preempt_disable();
cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu);
get_new_mmu_context(kern_mm);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
set_cpu_context(i, kern_mm, 0);
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0, entryhi);
@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
if (i == cpu)
continue;
if (user)
cpu_context(i, user_mm) = 0;
cpu_context(i, kern_mm) = 0;
set_cpu_context(i, user_mm, 0);
set_cpu_context(i, kern_mm, 0);
}
preempt_enable();

View File

@ -1723,6 +1723,11 @@ static int __init kvm_mips_init(void)
{
int ret;
if (cpu_has_mmid) {
pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
return -EOPNOTSUPP;
}
ret = kvm_mips_entry_setup();
if (ret)
return ret;

View File

@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (current->flags & PF_VCPU) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))
get_new_mmu_context(mm, cpu);
write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
check_switch_mmu_context(mm);
kvm_mips_suspend_mm(cpu);
ehb();
}
@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
if (current->flags & PF_VCPU) {
/* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)))
get_new_mmu_context(current->mm, cpu);
write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
check_switch_mmu_context(current->mm);
kvm_mips_resume_mm(cpu);
ehb();
}
@ -1106,14 +1098,14 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
for_each_possible_cpu(i) {
cpu_context(i, kern_mm) = 0;
cpu_context(i, user_mm) = 0;
set_cpu_context(i, kern_mm, 0);
set_cpu_context(i, user_mm, 0);
}
/* Generate new ASID for current mode */
if (reload_asid) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
get_new_mmu_context(mm, cpu);
get_new_mmu_context(mm);
htw_stop();
write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
@ -1219,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
if (gasid != vcpu->arch.last_user_gasid) {
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
for_each_possible_cpu(i)
cpu_context(i, user_mm) = 0;
set_cpu_context(i, user_mm, 0);
vcpu->arch.last_user_gasid = gasid;
}
}
@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
* Check if ASID is stale. This may happen due to a TLB flush request or
* a lazy user MM invalidation.
*/
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))
get_new_mmu_context(mm, cpu);
check_mmu_context(mm);
}
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
cpu = smp_processor_id();
/* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)))
get_new_mmu_context(current->mm, cpu);
write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
check_switch_mmu_context(current->mm);
kvm_mips_resume_mm(cpu);
htw_start();

View File

@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
* Root ASID dealiases guest GPA mappings in the root TLB.
* Allocate new root ASID if needed.
*/
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
|| (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))
get_new_mmu_context(gpa_mm, cpu);
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
get_new_mmu_context(gpa_mm);
else
check_mmu_context(gpa_mm);
}
}

View File

@ -52,8 +52,4 @@ config PCI_LANTIQ
bool "PCI Support"
depends on SOC_XWAY && PCI
config XRX200_PHY_FW
bool "XRX200 PHY firmware loader"
depends on SOC_XWAY
endif

View File

@ -10,6 +10,7 @@
#include <asm/hazards.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask)
static void dump_tlb(int first, int last)
{
unsigned long s_entryhi, entryhi, asid;
unsigned long s_entryhi, entryhi, asid, mmid;
unsigned long long entrylo0, entrylo1, pa;
unsigned int s_index, s_pagemask, s_guestctl1 = 0;
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
unsigned long uninitialized_var(s_mmid);
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
@ -92,7 +94,12 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
asid = s_entryhi & asidmask;
if (cpu_has_mmid)
asid = s_mmid = read_c0_memorymapid();
else
asid = s_entryhi & asidmask;
if (cpu_has_guestid)
s_guestctl1 = read_c0_guestctl1();
@ -105,6 +112,12 @@ static void dump_tlb(int first, int last)
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
if (cpu_has_mmid)
mmid = read_c0_memorymapid();
else
mmid = entryhi & asidmask;
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
@ -124,8 +137,7 @@ static void dump_tlb(int first, int last)
* leave only a single G bit set after a machine check exception
* due to duplicate TLB entry.
*/
if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
(entryhi & asidmask) != asid)
if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
continue;
/*
@ -138,7 +150,7 @@ static void dump_tlb(int first, int last)
pr_cont("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
asidwidth, entryhi & asidmask);
asidwidth, mmid);
if (cpu_has_guestid)
pr_cont(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)

View File

@ -15,7 +15,6 @@ config LOONGSON1_LS1B
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_MIPS16
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK
@ -31,7 +30,6 @@ config LOONGSON1_LS1C
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_MIPS16
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK

View File

@ -1,4 +1,4 @@
cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32r2 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000
load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80200000

View File

@ -81,42 +81,6 @@ struct platform_device ls1x_cpufreq_pdev = {
},
};
/* DMA */
static struct resource ls1x_dma_resources[] = {
[0] = {
.start = LS1X_DMAC_BASE,
.end = LS1X_DMAC_BASE + SZ_4 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LS1X_DMA0_IRQ,
.end = LS1X_DMA0_IRQ,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = LS1X_DMA1_IRQ,
.end = LS1X_DMA1_IRQ,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = LS1X_DMA2_IRQ,
.end = LS1X_DMA2_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device ls1x_dma_pdev = {
.name = "ls1x-dma",
.id = -1,
.num_resources = ARRAY_SIZE(ls1x_dma_resources),
.resource = ls1x_dma_resources,
};
void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata)
{
ls1x_dma_pdev.dev.platform_data = pdata;
}
/* Synopsys Ethernet GMAC */
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.phy_mask = 0,
@ -291,33 +255,6 @@ struct platform_device ls1x_gpio1_pdev = {
.resource = ls1x_gpio1_resources,
};
/* NAND Flash */
static struct resource ls1x_nand_resources[] = {
[0] = {
.start = LS1X_NAND_BASE,
.end = LS1X_NAND_BASE + SZ_32 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMA channel 0 is dedicated to NAND */
.start = LS1X_DMA_CHANNEL0,
.end = LS1X_DMA_CHANNEL0,
.flags = IORESOURCE_DMA,
},
};
struct platform_device ls1x_nand_pdev = {
.name = "ls1x-nand",
.id = -1,
.num_resources = ARRAY_SIZE(ls1x_nand_resources),
.resource = ls1x_nand_resources,
};
void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata)
{
ls1x_nand_pdev.dev.platform_data = pdata;
}
/* USB EHCI */
static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);

View File

@ -16,30 +16,6 @@
#include <nand.h>
#include <platform.h>
struct plat_ls1x_dma ls1x_dma_pdata = {
.nr_channels = 3,
};
static struct mtd_partition ls1x_nand_parts[] = {
{
.name = "kernel",
.offset = 0,
.size = SZ_16M,
},
{
.name = "rootfs",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
struct plat_ls1x_nand ls1x_nand_pdata = {
.parts = ls1x_nand_parts,
.nr_parts = ARRAY_SIZE(ls1x_nand_parts),
.hold_cycle = 0x2,
.wait_cycle = 0xc,
};
static const struct gpio_led ls1x_gpio_leds[] __initconst = {
{
.name = "LED9",
@ -64,13 +40,11 @@ static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_pdev,
&ls1x_cpufreq_pdev,
&ls1x_dma_pdev,
&ls1x_eth0_pdev,
&ls1x_eth1_pdev,
&ls1x_ehci_pdev,
&ls1x_gpio0_pdev,
&ls1x_gpio1_pdev,
&ls1x_nand_pdev,
&ls1x_rtc_pdev,
&ls1x_wdt_pdev,
};
@ -78,8 +52,6 @@ static struct platform_device *ls1b_platform_devices[] __initdata = {
static int __init ls1b_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
ls1x_dma_set_platdata(&ls1x_dma_pdata);
ls1x_nand_set_platdata(&ls1x_nand_pdata);
gpio_led_register_device(-1, &ls1x_led_pdata);

View File

@ -189,32 +189,21 @@ static int __init debugfs_fpuemu(void)
{
struct dentry *fpuemu_debugfs_base_dir;
struct dentry *fpuemu_debugfs_inst_dir;
struct dentry *d, *reset_file;
if (!mips_debugfs_dir)
return -ENODEV;
fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats",
mips_debugfs_dir);
if (!fpuemu_debugfs_base_dir)
return -ENOMEM;
reset_file = debugfs_create_file("fpuemustats_clear", 0444,
mips_debugfs_dir, NULL,
&fpuemustats_clear_fops);
if (!reset_file)
return -ENOMEM;
debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL,
&fpuemustats_clear_fops);
#define FPU_EMU_STAT_OFFSET(m) \
offsetof(struct mips_fpu_emulator_stats, m)
#define FPU_STAT_CREATE(m) \
do { \
d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
if (!d) \
return -ENOMEM; \
} while (0)
FPU_STAT_CREATE(emulated);
@ -233,8 +222,6 @@ do { \
fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions",
fpuemu_debugfs_base_dir);
if (!fpuemu_debugfs_inst_dir)
return -ENOMEM;
#define FPU_STAT_CREATE_EX(m) \
do { \
@ -242,11 +229,9 @@ do { \
\
adjust_instruction_counter_name(name, #m); \
\
d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
if (!d) \
return -ENOMEM; \
} while (0)
FPU_STAT_CREATE_EX(abs_s);

View File

@ -3,9 +3,19 @@
# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
obj-y += cache.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
obj-y += cache.o
obj-y += context.o
obj-y += extable.o
obj-y += fault.o
obj-y += gup.o
obj-y += init.o
obj-y += mmap.o
obj-y += page.o
obj-y += page-funcs.o
obj-y += pgtable.o
obj-y += tlbex.o
obj-y += tlbex-fault.o
obj-y += tlb-funcs.o
ifdef CONFIG_CPU_MICROMIPS
obj-y += uasm-micromips.o

View File

@ -127,23 +127,6 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end)
}
/**
* Flush the icache for a trampoline. These are used for interrupt
* and exception hooking.
*
* @addr: Address to flush
*/
static void octeon_flush_cache_sigtramp(unsigned long addr)
{
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
octeon_flush_icache_all_cores(vma);
up_read(&current->mm->mmap_sem);
}
/**
* Flush a range out of a vma
*
@ -289,7 +272,6 @@ void octeon_cache_init(void)
flush_cache_mm = octeon_flush_cache_mm;
flush_cache_page = octeon_flush_cache_page;
flush_cache_range = octeon_flush_cache_range;
flush_cache_sigtramp = octeon_flush_cache_sigtramp;
flush_icache_all = octeon_flush_icache_all;
flush_data_cache_page = octeon_flush_data_cache_page;
flush_icache_range = octeon_flush_icache_range;

View File

@ -274,30 +274,6 @@ static void r3k_flush_data_cache_page(unsigned long addr)
{
}
static void r3k_flush_cache_sigtramp(unsigned long addr)
{
unsigned long flags;
pr_debug("csigtramp[%08lx]\n", addr);
flags = read_c0_status();
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status(flags);
}
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
@ -331,7 +307,6 @@ void r3k_cache_init(void)
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;

View File

@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
unsigned int i;
const cpumask_t *mask = cpu_present_mask;
if (cpu_has_mmid)
return cpu_context(0, mm) != 0;
/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
#ifdef CONFIG_SMP
/*
@ -697,10 +700,7 @@ static inline void local_r4k_flush_cache_page(void *args)
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
drop_mmu_context(mm);
} else
vaddr ? r4k_blast_icache_page(addr) :
r4k_blast_icache_user_page(addr);
@ -937,119 +937,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT */
struct flush_cache_sigtramp_args {
struct mm_struct *mm;
struct page *page;
unsigned long addr;
};
/*
* While we're protected against bad userland addresses we don't care
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
static void local_r4k_flush_cache_sigtramp(void *args)
{
struct flush_cache_sigtramp_args *fcs_args = args;
unsigned long addr = fcs_args->addr;
struct page *page = fcs_args->page;
struct mm_struct *mm = fcs_args->mm;
int map_coherent = 0;
void *vaddr;
unsigned long ic_lsize = cpu_icache_line_size();
unsigned long dc_lsize = cpu_dcache_line_size();
unsigned long sc_lsize = cpu_scache_line_size();
/*
* If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
if (!has_valid_asid(mm, R4K_HIT))
return;
if (mm == current->active_mm) {
vaddr = NULL;
} else {
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
map_coherent = (cpu_has_dc_aliases &&
page_mapcount(page) &&
!Page_dcache_dirty(page));
if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
}
R4600_HIT_CACHEOP_WAR_IMPL;
if (!cpu_has_ic_fills_f_dc) {
if (dc_lsize)
vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
: protected_writeback_dcache_line(
addr & ~(dc_lsize - 1));
if (!cpu_icache_snoops_remote_store && scache_size)
vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
: protected_writeback_scache_line(
addr & ~(sc_lsize - 1));
}
if (ic_lsize)
vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
: protected_flush_icache_line(addr & ~(ic_lsize - 1));
if (vaddr) {
if (map_coherent)
kunmap_coherent();
else
kunmap_atomic(vaddr);
}
if (MIPS4K_ICACHE_REFILL_WAR) {
__asm__ __volatile__ (
".set push\n\t"
".set noat\n\t"
".set "MIPS_ISA_LEVEL"\n\t"
#ifdef CONFIG_32BIT
"la $at,1f\n\t"
#endif
#ifdef CONFIG_64BIT
"dla $at,1f\n\t"
#endif
"cache %0,($at)\n\t"
"nop; nop; nop\n"
"1:\n\t"
".set pop"
:
: "i" (Hit_Invalidate_I));
}
if (MIPS_CACHE_SYNC_WAR)
__asm__ __volatile__ ("sync");
}
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
struct flush_cache_sigtramp_args args;
int npages;
down_read(&current->mm->mmap_sem);
npages = get_user_pages_fast(addr, 1, 0, &args.page);
if (npages < 1)
goto out;
args.mm = current->mm;
args.addr = addr;
r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
put_page(args.page);
out:
up_read(&current->mm->mmap_sem);
}
static void r4k_flush_icache_all(void)
{
if (cpu_has_vtag_icache)
@ -1978,7 +1865,6 @@ void r4k_cache_init(void)
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
@ -2033,7 +1919,6 @@ void r4k_cache_init(void)
/* I$ fills from D$ just by emptying the write buffers */
flush_cache_page = (void *)b5k_instruction_hazard;
flush_cache_range = (void *)b5k_instruction_hazard;
flush_cache_sigtramp = (void *)b5k_instruction_hazard;
local_flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_icache_range = (void *)b5k_instruction_hazard;
@ -2052,7 +1937,6 @@ void r4k_cache_init(void)
flush_cache_mm = (void *)cache_noop;
flush_cache_page = (void *)cache_noop;
flush_cache_range = (void *)cache_noop;
flush_cache_sigtramp = (void *)cache_noop;
flush_icache_all = (void *)cache_noop;
flush_data_cache_page = (void *)cache_noop;
local_flush_data_cache_page = (void *)cache_noop;

View File

@ -290,25 +290,6 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
}
}
static void tx39_flush_cache_sigtramp(unsigned long addr)
{
unsigned long ic_lsize = current_cpu_data.icache.linesz;
unsigned long dc_lsize = current_cpu_data.dcache.linesz;
unsigned long config;
unsigned long flags;
protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
/* disable icache (set ICE#) */
local_irq_save(flags);
config = read_c0_conf();
write_c0_conf(config & ~TX39_CONF_ICE);
TX39_STOP_STREAMING();
protected_flush_icache_line(addr & ~(ic_lsize - 1));
write_c0_conf(config);
local_irq_restore(flags);
}
static __init void tx39_probe_cache(void)
{
unsigned long config;
@ -368,7 +349,6 @@ void tx39_cache_init(void)
flush_icache_range = (void *) tx39h_flush_icache_all;
local_flush_icache_range = (void *) tx39h_flush_icache_all;
flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
flush_data_cache_page = (void *) tx39h_flush_icache_all;
@ -397,7 +377,6 @@ void tx39_cache_init(void)
__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
flush_data_cache_page = tx39_flush_data_cache_page;

View File

@ -47,7 +47,6 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);

291
arch/mips/mm/context.c Normal file
View File

@ -0,0 +1,291 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/atomic.h>
#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
static atomic64_t mmid_version;
static unsigned int num_mmids;
static unsigned long *mmid_map;
static DEFINE_PER_CPU(u64, reserved_mmids);
static cpumask_t tlb_flush_pending;
static bool asid_versions_eq(int cpu, u64 a, u64 b)
{
return ((a ^ b) & asid_version_mask(cpu)) == 0;
}
void get_new_mmu_context(struct mm_struct *mm)
{
unsigned int cpu;
u64 asid;
/*
* This function is specific to ASIDs, and should not be called when
* MMIDs are in use.
*/
if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
return;
cpu = smp_processor_id();
asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
}
set_cpu_context(cpu, mm, asid);
asid_cache(cpu) = asid;
}
EXPORT_SYMBOL_GPL(get_new_mmu_context);
void check_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
/*
* This function is specific to ASIDs, and should not be called when
* MMIDs are in use.
*/
if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
return;
/* Check if our ASID is of an older version and thus invalid */
if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
get_new_mmu_context(mm);
}
EXPORT_SYMBOL_GPL(check_mmu_context);
static void flush_context(void)
{
u64 mmid;
int cpu;
/* Update the list of reserved MMIDs and the MMID bitmap */
bitmap_clear(mmid_map, 0, num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
for_each_possible_cpu(cpu) {
mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* MMID, as this is the only trace we have of
* the process it is still running.
*/
if (mmid == 0)
mmid = per_cpu(reserved_mmids, cpu);
__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
per_cpu(reserved_mmids, cpu) = mmid;
}
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
*/
cpumask_setall(&tlb_flush_pending);
}
static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
{
bool hit;
int cpu;
/*
* Iterate over the set of reserved MMIDs looking for a match.
* If we find one, then we can update our mm to use newmmid
* (i.e. the same MMID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old MMID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved MMID in a future
* generation.
*/
hit = false;
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_mmids, cpu) == mmid) {
hit = true;
per_cpu(reserved_mmids, cpu) = newmmid;
}
}
return hit;
}
static u64 get_new_mmid(struct mm_struct *mm)
{
static u32 cur_idx = MMID_KERNEL_WIRED + 1;
u64 mmid, version, mmid_mask;
mmid = cpu_context(0, mm);
version = atomic64_read(&mmid_version);
mmid_mask = cpu_asid_mask(&boot_cpu_data);
if (!asid_versions_eq(0, mmid, 0)) {
u64 newmmid = version | (mmid & mmid_mask);
/*
* If our current MMID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (check_update_reserved_mmid(mmid, newmmid)) {
mmid = newmmid;
goto set_context;
}
/*
* We had a valid MMID in a previous life, so try to re-use
* it if possible.
*/
if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
mmid = newmmid;
goto set_context;
}
}
/* Allocate a free MMID */
mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
if (mmid != num_mmids)
goto reserve_mmid;
/* We're out of MMIDs, so increment the global version */
version = atomic64_add_return_relaxed(asid_first_version(0),
&mmid_version);
/* Note currently active MMIDs & mark TLBs as requiring flushes */
flush_context();
/* We have more MMIDs than CPUs, so this will always succeed */
mmid = find_first_zero_bit(mmid_map, num_mmids);
reserve_mmid:
__set_bit(mmid, mmid_map);
cur_idx = mmid;
mmid |= version;
set_context:
set_cpu_context(0, mm, mmid);
return mmid;
}
void check_switch_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
u64 ctx, old_active_mmid;
unsigned long flags;
if (!cpu_has_mmid) {
check_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
goto setup_pgd;
}
/*
* MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
* unnecessary.
*
* The memory ordering here is subtle. If our active_mmids is non-zero
* and the MMID matches the current version, then we update the CPU's
* asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
* means that either:
*
* - We get a zero back from the cmpxchg and end up waiting on
* cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
* with the rollover and so we are forced to see the updated
* generation.
*
* - We get a valid MMID back from the cmpxchg, which means the
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
ctx = cpu_context(cpu, mm);
old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
if (!old_active_mmid ||
!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
!cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
ctx = cpu_context(cpu, mm);
if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
ctx = get_new_mmid(mm);
WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
}
/*
* Invalidate the local TLB if needed. Note that we must only clear our
* bit in tlb_flush_pending after this is complete, so that the
* cpu_has_shared_ftlb_entries case below isn't misled.
*/
if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all();
cpumask_clear_cpu(cpu, &tlb_flush_pending);
}
write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
/*
* If this CPU shares FTLB entries with its siblings and one or more of
* those siblings hasn't yet invalidated its TLB following a version
* increase then we need to invalidate any TLB entries for our MMID
* that we might otherwise pick up from a sibling.
*
* We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
* CONFIG_SMP=n kernels.
*/
#ifdef CONFIG_SMP
if (cpu_has_shared_ftlb_entries &&
cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
/* Ensure we operate on the new MMID */
mtc0_tlbw_hazard();
/*
* Invalidate all TLB entries associated with the new
* MMID, and wait for the invalidation to complete.
*/
ginvt_mmid();
sync_ginv();
}
#endif
setup_pgd:
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
}
EXPORT_SYMBOL_GPL(check_switch_mmu_context);
static int mmid_init(void)
{
if (!cpu_has_mmid)
return 0;
/*
* Expect allocation after rollover to fail if we don't have at least
* one more MMID than CPUs.
*/
num_mmids = asid_first_version(0);
WARN_ON(num_mmids <= num_possible_cpus());
atomic64_set(&mmid_version, asid_first_version(0));
mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
GFP_KERNEL);
if (!mmid_map)
panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
pr_info("MMID allocator initialised with %u entries\n", num_mmids);
return 0;
}
early_initcall(mmid_init);

View File

@ -120,13 +120,8 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
dma_sync_virt(addr + offset, len, dir);
@ -145,12 +140,14 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush(dev))
dma_sync_phys(paddr, size, dir);
}
#endif
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)

View File

@ -84,6 +84,7 @@ void setup_zero_pages(void)
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned int uninitialized_var(old_mmid);
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
#ifdef CONFIG_XPA
if (cpu_has_xpa) {
entrylo = (pte.pte_low & _PFNX_MASK);
@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
local_irq_restore(flags);
return (void*) vaddr;

View File

@ -55,20 +55,11 @@ static const struct file_operations sc_prefetch_fops = {
static int __init sc_debugfs_init(void)
{
struct dentry *dir, *file;
if (!mips_debugfs_dir)
return -ENODEV;
struct dentry *dir;
dir = debugfs_create_dir("l2cache", mips_debugfs_dir);
if (IS_ERR(dir))
return PTR_ERR(dir);
file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir,
NULL, &sc_prefetch_fops);
if (!file)
return -ENOMEM;
debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL,
&sc_prefetch_fops);
return 0;
}
late_initcall(sc_debugfs_init);

View File

@ -67,18 +67,6 @@ void local_flush_tlb_all(void)
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
#ifdef DEBUG_TLB
printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
#endif
drop_mmu_context(mm, cpu);
}
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@ -117,7 +105,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm, cpu);
drop_mmu_context(mm);
}
local_irq_restore(flags);
}

View File

@ -104,23 +104,6 @@ void local_flush_tlb_all(void)
}
EXPORT_SYMBOL(local_flush_tlb_all);
/* All entries common to a mm share an asid. To effectively flush
these entries, we just bump the asid. */
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu;
preempt_disable();
cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
drop_mmu_context(mm, cpu);
}
preempt_enable();
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@ -137,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
int oldpid = read_c0_entryhi();
unsigned long old_entryhi, uninitialized_var(old_mmid);
int newpid = cpu_asid(cpu, mm);
old_entryhi = read_c0_entryhi();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(newpid);
}
htw_stop();
while (start < end) {
int idx;
write_c0_entryhi(start | newpid);
if (cpu_has_mmid)
write_c0_entryhi(start);
else
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
@ -160,10 +152,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(oldpid);
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
} else {
drop_mmu_context(mm, cpu);
drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
@ -220,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long flags;
int oldpid, newpid, idx;
unsigned long uninitialized_var(old_mmid);
unsigned long flags, old_entryhi;
int idx;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
local_irq_save(flags);
oldpid = read_c0_entryhi();
old_entryhi = read_c0_entryhi();
htw_stop();
write_c0_entryhi(page | newpid);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_entryhi(page);
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
} else {
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
@ -244,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
tlbw_use_hazard();
finish:
write_c0_entryhi(oldpid);
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
@ -307,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags);
htw_stop();
pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
if (cpu_has_mmid) {
write_c0_entryhi(address);
} else {
pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entryhi(address | pid);
}
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
@ -375,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
unsigned int uninitialized_var(old_mmid);
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
@ -398,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);

View File

@ -50,14 +50,6 @@ void local_flush_tlb_all(void)
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@ -75,7 +67,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
local_irq_save(flags);
if (size > TFP_TLB_SIZE / 2) {
drop_mmu_context(mm, cpu);
drop_mmu_context(mm);
goto out_restore;
}

Some files were not shown because too many files have changed in this diff Show More