linux/arch/sparc/kernel/trampoline_32.S
Paul Gortmaker 2066aadd53 sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit  -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings.  In any case, they are temporary and harmless.

This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files.  Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).

[1] https://lkml.org/lkml/2013/5/20/589

Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-07-14 19:36:52 -04:00

202 lines
3.8 KiB
ArmAsm

/*
* trampoline.S: SMP cpu boot-up trampoline code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/init.h>
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/page.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/vaddrs.h>
#include <asm/contregs.h>
#include <asm/thread_info.h>
.globl sun4m_cpu_startup
.globl sun4d_cpu_startup
.align 4
/* When we start up a cpu for the first time it enters this routine.
* This initializes the chip from whatever state the prom left it
* in and sets PIL in %psr to 15, no irqs.
*/
sun4m_cpu_startup:
cpu1_startup:
sethi %hi(trapbase_cpu1), %g3
b 1f
or %g3, %lo(trapbase_cpu1), %g3
cpu2_startup:
sethi %hi(trapbase_cpu2), %g3
b 1f
or %g3, %lo(trapbase_cpu2), %g3
cpu3_startup:
sethi %hi(trapbase_cpu3), %g3
b 1f
or %g3, %lo(trapbase_cpu3), %g3
1:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* This identifies "this cpu". */
wr %g3, 0x0, %tbr
WRITE_PAUSE
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 10, %g4
and %g4, 0xc, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic
.text
.align 4
smp_panic:
call cpu_panic
nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
.align 4
sun4d_cpu_startup:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* Set tbr - we use just one trap table. */
set trapbase, %g1
wr %g1, 0x0, %tbr
WRITE_PAUSE
/* Get our CPU id out of bootbus */
set SUN4D_BOOTBUS_CPUID, %g3
lduba [%g3] ASI_M_CTL, %g3
and %g3, 0xf8, %g3
srl %g3, 3, %g1
sta %g1, [%g0] ASI_M_VIKING_TMP1
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 1, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
leon_smp_cpu_startup:
set smp_penguin_ctable,%g1
ld [%g1+4],%g1
srl %g1,4,%g1
set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
sta %g1, [%g5] ASI_LEON_MMUREGS
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* Set tbr - we use just one trap table. */
set trapbase, %g1
wr %g1, 0x0, %tbr
WRITE_PAUSE
/* Get our CPU id */
rd %asr17,%g3
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 28, %g4
sll %g4, 2, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic