Add initial smp support. This gets as far as allowing the secondary

cpu(s) into the kernel, and sync-ing them up to "kernel" mode so we can
send them ipis, which also work.

Thanks to John Baldwin for providing me with access to the hardware
that made this possible.

Parts obtained from:	bsd/os
This commit is contained in:
Jake Burkholder 2002-01-08 05:50:26 +00:00
parent 33bcea4eff
commit 6deb695c1d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=89051
7 changed files with 1093 additions and 0 deletions

View file

@ -41,6 +41,9 @@
#define PIL_LOW 1 /* stray interrupts */
#define PIL_ITHREAD 2 /* interrupts that use ithreads */
#define PIL_RENDEZVOUS 3 /* smp rendezvous ipi */
#define PIL_AST 4 /* ast ipi */
#define PIL_STOP 5 /* stop cpu ipi */
#define PIL_FAST 13 /* fast interrupts */
#define PIL_TICK 14

View file

@ -29,4 +29,160 @@
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
#include <machine/intr_machdep.h>
#define CPU_INITING 1
#define CPU_INITED 2
#define CPU_REJECT 3
#define CPU_STARTING 4
#define CPU_STARTED 5
#define CPU_BOOTSTRAPING 6
#define CPU_BOOTSTRAPPED 7
#ifndef LOCORE
#define IDR_BUSY (1<<0)
#define IDR_NACK (1<<1)
#define IPI_AST PIL_AST
#define IPI_RENDEZVOUS PIL_RENDEZVOUS
#define IPI_STOP PIL_STOP
#define IPI_RETRIES 100
struct cpu_start_args {
u_int csa_mid;
u_int csa_state;
u_long csa_data;
vm_offset_t csa_va;
};
struct ipi_level_args {
u_int ila_count;
u_int ila_level;
};
struct ipi_tlb_args {
u_int ita_count;
u_long ita_tlb;
u_long ita_ctx;
u_long ita_start;
u_long ita_end;
};
#define ita_va ita_start
struct pcpu;
void cpu_mp_bootstrap(struct pcpu *pc);
void cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2);
void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
void ipi_selected(u_int cpus, u_int ipi);
void ipi_all(u_int ipi);
void ipi_all_but_self(u_int ipi);
struct ipi_level_args ipi_level_args;
struct ipi_tlb_args ipi_tlb_args;
extern int mp_ncpus;
extern char tl_ipi_level[];
extern char tl_ipi_test[];
extern char tl_ipi_tlb_context_demap[];
extern char tl_ipi_tlb_page_demap[];
extern char tl_ipi_tlb_range_demap[];
#ifdef SMP
static __inline void *
ipi_tlb_context_demap(u_int ctx)
{
struct ipi_tlb_args *ita;
if (mp_ncpus == 1)
return (NULL);
ita = &ipi_tlb_args;
ita->ita_count = mp_ncpus;
ita->ita_ctx = ctx;
cpu_ipi_selected(PCPU_GET(other_cpus), 0,
(u_long)tl_ipi_tlb_context_demap, (u_long)ita);
return (&ita->ita_count);
}
static __inline void *
ipi_tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
{
struct ipi_tlb_args *ita;
if (mp_ncpus == 1)
return (NULL);
ita = &ipi_tlb_args;
ita->ita_count = mp_ncpus;
ita->ita_tlb = tlb;
ita->ita_ctx = ctx;
ita->ita_va = va;
cpu_ipi_selected(PCPU_GET(other_cpus), 0,
(u_long)tl_ipi_tlb_page_demap, (u_long)ita);
return (&ita->ita_count);
}
static __inline void *
ipi_tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
{
struct ipi_tlb_args *ita;
if (mp_ncpus == 1)
return (NULL);
ita = &ipi_tlb_args;
ita->ita_count = mp_ncpus;
ita->ita_ctx = ctx;
ita->ita_start = start;
ita->ita_end = end;
cpu_ipi_selected(PCPU_GET(other_cpus), 0,
(u_long)tl_ipi_tlb_range_demap, (u_long)ita);
return (&ita->ita_count);
}
static __inline void
ipi_wait(void *cookie)
{
u_int *count;
if ((count = cookie) != NULL) {
atomic_subtract_int(count, 1);
while (*count != 0)
membar(LoadStore);
}
}
#else
static __inline void *
ipi_tlb_context_demap(u_int ctx)
{
return (NULL);
}
static __inline void *
ipi_tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
{
return (NULL);
}
static __inline void *
ipi_tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
{
return (NULL);
}
static __inline void
ipi_wait(void *cookie)
{
}
#endif /* SMP */
#endif /* !LOCORE */
#endif /* !_MACHINE_SMP_H_ */

View file

@ -0,0 +1,169 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/ktr.h>
#include <machine/asmacros.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
#define IPI_WAIT(r1, r2, r3) \
ATOMIC_DEC_INT(r1, r2, r3) ; \
9: membar #StoreLoad ; \
lduw [r1], r2 ; \
brnz,a,pn r2, 9b ; \
nop
/*
* Trigger a softint at the desired level.
*/
ENTRY(tl_ipi_level)
lduw [%g5 + ILA_LEVEL], %g2
mov 1, %g1
sllx %g1, %g2, %g1
wr %g1, 0, %asr20
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_level)
ENTRY(tl_ipi_test)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_test: cpuid=%d mid=%d d1=%#lx d2=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [PCPU(CPUID)], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [PCPU(MID)], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %g4, [%g1 + KTR_PARM3]
stx %g5, [%g1 + KTR_PARM4]
9:
#endif
retry
END(tl_ipi_test)
/*
* Demap a page from the dtlb and/or itlb.
*/
ENTRY(tl_ipi_tlb_page_demap)
ldx [%g5 + ITA_TLB], %g1
ldx [%g5 + ITA_CTX], %g2
ldx [%g5 + ITA_VA], %g3
wr %g0, ASI_DMMU, %asi
brz,a,pt %g2, 1f
or %g3, TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g3
stxa %g2, [%g0 + AA_DMMU_SCXR] %asi
membar #Sync
or %g3, TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE, %g3
1: andcc %g1, TLB_DTLB, %g0
bz,a,pn %xcc, 2f
nop
stxa %g0, [%g3] ASI_DMMU_DEMAP
2: andcc %g1, TLB_ITLB, %g0
bz,a,pn %xcc, 3f
nop
stxa %g0, [%g3] ASI_IMMU_DEMAP
3: brz,a,pt %g2, 4f
nop
stxa %g0, [%g0 + AA_DMMU_SCXR] %asi
4: membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_page_demap)
/*
* Demap a range of pages from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_range_demap)
ldx [%g5 + ITA_CTX], %g1
ldx [%g5 + ITA_START], %g2
ldx [%g5 + ITA_END], %g3
wr %g0, ASI_DMMU, %asi
brz,a,pt %g1, 1f
mov TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g4
stxa %g1, [%g0 + AA_DMMU_SCXR] %asi
membar #Sync
mov TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE, %g4
1: set PAGE_SIZE, %g5
2: or %g4, %g2, %g4
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
add %g2, %g5, %g2
cmp %g2, %g3
bne,a,pt %xcc, 2b
nop
brz,a,pt %g1, 3f
nop
stxa %g0, [%g0 + AA_DMMU_SCXR] %asi
3: membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_range_demap)
/*
* Demap an entire context from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_context_demap)
ldx [%g5 + ITA_CTX], %g1
mov AA_DMMU_SCXR, %g2
stxa %g1, [%g2] ASI_DMMU
membar #Sync
mov TLB_DEMAP_SECONDARY | TLB_DEMAP_CONTEXT, %g3
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
stxa %g0, [%g2] ASI_DMMU
membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_context_demap)

View file

@ -0,0 +1,169 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/ktr.h>
#include <machine/asmacros.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
#define IPI_WAIT(r1, r2, r3) \
ATOMIC_DEC_INT(r1, r2, r3) ; \
9: membar #StoreLoad ; \
lduw [r1], r2 ; \
brnz,a,pn r2, 9b ; \
nop
/*
* Trigger a softint at the desired level.
*/
ENTRY(tl_ipi_level)
lduw [%g5 + ILA_LEVEL], %g2
mov 1, %g1
sllx %g1, %g2, %g1
wr %g1, 0, %asr20
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_level)
ENTRY(tl_ipi_test)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_test: cpuid=%d mid=%d d1=%#lx d2=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [PCPU(CPUID)], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [PCPU(MID)], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %g4, [%g1 + KTR_PARM3]
stx %g5, [%g1 + KTR_PARM4]
9:
#endif
retry
END(tl_ipi_test)
/*
* Demap a page from the dtlb and/or itlb.
*/
ENTRY(tl_ipi_tlb_page_demap)
ldx [%g5 + ITA_TLB], %g1
ldx [%g5 + ITA_CTX], %g2
ldx [%g5 + ITA_VA], %g3
wr %g0, ASI_DMMU, %asi
brz,a,pt %g2, 1f
or %g3, TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g3
stxa %g2, [%g0 + AA_DMMU_SCXR] %asi
membar #Sync
or %g3, TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE, %g3
1: andcc %g1, TLB_DTLB, %g0
bz,a,pn %xcc, 2f
nop
stxa %g0, [%g3] ASI_DMMU_DEMAP
2: andcc %g1, TLB_ITLB, %g0
bz,a,pn %xcc, 3f
nop
stxa %g0, [%g3] ASI_IMMU_DEMAP
3: brz,a,pt %g2, 4f
nop
stxa %g0, [%g0 + AA_DMMU_SCXR] %asi
4: membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_page_demap)
/*
* Demap a range of pages from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_range_demap)
ldx [%g5 + ITA_CTX], %g1
ldx [%g5 + ITA_START], %g2
ldx [%g5 + ITA_END], %g3
wr %g0, ASI_DMMU, %asi
brz,a,pt %g1, 1f
mov TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g4
stxa %g1, [%g0 + AA_DMMU_SCXR] %asi
membar #Sync
mov TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE, %g4
1: set PAGE_SIZE, %g5
2: or %g4, %g2, %g4
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
add %g2, %g5, %g2
cmp %g2, %g3
bne,a,pt %xcc, 2b
nop
brz,a,pt %g1, 3f
nop
stxa %g0, [%g0 + AA_DMMU_SCXR] %asi
3: membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_range_demap)
/*
* Demap an entire context from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_context_demap)
ldx [%g5 + ITA_CTX], %g1
mov AA_DMMU_SCXR, %g2
stxa %g1, [%g2] ASI_DMMU
membar #Sync
mov TLB_DEMAP_SECONDARY | TLB_DEMAP_CONTEXT, %g3
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
stxa %g0, [%g2] ASI_DMMU
membar #Sync
IPI_WAIT(%g5, %g1, %g2)
retry
END(tl_ipi_tlb_context_demap)

View file

@ -0,0 +1,137 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
/*
* void _mp_start(u_long o0, u_int *state, u_int mid, u_long o3, u_long o4)
*/
ENTRY(_mp_start)
/*
* Give away our stack to another processor that may be starting in the
* loader.
*/
clr %sp
/*
* Inform the boot processor which is waiting in the loader that we
* made it.
*/
mov CPU_INITED, %l0
stw %l0, [%o1]
membar #StoreLoad
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
SET(cpu_start_args, %l1, %l0)
/*
* Wait till its our turn to start.
*/
1: membar #StoreLoad
lduw [%l0 + CSA_MID], %l1
cmp %l1, %o2
bne %xcc, 1b
nop
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
/*
* Find our per-cpu page and the tte data that we will use to map it.
*/
ldx [%l0 + CSA_DATA], %l1
ldx [%l0 + CSA_VA], %l2
/*
* Map the per-cpu page. It uses a locked tlb entry.
*/
wr %g0, ASI_DMMU, %asi
stxa %l2, [%g0 + AA_DMMU_TAR] %asi
stxa %l1, [%g0] ASI_DTLB_DATA_IN_REG
membar #Sync
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu
* in the per-cpu page.
*/
set PAGE_SIZE - PC_SIZEOF, %l3
add %l2, %l3, %l2
sub %l2, SPOFF + CCFSZ, %sp
/*
* Inform the boot processor that we're about to start.
*/
mov CPU_STARTED, %l3
stw %l3, [%l0 + CSA_STATE]
membar #StoreLoad
/*
* Enable interrupts.
*/
wrpr %g0, PSTATE_KERNEL, %pstate
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP,
"_mp_start: bootstrap cpuid=%d mid=%d pcpu=%#lx data=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [%l2 + PC_CPUID], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [%l2 + PC_MID], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %l2, [%g1 + KTR_PARM3]
stx %l1, [%g1 + KTR_PARM4]
stx %sp, [%g1 + KTR_PARM5]
9:
#endif
/*
* And away we go. This doesn't return.
*/
call cpu_mp_bootstrap
mov %l2, %o0
sir
! NOTREACHED
END(_mp_start)

View file

@ -0,0 +1,137 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
/*
* void _mp_start(u_long o0, u_int *state, u_int mid, u_long o3, u_long o4)
*/
ENTRY(_mp_start)
/*
* Give away our stack to another processor that may be starting in the
* loader.
*/
clr %sp
/*
* Inform the boot processor which is waiting in the loader that we
* made it.
*/
mov CPU_INITED, %l0
stw %l0, [%o1]
membar #StoreLoad
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
SET(cpu_start_args, %l1, %l0)
/*
* Wait till its our turn to start.
*/
1: membar #StoreLoad
lduw [%l0 + CSA_MID], %l1
cmp %l1, %o2
bne %xcc, 1b
nop
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
/*
* Find our per-cpu page and the tte data that we will use to map it.
*/
ldx [%l0 + CSA_DATA], %l1
ldx [%l0 + CSA_VA], %l2
/*
* Map the per-cpu page. It uses a locked tlb entry.
*/
wr %g0, ASI_DMMU, %asi
stxa %l2, [%g0 + AA_DMMU_TAR] %asi
stxa %l1, [%g0] ASI_DTLB_DATA_IN_REG
membar #Sync
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu
* in the per-cpu page.
*/
set PAGE_SIZE - PC_SIZEOF, %l3
add %l2, %l3, %l2
sub %l2, SPOFF + CCFSZ, %sp
/*
* Inform the boot processor that we're about to start.
*/
mov CPU_STARTED, %l3
stw %l3, [%l0 + CSA_STATE]
membar #StoreLoad
/*
* Enable interrupts.
*/
wrpr %g0, PSTATE_KERNEL, %pstate
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP,
"_mp_start: bootstrap cpuid=%d mid=%d pcpu=%#lx data=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [%l2 + PC_CPUID], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [%l2 + PC_MID], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %l2, [%g1 + KTR_PARM3]
stx %l1, [%g1 + KTR_PARM4]
stx %sp, [%g1 + KTR_PARM5]
9:
#endif
/*
* And away we go. This doesn't return.
*/
call cpu_mp_bootstrap
mov %l2, %o0
sir
! NOTREACHED
END(_mp_start)

View file

@ -0,0 +1,322 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
*/
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/smp.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_map.h>
#include <dev/ofw/openfirm.h>
#include <machine/asi.h>
#include <machine/md_var.h>
#include <machine/smp.h>
#include <machine/tte.h>
static ih_func_t cpu_ipi_ast;
static ih_func_t cpu_ipi_stop;
/*
* Argument area used to pass data to non-boot processors as they start up.
* This must be statically initialized with a known invalid upa module id,
* since the other processors will use it before the boot cpu enters the
* kernel.
*/
struct cpu_start_args cpu_start_args = { -1, -1, 0, 0 };
static struct mtx ap_boot_mtx;
u_int mp_boot_mid;
/*
* Probe for other cpus.
*/
int
cpu_mp_probe(void)
{
phandle_t child;
phandle_t root;
char buf[128];
int cpus;
all_cpus = 1 << PCPU_GET(cpuid);
mp_boot_mid = PCPU_GET(mid);
mp_ncpus = 1;
cpus = 0;
root = OF_peer(0);
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
if (OF_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
strcmp(buf, "cpu") == 0)
cpus++;
}
return (cpus > 1);
}
/*
* Fire up any non-boot processors.
*/
void
cpu_mp_start(void)
{
volatile struct cpu_start_args *csa;
struct pcpu *pc;
phandle_t child;
phandle_t root;
vm_offset_t pa;
vm_offset_t va;
char buf[128];
u_long data;
u_int mid;
int cpuid;
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
-1, NULL, NULL);
intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
root = OF_peer(0);
csa = &cpu_start_args;
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
if (OF_getprop(child, "device_type", buf, sizeof(buf)) <= 0 ||
strcmp(buf, "cpu") != 0)
continue;
if (OF_getprop(child, "upa-portid", &mid, sizeof(mid)) <= 0)
panic("cpu_mp_start: can't get module id");
if (mid == mp_boot_mid)
continue;
/*
* Found a non-boot processor. It is currently spinning in
* _mp_start, and it has no stack. Allocate a per-cpu page
* for it, which it will use as a bootstrap stack, and pass
* it through the argument area.
*/
cpuid = mp_ncpus++;
va = kmem_alloc(kernel_map, PAGE_SIZE);
pa = pmap_kextract(va);
if (pa == 0)
panic("cpu_mp_start: pmap_kextract\n");
pc = (struct pcpu *)(va + PAGE_SIZE) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
pc->pc_mid = mid;
data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_L | TD_CP | TD_CV | TD_P | TD_W;
/*
* Initialize the argument area to start this cpu.
* Note, order is important here. We must set the pcpu pointer
* and the tte data before letting it loose.
*/
csa->csa_data = data;
csa->csa_va = va;
membar(StoreLoad);
csa->csa_mid = mid;
csa->csa_state = CPU_STARTING;
while (csa->csa_state == CPU_STARTING)
membar(StoreLoad);
if (csa->csa_state != CPU_STARTED)
panic("cpu_mp_start: bad state %d for cpu %d\n",
csa->csa_state, mid);
csa->csa_state = CPU_BOOTSTRAPING;
while (csa->csa_state == CPU_BOOTSTRAPING)
membar(StoreLoad);
if (csa->csa_state != CPU_BOOTSTRAPPED)
panic("cpu_mp_start: bad state %d for cpu %d\n",
csa->csa_state, mid);
cpu_ipi_send(mid, 0, (u_long)tl_ipi_test, 0);
all_cpus |= 1 << cpuid;
}
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
}
void
cpu_mp_announce(void)
{
TODO;
}
void
cpu_mp_bootstrap(struct pcpu *pc)
{
struct cpu_start_args *csa;
csa = &cpu_start_args;
CTR1(KTR_SMP, "cpu_mp_bootstrap: cpuid=%d", pc->pc_cpuid);
while (csa->csa_state != CPU_BOOTSTRAPING)
membar(StoreLoad);
cpu_setregs(pc);
pmap_map_tsb();
CTR0(KTR_SMP, "cpu_mp_bootstrap: spinning");
csa->csa_state = CPU_BOOTSTRAPPED;
membar(StoreLoad);
for (;;)
;
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
; /* nothing */
microuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
}
static void
cpu_ipi_ast(struct trapframe *tf)
{
}
static void
cpu_ipi_stop(struct trapframe *tf)
{
TODO;
}
void
cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
{
struct pcpu *pc;
u_int cpu;
while (cpus) {
cpu = ffs(cpus) - 1;
cpus &= ~(1 << cpu);
pc = pcpu_find(cpu);
cpu_ipi_send(pc->pc_mid, d0, d1, d2);
}
}
void
cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2)
{
u_long pstate;
int i;
KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0,
("ipi_send: outstanding dispatch"));
pstate = rdpr(pstate);
for (i = 0; i < IPI_RETRIES; i++) {
if (pstate & PSTATE_IE)
wrpr(pstate, pstate, PSTATE_IE);
stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
stxa(AA_INTR_SEND | (mid << 14), ASI_SDB_INTR_W, 0);
membar(Sync);
while (ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY)
;
wrpr(pstate, pstate, 0);
if ((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_NACK) == 0)
return;
}
panic("ipi_send: couldn't send ipi");
}
void
ipi_selected(u_int cpus, u_int ipi)
{
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
}
void
ipi_all(u_int ipi)
{
TODO;
}
void
ipi_all_but_self(u_int ipi)
{
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
}