- Use atomic operations rather than sched_lock for safely assigning pm_active

and pc_pmap for SMP. This is key to allowing adding support for SCHED_ULE.
  Thanks go to Peter Jeremy for additional testing.
- Add support for SCHED_ULE to cpu_switch().

Committed from:	201110DevSummit
This commit is contained in:
Marius Strobl 2011-10-06 11:01:31 +00:00
parent 87ef283176
commit c95589317d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=226054
3 changed files with 130 additions and 28 deletions

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* Copyright (c) 2011 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -98,9 +99,67 @@
9: andn r2, bits, r3 ; \
casxa [r1] ASI_N, r2, r3 ; \
cmp r2, r3 ; \
bne,pn %xcc, 9b ; \
mov r3, r2
/*
* Atomically load an integer from memory.
*/
#define ATOMIC_LOAD_INT(r1, val) \
clr val ; \
casa [r1] ASI_N, %g0, val
/*
* Atomically load a long from memory.
*/
#define ATOMIC_LOAD_LONG(r1, val) \
clr val ; \
casxa [r1] ASI_N, %g0, val
/*
* Atomically set a number of bits of an integer in memory.
*/
#define ATOMIC_SET_INT(r1, r2, r3, bits) \
lduw [r1], r2 ; \
9: or r2, bits, r3 ; \
casa [r1] ASI_N, r2, r3 ; \
cmp r2, r3 ; \
bne,pn %icc, 9b ; \
mov r3, r2
/*
* Atomically set a number of bits of a long in memory.
*/
#define ATOMIC_SET_LONG(r1, r2, r3, bits) \
ldx [r1], r2 ; \
9: or r2, bits, r3 ; \
casxa [r1] ASI_N, r2, r3 ; \
cmp r2, r3 ; \
bne,pn %xcc, 9b ; \
mov r3, r2
/*
* Atomically store an integer in memory.
*/
#define ATOMIC_STORE_INT(r1, r2, r3, val) \
lduw [r1], r2 ; \
9: mov val, r3 ; \
casa [r1] ASI_N, r2, r3 ; \
cmp r2, r3 ; \
bne,pn %icc, 9b ; \
mov r3, r2
/*
* Atomically store a long in memory.
*/
#define ATOMIC_STORE_LONG(r1, r2, r3, val) \
ldx [r1], r2 ; \
9: mov val, r3 ; \
casxa [r1] ASI_N, r2, r3 ; \
cmp r2, r3 ; \
bne,pn %xcc, 9b ; \
mov r3, r2
#define PCPU(member) PCPU_REG + PC_ ## member
#define PCPU_ADDR(member, reg) \
add PCPU_REG, PC_ ## member, reg

View file

@ -100,13 +100,6 @@ __FBSDID("$FreeBSD$");
#include <machine/tsb.h>
#include <machine/ver.h>
/* XXX */
#include "opt_sched.h"
#ifndef SCHED_4BSD
#error "sparc64 only works with SCHED_4BSD which uses a global scheduler lock."
#endif
extern struct mtx sched_lock;
/*
* Virtual address of message buffer
*/
@ -1232,11 +1225,9 @@ pmap_pinit(pmap_t pm)
if (pm->pm_tsb_obj == NULL)
pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES);
mtx_lock_spin(&sched_lock);
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = -1;
CPU_ZERO(&pm->pm_active);
mtx_unlock_spin(&sched_lock);
VM_OBJECT_LOCK(pm->pm_tsb_obj);
for (i = 0; i < TSB_PAGES; i++) {
@ -1263,7 +1254,9 @@ pmap_release(pmap_t pm)
{
vm_object_t obj;
vm_page_t m;
#ifdef SMP
struct pcpu *pc;
#endif
CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
pm->pm_context[curcpu], pm->pm_tsb);
@ -1283,11 +1276,18 @@ pmap_release(pmap_t pm)
* - A process that referenced this pmap ran on a CPU, but we switched
* to a kernel thread, leaving the pmap pointer unchanged.
*/
mtx_lock_spin(&sched_lock);
#ifdef SMP
sched_pin();
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
if (pc->pc_pmap == pm)
pc->pc_pmap = NULL;
mtx_unlock_spin(&sched_lock);
atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap,
(uintptr_t)pm, (uintptr_t)NULL);
sched_unpin();
#else
critical_enter();
if (PCPU_GET(pmap) == pm)
PCPU_SET(pmap, NULL);
critical_exit();
#endif
pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
obj = pm->pm_tsb_obj;
@ -2232,11 +2232,14 @@ pmap_activate(struct thread *td)
}
PCPU_SET(tlb_ctx, context + 1);
mtx_lock_spin(&sched_lock);
pm->pm_context[curcpu] = context;
#ifdef SMP
CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active);
atomic_store_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm);
#else
CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
PCPU_SET(pmap, pm);
mtx_unlock_spin(&sched_lock);
#endif
stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* Copyright (c) 2011 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <machine/tstate.h>
#include "assym.s"
#include "opt_sched.h"
.register %g2, #ignore
.register %g3, #ignore
@ -66,7 +68,7 @@ ENTRY(cpu_switch)
nop
call savefpctx
add PCB_REG, PCB_KFP, %o0
ba,a %xcc, 2f
ba,a,pt %xcc, 2f
nop
/*
@ -148,7 +150,7 @@ ENTRY(cpu_switch)
* If they are the same we are done.
*/
cmp %l2, %l1
be,a,pn %xcc, 7f
be,a,pn %xcc, 8f
nop
/*
@ -157,7 +159,7 @@ ENTRY(cpu_switch)
*/
SET(vmspace0, %i4, %i3)
cmp %i5, %i3
be,a,pn %xcc, 7f
be,a,pn %xcc, 8f
nop
/*
@ -180,9 +182,15 @@ ENTRY(cpu_switch)
sub %l3, %l5, %l5
mov 1, %l6
sllx %l6, %l5, %l5
#ifdef SMP
add %l2, %l4, %l4
membar #LoadStore | #StoreStore
ATOMIC_CLEAR_LONG(%l4, %l6, %l7, %l5)
#else
ldx [%l2 + %l4], %l6
andn %l6, %l5, %l6
stx %l6, [%l2 + %l4]
#endif
/*
* Take away its context number.
@ -194,14 +202,20 @@ ENTRY(cpu_switch)
3: cmp %i2, %g0
be,pn %xcc, 4f
lduw [PCPU(TLB_CTX_MAX)], %i4
stx %i2, [%i0 + TD_LOCK]
add %i0, TD_LOCK, %l4
#if defined(SCHED_ULE) && defined(SMP)
membar #LoadStore | #StoreStore
ATOMIC_STORE_LONG(%l4, %l6, %l7, %i2)
#else
stx %i2, [%l4]
#endif
/*
* Find a new TLB context. If we've run out we have to flush all
* user mappings from the TLB and reset the context numbers.
*/
4: lduw [PCPU(TLB_CTX)], %i3
lduw [PCPU(TLB_CTX_MAX)], %i4
cmp %i3, %i4
bne,a,pt %xcc, 5f
nop
@ -237,14 +251,24 @@ ENTRY(cpu_switch)
sub %l3, %l5, %l5
mov 1, %l6
sllx %l6, %l5, %l5
#ifdef SMP
add %l1, %l4, %l4
ATOMIC_SET_LONG(%l4, %l6, %l7, %l5)
#else
ldx [%l1 + %l4], %l6
or %l6, %l5, %l6
stx %l6, [%l1 + %l4]
#endif
/*
* Make note of the change in pmap.
*/
#ifdef SMP
PCPU_ADDR(PMAP, %l4)
ATOMIC_STORE_LONG(%l4, %l5, %l6, %l1)
#else
stx %l1, [PCPU(PMAP)]
#endif
/*
* Fiddle the hardware bits. Set the TSB registers and install the
@ -264,19 +288,35 @@ ENTRY(cpu_switch)
stxa %i3, [%i5] ASI_DMMU
flush %i4
6:
#if defined(SCHED_ULE) && defined(SMP)
SET(blocked_lock, %l2, %l1)
add %i1, TD_LOCK, %l2
7:
ATOMIC_LOAD_LONG(%l2, %l3)
cmp %l1, %l3
be,a,pn %xcc, 7b
nop
#endif
/*
* Done, return and load the new process's window from the stack.
*/
6: ret
restore
7: cmp %i2, %g0
be,a,pn %xcc, 6b
nop
stx %i2, [%i0 + TD_LOCK]
ret
restore
8: cmp %i2, %g0
be,pn %xcc, 6b
add %i0, TD_LOCK, %l4
#if defined(SCHED_ULE) && defined(SMP)
membar #LoadStore | #StoreStore
ATOMIC_STORE_LONG(%l4, %l6, %l7, %i2)
ba,pt %xcc, 6b
nop
#else
ba,pt %xcc, 6b
stx %i2, [%l4]
#endif
END(cpu_switch)
ENTRY(savectx)