x86: make EARLY_AP_STARTUP mandatory

When early AP startup was introduced in 2016 it was put behind a kernel
option EARLY_AP_STARTUP as a transition aid, so that it could be turned
off if necessary.  For x86 the non-EARLY_AP_STARTUP case is no longer
functional, so disallow it.

Other archs are still incompatible with EARLY_AP_STARTUP, so the option
cannot yet be removed entirely.

Reported by:	wollman
Reviewed by:	markj
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D41351

(cherry picked from commit 792655abd6)
This commit is contained in:
Ed Maste 2023-08-07 16:59:52 -04:00
parent f6fa4749f7
commit c2009f3b5b
5 changed files with 1 additions and 93 deletions

View file

@ -411,7 +411,6 @@ startrtclock(void)
void void
cpu_initclocks(void) cpu_initclocks(void)
{ {
#ifdef EARLY_AP_STARTUP
struct thread *td; struct thread *td;
int i; int i;
@ -434,13 +433,6 @@ cpu_initclocks(void)
if (sched_is_bound(td)) if (sched_is_bound(td))
sched_unbind(td); sched_unbind(td);
thread_unlock(td); thread_unlock(td);
#else
tsc_calibrate();
#ifdef DEV_APIC
lapic_calibrate_timer();
#endif
cpu_initclocks_bsp();
#endif
} }
static int static int

View file

@ -90,7 +90,7 @@ static TAILQ_HEAD(pics_head, pic) pics;
u_int num_io_irqs; u_int num_io_irqs;
#if defined(SMP) && !defined(EARLY_AP_STARTUP) #if defined(SMP) && !defined(EARLY_AP_STARTUP)
static int assign_cpu; #error EARLY_AP_STARTUP required on x86
#endif #endif
#define INTRNAME_LEN (MAXCOMLEN + 1) #define INTRNAME_LEN (MAXCOMLEN + 1)
@ -399,18 +399,10 @@ intr_assign_cpu(void *arg, int cpu)
struct intsrc *isrc; struct intsrc *isrc;
int error; int error;
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started); MPASS(mp_ncpus == 1 || smp_started);
/* Nothing to do if there is only a single CPU. */ /* Nothing to do if there is only a single CPU. */
if (mp_ncpus > 1 && cpu != NOCPU) { if (mp_ncpus > 1 && cpu != NOCPU) {
#else
/*
* Don't do anything during early boot. We will pick up the
* assignment once the APs are started.
*/
if (assign_cpu && cpu != NOCPU) {
#endif
isrc = arg; isrc = arg;
sx_xlock(&intrsrc_lock); sx_xlock(&intrsrc_lock);
error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
@ -620,15 +612,9 @@ intr_next_cpu(int domain)
{ {
u_int apic_id; u_int apic_id;
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started); MPASS(mp_ncpus == 1 || smp_started);
if (mp_ncpus == 1) if (mp_ncpus == 1)
return (PCPU_GET(apic_id)); return (PCPU_GET(apic_id));
#else
/* Leave all interrupts on the BSP during boot. */
if (!assign_cpu)
return (PCPU_GET(apic_id));
#endif
if (intr_no_domain) if (intr_no_domain)
domain = 0; domain = 0;
@ -662,7 +648,6 @@ intr_add_cpu(u_int cpu)
CPU_SET(cpu, &intr_cpus); CPU_SET(cpu, &intr_cpus);
} }
#ifdef EARLY_AP_STARTUP
static void static void
intr_smp_startup(void *arg __unused) intr_smp_startup(void *arg __unused)
{ {
@ -673,52 +658,6 @@ intr_smp_startup(void *arg __unused)
SYSINIT(intr_smp_startup, SI_SUB_SMP, SI_ORDER_SECOND, intr_smp_startup, SYSINIT(intr_smp_startup, SI_SUB_SMP, SI_ORDER_SECOND, intr_smp_startup,
NULL); NULL);
#else
/*
* Distribute all the interrupt sources among the available CPUs once the
* AP's have been launched.
*/
static void
intr_shuffle_irqs(void *arg __unused)
{
struct intsrc *isrc;
u_int cpu, i;
intr_init_cpus();
/* Don't bother on UP. */
if (mp_ncpus == 1)
return;
/* Round-robin assign a CPU to each enabled source. */
sx_xlock(&intrsrc_lock);
assign_cpu = 1;
for (i = 0; i < num_io_irqs; i++) {
isrc = interrupt_sources[i];
if (isrc != NULL && isrc->is_handlers > 0) {
/*
* If this event is already bound to a CPU,
* then assign the source to that CPU instead
* of picking one via round-robin. Note that
* this is careful to only advance the
* round-robin if the CPU assignment succeeds.
*/
cpu = isrc->is_event->ie_cpu;
if (cpu == NOCPU)
cpu = current_cpu[isrc->is_domain];
if (isrc->is_pic->pic_assign_cpu(isrc,
cpu_apic_ids[cpu]) == 0) {
isrc->is_cpu = cpu;
if (isrc->is_event->ie_cpu == NOCPU)
intr_next_cpu(isrc->is_domain);
}
}
}
sx_xunlock(&intrsrc_lock);
}
SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
NULL);
#endif
/* /*
* TODO: Export this information in a non-MD fashion, integrate with vmstat -i. * TODO: Export this information in a non-MD fashion, integrate with vmstat -i.
*/ */

View file

@ -876,22 +876,8 @@ lapic_enable_pmc(void)
lvts[APIC_LVT_PMC].lvt_masked = 0; lvts[APIC_LVT_PMC].lvt_masked = 0;
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started); MPASS(mp_ncpus == 1 || smp_started);
smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL); smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
#else
#ifdef SMP
/*
* If hwpmc was loaded at boot time then the APs may not be
* started yet. In that case, don't forward the request to
* them as they will program the lvt when they start.
*/
if (smp_started)
smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
else
#endif
lapic_update_pmc(NULL);
#endif
return (1); return (1);
#else #else
return (0); return (0);

View file

@ -1077,11 +1077,7 @@ mca_startup(void *dummy)
taskqueue_enqueue_timeout_sbt(mca_tq, &mca_scan_task, taskqueue_enqueue_timeout_sbt(mca_tq, &mca_scan_task,
mca_ticks * SBT_1S, 0, C_PREL(1)); mca_ticks * SBT_1S, 0, C_PREL(1));
} }
#ifdef EARLY_AP_STARTUP
SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL); SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
#else
SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
#endif
#ifdef DEV_APIC #ifdef DEV_APIC
static void static void

View file

@ -1133,11 +1133,6 @@ init_secondary_tail(void)
while (atomic_load_acq_int(&smp_started) == 0) while (atomic_load_acq_int(&smp_started) == 0)
ia32_pause(); ia32_pause();
#ifndef EARLY_AP_STARTUP
/* Start per-CPU event timers. */
cpu_initclocks_ap();
#endif
kcsan_cpu_init(cpuid); kcsan_cpu_init(cpuid);
sched_ap_entry(); sched_ap_entry();