- Move therm_throt.c to the thermal framework, where it belongs.

- Identify CPUs which miss to enter the broadcast handler, as an
   additional debugging aid.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmAqRVgACgkQEsHwGGHe
 VUo8Pw/+NtY3+2n07bosm5EXeyjdE5+rexcZRTnkbfwjGekxIF4Sk2Q5Ryq93vpo
 KSBfVAPcfhRa/rd0CiqEAaE+OybAkICNNpI7MOyaYAmLNbZJaToy2g2BBl8aFjwS
 YrCeq/2iIAjYXm93p1ZzD5iPPT3VWfUq5hs52RJ7xt5vzLt+j3NSVdh/ILPFSDIZ
 F+uC4MlK1CTfxPInxGi8tIkRiXnifEHcN27G769nC3GSpBmeXG5cqItI/r0vwloC
 KXGrqUK6w+2n/eNYwlw1akp2eedjIHwE3/CzEecEZZ42h11FMnkLq1H0GhPkBDCE
 xiiujlwR9P6UE3MpIFayt1SK0ARmlTeq0m4yT1pdT/cT0qGnYGOYv6+HWZ4KC0bn
 0xLIwPXAElddAZXbgww3FwAFiBPDJ1OuVh1+amzCYL5fxfqONg3E2G1wk/T8yht5
 /WhGdiZOXqeDN04sy+lFB/0RiHbXVYSq4gVi7P+ql341rufLerb1U36HRQAwZIkZ
 Nk/E2Mcou++tzLJO836z4co92Sl/Bt2nNqSCbdg/mwSZahUURgxzMwdLv/7REQ/n
 SpO5890+FObETlRS6N125ONzCCAru+lTNTidHdIV5U4UtzPqDJfD3QYOa2m4wekD
 EJq3epSP9R9Mks54BR0Mn/EJMStT1KAD7p07NQWuZrbOdGxHNy8=
 =EOJc
 -----END PGP SIGNATURE-----

Merge tag 'ras_updates_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS updates from Borislav Petkov:

 - move therm_throt.c to the thermal framework, where it belongs.

 - identify CPUs which miss to enter the broadcast handler, as an
   additional debugging aid.

* tag 'ras_updates_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  thermal: Move therm_throt there from x86/mce
  x86/mce: Get rid of mcheck_intel_therm_init()
  x86/mce: Make mce_timed_out() identify holdout CPUs
This commit is contained in:
Linus Torvalds 2021-02-20 19:06:34 -08:00
commit 3e89c7ea7a
13 changed files with 84 additions and 63 deletions

View file

@ -1158,10 +1158,6 @@ config X86_MCE_INJECT
If you don't know what a machine check is and you don't do kernel
QA it is safe to say n.
config X86_THERMAL_VECTOR
def_bool y
depends on X86_MCE_INTEL
source "arch/x86/events/Kconfig"
config X86_LEGACY_VM86

View file

@ -288,28 +288,6 @@ extern void (*mce_threshold_vector)(void);
/* Deferred error interrupt handler */
extern void (*deferred_error_int_vector)(void);
/*
* Thermal handler
*/
void intel_init_thermal(struct cpuinfo_x86 *c);
/* Interrupt Handler for core thermal thresholds */
extern int (*platform_thermal_notify)(__u64 msr_val);
/* Interrupt Handler for package thermal thresholds */
extern int (*platform_thermal_package_notify)(__u64 msr_val);
/* Callback support of rate control, return true, if
* callback has rate control */
extern bool (*platform_thermal_package_rate_control)(void);
#ifdef CONFIG_X86_THERMAL_VECTOR
extern void mcheck_intel_therm_init(void);
#else
static inline void mcheck_intel_therm_init(void) { }
#endif
/*
* Used by APEI to report memory error via /dev/mcelog
*/

View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_THERMAL_H
#define _ASM_X86_THERMAL_H
#ifdef CONFIG_X86_THERMAL_VECTOR
void intel_init_thermal(struct cpuinfo_x86 *c);
bool x86_thermal_enabled(void);
void intel_thermal_interrupt(void);
#else
static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
#endif
#endif /* _ASM_X86_THERMAL_H */

View file

@ -24,6 +24,7 @@
#include <asm/traps.h>
#include <asm/resctrl.h>
#include <asm/numa.h>
#include <asm/thermal.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@ -719,6 +720,8 @@ static void init_intel(struct cpuinfo_x86 *c)
tsx_disable();
split_lock_init();
intel_init_thermal(c);
}
#ifdef CONFIG_X86_32

View file

@ -9,8 +9,6 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
mce-inject-y := inject.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o

View file

@ -877,6 +877,12 @@ static atomic_t mce_executing;
*/
static atomic_t mce_callin;
/*
* Track which CPUs entered the MCA broadcast synchronization and which not in
* order to print holdouts.
*/
static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
/*
* Check if a timeout waiting for other CPUs happened.
*/
@ -894,8 +900,12 @@ static int mce_timed_out(u64 *t, const char *msg)
if (!mca_cfg.monarch_timeout)
goto out;
if ((s64)*t < SPINUNIT) {
if (mca_cfg.tolerant <= 1)
if (mca_cfg.tolerant <= 1) {
if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
cpumask_pr_args(&mce_missing_cpus));
mce_panic(msg, NULL, NULL);
}
cpu_missing = 1;
return 1;
}
@ -1006,6 +1016,7 @@ static int mce_start(int *no_way_out)
* is updated before mce_callin.
*/
order = atomic_inc_return(&mce_callin);
cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
/*
* Wait for everyone.
@ -1114,6 +1125,7 @@ static int mce_end(int order)
reset:
atomic_set(&global_nwo, 0);
atomic_set(&mce_callin, 0);
cpumask_setall(&mce_missing_cpus);
barrier();
/*
@ -2178,7 +2190,6 @@ __setup("mce", mcheck_enable);
int __init mcheck_init(void)
{
mcheck_intel_therm_init();
mce_register_decode_chain(&early_nb);
mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
@ -2713,6 +2724,7 @@ static void mce_reset(void)
atomic_set(&mce_executing, 0);
atomic_set(&mce_callin, 0);
atomic_set(&global_nwo, 0);
cpumask_setall(&mce_missing_cpus);
}
static int fake_panic_get(void *data, u64 *val)

View file

@ -531,7 +531,6 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
intel_init_thermal(c);
intel_init_cmci();
intel_init_lmce();
intel_ppin_init(c);

View file

@ -21,6 +21,7 @@
#include <asm/hw_irq.h>
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/thermal.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
@ -374,3 +375,23 @@ void fixup_irqs(void)
}
}
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
static void smp_thermal_vector(void)
{
if (x86_thermal_enabled())
intel_thermal_interrupt();
else
pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
smp_processor_id());
}
DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
ack_APIC_irq();
}
#endif

View file

@ -8,6 +8,10 @@ config INTEL_POWERCLAMP
enforce idle time which results in more package C-state residency. The
user interface is exposed via generic thermal framework.
config X86_THERMAL_VECTOR
def_bool y
depends on X86 && CPU_SUP_INTEL && X86_LOCAL_APIC
config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
depends on X86_THERMAL_VECTOR

View file

@ -10,3 +10,4 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o

View file

@ -26,13 +26,13 @@
#include <linux/cpu.h>
#include <asm/processor.h>
#include <asm/thermal.h>
#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/irq.h>
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
#include "internal.h"
#include "thermal_interrupt.h"
/* How long to wait between reporting thermal events */
#define CHECK_INTERVAL (300 * HZ)
@ -570,7 +570,7 @@ static void notify_thresholds(__u64 msr_val)
}
/* Thermal transition interrupt handler */
static void intel_thermal_interrupt(void)
void intel_thermal_interrupt(void)
{
__u64 msr_val;
@ -606,23 +606,6 @@ static void intel_thermal_interrupt(void)
}
}
static void unexpected_thermal_interrupt(void)
{
pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
smp_processor_id());
}
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
ack_APIC_irq();
}
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
static int intel_thermal_supported(struct cpuinfo_x86 *c)
{
@ -633,15 +616,9 @@ static int intel_thermal_supported(struct cpuinfo_x86 *c)
return 1;
}
void __init mcheck_intel_therm_init(void)
bool x86_thermal_enabled(void)
{
/*
* This function is only called on boot CPU. Save the init thermal
* LVT value on BSP and use that value to restore APs' thermal LVT
* entry BIOS programmed later
*/
if (intel_thermal_supported(&boot_cpu_data))
lvtthmr_init = apic_read(APIC_LVTTHMR);
return atomic_read(&therm_throt_en);
}
void intel_init_thermal(struct cpuinfo_x86 *c)
@ -653,6 +630,10 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if (!intel_thermal_supported(c))
return;
/* On the BSP? */
if (c == &boot_cpu_data)
lvtthmr_init = apic_read(APIC_LVTTHMR);
/*
* First check if its enabled already, in which case there might
* be some SMM goo which handles it, so we can't even put a handler
@ -726,8 +707,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
| PACKAGE_THERM_INT_HIGH_ENABLE), h);
}
smp_thermal_vector = intel_thermal_interrupt;
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);

View file

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INTEL_THERMAL_INTERRUPT_H
#define _INTEL_THERMAL_INTERRUPT_H
/* Interrupt Handler for package thermal thresholds */
extern int (*platform_thermal_package_notify)(__u64 msr_val);
/* Interrupt Handler for core thermal thresholds */
extern int (*platform_thermal_notify)(__u64 msr_val);
/* Callback support of rate control, return true, if
* callback has rate control */
extern bool (*platform_thermal_package_rate_control)(void);
#endif /* _INTEL_THERMAL_INTERRUPT_H */

View file

@ -17,8 +17,10 @@
#include <linux/pm.h>
#include <linux/thermal.h>
#include <linux/debugfs.h>
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
#include "thermal_interrupt.h"
/*
* Rate control delay: Idea is to introduce denounce effect