Merge branches 'acpi-prm', 'acpi-sysfs' and 'acpi-x86'

* acpi-prm:
  ACPI: PRM: make symbol 'prm_module_list' static
  ACPI: Add \_SB._OSC bit for PRM
  ACPI: PRM: implement OperationRegion handler for the PlatformRtMechanism subtype

* acpi-sysfs:
  ACPI: sysfs: Remove tailing return statement in void function
  ACPI: sysfs: Use __ATTR_RO() and __ATTR_RW() macros
  ACPI: sysfs: Sort headers alphabetically
  ACPI: sysfs: Refactor param_get_trace_state() to drop dead code
  ACPI: sysfs: Unify pattern of memory allocations
  ACPI: sysfs: Allow bitmap list to be supplied to acpi_mask_gpe
  ACPI: sysfs: Make sparse happy about address space in use
  ACPI: sysfs: fix doc warnings in device_sysfs.c
  ACPI: sysfs: Drop four redundant return statements
  ACPI: sysfs: Fix a buffer overrun problem with description_show()

* acpi-x86:
  x86/acpi: Switch to pr_xxx log functions
This commit is contained in:
Rafael J. Wysocki 2021-06-29 15:48:08 +02:00
commit 3a616ec797
11 changed files with 413 additions and 121 deletions

View file

@ -113,7 +113,7 @@
the GPE dispatcher.
This facility can be used to prevent such uncontrolled
GPE floodings.
Format: <byte>
Format: <byte> or <bitmap-list>
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods

View file

@ -5,6 +5,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/acpi.h>
@ -42,8 +43,6 @@ EXPORT_SYMBOL(acpi_disabled);
# include <asm/proto.h>
#endif /* X86 */
#define PREFIX "ACPI: "
int acpi_noirq; /* skip ACPI IRQ initialization */
static int acpi_nobgrt; /* skip ACPI BGRT */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
@ -130,15 +129,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt = (struct acpi_table_madt *)table;
if (!madt) {
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
madt->address);
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
default_acpi_madt_oem_check(madt->header.oem_id,
@ -161,7 +159,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
int cpu;
if (id >= MAX_LOCAL_APIC) {
printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
pr_info("skipped apicid that is too big\n");
return -EINVAL;
}
@ -213,13 +211,13 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
*/
if (!apic->apic_id_valid(apic_id)) {
if (enabled)
pr_warn(PREFIX "x2apic entry ignored\n");
pr_warn("x2apic entry ignored\n");
return 0;
}
acpi_register_lapic(apic_id, processor->uid, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
pr_warn("x2apic entry ignored\n");
#endif
return 0;
@ -306,7 +304,7 @@ acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@ -324,7 +322,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@ -514,14 +512,14 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
printk(PREFIX "BIOS IRQ0 override ignored.\n");
pr_warn("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
@ -597,7 +595,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
if (old == new)
return;
printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, 0x4d0);
outb(new >> 8, 0x4d1);
}
@ -754,7 +752,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
if (cpu < 0) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
@ -870,8 +868,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
printk(KERN_WARNING PREFIX "HPET timers must be located in "
"memory.\n");
pr_warn("HPET timers must be located in memory.\n");
return -1;
}
@ -883,9 +880,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* want to allocate a resource there.
*/
if (!hpet_address) {
printk(KERN_WARNING PREFIX
"HPET id: %#x base: %#lx is invalid\n",
hpet_tbl->id, hpet_address);
pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
@ -896,21 +891,17 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
printk(KERN_WARNING PREFIX "HPET id: %#x "
"base: 0xfed0000000000000 is bogus\n "
"try hpet=force on the kernel command line to "
"fix it up to 0xfed00000.\n", hpet_tbl->id);
pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
hpet_tbl->id);
hpet_address = 0;
return 0;
}
printk(KERN_WARNING PREFIX
"HPET id: %#x base: 0xfed0000000000000 fixed up "
"to 0xfed00000.\n", hpet_tbl->id);
pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
hpet_tbl->id);
hpet_address >>= 32;
}
#endif
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
hpet_tbl->id, hpet_address);
pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
/*
* Allocate and initialize the HPET firmware resource for adding into
@ -955,24 +946,24 @@ late_initcall(hpet_insert_resource);
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
pr_debug("ACPI: no legacy devices present\n");
pr_debug("no legacy devices present\n");
x86_platform.legacy.devices.pnpbios = 0;
}
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
pr_debug("ACPI: i8042 controller is absent\n");
pr_debug("i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
pr_debug("ACPI: not registering RTC platform device\n");
pr_debug("not registering RTC platform device\n");
x86_platform.legacy.rtc = 0;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
pr_debug("ACPI: probing for VGA not safe\n");
pr_debug("probing for VGA not safe\n");
x86_platform.legacy.no_vga = 1;
}
@ -997,8 +988,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
pmtmr_ioport);
pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
#endif
return 0;
}
@ -1024,8 +1014,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n");
pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@ -1057,8 +1046,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
printk(KERN_ERR PREFIX
"Error parsing LAPIC/X2APIC entries\n");
pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
@ -1066,11 +1054,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
x2count = madt_proc[1].count;
}
if (!count && !x2count) {
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@ -1080,7 +1068,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@ -1139,7 +1127,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
}
if (idx != mp_irq_entries) {
printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
@ -1179,26 +1167,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* if "noapic" boot option, don't look for IO-APICs
*/
if (skip_ioapic_setup) {
printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
"due to 'noapic' option.\n");
pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
if (count < 0) {
printk(KERN_ERR PREFIX
"Error parsing interrupt source overrides entry\n");
pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@ -1218,7 +1204,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
if (count < 0) {
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@ -1251,8 +1237,7 @@ static void __init early_acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
printk(KERN_ERR PREFIX
"Invalid BIOS MADT, disabling ACPI\n");
pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
@ -1289,8 +1274,7 @@ static void __init acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
printk(KERN_ERR PREFIX
"Invalid BIOS MADT, disabling ACPI\n");
pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
@ -1300,8 +1284,7 @@ static void __init acpi_process_madt(void)
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
printk(KERN_WARNING PREFIX
"No APIC-table, disabling MPS\n");
pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
@ -1311,11 +1294,9 @@ static void __init acpi_process_madt(void)
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
"information\n");
pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
printk(KERN_INFO "Using ACPI for processor (LAPIC) "
"configuration information\n");
pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
@ -1323,8 +1304,7 @@ static void __init acpi_process_madt(void)
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
d->ident);
pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
acpi_noirq_set();
}
return 0;
@ -1333,8 +1313,7 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
d->ident);
pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
acpi_disable_pci();
}
return 0;
@ -1343,11 +1322,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
printk(KERN_NOTICE
"Warning: DMI blacklist says broken, but acpi forced\n");
pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
@ -1574,9 +1552,9 @@ int __init early_acpi_boot_init(void)
*/
if (acpi_blacklisted()) {
if (acpi_force) {
printk(KERN_WARNING PREFIX "acpi=force override\n");
pr_warn("acpi=force override\n");
} else {
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
pr_warn("Disabling ACPI support\n");
disable_acpi();
return 1;
}
@ -1692,9 +1670,7 @@ int __init acpi_mps_check(void)
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
printk(KERN_WARNING "MPS support code is not built-in.\n"
"Using acpi=off or acpi=noirq or pci=noacpi "
"may have problem\n");
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif

View file

@ -543,3 +543,8 @@ config X86_PM_TIMER
You should nearly always say Y here because many modern
systems require this timer.
config ACPI_PRMT
bool "Platform Runtime Mechanism Support"
depends on EFI && X86_64
default y

View file

@ -66,6 +66,7 @@ acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o

View file

@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <acpi/apei.h>
#include <linux/suspend.h>
#include <linux/prmt.h>
#include "internal.h"
@ -302,6 +303,7 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
#ifdef CONFIG_ARM64
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
@ -1318,6 +1320,7 @@ static int __init acpi_init(void)
acpi_kobj = NULL;
}
init_prmt();
result = acpi_bus_init();
if (result) {
disable_acpi();

View file

@ -268,6 +268,8 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
/**
* acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
* @dev: Struct device to get ACPI device node.
* @env: Environment variables of the kobject uevent.
*
* Create the uevent modalias field for ACPI-enumerated devices.
*
@ -313,6 +315,9 @@ static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
/**
* acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
* @dev: Struct device to get ACPI device node.
* @buf: The buffer to save pnp_modalias and of_modalias.
* @size: Size of buffer.
*
* Create the modalias sysfs attribute for ACPI-enumerated devices.
*
@ -448,7 +453,7 @@ static ssize_t description_show(struct device *dev,
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE);
PAGE_SIZE - 1);
buf[result++] = '\n';

303
drivers/acpi/prmt.c Normal file
View file

@ -0,0 +1,303 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Author: Erik Kaneda <erik.kaneda@intel.com>
* Copyright 2020 Intel Corporation
*
* prmt.c
*
* Each PRM service is an executable that is run in a restricted environment
* that is invoked by writing to the PlatformRtMechanism OperationRegion from
* AML bytecode.
*
* init_prmt initializes the Platform Runtime Mechanism (PRM) services by
* processing data in the PRMT as well as registering an ACPI OperationRegion
* handler for the PlatformRtMechanism subtype.
*
*/
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/acpi.h>
#include <linux/prmt.h>
#include <asm/efi.h>
#pragma pack(1)
struct prm_mmio_addr_range {
u64 phys_addr;
u64 virt_addr;
u32 length;
};
struct prm_mmio_info {
u64 mmio_count;
struct prm_mmio_addr_range addr_ranges[];
};
struct prm_buffer {
u8 prm_status;
u64 efi_status;
u8 prm_cmd;
guid_t handler_guid;
};
struct prm_context_buffer {
char signature[ACPI_NAMESEG_SIZE];
u16 revision;
u16 reserved;
guid_t identifier;
u64 static_data_buffer;
struct prm_mmio_info *mmio_ranges;
};
#pragma pack()
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
guid_t guid;
u64 handler_addr;
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
struct list_head handler_list;
};
struct prm_module_info {
guid_t guid;
u16 major_rev;
u16 minor_rev;
u16 handler_count;
struct prm_mmio_info *mmio_info;
bool updatable;
struct list_head module_list;
struct prm_handler_info handlers[];
};
static u64 efi_pa_va_lookup(u64 pa)
{
efi_memory_desc_t *md;
u64 pa_offset = pa & ~PAGE_MASK;
u64 page = pa & PAGE_MASK;
for_each_efi_memory_desc(md) {
if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
return pa_offset + md->virt_addr + page - md->phys_addr;
}
return 0;
}
#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
static int __init
acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_prmt_module_info *module_info;
struct acpi_prmt_handler_info *handler_info;
struct prm_handler_info *th;
struct prm_module_info *tm;
u64 mmio_count = 0;
u64 cur_handler = 0;
u32 module_info_size = 0;
u64 mmio_range_size = 0;
void *temp_mmio;
module_info = (struct acpi_prmt_module_info *) header;
module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
tm = kmalloc(module_info_size, GFP_KERNEL);
guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
tm->major_rev = module_info->major_rev;
tm->minor_rev = module_info->minor_rev;
tm->handler_count = module_info->handler_info_count;
tm->updatable = true;
if (module_info->mmio_list_pointer) {
/*
* Each module is associated with a list of addr
* ranges that it can use during the service
*/
mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
memmove(tm->mmio_info, temp_mmio, mmio_range_size);
} else {
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
tm->mmio_info->mmio_count = 0;
}
INIT_LIST_HEAD(&tm->module_list);
list_add(&tm->module_list, &prm_module_list);
handler_info = get_first_handler(module_info);
do {
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
th->handler_addr = efi_pa_va_lookup(handler_info->handler_address);
th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
}
#define GET_MODULE 0
#define GET_HANDLER 1
static void *find_guid_info(const guid_t *guid, u8 mode)
{
struct prm_handler_info *cur_handler;
struct prm_module_info *cur_module;
int i = 0;
list_for_each_entry(cur_module, &prm_module_list, module_list) {
for (i = 0; i < cur_module->handler_count; ++i) {
cur_handler = &cur_module->handlers[i];
if (guid_equal(guid, &cur_handler->guid)) {
if (mode == GET_MODULE)
return (void *)cur_module;
else
return (void *)cur_handler;
}
}
}
return NULL;
}
static struct prm_module_info *find_prm_module(const guid_t *guid)
{
return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
}
static struct prm_handler_info *find_prm_handler(const guid_t *guid)
{
return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
}
/* In-coming PRM commands */
#define PRM_CMD_RUN_SERVICE 0
#define PRM_CMD_START_TRANSACTION 1
#define PRM_CMD_END_TRANSACTION 2
/* statuses that can be passed back to ASL */
#define PRM_HANDLER_SUCCESS 0
#define PRM_HANDLER_ERROR 1
#define INVALID_PRM_COMMAND 2
#define PRM_HANDLER_GUID_NOT_FOUND 3
#define UPDATE_LOCK_ALREADY_HELD 4
#define UPDATE_UNLOCK_WITHOUT_LOCK 5
/*
* This is the PlatformRtMechanism opregion space handler.
* @function: indicates the read/write. In fact as the PlatformRtMechanism
* message is driven by command, only write is meaningful.
*
* @addr : not used
* @bits : not used.
* @value : it is an in/out parameter. It points to the PRM message buffer.
* @handler_context: not used
*/
static acpi_status acpi_platformrt_space_handler(u32 function,
acpi_physical_address addr,
u32 bits, acpi_integer *value,
void *handler_context,
void *region_context)
{
struct prm_buffer *buffer = ACPI_CAST_PTR(struct prm_buffer, value);
struct prm_handler_info *handler;
struct prm_module_info *module;
efi_status_t status;
struct prm_context_buffer context;
/*
* The returned acpi_status will always be AE_OK. Error values will be
* saved in the first byte of the PRM message buffer to be used by ASL.
*/
switch (buffer->prm_cmd) {
case PRM_CMD_RUN_SERVICE:
handler = find_prm_handler(&buffer->handler_guid);
module = find_prm_module(&buffer->handler_guid);
if (!handler || !module)
goto invalid_guid;
ACPI_COPY_NAMESEG(context.signature, "PRMC");
context.revision = 0x0;
context.reserved = 0x0;
context.identifier = handler->guid;
context.static_data_buffer = handler->static_data_buffer_addr;
context.mmio_ranges = module->mmio_info;
status = efi_call_virt_pointer(handler, handler_addr,
handler->acpi_param_buffer_addr,
&context);
if (status == EFI_SUCCESS) {
buffer->prm_status = PRM_HANDLER_SUCCESS;
} else {
buffer->prm_status = PRM_HANDLER_ERROR;
buffer->efi_status = status;
}
break;
case PRM_CMD_START_TRANSACTION:
module = find_prm_module(&buffer->handler_guid);
if (!module)
goto invalid_guid;
if (module->updatable)
module->updatable = false;
else
buffer->prm_status = UPDATE_LOCK_ALREADY_HELD;
break;
case PRM_CMD_END_TRANSACTION:
module = find_prm_module(&buffer->handler_guid);
if (!module)
goto invalid_guid;
if (module->updatable)
buffer->prm_status = UPDATE_UNLOCK_WITHOUT_LOCK;
else
module->updatable = true;
break;
default:
buffer->prm_status = INVALID_PRM_COMMAND;
break;
}
return AE_OK;
invalid_guid:
buffer->prm_status = PRM_HANDLER_GUID_NOT_FOUND;
return AE_OK;
}
void __init init_prmt(void)
{
acpi_status status;
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
pr_info("PRM: found %u modules\n", mc);
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_RT,
&acpi_platformrt_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status))
pr_alert("PRM: OperationRegion handler could not be installed\n");
}

View file

@ -5,10 +5,11 @@
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/acpi.h>
#include "internal.h"
@ -254,16 +255,12 @@ static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable\n");
else {
if (acpi_gbl_trace_method_name) {
if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
return sprintf(buffer, "method-once\n");
else
return sprintf(buffer, "method\n");
} else
return sprintf(buffer, "enable\n");
}
return 0;
if (!acpi_gbl_trace_method_name)
return sprintf(buffer, "enable\n");
if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
return sprintf(buffer, "method-once\n");
else
return sprintf(buffer, "method\n");
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
@ -388,8 +385,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
switch (event) {
case ACPI_TABLE_EVENT_INSTALL:
table_attr =
kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
@ -420,7 +416,7 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
void __iomem *base;
void *base;
ssize_t rc;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
@ -582,8 +578,6 @@ static void delete_gpe_attr_array(void)
kfree(counter_attrs);
}
kfree(all_attrs);
return;
}
static void gpe_count(u32 gpe_number)
@ -598,8 +592,6 @@ static void gpe_count(u32 gpe_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void fixed_event_count(u32 event_number)
@ -612,8 +604,6 @@ static void fixed_event_count(u32 event_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
@ -796,6 +786,7 @@ static ssize_t counter_set(struct kobject *kobj,
* the GPE flooding for GPE 00, they need to specify the following boot
* parameter:
* acpi_mask_gpe=0x00
* Note, the parameter can be a list (see bitmap_parselist() for the details).
* The masking status can be modified by the following runtime controlling
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
@ -805,11 +796,16 @@ static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
int ret;
u8 gpe;
if (kstrtou8(val, 0, &gpe))
return -EINVAL;
set_bit(gpe, acpi_masked_gpes_map);
ret = kstrtou8(val, 0, &gpe);
if (ret) {
ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
if (ret)
return ret;
} else
set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@ -841,13 +837,11 @@ void acpi_irq_stats_init(void)
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
GFP_KERNEL);
all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kcalloc(num_counters, sizeof(struct event_counter),
GFP_KERNEL);
all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
if (all_counters == NULL)
goto fail;
@ -855,8 +849,7 @@ void acpi_irq_stats_init(void)
if (ACPI_FAILURE(status))
goto fail;
counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
GFP_KERNEL);
counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
@ -906,7 +899,6 @@ void acpi_irq_stats_init(void)
fail:
delete_gpe_attr_array();
return;
}
static void __exit interrupt_stats_exit(void)
@ -914,31 +906,24 @@ static void __exit interrupt_stats_exit(void)
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
return;
}
static ssize_t
acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
return sprintf(buf, "%d\n", hotplug->enabled);
}
static ssize_t hotplug_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t size)
static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t size)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
unsigned int val;
@ -950,9 +935,7 @@ static ssize_t hotplug_enabled_store(struct kobject *kobj,
return size;
}
static struct kobj_attribute hotplug_enabled_attr =
__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
hotplug_enabled_store);
static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
@ -1010,9 +993,7 @@ static ssize_t force_remove_store(struct kobject *kobj,
return size;
}
static const struct kobj_attribute force_remove_attr =
__ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
force_remove_store);
static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
int __init acpi_sysfs_init(void)
{

View file

@ -39,6 +39,7 @@ static int acpi_apic_instance __initdata;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
};
struct acpi_subtable_entry {
@ -222,6 +223,8 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return entry->hdr->common.type;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.type;
case ACPI_SUBTABLE_PRMT:
return 0;
}
return 0;
}
@ -234,6 +237,8 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->common.length;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.length;
case ACPI_SUBTABLE_PRMT:
return entry->hdr->prmt.length;
}
return 0;
}
@ -246,6 +251,8 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->common);
case ACPI_SUBTABLE_HMAT:
return sizeof(entry->hdr->hmat);
case ACPI_SUBTABLE_PRMT:
return sizeof(entry->hdr->prmt);
}
return 0;
}
@ -255,6 +262,8 @@ acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
return ACPI_SUBTABLE_PRMT;
return ACPI_SUBTABLE_COMMON;
}

View file

@ -132,6 +132,7 @@ enum acpi_address_range_id {
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
struct acpi_prmt_module_header prmt;
};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
@ -550,6 +551,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
#define OSC_SB_PRM_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
extern bool osc_sb_apei_support_acked;

7
include/linux/prmt.h Normal file
View file

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifdef CONFIG_ACPI_PRMT
void init_prmt(void);
#else
static inline void init_prmt(void) { }
#endif