Merge branch irq/affinity-nosmp into irq/plic-masking

* irq/affinity-nosmp:
  : .
  : non-SMP IRQ affinity fixes courtesy of Samuel Holland:
  :
  : "This series solves some inconsistency with how IRQ affinity masks are
  : handled between SMP and non-SMP configurations.
  :
  : In non-SMP configs, an IRQ's true affinity is always cpumask_of(0), so
  : irq_{,data_}get_affinity_mask now return that, instead of returning an
  : uninitialized per-IRQ cpumask. This change makes iterating over the
  : affinity mask do the right thing in both SMP and non-SMP configurations.
  :
  : To accomplish that:
  :  - patches 1-3 disable some library code that was broken anyway on !SMP
  :  - patches 4-7 refactor the code so that irq_{,data_}get_affinity_mask
  :    can return a const cpumask, since that is what cpumask_of provides
  :  - patch 8 drops the per-IRQ cpumask and replaces it with cpumask_of(0)"
  : .
  PCI: hv: Take a const cpumask in hv_compose_msi_req_get_cpu()
  genirq: Provide an IRQ affinity mask in non-SMP configs
  genirq: Return a const cpumask from irq_data_get_affinity_mask
  genirq: Add and use an irq_data_update_affinity helper
  genirq: Refactor accessors to use irq_data_get_affinity_mask
  genirq: Drop redundant irq_init_effective_affinity
  genirq: GENERIC_IRQ_EFFECTIVE_AFF_MASK depends on SMP
  genirq: GENERIC_IRQ_IPI depends on SMP
  irqchip/mips-gic: Only register IPI domain when SMP is enabled

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2022-07-10 09:49:44 +01:00
commit 4f4b8f8f95
24 changed files with 143 additions and 97 deletions

View file

@ -60,7 +60,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu; last_cpu = cpu;
cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); irq_data_update_affinity(data, cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false); chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0; return 0;
} }

View file

@ -40,7 +40,7 @@ config ARCH_HIP04
select HAVE_ARM_ARCH_TIMER select HAVE_ARM_ARCH_TIMER
select MCPM if SMP select MCPM if SMP
select MCPM_QUAD_CLUSTER if SMP select MCPM_QUAD_CLUSTER if SMP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
help help
Support for Hisilicon HiP04 SoC family Support for Hisilicon HiP04 SoC family

View file

@ -834,7 +834,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) { if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Clear affinity */ /* Clear affinity */
cpumask_setall(irq_get_affinity_mask(irq)); irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask);
#endif #endif
/* Clear the interrupt information */ /* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0; iosapic_intr_info[irq].dest = 0;

View file

@ -57,8 +57,8 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir) void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{ {
if (irq < NR_IRQS) { if (irq < NR_IRQS) {
cpumask_copy(irq_get_affinity_mask(irq), irq_data_update_affinity(irq_get_irq_data(irq),
cpumask_of(cpu_logical_id(hwid))); cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff); irq_redir[irq] = (char) (redir & 0xff);
} }
} }

View file

@ -37,7 +37,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
msg.data = data; msg.data = data;
pci_write_msi_msg(irq, &msg); pci_write_msi_msg(irq, &msg);
cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu)); irq_data_update_affinity(idata, cpumask_of(cpu));
return 0; return 0;
} }
@ -132,7 +132,7 @@ static int dmar_msi_set_affinity(struct irq_data *data,
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg); dmar_msi_write(irq, &msg);
cpumask_copy(irq_data_get_affinity_mask(data), mask); irq_data_update_affinity(data, mask);
return 0; return 0;
} }

View file

@ -263,7 +263,7 @@ static int next_cpu_for_irq(struct irq_data *data)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpu; int cpu;
struct cpumask *mask = irq_data_get_affinity_mask(data); const struct cpumask *mask = irq_data_get_affinity_mask(data);
int weight = cpumask_weight(mask); int weight = cpumask_weight(mask);
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
@ -758,7 +758,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
cpumask_t new_affinity; cpumask_t new_affinity;
struct cpumask *mask = irq_data_get_affinity_mask(data); const struct cpumask *mask = irq_data_get_affinity_mask(data);
if (!cpumask_test_cpu(cpu, mask)) if (!cpumask_test_cpu(cpu, mask))
return; return;

View file

@ -315,7 +315,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct irq_data *d = irq_get_irq_data(irq); struct irq_data *d = irq_get_irq_data(irq);
cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); irq_data_update_affinity(d, cpumask_of(cpu));
#endif #endif
return per_cpu(cpu_data, cpu).txn_addr; return per_cpu(cpu_data, cpu).txn_addr;

View file

@ -230,16 +230,17 @@ void migrate_irqs(void)
struct irq_data *data = irq_get_irq_data(irq); struct irq_data *data = irq_get_irq_data(irq);
if (irq_data_get_node(data) == cpu) { if (irq_data_get_node(data) == cpu) {
struct cpumask *mask = irq_data_get_affinity_mask(data); const struct cpumask *mask = irq_data_get_affinity_mask(data);
unsigned int newcpu = cpumask_any_and(mask, unsigned int newcpu = cpumask_any_and(mask,
cpu_online_mask); cpu_online_mask);
if (newcpu >= nr_cpu_ids) { if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
irq, cpu); irq, cpu);
cpumask_setall(mask); irq_set_affinity(irq, cpu_all_mask);
} else {
irq_set_affinity(irq, mask);
} }
irq_set_affinity(irq, mask);
} }
} }
} }

View file

@ -192,7 +192,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_dev *dev; struct pci_dev *dev;
struct hv_interrupt_entry out_entry, *stored_entry; struct hv_interrupt_entry out_entry, *stored_entry;
struct irq_cfg *cfg = irqd_cfg(data); struct irq_cfg *cfg = irqd_cfg(data);
cpumask_t *affinity; const cpumask_t *affinity;
int cpu; int cpu;
u64 status; u64 status;

View file

@ -169,7 +169,7 @@ void migrate_irqs(void)
for_each_active_irq(i) { for_each_active_irq(i) {
struct irq_data *data = irq_get_irq_data(i); struct irq_data *data = irq_get_irq_data(i);
struct cpumask *mask; const struct cpumask *mask;
unsigned int newcpu; unsigned int newcpu;
if (irqd_is_per_cpu(data)) if (irqd_is_per_cpu(data))
@ -185,9 +185,10 @@ void migrate_irqs(void)
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, cpu); i, cpu);
cpumask_setall(mask); irq_set_affinity(i, cpu_all_mask);
} else {
irq_set_affinity(i, mask);
} }
irq_set_affinity(i, mask);
} }
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */

View file

@ -194,7 +194,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
u32 vector; u32 vector;
struct irq_cfg *cfg; struct irq_cfg *cfg;
int ioapic_id; int ioapic_id;
struct cpumask *affinity; const struct cpumask *affinity;
int cpu; int cpu;
struct hv_interrupt_entry entry; struct hv_interrupt_entry entry;
struct hyperv_root_ir_data *data = irq_data->chip_data; struct hyperv_root_ir_data *data = irq_data->chip_data;

View file

@ -8,7 +8,7 @@ config IRQCHIP
config ARM_GIC config ARM_GIC
bool bool
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ARM_GIC_PM config ARM_GIC_PM
bool bool
@ -34,7 +34,7 @@ config ARM_GIC_V3
bool bool
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ARM_GIC_V3_ITS config ARM_GIC_V3_ITS
bool bool
@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select PCI_MSI if PCI select PCI_MSI if PCI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ALPINE_MSI config ALPINE_MSI
bool bool
@ -112,7 +112,7 @@ config BCM6345_L1_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config BCM7038_L1_IRQ config BCM7038_L1_IRQ
tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver" tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
@ -120,7 +120,7 @@ config BCM7038_L1_IRQ
default ARCH_BRCMSTB || BMIPS_GENERIC default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config BCM7120_L2_IRQ config BCM7120_L2_IRQ
tristate "Broadcom STB 7120-style L2 interrupt controller driver" tristate "Broadcom STB 7120-style L2 interrupt controller driver"
@ -177,9 +177,9 @@ config MADERA_IRQ
config IRQ_MIPS_CPU config IRQ_MIPS_CPU
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config CLPS711X_IRQCHIP config CLPS711X_IRQCHIP
bool bool
@ -294,7 +294,7 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX config XTENSA_MX
bool bool
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config XILINX_INTC config XILINX_INTC
bool "Xilinx Interrupt Controller IP" bool "Xilinx Interrupt Controller IP"
@ -322,7 +322,8 @@ config KEYSTONE_IRQ
config MIPS_GIC config MIPS_GIC
bool bool
select GENERIC_IRQ_IPI select GENERIC_IRQ_IPI if SMP
select IRQ_DOMAIN_HIERARCHY
select MIPS_CM select MIPS_CM
config INGENIC_IRQ config INGENIC_IRQ

View file

@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
if (enabled) if (enabled)
__bcm6345_l1_mask(d); __bcm6345_l1_mask(d);
cpumask_copy(irq_data_get_affinity_mask(d), dest); irq_data_update_affinity(d, dest);
if (enabled) if (enabled)
__bcm6345_l1_unmask(d); __bcm6345_l1_unmask(d);
} else { } else {
cpumask_copy(irq_data_get_affinity_mask(d), dest); irq_data_update_affinity(d, dest);
} }
raw_spin_unlock_irqrestore(&intc->lock, flags); raw_spin_unlock_irqrestore(&intc->lock, flags);

View file

@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
static DEFINE_SPINLOCK(gic_lock); static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs; static int gic_shared_intrs;
static unsigned int gic_cpu_pin; static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin; static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
#ifdef CONFIG_GENERIC_IRQ_IPI
static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
#endif /* CONFIG_GENERIC_IRQ_IPI */
static struct gic_all_vpes_chip_data { static struct gic_all_vpes_chip_data {
u32 map; u32 map;
@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
u32 map; u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) { if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
#ifdef CONFIG_GENERIC_IRQ_IPI
/* verify that shared irqs don't conflict with an IPI irq */ /* verify that shared irqs don't conflict with an IPI irq */
if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
return -EBUSY; return -EBUSY;
#endif /* CONFIG_GENERIC_IRQ_IPI */
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller, &gic_level_irq_controller,
@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map, .map = gic_irq_domain_map,
}; };
#ifdef CONFIG_GENERIC_IRQ_IPI
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, irq_hw_number_t *out_hwirq,
@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match, .match = gic_ipi_domain_match,
}; };
static int gic_register_ipi_domain(struct device_node *node)
{
struct irq_domain *gic_ipi_domain;
unsigned int v[2], num_ipis;
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_ipi_domain_ops, NULL);
if (!gic_ipi_domain) {
pr_err("Failed to add IPI domain");
return -ENXIO;
}
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
if (node &&
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
bitmap_set(ipi_resrv, v[0], v[1]);
} else {
/*
* Reserve 2 interrupts per possible CPU/VP for use as IPIs,
* meeting the requirements of arch/mips SMP.
*/
num_ipis = 2 * num_possible_cpus();
bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
}
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
return 0;
}
#else /* !CONFIG_GENERIC_IRQ_IPI */
static inline int gic_register_ipi_domain(struct device_node *node)
{
return 0;
}
#endif /* !CONFIG_GENERIC_IRQ_IPI */
static int gic_cpu_startup(unsigned int cpu) static int gic_cpu_startup(unsigned int cpu)
{ {
/* Enable or disable EIC */ /* Enable or disable EIC */
@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu)
static int __init gic_of_init(struct device_node *node, static int __init gic_of_init(struct device_node *node,
struct device_node *parent) struct device_node *parent)
{ {
unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; unsigned int cpu_vec, i, gicconfig;
unsigned long reserved; unsigned long reserved;
phys_addr_t gic_base; phys_addr_t gic_base;
struct resource res; struct resource res;
size_t gic_len; size_t gic_len;
int ret;
/* Find the first available CPU vector. */ /* Find the first available CPU vector. */
i = 0; i = 0;
@ -780,30 +829,9 @@ static int __init gic_of_init(struct device_node *node,
return -ENXIO; return -ENXIO;
} }
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, ret = gic_register_ipi_domain(node);
IRQ_DOMAIN_FLAG_IPI_PER_CPU, if (ret)
GIC_NUM_LOCAL_INTRS + gic_shared_intrs, return ret;
node, &gic_ipi_domain_ops, NULL);
if (!gic_ipi_domain) {
pr_err("Failed to add IPI domain");
return -ENXIO;
}
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
if (node &&
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
bitmap_set(ipi_resrv, v[0], v[1]);
} else {
/*
* Reserve 2 interrupts per possible CPU/VP for use as IPIs,
* meeting the requirements of arch/mips SMP.
*/
num_ipis = 2 * num_possible_cpus();
bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
}
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
board_bind_eic_interrupt = &gic_bind_eic_interrupt; board_bind_eic_interrupt = &gic_bind_eic_interrupt;

View file

@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d,
if (dest_cpu < 0) if (dest_cpu < 0)
return -1; return -1;
cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu)); irq_data_update_affinity(d, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);

View file

@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
struct hv_retarget_device_interrupt *params; struct hv_retarget_device_interrupt *params;
struct tran_int_desc *int_desc; struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus; struct hv_pcibus_device *hbus;
struct cpumask *dest; const struct cpumask *dest;
cpumask_var_t tmp; cpumask_var_t tmp;
struct pci_bus *pbus; struct pci_bus *pbus;
struct pci_dev *pdev; struct pci_dev *pdev;
@ -1613,7 +1613,7 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
} }
static u32 hv_compose_msi_req_v1( static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity, struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
u32 slot, u8 vector, u8 vector_count) u32 slot, u8 vector, u8 vector_count)
{ {
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1(
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
* by subsequent retarget in hv_irq_unmask(). * by subsequent retarget in hv_irq_unmask().
*/ */
static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
{ {
return cpumask_first_and(affinity, cpu_online_mask); return cpumask_first_and(affinity, cpu_online_mask);
} }
static u32 hv_compose_msi_req_v2( static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
u32 slot, u8 vector, u8 vector_count) u32 slot, u8 vector, u8 vector_count)
{ {
int cpu; int cpu;
@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2(
} }
static u32 hv_compose_msi_req_v3( static u32 hv_compose_msi_req_v3(
struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
u32 slot, u32 vector, u8 vector_count) u32 slot, u32 vector, u8 vector_count)
{ {
int cpu; int cpu;
@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev; struct hv_pci_dev *hpdev;
struct pci_bus *pbus; struct pci_bus *pbus;
struct pci_dev *pdev; struct pci_dev *pdev;
struct cpumask *dest; const struct cpumask *dest;
struct compose_comp_ctxt comp; struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc; struct tran_int_desc *int_desc;
struct msi_desc *msi_desc; struct msi_desc *msi_desc;

View file

@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data,
if (!cpumask_intersects(cpumask, cpu_online_mask)) if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1; return -1;
cpumask_copy(irq_data_get_affinity_mask(data), cpumask); irq_data_update_affinity(data, cpumask);
return IRQ_SET_MASK_OK_NOCOPY; return IRQ_SET_MASK_OK_NOCOPY;
} }

View file

@ -528,9 +528,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
BUG_ON(irq == -1); BUG_ON(irq == -1);
if (IS_ENABLED(CONFIG_SMP) && force_affinity) { if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); struct irq_data *data = irq_get_irq_data(irq);
cpumask_copy(irq_get_effective_affinity_mask(irq),
cpumask_of(cpu)); irq_data_update_affinity(data, cpumask_of(cpu));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
} }
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);

View file

@ -151,7 +151,9 @@ struct irq_common_data {
#endif #endif
void *handler_data; void *handler_data;
struct msi_desc *msi_desc; struct msi_desc *msi_desc;
#ifdef CONFIG_SMP
cpumask_var_t affinity; cpumask_var_t affinity;
#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity; cpumask_var_t effective_affinity;
#endif #endif
@ -879,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common); return irq_common_data_get_node(d->common);
} }
static inline struct cpumask *irq_get_affinity_mask(int irq) static inline
const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
#ifdef CONFIG_SMP
return d->common->affinity;
#else
return cpumask_of(0);
#endif
}
static inline void irq_data_update_affinity(struct irq_data *d,
const struct cpumask *m)
{
#ifdef CONFIG_SMP
cpumask_copy(d->common->affinity, m);
#endif
}
static inline const struct cpumask *irq_get_affinity_mask(int irq)
{ {
struct irq_data *d = irq_get_irq_data(irq); struct irq_data *d = irq_get_irq_data(irq);
return d ? d->common->affinity : NULL; return d ? irq_data_get_affinity_mask(d) : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
return d->common->affinity;
} }
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{ {
return d->common->effective_affinity; return d->common->effective_affinity;
} }
@ -908,13 +923,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{ {
} }
static inline static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{ {
return d->common->affinity; return irq_data_get_affinity_mask(d);
} }
#endif #endif
static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) static inline
const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
{ {
struct irq_data *d = irq_get_irq_data(irq); struct irq_data *d = irq_get_irq_data(irq);

View file

@ -24,6 +24,7 @@ config GENERIC_IRQ_SHOW_LEVEL
# Supports effective affinity mask # Supports effective affinity mask
config GENERIC_IRQ_EFFECTIVE_AFF_MASK config GENERIC_IRQ_EFFECTIVE_AFF_MASK
depends on SMP
bool bool
# Support for delayed migration from interrupt context # Support for delayed migration from interrupt context
@ -82,6 +83,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
# Generic IRQ IPI support # Generic IRQ IPI support
config GENERIC_IRQ_IPI config GENERIC_IRQ_IPI
bool bool
depends on SMP
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
# Generic MSI interrupt support # Generic MSI interrupt support

View file

@ -188,7 +188,8 @@ enum {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
bool force)
{ {
struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_data *d = irq_desc_get_irq_data(desc);
@ -224,7 +225,8 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
} }
#else #else
static __always_inline int static __always_inline int
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
bool force)
{ {
return IRQ_STARTUP_NORMAL; return IRQ_STARTUP_NORMAL;
} }
@ -252,7 +254,7 @@ static int __irq_startup(struct irq_desc *desc)
int irq_startup(struct irq_desc *desc, bool resend, bool force) int irq_startup(struct irq_desc *desc, bool resend, bool force)
{ {
struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_data *d = irq_desc_get_irq_data(desc);
struct cpumask *aff = irq_data_get_affinity_mask(d); const struct cpumask *aff = irq_data_get_affinity_mask(d);
int ret = 0; int ret = 0;
desc->depth = 0; desc->depth = 0;

View file

@ -30,7 +30,7 @@ static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
{ {
struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc);
struct cpumask *msk; const struct cpumask *msk;
msk = irq_data_get_affinity_mask(data); msk = irq_data_get_affinity_mask(data);
seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk)); seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));

View file

@ -115,11 +115,11 @@ int irq_reserve_ipi(struct irq_domain *domain,
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
{ {
struct irq_data *data = irq_get_irq_data(irq); struct irq_data *data = irq_get_irq_data(irq);
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; const struct cpumask *ipimask;
struct irq_domain *domain; struct irq_domain *domain;
unsigned int nr_irqs; unsigned int nr_irqs;
if (!irq || !data || !ipimask) if (!irq || !data)
return -EINVAL; return -EINVAL;
domain = data->domain; domain = data->domain;
@ -131,7 +131,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
return -EINVAL; return -EINVAL;
} }
if (WARN_ON(!cpumask_subset(dest, ipimask))) ipimask = irq_data_get_affinity_mask(data);
if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
/* /*
* Must be destroying a subset of CPUs to which this IPI * Must be destroying a subset of CPUs to which this IPI
* was set up to target * was set up to target
@ -162,12 +163,13 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
{ {
struct irq_data *data = irq_get_irq_data(irq); struct irq_data *data = irq_get_irq_data(irq);
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; const struct cpumask *ipimask;
if (!data || !ipimask || cpu >= nr_cpu_ids) if (!data || cpu >= nr_cpu_ids)
return INVALID_HWIRQ; return INVALID_HWIRQ;
if (!cpumask_test_cpu(cpu, ipimask)) ipimask = irq_data_get_affinity_mask(data);
if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
return INVALID_HWIRQ; return INVALID_HWIRQ;
/* /*
@ -186,7 +188,7 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq);
static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
const struct cpumask *dest, unsigned int cpu) const struct cpumask *dest, unsigned int cpu)
{ {
struct cpumask *ipimask = irq_data_get_affinity_mask(data); const struct cpumask *ipimask = irq_data_get_affinity_mask(data);
if (!chip || !ipimask) if (!chip || !ipimask)
return -EINVAL; return -EINVAL;

View file

@ -205,16 +205,8 @@ static void irq_validate_effective_affinity(struct irq_data *data)
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
chip->name, data->irq); chip->name, data->irq);
} }
static inline void irq_init_effective_affinity(struct irq_data *data,
const struct cpumask *mask)
{
cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
}
#else #else
static inline void irq_validate_effective_affinity(struct irq_data *data) { } static inline void irq_validate_effective_affinity(struct irq_data *data) { }
static inline void irq_init_effective_affinity(struct irq_data *data,
const struct cpumask *mask) { }
#endif #endif
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
@ -347,7 +339,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
return false; return false;
cpumask_copy(desc->irq_common_data.affinity, mask); cpumask_copy(desc->irq_common_data.affinity, mask);
irq_init_effective_affinity(data, mask); irq_data_update_effective_affinity(data, mask);
irqd_set(data, IRQD_AFFINITY_SET); irqd_set(data, IRQD_AFFINITY_SET);
return true; return true;
} }