mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
f23f5bece6
The PCI interrupt vectors intended to be associated with a queue may not start at 0; a driver may allocate pre_vectors for special use. This patch adds an offset parameter so blk-mq may find the intended affinity mask and updates all drivers using this API accordingly. Cc: Don Brace <don.brace@microsemi.com> Cc: <qla2xxx-upstream@qlogic.com> Cc: <linux-scsi@vger.kernel.org> Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
55 lines
1.7 KiB
C
55 lines
1.7 KiB
C
/*
|
|
* Copyright (c) 2016 Christoph Hellwig.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
#include <linux/kobject.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/blk-mq.h>
|
|
#include <linux/blk-mq-pci.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/module.h>
|
|
|
|
/**
|
|
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
|
|
* @set: tagset to provide the mapping for
|
|
* @pdev: PCI device associated with @set.
|
|
* @offset: Offset to use for the pci irq vector
|
|
*
|
|
* This function assumes the PCI device @pdev has at least as many available
|
|
* interrupt vectors as @set has queues. It will then query the vector
|
|
* corresponding to each queue for it's affinity mask and built queue mapping
|
|
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
|
* vector.
|
|
*/
|
|
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
|
|
int offset)
|
|
{
|
|
const struct cpumask *mask;
|
|
unsigned int queue, cpu;
|
|
|
|
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
|
mask = pci_irq_get_affinity(pdev, queue + offset);
|
|
if (!mask)
|
|
goto fallback;
|
|
|
|
for_each_cpu(cpu, mask)
|
|
set->mq_map[cpu] = queue;
|
|
}
|
|
|
|
return 0;
|
|
|
|
fallback:
|
|
WARN_ON_ONCE(set->nr_hw_queues > 1);
|
|
for_each_possible_cpu(cpu)
|
|
set->mq_map[cpu] = 0;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|