mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
msix: Split PBA into it's own MemoryRegion
These don't have to be contiguous. Size them to only what they need and use separate MemoryRegions for the vector table and PBA. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
2cf62ad742
commit
d35e428c84
2 changed files with 73 additions and 43 deletions
106
hw/msix.c
106
hw/msix.c
|
@ -37,7 +37,7 @@
|
|||
|
||||
static MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
|
||||
{
|
||||
uint8_t *table_entry = dev->msix_table_page + vector * PCI_MSIX_ENTRY_SIZE;
|
||||
uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
|
||||
MSIMessage msg;
|
||||
|
||||
msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
|
@ -93,7 +93,7 @@ static uint8_t msix_pending_mask(int vector)
|
|||
|
||||
static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
|
||||
{
|
||||
return dev->msix_table_page + MSIX_PAGE_PENDING + vector / 8;
|
||||
return dev->msix_pba + vector / 8;
|
||||
}
|
||||
|
||||
static int msix_is_pending(PCIDevice *dev, int vector)
|
||||
|
@ -114,7 +114,7 @@ static void msix_clr_pending(PCIDevice *dev, int vector)
|
|||
static bool msix_vector_masked(PCIDevice *dev, int vector, bool fmask)
|
||||
{
|
||||
unsigned offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
return fmask || dev->msix_table_page[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
return fmask || dev->msix_table[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
}
|
||||
|
||||
static bool msix_is_masked(PCIDevice *dev, int vector)
|
||||
|
@ -193,37 +193,47 @@ void msix_write_config(PCIDevice *dev, uint32_t addr,
|
|||
}
|
||||
}
|
||||
|
||||
static uint64_t msix_mmio_read(void *opaque, target_phys_addr_t addr,
|
||||
unsigned size)
|
||||
static uint64_t msix_table_mmio_read(void *opaque, target_phys_addr_t addr,
|
||||
unsigned size)
|
||||
{
|
||||
PCIDevice *dev = opaque;
|
||||
unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
|
||||
void *page = dev->msix_table_page;
|
||||
|
||||
return pci_get_long(page + offset);
|
||||
return pci_get_long(dev->msix_table + addr);
|
||||
}
|
||||
|
||||
static void msix_mmio_write(void *opaque, target_phys_addr_t addr,
|
||||
uint64_t val, unsigned size)
|
||||
static void msix_table_mmio_write(void *opaque, target_phys_addr_t addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
PCIDevice *dev = opaque;
|
||||
unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
|
||||
int vector = offset / PCI_MSIX_ENTRY_SIZE;
|
||||
int vector = addr / PCI_MSIX_ENTRY_SIZE;
|
||||
bool was_masked;
|
||||
|
||||
/* MSI-X page includes a read-only PBA and a writeable Vector Control. */
|
||||
if (vector >= dev->msix_entries_nr) {
|
||||
return;
|
||||
}
|
||||
|
||||
was_masked = msix_is_masked(dev, vector);
|
||||
pci_set_long(dev->msix_table_page + offset, val);
|
||||
pci_set_long(dev->msix_table + addr, val);
|
||||
msix_handle_mask_update(dev, vector, was_masked);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps msix_mmio_ops = {
|
||||
.read = msix_mmio_read,
|
||||
.write = msix_mmio_write,
|
||||
static const MemoryRegionOps msix_table_mmio_ops = {
|
||||
.read = msix_table_mmio_read,
|
||||
.write = msix_table_mmio_write,
|
||||
/* TODO: MSIX should be LITTLE_ENDIAN. */
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
};
|
||||
|
||||
static uint64_t msix_pba_mmio_read(void *opaque, target_phys_addr_t addr,
|
||||
unsigned size)
|
||||
{
|
||||
PCIDevice *dev = opaque;
|
||||
|
||||
return pci_get_long(dev->msix_pba + addr);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps msix_pba_mmio_ops = {
|
||||
.read = msix_pba_mmio_read,
|
||||
/* TODO: MSIX should be LITTLE_ENDIAN. */
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
|
@ -236,11 +246,14 @@ static void msix_mmio_setup(PCIDevice *d, MemoryRegion *bar)
|
|||
{
|
||||
uint8_t *config = d->config + d->msix_cap;
|
||||
uint32_t table = pci_get_long(config + PCI_MSIX_TABLE);
|
||||
uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1);
|
||||
uint32_t table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
|
||||
uint32_t pba = pci_get_long(config + PCI_MSIX_PBA);
|
||||
uint32_t pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
|
||||
/* TODO: for assigned devices, we'll want to make it possible to map
|
||||
* pending bits separately in case they are in a separate bar. */
|
||||
|
||||
memory_region_add_subregion(bar, offset, &d->msix_mmio);
|
||||
memory_region_add_subregion(bar, table_offset, &d->msix_table_mmio);
|
||||
memory_region_add_subregion(bar, pba_offset, &d->msix_pba_mmio);
|
||||
}
|
||||
|
||||
static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
|
||||
|
@ -252,7 +265,7 @@ static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
|
|||
vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
bool was_masked = msix_is_masked(dev, vector);
|
||||
|
||||
dev->msix_table_page[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
msix_handle_mask_update(dev, vector, was_masked);
|
||||
}
|
||||
}
|
||||
|
@ -264,6 +277,7 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
|
|||
unsigned bar_nr, unsigned bar_size)
|
||||
{
|
||||
int ret;
|
||||
unsigned table_size, pba_size;
|
||||
|
||||
/* Nothing to do if MSI is not supported by interrupt controller */
|
||||
if (!msi_supported) {
|
||||
|
@ -272,14 +286,20 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
|
|||
if (nentries > MSIX_MAX_ENTRIES)
|
||||
return -EINVAL;
|
||||
|
||||
table_size = nentries * PCI_MSIX_ENTRY_SIZE;
|
||||
pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
|
||||
|
||||
dev->msix_entry_used = g_malloc0(MSIX_MAX_ENTRIES *
|
||||
sizeof *dev->msix_entry_used);
|
||||
|
||||
dev->msix_table_page = g_malloc0(MSIX_PAGE_SIZE);
|
||||
dev->msix_table = g_malloc0(table_size);
|
||||
dev->msix_pba = g_malloc0(pba_size);
|
||||
msix_mask_all(dev, nentries);
|
||||
|
||||
memory_region_init_io(&dev->msix_mmio, &msix_mmio_ops, dev,
|
||||
"msix", MSIX_PAGE_SIZE);
|
||||
memory_region_init_io(&dev->msix_table_mmio, &msix_table_mmio_ops, dev,
|
||||
"msix-table", table_size);
|
||||
memory_region_init_io(&dev->msix_pba_mmio, &msix_pba_mmio_ops, dev,
|
||||
"msix-pba", pba_size);
|
||||
|
||||
dev->msix_entries_nr = nentries;
|
||||
ret = msix_add_config(dev, nentries, bar_nr, bar_size);
|
||||
|
@ -292,9 +312,12 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
|
|||
|
||||
err_config:
|
||||
dev->msix_entries_nr = 0;
|
||||
memory_region_destroy(&dev->msix_mmio);
|
||||
g_free(dev->msix_table_page);
|
||||
dev->msix_table_page = NULL;
|
||||
memory_region_destroy(&dev->msix_pba_mmio);
|
||||
g_free(dev->msix_pba);
|
||||
dev->msix_pba = NULL;
|
||||
memory_region_destroy(&dev->msix_table_mmio);
|
||||
g_free(dev->msix_table);
|
||||
dev->msix_table = NULL;
|
||||
g_free(dev->msix_entry_used);
|
||||
dev->msix_entry_used = NULL;
|
||||
return ret;
|
||||
|
@ -359,10 +382,14 @@ int msix_uninit(PCIDevice *dev, MemoryRegion *bar)
|
|||
dev->msix_cap = 0;
|
||||
msix_free_irq_entries(dev);
|
||||
dev->msix_entries_nr = 0;
|
||||
memory_region_del_subregion(bar, &dev->msix_mmio);
|
||||
memory_region_destroy(&dev->msix_mmio);
|
||||
g_free(dev->msix_table_page);
|
||||
dev->msix_table_page = NULL;
|
||||
memory_region_del_subregion(bar, &dev->msix_pba_mmio);
|
||||
memory_region_destroy(&dev->msix_pba_mmio);
|
||||
g_free(dev->msix_pba);
|
||||
dev->msix_pba = NULL;
|
||||
memory_region_del_subregion(bar, &dev->msix_table_mmio);
|
||||
memory_region_destroy(&dev->msix_table_mmio);
|
||||
g_free(dev->msix_table);
|
||||
dev->msix_table = NULL;
|
||||
g_free(dev->msix_entry_used);
|
||||
dev->msix_entry_used = NULL;
|
||||
dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
|
||||
|
@ -385,8 +412,8 @@ void msix_save(PCIDevice *dev, QEMUFile *f)
|
|||
return;
|
||||
}
|
||||
|
||||
qemu_put_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE);
|
||||
qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8);
|
||||
qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
|
||||
qemu_put_buffer(f, dev->msix_pba, (n + 7) / 8);
|
||||
}
|
||||
|
||||
/* Should be called after restoring the config space. */
|
||||
|
@ -400,8 +427,8 @@ void msix_load(PCIDevice *dev, QEMUFile *f)
|
|||
}
|
||||
|
||||
msix_free_irq_entries(dev);
|
||||
qemu_get_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE);
|
||||
qemu_get_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8);
|
||||
qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
|
||||
qemu_get_buffer(f, dev->msix_pba, (n + 7) / 8);
|
||||
msix_update_function_masked(dev);
|
||||
|
||||
for (vector = 0; vector < n; vector++) {
|
||||
|
@ -448,7 +475,8 @@ void msix_reset(PCIDevice *dev)
|
|||
msix_free_irq_entries(dev);
|
||||
dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
|
||||
~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
|
||||
memset(dev->msix_table_page, 0, MSIX_PAGE_SIZE);
|
||||
memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
|
||||
memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
|
||||
msix_mask_all(dev, dev->msix_entries_nr);
|
||||
}
|
||||
|
||||
|
|
10
hw/pci.h
10
hw/pci.h
|
@ -220,12 +220,14 @@ struct PCIDevice {
|
|||
/* MSI-X entries */
|
||||
int msix_entries_nr;
|
||||
|
||||
/* Space to store MSIX table */
|
||||
uint8_t *msix_table_page;
|
||||
/* Space to store MSIX table & pending bit array */
|
||||
uint8_t *msix_table;
|
||||
uint8_t *msix_pba;
|
||||
/* MemoryRegion container for msix exclusive BAR setup */
|
||||
MemoryRegion msix_exclusive_bar;
|
||||
/* MMIO index used to map MSIX table and pending bit entries. */
|
||||
MemoryRegion msix_mmio;
|
||||
/* Memory Regions for MSIX table and pending bit entries. */
|
||||
MemoryRegion msix_table_mmio;
|
||||
MemoryRegion msix_pba_mmio;
|
||||
/* Reference-count for entries actually in use by driver. */
|
||||
unsigned *msix_entry_used;
|
||||
/* MSIX function mask set or MSIX disabled */
|
||||
|
|
Loading…
Reference in a new issue