1
0
mirror of https://gitlab.com/qemu-project/qemu synced 2024-07-05 17:29:18 +00:00

RISC-V PR for 9.0

* Update $ra with current $pc in trans_cm_jalt
 * Enable SPCR for SCPI virt machine
 * Allow large kernels to boot by moving the initrd further away in RAM
 * Sync hwprobe keys with kernel
 * Named features riscv,isa, 'svade' rework
 * FIX xATP_MODE validation
 * Add missing include guard in pmu.h
 * Add SRAT and SLIT ACPI tables
 * libqos fixes and add a riscv machine
 * Add Ztso extension
 * Use 'zfa' instead of 'Zfa'
 * Update KVM exts to Linux 6.8
 * move ratified/frozen exts to non-experimental
 * Ensure mcountinhibit, mcounteren, scounteren, hcounteren are 32-bit
 * mark_vs_dirty() before loads and stores
 * Remove 'is_store' bool from load/store fns
 * Fix shift count overflow
 * Fix setipnum_le write emulation for APLIC MSI-mode
 * Fix in_clrip[x] read emulation
 * Fix privilege mode of G-stage translation for debugging
 * Fix ACPI MCFG table for virt machine
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmXq8joACgkQr3yVEwxT
 gBMCRxAAvG1RsCxWhMjLYDEYuUhQkP2nd86PHJMrNxAeb5WUIrgrYyT6OLXpfuKN
 8Hz5sR1bJnZSxKfGSFIHoPVxvW788fg7c5fIgg9txEssEQYhd7rCCPALxdiu/zlb
 0fbfx9Ir0nb9qS6vQuT4dEddmljKYKPry8dH0anWI7SVIfdor2TTlP4xOAR88Yq7
 gdGIKvr7PrUhI5JdoWBe8R6w7vguT35EO6aiC+7PA+8AT1SXrxFrg86bYtDUffD+
 LktGcI7GhlIeosolodU8iNK7CdZTywRuyIQ+/KF4mmC7qaC8yJNVVTwX9/vXbH36
 dWIFkOv3VRnLBKt3/PW1DRZoUbSLpWjwH7WZsvQWPW5Ql717pGWId4eXSemxCFLN
 u7gGZ29/1jxMuYue+FImHTbBf6fSV25VOfo5ZfR/AfzxLBbugsP56CPwfK3K2OBq
 fQ+k3hl8iCx62rZrcAa69TuMBQJB5Q94NIFP1m5u/bPIZZhT311RVY1xx/dRCXnI
 9/7DiPUyMeV4np2j4crvs4/QVTqYvpt4U9ElYrMWUxsTUyBEGEaSzpRsR5FCRmSH
 2IrJsVWw5QsTlHukCMvxWIiWnYS5KIe764LsY31FNOlbl8eNJkyapc+BE8HWjgPa
 xYROCWOL7T/E1BPJ7wbEBlzPhuJOndZX91Zbfq0n+SAYq4YnNu4=
 =sCqM
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20240308-1' of https://github.com/alistair23/qemu into staging

RISC-V PR for 9.0

* Update $ra with current $pc in trans_cm_jalt
* Enable SPCR for SCPI virt machine
* Allow large kernels to boot by moving the initrd further away in RAM
* Sync hwprobe keys with kernel
* Named features riscv,isa, 'svade' rework
* FIX xATP_MODE validation
* Add missing include guard in pmu.h
* Add SRAT and SLIT ACPI tables
* libqos fixes and add a riscv machine
* Add Ztso extension
* Use 'zfa' instead of 'Zfa'
* Update KVM exts to Linux 6.8
* move ratified/frozen exts to non-experimental
* Ensure mcountinhibit, mcounteren, scounteren, hcounteren are 32-bit
* mark_vs_dirty() before loads and stores
* Remove 'is_store' bool from load/store fns
* Fix shift count overflow
* Fix setipnum_le write emulation for APLIC MSI-mode
* Fix in_clrip[x] read emulation
* Fix privilege mode of G-stage translation for debugging
* Fix ACPI MCFG table for virt machine

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmXq8joACgkQr3yVEwxT
# gBMCRxAAvG1RsCxWhMjLYDEYuUhQkP2nd86PHJMrNxAeb5WUIrgrYyT6OLXpfuKN
# 8Hz5sR1bJnZSxKfGSFIHoPVxvW788fg7c5fIgg9txEssEQYhd7rCCPALxdiu/zlb
# 0fbfx9Ir0nb9qS6vQuT4dEddmljKYKPry8dH0anWI7SVIfdor2TTlP4xOAR88Yq7
# gdGIKvr7PrUhI5JdoWBe8R6w7vguT35EO6aiC+7PA+8AT1SXrxFrg86bYtDUffD+
# LktGcI7GhlIeosolodU8iNK7CdZTywRuyIQ+/KF4mmC7qaC8yJNVVTwX9/vXbH36
# dWIFkOv3VRnLBKt3/PW1DRZoUbSLpWjwH7WZsvQWPW5Ql717pGWId4eXSemxCFLN
# u7gGZ29/1jxMuYue+FImHTbBf6fSV25VOfo5ZfR/AfzxLBbugsP56CPwfK3K2OBq
# fQ+k3hl8iCx62rZrcAa69TuMBQJB5Q94NIFP1m5u/bPIZZhT311RVY1xx/dRCXnI
# 9/7DiPUyMeV4np2j4crvs4/QVTqYvpt4U9ElYrMWUxsTUyBEGEaSzpRsR5FCRmSH
# 2IrJsVWw5QsTlHukCMvxWIiWnYS5KIe764LsY31FNOlbl8eNJkyapc+BE8HWjgPa
# xYROCWOL7T/E1BPJ7wbEBlzPhuJOndZX91Zbfq0n+SAYq4YnNu4=
# =sCqM
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 08 Mar 2024 11:10:50 GMT
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20240308-1' of https://github.com/alistair23/qemu: (34 commits)
  target/riscv: fix ACPI MCFG table
  target/riscv: Fix privilege mode of G-stage translation for debugging
  hw/intc/riscv_aplic: Fix in_clrip[x] read emulation
  hw/intc/riscv_aplic: Fix setipnum_le write emulation for APLIC MSI-mode
  target/riscv: Fix shift count overflow
  trans_rvv.c.inc: remove 'is_store' bool from load/store fns
  trans_rvv.c.inc: mark_vs_dirty() before loads and stores
  target/riscv: mcountinhibit, mcounteren, scounteren, hcounteren is 32-bit
  target/riscv: move ratified/frozen exts to non-experimental
  target/riscv/kvm: update KVM exts to Linux 6.8
  linux-headers: Update to Linux v6.8-rc6
  tests: riscv64: Use 'zfa' instead of 'Zfa'
  linux-user/riscv: Add Ztso extension to hwprobe
  RISC-V: Add support for Ztso
  tests/libqos: add riscv/virt machine nodes
  hw/riscv/virt.c: make aclint compatible with 'qtest' accel
  hw/riscv/virt.c: add virtio-iommu-pci hotplug support
  hw/riscv/virt.c: create '/soc/pci@...' fdt node earlier
  hw/riscv/virt-acpi-build.c: Add SRAT and SLIT ACPI tables
  target/riscv: Add missing include guard in pmu.h
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-03-08 11:47:01 +00:00
commit cbccded4a2
50 changed files with 1213 additions and 347 deletions

View File

@ -1994,6 +1994,59 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
}
}
void build_spcr(GArray *table_data, BIOSLinker *linker,
const AcpiSpcrData *f, const uint8_t rev,
const char *oem_id, const char *oem_table_id)
{
AcpiTable table = { .sig = "SPCR", .rev = rev, .oem_id = oem_id,
.oem_table_id = oem_table_id };
acpi_table_begin(&table, table_data);
/* Interface type */
build_append_int_noprefix(table_data, f->interface_type, 1);
/* Reserved */
build_append_int_noprefix(table_data, 0, 3);
/* Base Address */
build_append_gas(table_data, f->base_addr.id, f->base_addr.width,
f->base_addr.offset, f->base_addr.size,
f->base_addr.addr);
/* Interrupt type */
build_append_int_noprefix(table_data, f->interrupt_type, 1);
/* IRQ */
build_append_int_noprefix(table_data, f->pc_interrupt, 1);
/* Global System Interrupt */
build_append_int_noprefix(table_data, f->interrupt, 4);
/* Baud Rate */
build_append_int_noprefix(table_data, f->baud_rate, 1);
/* Parity */
build_append_int_noprefix(table_data, f->parity, 1);
/* Stop Bits */
build_append_int_noprefix(table_data, f->stop_bits, 1);
/* Flow Control */
build_append_int_noprefix(table_data, f->flow_control, 1);
/* Language */
build_append_int_noprefix(table_data, f->language, 1);
/* Terminal Type */
build_append_int_noprefix(table_data, f->terminal_type, 1);
/* PCI Device ID */
build_append_int_noprefix(table_data, f->pci_device_id, 2);
/* PCI Vendor ID */
build_append_int_noprefix(table_data, f->pci_vendor_id, 2);
/* PCI Bus Number */
build_append_int_noprefix(table_data, f->pci_bus, 1);
/* PCI Device Number */
build_append_int_noprefix(table_data, f->pci_device, 1);
/* PCI Function Number */
build_append_int_noprefix(table_data, f->pci_function, 1);
/* PCI Flags */
build_append_int_noprefix(table_data, f->pci_flags, 4);
/* PCI Segment */
build_append_int_noprefix(table_data, f->pci_segment, 1);
/* Reserved */
build_append_int_noprefix(table_data, 0, 4);
acpi_table_end(linker, &table);
}
/*
* ACPI spec, Revision 6.3
* 5.2.29 Processor Properties Topology Table (PPTT)

View File

@ -431,48 +431,34 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
* Rev: 1.07
*/
static void
build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
spcr_setup(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiTable table = { .sig = "SPCR", .rev = 2, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
AcpiSpcrData serial = {
.interface_type = 3, /* ARM PL011 UART */
.base_addr.id = AML_AS_SYSTEM_MEMORY,
.base_addr.width = 32,
.base_addr.offset = 0,
.base_addr.size = 3,
.base_addr.addr = vms->memmap[VIRT_UART].base,
.interrupt_type = (1 << 3),/* Bit[3] ARMH GIC interrupt*/
.pc_interrupt = 0, /* IRQ */
.interrupt = (vms->irqmap[VIRT_UART] + ARM_SPI_BASE),
.baud_rate = 3, /* 9600 */
.parity = 0, /* No Parity */
.stop_bits = 1, /* 1 Stop bit */
.flow_control = 1 << 1, /* RTS/CTS hardware flow control */
.terminal_type = 0, /* VT100 */
.language = 0, /* Language */
.pci_device_id = 0xffff, /* not a PCI device*/
.pci_vendor_id = 0xffff, /* not a PCI device*/
.pci_bus = 0,
.pci_device = 0,
.pci_function = 0,
.pci_flags = 0,
.pci_segment = 0,
};
acpi_table_begin(&table, table_data);
/* Interface Type */
build_append_int_noprefix(table_data, 3, 1); /* ARM PL011 UART */
build_append_int_noprefix(table_data, 0, 3); /* Reserved */
/* Base Address */
build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 32, 0, 3,
vms->memmap[VIRT_UART].base);
/* Interrupt Type */
build_append_int_noprefix(table_data,
(1 << 3) /* Bit[3] ARMH GIC interrupt */, 1);
build_append_int_noprefix(table_data, 0, 1); /* IRQ */
/* Global System Interrupt */
build_append_int_noprefix(table_data,
vms->irqmap[VIRT_UART] + ARM_SPI_BASE, 4);
build_append_int_noprefix(table_data, 3 /* 9600 */, 1); /* Baud Rate */
build_append_int_noprefix(table_data, 0 /* No Parity */, 1); /* Parity */
/* Stop Bits */
build_append_int_noprefix(table_data, 1 /* 1 Stop bit */, 1);
/* Flow Control */
build_append_int_noprefix(table_data,
(1 << 1) /* RTS/CTS hardware flow control */, 1);
/* Terminal Type */
build_append_int_noprefix(table_data, 0 /* VT100 */, 1);
build_append_int_noprefix(table_data, 0, 1); /* Language */
/* PCI Device ID */
build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2);
/* PCI Vendor ID */
build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2);
build_append_int_noprefix(table_data, 0, 1); /* PCI Bus Number */
build_append_int_noprefix(table_data, 0, 1); /* PCI Device Number */
build_append_int_noprefix(table_data, 0, 1); /* PCI Function Number */
build_append_int_noprefix(table_data, 0, 4); /* PCI Flags */
build_append_int_noprefix(table_data, 0, 1); /* PCI Segment */
build_append_int_noprefix(table_data, 0, 4); /* Reserved */
acpi_table_end(linker, &table);
build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id);
}
/*
@ -938,7 +924,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
}
acpi_add_table(table_offsets, tables_blob);
build_spcr(tables_blob, tables->linker, vms);
spcr_setup(tables_blob, tables->linker, vms);
acpi_add_table(table_offsets, tables_blob);
build_dbg2(tables_blob, tables->linker, vms);

View File

@ -162,7 +162,7 @@ static bool is_kvm_aia(bool msimode)
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
uint32_t word)
{
uint32_t i, irq, ret = 0;
uint32_t i, irq, sourcecfg, sm, raw_input, irq_inverted, ret = 0;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
@ -170,7 +170,20 @@ static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
continue;
}
ret |= ((aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0) << i;
sourcecfg = aplic->sourcecfg[irq];
if (sourcecfg & APLIC_SOURCECFG_D) {
continue;
}
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
continue;
}
raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0;
irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
ret |= (raw_input ^ irq_inverted) << i;
}
return ret;
@ -218,13 +231,25 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic,
}
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
if ((sm == APLIC_SOURCECFG_SM_INACTIVE) ||
((!aplic->msimode || (aplic->msimode && !pending)) &&
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))) {
if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
return;
}
if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) {
if (!aplic->msimode || (aplic->msimode && !pending)) {
return;
}
if ((aplic->state[irq] & APLIC_ISTATE_INPUT) &&
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) {
return;
}
if (!(aplic->state[irq] & APLIC_ISTATE_INPUT) &&
(sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)) {
return;
}
}
riscv_aplic_set_pending_raw(aplic, irq, pending);
}

View File

@ -189,13 +189,13 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
* kernel is uncompressed it will not clobber the initrd. However
* on boards without much RAM we must ensure that we still leave
* enough room for a decent sized initrd, and on boards with large
* amounts of RAM we must avoid the initrd being so far up in RAM
* that it is outside lowmem and inaccessible to the kernel.
* So for boards with less than 256MB of RAM we put the initrd
* halfway into RAM, and for boards with 256MB of RAM or more we put
* the initrd at 128MB.
* amounts of RAM, we put the initrd at 512MB to allow large kernels
* to boot.
* So for boards with less than 1GB of RAM we put the initrd
* halfway into RAM, and for boards with 1GB of RAM or more we put
* the initrd at 512MB.
*/
start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
start = kernel_entry + MIN(mem_size / 2, 512 * MiB);
size = load_ramdisk(filename, start, mem_size - start);
if (size == -1) {

View File

@ -174,6 +174,42 @@ acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
aml_append(scope, dev);
}
/*
* Serial Port Console Redirection Table (SPCR)
* Rev: 1.07
*/
static void
spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s)
{
AcpiSpcrData serial = {
.interface_type = 0, /* 16550 compatible */
.base_addr.id = AML_AS_SYSTEM_MEMORY,
.base_addr.width = 32,
.base_addr.offset = 0,
.base_addr.size = 1,
.base_addr.addr = s->memmap[VIRT_UART0].base,
.interrupt_type = (1 << 4),/* Bit[4] RISC-V PLIC/APLIC */
.pc_interrupt = 0,
.interrupt = UART0_IRQ,
.baud_rate = 7, /* 15200 */
.parity = 0,
.stop_bits = 1,
.flow_control = 0,
.terminal_type = 3, /* ANSI */
.language = 0, /* Language */
.pci_device_id = 0xffff, /* not a PCI device*/
.pci_vendor_id = 0xffff, /* not a PCI device*/
.pci_bus = 0,
.pci_device = 0,
.pci_function = 0,
.pci_flags = 0,
.pci_segment = 0,
};
build_spcr(table_data, linker, &serial, 2, s->oem_id, s->oem_table_id);
}
/* RHCT Node[N] starts at offset 56 */
#define RHCT_NODE_ARRAY_OFFSET 56
@ -528,11 +564,61 @@ static void build_madt(GArray *table_data,
acpi_table_end(linker, &table);
}
/*
* ACPI spec, Revision 6.5+
* 5.2.16 System Resource Affinity Table (SRAT)
* REF: https://github.com/riscv-non-isa/riscv-acpi/issues/25
* https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view
*/
static void
build_srat(GArray *table_data, BIOSLinker *linker, RISCVVirtState *vms)
{
int i;
uint64_t mem_base;
MachineClass *mc = MACHINE_GET_CLASS(vms);
MachineState *ms = MACHINE(vms);
const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms);
AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
acpi_table_begin(&table, table_data);
build_append_int_noprefix(table_data, 1, 4); /* Reserved */
build_append_int_noprefix(table_data, 0, 8); /* Reserved */
for (i = 0; i < cpu_list->len; ++i) {
uint32_t nodeid = cpu_list->cpus[i].props.node_id;
/*
* 5.2.16.8 RINTC Affinity Structure
*/
build_append_int_noprefix(table_data, 7, 1); /* Type */
build_append_int_noprefix(table_data, 20, 1); /* Length */
build_append_int_noprefix(table_data, 0, 2); /* Reserved */
build_append_int_noprefix(table_data, nodeid, 4); /* Proximity Domain */
build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
/* Flags, Table 5-70 */
build_append_int_noprefix(table_data, 1 /* Flags: Enabled */, 4);
build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */
}
mem_base = vms->memmap[VIRT_DRAM].base;
for (i = 0; i < ms->numa_state->num_nodes; ++i) {
if (ms->numa_state->nodes[i].node_mem > 0) {
build_srat_memory(table_data, mem_base,
ms->numa_state->nodes[i].node_mem, i,
MEM_AFFINITY_ENABLED);
mem_base += ms->numa_state->nodes[i].node_mem;
}
}
acpi_table_end(linker, &table);
}
static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
{
GArray *table_offsets;
unsigned dsdt, xsdt;
GArray *tables_blob = tables->table_data;
MachineState *ms = MACHINE(s);
table_offsets = g_array_new(false, true,
sizeof(uint32_t));
@ -555,16 +641,29 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_rhct(tables_blob, tables->linker, s);
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, s);
acpi_add_table(table_offsets, tables_blob);
{
AcpiMcfgInfo mcfg = {
.base = s->memmap[VIRT_PCIE_MMIO].base,
.size = s->memmap[VIRT_PCIE_MMIO].size,
.base = s->memmap[VIRT_PCIE_ECAM].base,
.size = s->memmap[VIRT_PCIE_ECAM].size,
};
build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id,
s->oem_table_id);
}
if (ms->numa_state->num_nodes > 0) {
acpi_add_table(table_offsets, tables_blob);
build_srat(tables_blob, tables->linker, s);
if (ms->numa_state->have_numa_distance) {
acpi_add_table(table_offsets, tables_blob);
build_slit(tables_blob, tables->linker, ms, s->oem_id,
s->oem_table_id);
}
}
/* XSDT is pointed to by RSDP */
xsdt = tables_blob->len;
build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id,

View File

@ -48,11 +48,13 @@
#include "sysemu/tcg.h"
#include "sysemu/kvm.h"
#include "sysemu/tpm.h"
#include "sysemu/qtest.h"
#include "hw/pci/pci.h"
#include "hw/pci-host/gpex.h"
#include "hw/display/ramfb.h"
#include "hw/acpi/aml-build.h"
#include "qapi/qapi-visit-common.h"
#include "hw/virtio/virtio-iommu.h"
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
static bool virt_use_kvm_aia(RISCVVirtState *s)
@ -60,6 +62,11 @@ static bool virt_use_kvm_aia(RISCVVirtState *s)
return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
}
static bool virt_aclint_allowed(void)
{
return tcg_enabled() || qtest_enabled();
}
static const MemMapEntry virt_memmap[] = {
[VIRT_DEBUG] = { 0x0, 0x100 },
[VIRT_MROM] = { 0x1000, 0xf000 },
@ -724,14 +731,12 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
create_fdt_socket_memory(s, memmap, socket);
if (tcg_enabled()) {
if (s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket,
&intc_phandles[phandle_pos]);
} else {
create_fdt_socket_clint(s, memmap, socket,
&intc_phandles[phandle_pos]);
}
if (virt_aclint_allowed() && s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket,
&intc_phandles[phandle_pos]);
} else if (tcg_enabled()) {
create_fdt_socket_clint(s, memmap, socket,
&intc_phandles[phandle_pos]);
}
}
@ -826,7 +831,6 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
name = g_strdup_printf("/soc/pci@%lx",
(long) memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells",
FDT_PCI_ADDR_CELLS);
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells",
@ -972,6 +976,34 @@ static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
}
static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf)
{
const char compat[] = "virtio,pci-iommu\0pci1af4,1057";
void *fdt = MACHINE(s)->fdt;
uint32_t iommu_phandle;
g_autofree char *iommu_node = NULL;
g_autofree char *pci_node = NULL;
pci_node = g_strdup_printf("/soc/pci@%lx",
(long) virt_memmap[VIRT_PCIE_ECAM].base);
iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node,
PCI_SLOT(bdf), PCI_FUNC(bdf));
iommu_phandle = qemu_fdt_alloc_phandle(fdt);
qemu_fdt_add_subnode(fdt, iommu_node);
qemu_fdt_setprop(fdt, iommu_node, "compatible", compat, sizeof(compat));
qemu_fdt_setprop_sized_cells(fdt, iommu_node, "reg",
1, bdf << 8, 1, 0, 1, 0,
1, 0, 1, 0);
qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1);
qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle);
qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map",
0, iommu_phandle, 0, bdf,
bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf);
}
static void finalize_fdt(RISCVVirtState *s)
{
uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
@ -996,6 +1028,7 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
{
MachineState *ms = MACHINE(s);
uint8_t rng_seed[32];
g_autofree char *name = NULL;
ms->fdt = create_device_tree(&s->fdt_size);
if (!ms->fdt) {
@ -1014,6 +1047,13 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop_cell(ms->fdt, "/soc", "#size-cells", 0x2);
qemu_fdt_setprop_cell(ms->fdt, "/soc", "#address-cells", 0x2);
/*
* The "/soc/pci@..." node is needed for PCIE hotplugs
* that might happen before finalize_fdt().
*/
name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_add_subnode(ms->fdt, "/chosen");
/* Pass seed to RNG */
@ -1373,7 +1413,7 @@ static void virt_machine_init(MachineState *machine)
exit(1);
}
if (!tcg_enabled() && s->have_aclint) {
if (!virt_aclint_allowed() && s->have_aclint) {
error_report("'aclint' is only available with TCG acceleration");
exit(1);
}
@ -1410,23 +1450,22 @@ static void virt_machine_init(MachineState *machine)
hart_count, &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal);
if (tcg_enabled()) {
if (s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
if (virt_aclint_allowed() && s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
@ -1434,21 +1473,20 @@ static void virt_machine_init(MachineState *machine)
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
i * memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
}
} else if (tcg_enabled()) {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
}
}
/* Per-socket interrupt controller */
@ -1673,7 +1711,8 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
if (device_is_dynamic_sysbus(mc, dev)) {
if (device_is_dynamic_sysbus(mc, dev) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
return HOTPLUG_HANDLER(machine);
}
return NULL;
@ -1692,6 +1731,10 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
SYS_BUS_DEVICE(dev));
}
}
if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev)));
}
}
static void virt_machine_class_init(ObjectClass *oc, void *data)

View File

@ -90,6 +90,39 @@ typedef struct AcpiFadtData {
unsigned *xdsdt_tbl_offset;
} AcpiFadtData;
typedef struct AcpiGas {
uint8_t id; /* Address space ID */
uint8_t width; /* Register bit width */
uint8_t offset; /* Register bit offset */
uint8_t size; /* Access size */
uint64_t addr; /* Address */
} AcpiGas;
/* SPCR (Serial Port Console Redirection table) */
typedef struct AcpiSpcrData {
uint8_t interface_type;
uint8_t reserved[3];
struct AcpiGas base_addr;
uint8_t interrupt_type;
uint8_t pc_interrupt;
uint32_t interrupt; /* Global system interrupt */
uint8_t baud_rate;
uint8_t parity;
uint8_t stop_bits;
uint8_t flow_control;
uint8_t terminal_type;
uint8_t language;
uint8_t reserved1;
uint16_t pci_device_id; /* Must be 0xffff if not PCI device */
uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */
uint8_t pci_bus;
uint8_t pci_device;
uint8_t pci_function;
uint32_t pci_flags;
uint8_t pci_segment;
uint32_t reserved2;
} AcpiSpcrData;
#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)

View File

@ -497,4 +497,8 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
const char *oem_id, const char *oem_table_id);
void build_spcr(GArray *table_data, BIOSLinker *linker,
const AcpiSpcrData *f, const uint8_t rev,
const char *oem_id, const char *oem_table_id);
#endif

View File

@ -53,7 +53,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
* modifier is specific to the modifer being used. For example, some modifiers
* modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@ -78,7 +78,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
* These users musn't need to know to reason about the modifier value
* These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@ -539,7 +539,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@ -1102,7 +1102,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
*/
/*
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
* The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@ -1418,7 +1418,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
* boudaries, i.e. 8bit should be stored in this mode to save allocation
* boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with

View File

@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
* @input_xfrm: Defines how the input data is transformed. Valid values are one
* of %RXH_XFRM_*.
* @rsvd8: Reserved for future use; see the note on reserved space.
* @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
@ -1285,7 +1287,8 @@ struct ethtool_rxfh {
uint32_t indir_size;
uint32_t key_size;
uint8_t hfunc;
uint8_t rsvd8[3];
uint8_t input_xfrm;
uint8_t rsvd8[2];
uint32_t rsvd32;
uint32_t rss_config[];
};
@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define WOL_MODE_COUNT 8
/* RSS hash function data
* XOR the corresponding source and destination fields of each specified
* protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
* calculation. Note that this XORing reduces the input set entropy and could
* be exploited to reduce the RSS queue spread.
*/
#define RXH_XFRM_SYM_XOR (1 << 0)
#define RXH_XFRM_NO_CHANGE 0xff
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@ -2128,18 +2140,6 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
* @supported: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features for which the interface
* supports autonegotiation or auto-detection. Read-only.
* @advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features that are advertised through
* autonegotiation or enabled for auto-detection.
* @lp_advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
@ -2181,6 +2181,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
*
* @link_mode_masks is divided into three bitfields, each of length
* @link_mode_masks_nwords:
* - supported: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features for which the interface
* supports autonegotiation or auto-detection. Read-only.
* - advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features that are advertised through
* autonegotiation or enabled for auto-detection.
* - lp_advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
uint32_t cmd;

View File

@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 41
#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@ -112,4 +112,10 @@
* This feature indicates that the driver can reset a queue individually.
*/
#define VIRTIO_F_RING_RESET 40
/*
* This feature indicates that the device support administration virtqueues.
*/
#define VIRTIO_F_ADMIN_VQ 41
#endif /* _LINUX_VIRTIO_CONFIG_H */

View File

@ -175,6 +175,9 @@ struct virtio_pci_modern_common_cfg {
uint16_t queue_notify_data; /* read-write */
uint16_t queue_reset; /* read-write */
uint16_t admin_queue_index; /* read-only */
uint16_t admin_queue_num; /* read-only */
};
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
@ -215,7 +218,72 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
#define VIRTIO_PCI_COMMON_Q_NDATA 56
#define VIRTIO_PCI_COMMON_Q_RESET 58
#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
/* Admin command status. */
#define VIRTIO_ADMIN_STATUS_OK 0
/* Admin command opcode. */
#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
/* Admin command group type. */
#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
/* Transitional device admin command. */
#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
struct QEMU_PACKED virtio_admin_cmd_hdr {
uint16_t opcode;
/*
* 1 - SR-IOV
* 2-65535 - reserved
*/
uint16_t group_type;
/* Unused, reserved for future extensions. */
uint8_t reserved1[12];
uint64_t group_member_id;
};
struct QEMU_PACKED virtio_admin_cmd_status {
uint16_t status;
uint16_t status_qualifier;
/* Unused, reserved for future extensions. */
uint8_t reserved2[4];
};
struct QEMU_PACKED virtio_admin_cmd_legacy_wr_data {
uint8_t offset; /* Starting offset of the register(s) to write. */
uint8_t reserved[7];
uint8_t registers[];
};
struct QEMU_PACKED virtio_admin_cmd_legacy_rd_data {
uint8_t offset; /* Starting offset of the register(s) to read. */
};
#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
struct QEMU_PACKED virtio_admin_cmd_notify_info_data {
uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */
uint8_t bar; /* BAR of the member or the owner device */
uint8_t padding[6];
uint64_t offset; /* Offset within bar. */
};
struct virtio_admin_cmd_notify_info_result {
struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
};
#endif

View File

@ -14,6 +14,13 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
/* Feature bits */
/* guest physical address range will be indicated as shared memory region 0 */
#define VIRTIO_PMEM_F_SHMEM_REGION 0
/* shmid of the shared memory region corresponding to the pmem */
#define VIRTIO_PMEM_SHMEM_REGION_ID 0
struct virtio_pmem_config {
uint64_t start;
uint64_t size;

View File

@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#define __NR_statmount 457
__SYSCALL(__NR_statmount, sys_statmount)
#define __NR_listmount 458
__SYSCALL(__NR_listmount, sys_listmount)
#define __NR_lsm_get_self_attr 459
__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
#define __NR_lsm_set_self_attr 460
__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#undef __NR_syscalls
#define __NR_syscalls 457
#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different

View File

@ -88,7 +88,7 @@
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
#define MADV_DONTDUMP 16 /* Explicitly exclude from core dump,
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */

View File

@ -385,5 +385,10 @@
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#define __NR_statmount (__NR_Linux + 457)
#define __NR_listmount (__NR_Linux + 458)
#define __NR_lsm_get_self_attr (__NR_Linux + 459)
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#endif /* _ASM_UNISTD_N32_H */

View File

@ -361,5 +361,10 @@
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#define __NR_statmount (__NR_Linux + 457)
#define __NR_listmount (__NR_Linux + 458)
#define __NR_lsm_get_self_attr (__NR_Linux + 459)
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#endif /* _ASM_UNISTD_N64_H */

View File

@ -431,5 +431,10 @@
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#define __NR_statmount (__NR_Linux + 457)
#define __NR_listmount (__NR_Linux + 458)
#define __NR_lsm_get_self_attr (__NR_Linux + 459)
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#endif /* _ASM_UNISTD_O32_H */

View File

@ -438,6 +438,11 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_UNISTD_32_H */

View File

@ -410,6 +410,11 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_UNISTD_64_H */

View File

@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_SMSTATEEN,
KVM_RISCV_ISA_EXT_ZICOND,
KVM_RISCV_ISA_EXT_ZBC,
KVM_RISCV_ISA_EXT_ZBKB,
KVM_RISCV_ISA_EXT_ZBKC,
KVM_RISCV_ISA_EXT_ZBKX,
KVM_RISCV_ISA_EXT_ZKND,
KVM_RISCV_ISA_EXT_ZKNE,
KVM_RISCV_ISA_EXT_ZKNH,
KVM_RISCV_ISA_EXT_ZKR,
KVM_RISCV_ISA_EXT_ZKSED,
KVM_RISCV_ISA_EXT_ZKSH,
KVM_RISCV_ISA_EXT_ZKT,
KVM_RISCV_ISA_EXT_ZVBB,
KVM_RISCV_ISA_EXT_ZVBC,
KVM_RISCV_ISA_EXT_ZVKB,
KVM_RISCV_ISA_EXT_ZVKG,
KVM_RISCV_ISA_EXT_ZVKNED,
KVM_RISCV_ISA_EXT_ZVKNHA,
KVM_RISCV_ISA_EXT_ZVKNHB,
KVM_RISCV_ISA_EXT_ZVKSED,
KVM_RISCV_ISA_EXT_ZVKSH,
KVM_RISCV_ISA_EXT_ZVKT,
KVM_RISCV_ISA_EXT_ZFH,
KVM_RISCV_ISA_EXT_ZFHMIN,
KVM_RISCV_ISA_EXT_ZIHINTNTL,
KVM_RISCV_ISA_EXT_ZVFH,
KVM_RISCV_ISA_EXT_ZVFHMIN,
KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_MAX,
};
@ -157,9 +184,16 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
KVM_RISCV_SBI_EXT_MAX,
};
/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_sbi_sta {
unsigned long shmem_lo;
unsigned long shmem_hi;
};
/* Possible states for kvm_riscv_timer */
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
@ -241,6 +275,12 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_VECTOR_REG(n) \
((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
/* Registers for specific SBI extensions are mapped as type 10 */
#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT)
#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_SBI_STA_REG(name) \
(offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long))
/* Device Control API: RISC-V AIA */
#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
#define KVM_DEV_RISCV_APLIC_SIZE 0x4000

View File

@ -429,5 +429,10 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_S390_UNISTD_32_H */

View File

@ -377,5 +377,10 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_S390_UNISTD_64_H */

View File

@ -560,4 +560,7 @@ struct kvm_pmu_event_filter {
/* x86-specific KVM_EXIT_HYPERCALL flags. */
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
#endif /* _ASM_X86_KVM_H */

View File

@ -447,6 +447,11 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_UNISTD_32_H */

View File

@ -369,6 +369,11 @@
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#define __NR_statmount 457
#define __NR_listmount 458
#define __NR_lsm_get_self_attr 459
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#endif /* _ASM_UNISTD_64_H */

View File

@ -321,6 +321,11 @@
#define __NR_futex_wake (__X32_SYSCALL_BIT + 454)
#define __NR_futex_wait (__X32_SYSCALL_BIT + 455)
#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456)
#define __NR_statmount (__X32_SYSCALL_BIT + 457)
#define __NR_listmount (__X32_SYSCALL_BIT + 458)
#define __NR_lsm_get_self_attr (__X32_SYSCALL_BIT + 459)
#define __NR_lsm_set_self_attr (__X32_SYSCALL_BIT + 460)
#define __NR_lsm_list_modules (__X32_SYSCALL_BIT + 461)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)

View File

@ -49,6 +49,7 @@ enum {
IOMMUFD_CMD_GET_HW_INFO,
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
IOMMUFD_CMD_HWPT_INVALIDATE,
};
/**
@ -613,4 +614,82 @@ struct iommu_hwpt_get_dirty_bitmap {
#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
/**
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
*/
enum iommu_hwpt_invalidate_data_type {
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
};
/**
* enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
* stage-1 cache invalidation
* @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
* to all-levels page structure cache or just
* the leaf PTE cache.
*/
enum iommu_hwpt_vtd_s1_invalidate_flags {
IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
};
/**
* struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
* (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
* @addr: The start address of the range to be invalidated. It needs to
* be 4KB aligned.
* @npages: Number of contiguous 4K pages to be invalidated.
* @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
* @__reserved: Must be 0
*
* The Intel VT-d specific invalidation data for user-managed stage-1 cache
* invalidation in nested translation. Userspace uses this structure to
* tell the impacted cache scope after modifying the stage-1 page table.
*
* Invalidating all the caches related to the page table by setting @addr
* to be 0 and @npages to be U64_MAX.
*
* The device TLB will be invalidated automatically if ATS is enabled.
*/
struct iommu_hwpt_vtd_s1_invalidate {
__aligned_u64 addr;
__aligned_u64 npages;
__u32 flags;
__u32 __reserved;
};
/**
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
* @size: sizeof(struct iommu_hwpt_invalidate)
* @hwpt_id: ID of a nested HWPT for cache invalidation
* @data_uptr: User pointer to an array of driver-specific cache invalidation
* data.
* @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
* type of all the entries in the invalidation request array. It
* should be a type supported by the hwpt pointed by @hwpt_id.
* @entry_len: Length (in bytes) of a request entry in the request array
* @entry_num: Input the number of cache invalidation requests in the array.
* Output the number of requests successfully handled by kernel.
* @__reserved: Must be 0.
*
* Invalidate the iommu cache for user-managed page table. Modifications on a
* user-managed page table should be followed by this operation to sync cache.
* Each ioctl can support one or more cache invalidation requests in the array
* that has a total size of @entry_len * @entry_num.
*
* An empty invalidation request array by setting @entry_num==0 is allowed, and
* @entry_len and @data_uptr would be ignored in this case. This can be used to
* check if the given @data_type is supported or not by kernel.
*/
struct iommu_hwpt_invalidate {
__u32 size;
__u32 hwpt_id;
__aligned_u64 data_uptr;
__u32 data_type;
__u32 entry_len;
__u32 entry_num;
__u32 __reserved;
};
#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
#endif

View File

@ -16,76 +16,6 @@
#define KVM_API_VERSION 12
/* *** Deprecated interfaces *** */
#define KVM_TRC_SHIFT 16
#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
#define KVM_TRC_HEAD_SIZE 12
#define KVM_TRC_CYCLE_SIZE 8
#define KVM_TRC_EXTRA_MAX 7
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
struct kvm_user_trace_setup {
__u32 buf_size;
__u32 buf_nr;
};
#define __KVM_DEPRECATED_MAIN_W_0x06 \
_IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
struct kvm_breakpoint {
__u32 enabled;
__u32 padding;
__u64 address;
};
struct kvm_debug_guest {
__u32 enabled;
__u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
/* *** End of deprecated interfaces *** */
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
/* for KVM_SET_USER_MEMORY_REGION2 */
struct kvm_userspace_memory_region2 {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
__u64 memory_size;
__u64 userspace_addr;
__u64 guest_memfd_offset;
__u32 guest_memfd;
__u32 pad1;
__u64 pad2[14];
};
/*
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
* userspace, other bits are reserved for kvm internal use which are defined
@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@ -265,6 +209,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@ -514,6 +459,13 @@ struct kvm_run {
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
__u32 flags;
} notify;
/* KVM_EXIT_MEMORY_FAULT */
struct {
#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
__u64 flags;
__u64 gpa;
__u64 size;
} memory_fault;
/* Fix the size of the union. */
char padding[256];
};
@ -941,9 +893,6 @@ struct kvm_ppc_resize_hpt {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
@ -1197,6 +1146,11 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
#define KVM_CAP_USER_MEMORY2 231
#define KVM_CAP_MEMORY_FAULT_INFO 232
#define KVM_CAP_MEMORY_ATTRIBUTES 233
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING
@ -1287,6 +1241,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config {
__u32 flags;
@ -1479,6 +1434,8 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
@ -1503,20 +1460,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
struct kvm_assigned_pci_dev)
#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
struct kvm_assigned_msix_nr)
#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
struct kvm_assigned_msix_entry)
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
@ -1533,9 +1478,6 @@ struct kvm_s390_ucas_mapping {
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */
#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@ -1588,8 +1530,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@ -2263,4 +2203,24 @@ struct kvm_s390_zpci_op {
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
struct kvm_memory_attributes {
__u64 address;
__u64 size;
__u64 attributes;
__u64 flags;
};
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
struct kvm_create_guest_memfd {
__u64 size;
__u64 flags;
__u64 reserved[6];
};
#endif /* __LINUX_KVM_H */

View File

@ -41,7 +41,8 @@
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
UFFD_FEATURE_POISON | \
UFFD_FEATURE_WP_ASYNC)
UFFD_FEATURE_WP_ASYNC | \
UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@ -50,6 +51,7 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
(__u64)1 << _UFFDIO_MOVE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
(__u64)1 << _UFFDIO_POISON)
@ -73,6 +75,7 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_POISON (0x08)
@ -92,6 +95,8 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
@ -222,6 +227,9 @@ struct uffdio_api {
* asynchronous mode is supported in which the write fault is
* automatically resolved and write-protection is un-set.
* It implies UFFD_FEATURE_WP_UNPOPULATED.
*
* UFFD_FEATURE_MOVE indicates that the kernel supports moving an
* existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@ -239,6 +247,7 @@ struct uffdio_api {
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
#define UFFD_FEATURE_WP_ASYNC (1<<15)
#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@ -347,6 +356,24 @@ struct uffdio_poison {
__s64 updated;
};
struct uffdio_move {
__u64 dst;
__u64 src;
__u64 len;
/*
* Especially if used to atomically remove memory from the
* address space the wake on the dst range is not needed.
*/
#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
__u64 mode;
/*
* "move" is written by the ioctl and must be at the end: the
* copy_from_user will not read the last 8 bytes.
*/
__s64 move;
};
/*
* Flags for the userfaultfd(2) system call itself.
*/

View File

@ -1219,6 +1219,7 @@ enum vfio_device_mig_state {
VFIO_DEVICE_STATE_RUNNING_P2P = 5,
VFIO_DEVICE_STATE_PRE_COPY = 6,
VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
VFIO_DEVICE_STATE_NR,
};
/**

View File

@ -8808,13 +8808,43 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
#define RISCV_HWPROBE_IMA_FD (1 << 0)
#define RISCV_HWPROBE_IMA_C (1 << 1)
#define RISCV_HWPROBE_IMA_V (1 << 2)
#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
#define RISCV_HWPROBE_IMA_FD (1 << 0)
#define RISCV_HWPROBE_IMA_C (1 << 1)
#define RISCV_HWPROBE_IMA_V (1 << 2)
#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
#define RISCV_HWPROBE_EXT_ZBC (1 << 7)
#define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
#define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
#define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
#define RISCV_HWPROBE_EXT_ZKND (1 << 11)
#define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
#define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
#define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
#define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
#define RISCV_HWPROBE_EXT_ZKT (1 << 16)
#define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
#define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
#define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
#define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
#define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
#define RISCV_HWPROBE_EXT_ZFH (1 << 27)
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
@ -8873,6 +8903,66 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
RISCV_HWPROBE_EXT_ZBB : 0;
value |= cfg->ext_zbs ?
RISCV_HWPROBE_EXT_ZBS : 0;
value |= cfg->ext_zicboz ?
RISCV_HWPROBE_EXT_ZICBOZ : 0;
value |= cfg->ext_zbc ?
RISCV_HWPROBE_EXT_ZBC : 0;
value |= cfg->ext_zbkb ?
RISCV_HWPROBE_EXT_ZBKB : 0;
value |= cfg->ext_zbkc ?
RISCV_HWPROBE_EXT_ZBKC : 0;
value |= cfg->ext_zbkx ?
RISCV_HWPROBE_EXT_ZBKX : 0;
value |= cfg->ext_zknd ?
RISCV_HWPROBE_EXT_ZKND : 0;
value |= cfg->ext_zkne ?
RISCV_HWPROBE_EXT_ZKNE : 0;
value |= cfg->ext_zknh ?
RISCV_HWPROBE_EXT_ZKNH : 0;
value |= cfg->ext_zksed ?
RISCV_HWPROBE_EXT_ZKSED : 0;
value |= cfg->ext_zksh ?
RISCV_HWPROBE_EXT_ZKSH : 0;
value |= cfg->ext_zkt ?
RISCV_HWPROBE_EXT_ZKT : 0;
value |= cfg->ext_zvbb ?
RISCV_HWPROBE_EXT_ZVBB : 0;
value |= cfg->ext_zvbc ?
RISCV_HWPROBE_EXT_ZVBC : 0;
value |= cfg->ext_zvkb ?
RISCV_HWPROBE_EXT_ZVKB : 0;
value |= cfg->ext_zvkg ?
RISCV_HWPROBE_EXT_ZVKG : 0;
value |= cfg->ext_zvkned ?
RISCV_HWPROBE_EXT_ZVKNED : 0;
value |= cfg->ext_zvknha ?
RISCV_HWPROBE_EXT_ZVKNHA : 0;
value |= cfg->ext_zvknhb ?
RISCV_HWPROBE_EXT_ZVKNHB : 0;
value |= cfg->ext_zvksed ?
RISCV_HWPROBE_EXT_ZVKSED : 0;
value |= cfg->ext_zvksh ?
RISCV_HWPROBE_EXT_ZVKSH : 0;
value |= cfg->ext_zvkt ?
RISCV_HWPROBE_EXT_ZVKT : 0;
value |= cfg->ext_zfh ?
RISCV_HWPROBE_EXT_ZFH : 0;
value |= cfg->ext_zfhmin ?
RISCV_HWPROBE_EXT_ZFHMIN : 0;
value |= cfg->ext_zihintntl ?
RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
value |= cfg->ext_zvfh ?
RISCV_HWPROBE_EXT_ZVFH : 0;
value |= cfg->ext_zvfhmin ?
RISCV_HWPROBE_EXT_ZVFHMIN : 0;
value |= cfg->ext_zfa ?
RISCV_HWPROBE_EXT_ZFA : 0;
value |= cfg->ext_ztso ?
RISCV_HWPROBE_EXT_ZTSO : 0;
value |= cfg->ext_zacas ?
RISCV_HWPROBE_EXT_ZACAS : 0;
value |= cfg->ext_zicond ?
RISCV_HWPROBE_EXT_ZICOND : 0;
__put_user(value, &pair->value);
break;
case RISCV_HWPROBE_KEY_CPUPERF_0:

View File

@ -98,9 +98,14 @@ bool riscv_cpu_option_set(const char *optname)
* instead.
*/
const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
@ -109,6 +114,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
@ -143,6 +149,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
@ -172,8 +179,13 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, ext_always_enabled),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
@ -949,9 +961,9 @@ static void riscv_cpu_reset_hold(Object *obj)
env->two_stage_lookup = false;
env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
(cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
(!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
MENVCFG_ADUE : 0);
env->henvcfg = 0;
/* Initialized default priorities of local interrupts. */
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
@ -1452,17 +1464,27 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
@ -1488,6 +1510,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
@ -1549,25 +1572,40 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
/* These are experimental so mark with 'x-' */
const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false),
MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false),
MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
DEFINE_PROP_END_OF_LIST(),
};
#define ALWAYS_ENABLED_FEATURE(_name) \
{.name = _name, \
.offset = CPU_CFG_OFFSET(ext_always_enabled), \
.enabled = true}
/*
* 'Named features' is the name we give to extensions that we
* don't want to expose to users. They are either immutable
* (always enabled/disable) or they'll vary depending on
* the resulting CPU state. They have riscv,isa strings
* and priv_ver like regular extensions.
*/
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("svade", svade, true),
MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
/*
* cache-related extensions that are always enabled
* in TCG since QEMU RISC-V does not have a cache
* model.
*/
ALWAYS_ENABLED_FEATURE("za64rs"),
ALWAYS_ENABLED_FEATURE("ziccif"),
ALWAYS_ENABLED_FEATURE("ziccrse"),
ALWAYS_ENABLED_FEATURE("ziccamoa"),
ALWAYS_ENABLED_FEATURE("zicclsm"),
ALWAYS_ENABLED_FEATURE("ssccptr"),
/* Other named features that TCG always implements */
ALWAYS_ENABLED_FEATURE("sstvecd"),
ALWAYS_ENABLED_FEATURE("sstvala"),
ALWAYS_ENABLED_FEATURE("sscounterenw"),
DEFINE_PROP_END_OF_LIST(),
};
@ -2162,13 +2200,10 @@ static const PropertyInfo prop_marchid = {
};
/*
* RVA22U64 defines some 'named features' or 'synthetic extensions'
* that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
* and Zicclsm. We do not implement caching in QEMU so we'll consider
* all these named features as always enabled.
*
* There's no riscv,isa update for them (nor for zic64b, despite it
* having a cfg offset) at this moment.
* RVA22U64 defines some 'named features' that are cache
* related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
* and Zicclsm. They are always implemented in TCG and
* doesn't need to be manually enabled by the profile.
*/
static RISCVCPUProfile RVA22U64 = {
.parent = NULL,
@ -2185,7 +2220,7 @@ static RISCVCPUProfile RVA22U64 = {
CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
/* mandatory named features for this profile */
CPU_CFG_OFFSET(zic64b),
CPU_CFG_OFFSET(ext_zic64b),
RISCV_PROFILE_EXT_LIST_END
}
@ -2200,8 +2235,6 @@ static RISCVCPUProfile RVA22U64 = {
* Other named features that we already implement: Sstvecd, Sstvala,
* Sscounterenw
*
* Named features that we need to enable: svade
*
* The remaining features/extensions comes from RVA22U64.
*/
static RISCVCPUProfile RVA22S64 = {
@ -2213,10 +2246,7 @@ static RISCVCPUProfile RVA22S64 = {
.ext_offsets = {
/* rva22s64 exts */
CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
CPU_CFG_OFFSET(ext_svinval),
/* rva22s64 named features */
CPU_CFG_OFFSET(svade),
CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
RISCV_PROFILE_EXT_LIST_END
}

View File

@ -271,7 +271,7 @@ struct CPUArchState {
target_ulong hstatus;
target_ulong hedeleg;
uint64_t hideleg;
target_ulong hcounteren;
uint32_t hcounteren;
target_ulong htval;
target_ulong htinst;
target_ulong hgatp;
@ -334,10 +334,10 @@ struct CPUArchState {
*/
bool two_stage_indirect_lookup;
target_ulong scounteren;
target_ulong mcounteren;
uint32_t scounteren;
uint32_t mcounteren;
target_ulong mcountinhibit;
uint32_t mcountinhibit;
/* PMU counter state */
PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];

View File

@ -71,6 +71,7 @@ struct RISCVCPUConfig {
bool ext_zihintntl;
bool ext_zihintpause;
bool ext_zihpm;
bool ext_ztso;
bool ext_smstateen;
bool ext_sstc;
bool ext_svadu;
@ -119,13 +120,21 @@ struct RISCVCPUConfig {
bool ext_smepmp;
bool rvv_ta_all_1s;
bool rvv_ma_all_1s;
bool svade;
bool zic64b;
uint32_t mvendorid;
uint64_t marchid;
uint64_t mimpid;
/* Named features */
bool ext_svade;
bool ext_zic64b;
/*
* Always 'true' boolean for named features
* TCG always implement/can't be disabled.
*/
bool ext_always_enabled;
/* Vendor-specific custom extensions */
bool ext_xtheadba;
bool ext_xtheadbb;

View File

@ -907,7 +907,9 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
}
bool pbmte = env->menvcfg & MENVCFG_PBMTE;
bool adue = env->menvcfg & MENVCFG_ADUE;
bool svade = riscv_cpu_cfg(env)->ext_svade;
bool svadu = riscv_cpu_cfg(env)->ext_svadu;
bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
if (first_stage && two_stage && env->virt_enabled) {
pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
@ -1082,9 +1084,18 @@ restart:
return TRANSLATE_FAIL;
}
/* If necessary, set accessed and dirty bits. */
target_ulong updated_pte = pte | PTE_A |
(access_type == MMU_DATA_STORE ? PTE_D : 0);
target_ulong updated_pte = pte;
/*
* If ADUE is enabled, set accessed and dirty bits.
* Otherwise raise an exception if necessary.
*/
if (adue) {
updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
} else if (!(pte & PTE_A) ||
(access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
return TRANSLATE_FAIL;
}
/* Page table updates need to be atomic with MTTCG enabled */
if (updated_pte != pte && !is_debug) {
@ -1212,7 +1223,7 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (env->virt_enabled) {
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
0, mmu_idx, false, true, true)) {
0, MMUIdx_U, false, true, true)) {
return -1;
}
}

View File

@ -1295,8 +1295,34 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno,
static bool validate_vm(CPURISCVState *env, target_ulong vm)
{
return (vm & 0xf) <=
satp_mode_max_from_map(riscv_cpu_cfg(env)->satp_mode.map);
uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
return get_field(mode_supported, (1 << vm));
}
static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
target_ulong val)
{
target_ulong mask;
bool vm;
if (riscv_cpu_mxl(env) == MXL_RV32) {
vm = validate_vm(env, get_field(val, SATP32_MODE));
mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
} else {
vm = validate_vm(env, get_field(val, SATP64_MODE));
mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
}
if (vm && mask) {
/*
* The ISA defines SATP.MODE=Bare as "no translation", but we still
* pass these through QEMU's TLB emulation as it improves
* performance. Flushing the TLB on SATP writes with paging
* enabled avoids leaking those invalid cached mappings.
*/
tlb_flush(env_cpu(env));
return val;
}
return old_xatp;
}
static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
@ -2133,7 +2159,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
/*
* henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
* henvcfg.hade is read_only 0 when menvcfg.hade = 0
* henvcfg.adue is read_only 0 when menvcfg.adue = 0
*/
*val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
env->menvcfg);
@ -3021,31 +3047,11 @@ static RISCVException read_satp(CPURISCVState *env, int csrno,
static RISCVException write_satp(CPURISCVState *env, int csrno,
target_ulong val)
{
target_ulong mask;
bool vm;
if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
vm = validate_vm(env, get_field(val, SATP32_MODE));
mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
} else {
vm = validate_vm(env, get_field(val, SATP64_MODE));
mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
}
if (vm && mask) {
/*
* The ISA defines SATP.MODE=Bare as "no translation", but we still
* pass these through QEMU's TLB emulation as it improves
* performance. Flushing the TLB on SATP writes with paging
* enabled avoids leaking those invalid cached mappings.
*/
tlb_flush(env_cpu(env));
env->satp = val;
}
env->satp = legalize_xatp(env, env->satp, val);
return RISCV_EXCP_NONE;
}
@ -3532,7 +3538,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno,
static RISCVException write_hgatp(CPURISCVState *env, int csrno,
target_ulong val)
{
env->hgatp = val;
env->hgatp = legalize_xatp(env, env->hgatp, val);
return RISCV_EXCP_NONE;
}
@ -3809,7 +3815,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno,
static RISCVException write_vsatp(CPURISCVState *env, int csrno,
target_ulong val)
{
env->vsatp = val;
env->vsatp = legalize_xatp(env, env->vsatp, val);
return RISCV_EXCP_NONE;
}

View File

@ -40,7 +40,11 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
if (a->aq) {
/*
* TSO defines AMOs as acquire+release-RCsc, but does not define LR/SC as
* AMOs. Instead treat them like loads.
*/
if (a->aq || ctx->ztso) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
@ -76,9 +80,10 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
gen_set_label(l1);
/*
* Address comparison failure. However, we still need to
* provide the memory barrier implied by AQ/RL.
* provide the memory barrier implied by AQ/RL/TSO.
*/
tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
TCGBar bar_strl = (ctx->ztso || a->rl) ? TCG_BAR_STRL : 0;
tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + bar_strl);
gen_set_gpr(ctx, a->rd, tcg_constant_tl(1));
gen_set_label(l2);

View File

@ -266,12 +266,20 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
bool out;
decode_save_opc(ctx);
if (get_xl(ctx) == MXL_RV128) {
return gen_load_i128(ctx, a, memop);
out = gen_load_i128(ctx, a, memop);
} else {
return gen_load_tl(ctx, a, memop);
out = gen_load_tl(ctx, a, memop);
}
if (ctx->ztso) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
return out;
}
static bool trans_lb(DisasContext *ctx, arg_lb *a)
@ -328,6 +336,10 @@ static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
TCGv addr = get_address(ctx, a->rs1, a->imm);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
if (ctx->ztso) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
return true;
}

View File

@ -636,10 +636,28 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
/*
* According to the specification
*
* Additionally, if the Ztso extension is implemented, then vector memory
* instructions in the V extension and Zve family of extensions follow
* RVTSO at the instruction level. The Ztso extension does not
* strengthen the ordering of intra-instruction element accesses.
*
* as a result neither ordered nor unordered accesses from the V
* instructions need ordering within the loop but we do still need barriers
* around the loop.
*/
if (is_store && s->ztso) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
mark_vs_dirty(s);
fn(dest, mask, base, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
if (!is_store && s->ztso) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
gen_set_label(over);
@ -778,7 +796,7 @@ typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
uint32_t data, gen_helper_ldst_stride *fn,
DisasContext *s, bool is_store)
DisasContext *s)
{
TCGv_ptr dest, mask;
TCGv base, stride;
@ -797,11 +815,9 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
fn(dest, mask, base, stride, tcg_env, desc);
mark_vs_dirty(s);
if (!is_store) {
mark_vs_dirty(s);
}
fn(dest, mask, base, stride, tcg_env, desc);
gen_set_label(over);
return true;
@ -827,7 +843,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -861,7 +877,7 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
return false;
}
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -884,7 +900,7 @@ typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
uint32_t data, gen_helper_ldst_index *fn,
DisasContext *s, bool is_store)
DisasContext *s)
{
TCGv_ptr dest, mask, index;
TCGv base;
@ -904,11 +920,9 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
fn(dest, mask, base, index, tcg_env, desc);
mark_vs_dirty(s);
if (!is_store) {
mark_vs_dirty(s);
}
fn(dest, mask, base, index, tcg_env, desc);
gen_set_label(over);
return true;
@ -953,7 +967,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -1005,7 +1019,7 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -1084,7 +1098,7 @@ typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
uint32_t width, gen_helper_ldst_whole *fn,
DisasContext *s, bool is_store)
DisasContext *s)
{
uint32_t evl = s->cfg_ptr->vlenb * nf / width;
TCGLabel *over = gen_new_label();
@ -1102,11 +1116,10 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
mark_vs_dirty(s);
fn(dest, base, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
}
gen_set_label(over);
return true;
@ -1116,42 +1129,42 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \
#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) && \
QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \
gen_helper_##NAME, s, IS_STORE); \
gen_helper_##NAME, s); \
} \
return false; \
}
GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false)
GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false)
GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false)
GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false)
GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false)
GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false)
GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false)
GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false)
GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false)
GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false)
GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false)
GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false)
GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false)
GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false)
GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false)
GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false)
GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1)
GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2)
GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4)
GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8)
GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1)
GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2)
GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4)
GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8)
GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1)
GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2)
GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4)
GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8)
GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1)
GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2)
GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4)
GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8)
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true)
GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true)
GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true)
GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true)
GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1)
GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1)
GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1)
GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1)
/*
*** Vector Integer Arithmetic Instructions

View File

@ -293,12 +293,14 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
{
REQUIRE_ZCMT(ctx);
TCGv addr = tcg_temp_new();
/*
* Update pc to current for the non-unwinding exception
* that might come from cpu_ld*_code() in the helper.
*/
gen_update_pc(ctx, 0);
gen_helper_cm_jalt(cpu_pc, tcg_env, tcg_constant_i32(a->index));
gen_helper_cm_jalt(addr, tcg_env, tcg_constant_i32(a->index));
/* c.jt vs c.jalt depends on the index. */
if (a->index >= 32) {
@ -307,6 +309,8 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
gen_set_gpr(ctx, xRA, succ_pc);
}
tcg_gen_mov_tl(cpu_pc, addr);
tcg_gen_lookup_and_goto_ptr();
ctx->base.is_jmp = DISAS_NORETURN;
return true;

View File

@ -275,13 +275,42 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),

View File

@ -79,14 +79,14 @@ static bool hyper_needed(void *opaque)
static const VMStateDescription vmstate_hyper = {
.name = "cpu/hyper",
.version_id = 3,
.minimum_version_id = 3,
.version_id = 4,
.minimum_version_id = 4,
.needed = hyper_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL(env.hstatus, RISCVCPU),
VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
VMSTATE_UINT64(env.hideleg, RISCVCPU),
VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
VMSTATE_UINT32(env.hcounteren, RISCVCPU),
VMSTATE_UINTTL(env.htval, RISCVCPU),
VMSTATE_UINTTL(env.htinst, RISCVCPU),
VMSTATE_UINTTL(env.hgatp, RISCVCPU),
@ -353,8 +353,8 @@ static const VMStateDescription vmstate_jvt = {
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
.version_id = 9,
.minimum_version_id = 9,
.version_id = 10,
.minimum_version_id = 10,
.post_load = riscv_cpu_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
@ -397,9 +397,9 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.mtval, RISCVCPU),
VMSTATE_UINTTL(env.miselect, RISCVCPU),
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINTTL(env.scounteren, RISCVCPU),
VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
VMSTATE_UINT32(env.scounteren, RISCVCPU),
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
vmstate_pmu_ctr_state, PMUCTRState),
VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),

View File

@ -16,6 +16,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RISCV_PMU_H
#define RISCV_PMU_H
#include "cpu.h"
#include "qapi/error.h"
@ -31,3 +34,5 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
#endif /* RISCV_PMU_H */

View File

@ -196,17 +196,14 @@ static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
{
switch (feat_offset) {
case CPU_CFG_OFFSET(zic64b):
/*
* All other named features are already enabled
* in riscv_tcg_cpu_instance_init().
*/
if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) {
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
break;
case CPU_CFG_OFFSET(svade):
cpu->cfg.ext_svadu = false;
break;
default:
g_assert_not_reached();
}
}
@ -219,10 +216,6 @@ static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
return;
}
if (cpu_cfg_offset_is_named_feat(ext_offset)) {
return;
}
ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
if (env->priv_ver < ext_priv_ver) {
@ -322,11 +315,9 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
static void riscv_cpu_update_named_features(RISCVCPU *cpu)
{
cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
cpu->cfg.cboz_blocksize == 64;
cpu->cfg.svade = !cpu->cfg.ext_svadu;
cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
cpu->cfg.cboz_blocksize == 64;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@ -1075,6 +1066,7 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
#ifndef CONFIG_USER_ONLY
if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
object_property_set_bool(obj, "mmu", true, NULL);
const char *satp_prop = satp_mode_str(profile->satp_mode,
riscv_cpu_is_32bit(cpu));
object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
@ -1290,6 +1282,12 @@ static void riscv_init_max_cpu_extensions(Object *obj)
isa_ext_update_enabled(cpu, prop->offset, true);
}
/*
* Some extensions can't be added without backward compatibilty concerns.
* Disable those, the user can still opt in to them on the command line.
*/
cpu->cfg.ext_svade = false;
/* set vector version */
env->vext_ver = VEXT_VERSION_1_00_0;
@ -1318,6 +1316,8 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
Object *obj = OBJECT(cpu);
cpu->cfg.ext_always_enabled = true;
misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
riscv_cpu_add_user_properties(obj);

View File

@ -109,6 +109,8 @@ typedef struct DisasContext {
/* PointerMasking extension */
bool pm_mask_enabled;
bool pm_base_enabled;
/* Ztso */
bool ztso;
/* Use icount trigger for native debug */
bool itrigger;
/* FRM is known to contain a valid value. */
@ -1196,6 +1198,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cs = cs;
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
ctx->zero = tcg_constant_tl(0);
ctx->virt_inst_excp = false;

View File

@ -44,6 +44,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
target_ulong reserved = s2 &
MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
xlen - 1 - R_VTYPE_RESERVED_SHIFT);
uint16_t vlen = cpu->cfg.vlenb << 3;
int8_t lmul;
if (vlmul & 4) {
@ -53,10 +54,8 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
* VLEN * LMUL >= SEW
* VLEN >> (8 - lmul) >= sew
* (vlenb << 3) >> (8 - lmul) >= sew
* vlenb >> (8 - 3 - lmul) >= sew
*/
if (vlmul == 4 ||
cpu->cfg.vlenb >> (8 - 3 - vlmul) < sew) {
if (vlmul == 4 || (vlen >> (8 - vlmul)) < sew) {
vill = true;
}
}

View File

@ -60,6 +60,7 @@ libqos_srcs = files(
'arm-xilinx-zynq-a9-machine.c',
'ppc64_pseries-machine.c',
'x86_64_pc-machine.c',
'riscv-virt-machine.c',
)
if have_virtfs

View File

@ -0,0 +1,137 @@
/*
* libqos driver framework for risc-v
*
* Initial version based on arm-virt-machine.c
*
* Copyright (c) 2024 Ventana Micro
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#include "qemu/osdep.h"
#include "../libqtest.h"
#include "qemu/module.h"
#include "libqos-malloc.h"
#include "qgraph.h"
#include "virtio-mmio.h"
#include "generic-pcihost.h"
#include "hw/pci/pci_regs.h"
#define RISCV_PAGE_SIZE 4096
/* VIRT_DRAM */
#define RISCV_VIRT_RAM_ADDR 0x80000000
#define RISCV_VIRT_RAM_SIZE 0x20000000
/*
* VIRT_VIRTIO. BASE_ADDR points to the last
* virtio_mmio device.
*/
#define VIRTIO_MMIO_BASE_ADDR 0x10008000
#define VIRTIO_MMIO_SIZE 0x00001000
/* VIRT_PCIE_PIO */
#define RISCV_GPEX_PIO_BASE 0x3000000
#define RISCV_BUS_PIO_LIMIT 0x10000
/* VIRT_PCIE_MMIO */
#define RISCV_BUS_MMIO_ALLOC_PTR 0x40000000
#define RISCV_BUS_MMIO_LIMIT 0x80000000
/* VIRT_PCIE_ECAM */
#define RISCV_ECAM_ALLOC_PTR 0x30000000
typedef struct QVirtMachine QVirtMachine;
struct QVirtMachine {
QOSGraphObject obj;
QGuestAllocator alloc;
QVirtioMMIODevice virtio_mmio;
QGenericPCIHost bridge;
};
static void virt_destructor(QOSGraphObject *obj)
{
QVirtMachine *machine = (QVirtMachine *) obj;
alloc_destroy(&machine->alloc);
}
static void *virt_get_driver(void *object, const char *interface)
{
QVirtMachine *machine = object;
if (!g_strcmp0(interface, "memory")) {
return &machine->alloc;
}
fprintf(stderr, "%s not present in riscv/virtio\n", interface);
g_assert_not_reached();
}
static QOSGraphObject *virt_get_device(void *obj, const char *device)
{
QVirtMachine *machine = obj;
if (!g_strcmp0(device, "generic-pcihost")) {
return &machine->bridge.obj;
} else if (!g_strcmp0(device, "virtio-mmio")) {
return &machine->virtio_mmio.obj;
}
fprintf(stderr, "%s not present in riscv/virt\n", device);
g_assert_not_reached();
}
static void riscv_config_qpci_bus(QGenericPCIBus *qpci)
{
qpci->gpex_pio_base = RISCV_GPEX_PIO_BASE;
qpci->bus.pio_limit = RISCV_BUS_PIO_LIMIT;
qpci->bus.mmio_alloc_ptr = RISCV_BUS_MMIO_ALLOC_PTR;
qpci->bus.mmio_limit = RISCV_BUS_MMIO_LIMIT;
qpci->ecam_alloc_ptr = RISCV_ECAM_ALLOC_PTR;
}
static void *qos_create_machine_riscv_virt(QTestState *qts)
{
QVirtMachine *machine = g_new0(QVirtMachine, 1);
alloc_init(&machine->alloc, 0,
RISCV_VIRT_RAM_ADDR,
RISCV_VIRT_RAM_ADDR + RISCV_VIRT_RAM_SIZE,
RISCV_PAGE_SIZE);
qvirtio_mmio_init_device(&machine->virtio_mmio, qts, VIRTIO_MMIO_BASE_ADDR,
VIRTIO_MMIO_SIZE);
qos_create_generic_pcihost(&machine->bridge, qts, &machine->alloc);
riscv_config_qpci_bus(&machine->bridge.pci);
machine->obj.get_device = virt_get_device;
machine->obj.get_driver = virt_get_driver;
machine->obj.destructor = virt_destructor;
return machine;
}
static void virt_machine_register_nodes(void)
{
qos_node_create_machine_args("riscv32/virt", qos_create_machine_riscv_virt,
"aclint=on,aia=aplic-imsic");
qos_node_contains("riscv32/virt", "virtio-mmio", NULL);
qos_node_contains("riscv32/virt", "generic-pcihost", NULL);
qos_node_create_machine_args("riscv64/virt", qos_create_machine_riscv_virt,
"aclint=on,aia=aplic-imsic");
qos_node_contains("riscv64/virt", "virtio-mmio", NULL);
qos_node_contains("riscv64/virt", "generic-pcihost", NULL);
}
libqos_init(virt_machine_register_nodes);

View File

@ -17,4 +17,4 @@ run-test-aes: QEMU_OPTS += -cpu rv64,zk=on
TESTS += test-fcvtmod
test-fcvtmod: CFLAGS += -march=rv64imafdc
test-fcvtmod: LDFLAGS += -static
run-test-fcvtmod: QEMU_OPTS += -cpu rv64,d=true,Zfa=true
run-test-fcvtmod: QEMU_OPTS += -cpu rv64,d=true,zfa=true