RISC-V PR for 9.0

* Make vector whole-register move (vmv) depend on vtype register
 * Fix th.dcache.cval1 priviledge check
 * Don't allow write mstatus_vs without RVV
 * Use hwaddr instead of target_ulong for RV32
 * Fix machine IDs QOM getters\
 * Fix KVM reg id sizes
 * ACPI: Enable AIA, PLIC and update RHCT
 * Fix the interrupts-extended property format of PLIC
 * Add support for Zacas extension
 * Add amocas.[w,d,q] instructions
 * Document acpi parameter of virt machine
 * RVA22 profiles support
 * Remove group setting of KVM AIA if the machine only has 1 socket
 * Add RVV CSRs to KVM
 * sifive_u: Update S-mode U-Boot image build instructions
 * Upgrade OpenSBI from v1.3.1 to v1.4
 * pmp: Ignore writes when RW=01 and MML=0
 * Assert that the CSR numbers will be correct
 * Don't adjust vscause for exceptions
 * Ensure mideleg is set correctly on reset
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmWeW8kACgkQr3yVEwxT
 gBMB3BAAtpb7dC/NqDOjo/LjGf81wYUnF0KcfJUIbuHEM9S03mKJEvngV/sUhg+A
 fzsoJazijQZk2+Y02WLT/o+ppRDegb4P6n54Nn13xr024Dn2jf45+EKDLI+vtU5y
 lhwp/LH3SEo2MM/Qr0njl8+jJ7W9adhZeK6x+NFaLaQJ291xupbcwEnScdv2bPAo
 gvbM6yrfUoZ25MsQKIDGssozdGRwOD/keAT0q8C0gKDamqXBDrI80BOVhRms+uLm
 R33DXsAegPKluJTa9gfaWFI0eK34WHXRvSIjE36nZlGNNgqLAVdM2/QozMVz4cKA
 Ymz1nzqB9HeSn1pM4KCK/Y3LH89qLGWtyHYgldiDXA/wSyKajwkbXSWFOT9gPDqV
 i+5BRDvU0zIeMIt+ROqNKgx1Hry6U2aycMNsdHTmygJbGEpiTaXuES5tt+LKsyHe
 w/7a6wPd/kh9LQhXYQ4qbn7L534tWvn8zWyvKLZLxmYPcOn6SdjFbKWmk5ARky2W
 sx9ojn9ANlYaLfzQ3TMRcIhWD6n8Si3KFNiQ3353E8xkRkyfu0WHyXAy8/kIc5UT
 nScO2YD68XkdkcLF6uLUKuGiVZXFWXRY1Ttz9tvEmBckVsg6TIkoMONHeUWNP7ly
 A0bJwN5qEOk6XIYKHWwX5UzvkcfUpOb5VmuLuv3gRoNX0A7/+fc=
 =5K9J
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20240110' of https://github.com/alistair23/qemu into staging

RISC-V PR for 9.0

* Make vector whole-register move (vmv) depend on vtype register
* Fix th.dcache.cval1 priviledge check
* Don't allow write mstatus_vs without RVV
* Use hwaddr instead of target_ulong for RV32
* Fix machine IDs QOM getters\
* Fix KVM reg id sizes
* ACPI: Enable AIA, PLIC and update RHCT
* Fix the interrupts-extended property format of PLIC
* Add support for Zacas extension
* Add amocas.[w,d,q] instructions
* Document acpi parameter of virt machine
* RVA22 profiles support
* Remove group setting of KVM AIA if the machine only has 1 socket
* Add RVV CSRs to KVM
* sifive_u: Update S-mode U-Boot image build instructions
* Upgrade OpenSBI from v1.3.1 to v1.4
* pmp: Ignore writes when RW=01 and MML=0
* Assert that the CSR numbers will be correct
* Don't adjust vscause for exceptions
* Ensure mideleg is set correctly on reset

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmWeW8kACgkQr3yVEwxT
# gBMB3BAAtpb7dC/NqDOjo/LjGf81wYUnF0KcfJUIbuHEM9S03mKJEvngV/sUhg+A
# fzsoJazijQZk2+Y02WLT/o+ppRDegb4P6n54Nn13xr024Dn2jf45+EKDLI+vtU5y
# lhwp/LH3SEo2MM/Qr0njl8+jJ7W9adhZeK6x+NFaLaQJ291xupbcwEnScdv2bPAo
# gvbM6yrfUoZ25MsQKIDGssozdGRwOD/keAT0q8C0gKDamqXBDrI80BOVhRms+uLm
# R33DXsAegPKluJTa9gfaWFI0eK34WHXRvSIjE36nZlGNNgqLAVdM2/QozMVz4cKA
# Ymz1nzqB9HeSn1pM4KCK/Y3LH89qLGWtyHYgldiDXA/wSyKajwkbXSWFOT9gPDqV
# i+5BRDvU0zIeMIt+ROqNKgx1Hry6U2aycMNsdHTmygJbGEpiTaXuES5tt+LKsyHe
# w/7a6wPd/kh9LQhXYQ4qbn7L534tWvn8zWyvKLZLxmYPcOn6SdjFbKWmk5ARky2W
# sx9ojn9ANlYaLfzQ3TMRcIhWD6n8Si3KFNiQ3353E8xkRkyfu0WHyXAy8/kIc5UT
# nScO2YD68XkdkcLF6uLUKuGiVZXFWXRY1Ttz9tvEmBckVsg6TIkoMONHeUWNP7ly
# A0bJwN5qEOk6XIYKHWwX5UzvkcfUpOb5VmuLuv3gRoNX0A7/+fc=
# =5K9J
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 10 Jan 2024 08:56:41 GMT
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20240110' of https://github.com/alistair23/qemu: (65 commits)
  target/riscv: Ensure mideleg is set correctly on reset
  target/riscv: Don't adjust vscause for exceptions
  target/riscv: Assert that the CSR numbers will be correct
  target/riscv: pmp: Ignore writes when RW=01 and MML=0
  roms/opensbi: Upgrade from v1.3.1 to v1.4
  docs/system/riscv: sifive_u: Update S-mode U-Boot image build instructions
  target/riscv/kvm: add RVV and Vector CSR regs
  target/riscv/kvm: do PR_RISCV_V_SET_CONTROL during realize()
  linux-headers: riscv: add ptrace.h
  linux-headers: Update to Linux v6.7-rc5
  target/riscv/kvm.c: remove group setting of KVM AIA if the machine only has 1 socket
  target/riscv: add rva22s64 cpu
  target/riscv: add RVA22S64 profile
  target/riscv: add 'parent' in profile description
  target/riscv: add satp_mode profile support
  target/riscv/cpu.c: add riscv_cpu_is_32bit()
  target/riscv/cpu.c: finalize satp_mode earlier
  target/riscv: add priv ver restriction to profiles
  target/riscv: implement svade
  target/riscv: add 'rva22u64' CPU
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-01-10 11:41:56 +00:00
commit 34eac35f89
68 changed files with 2248 additions and 360 deletions

View file

@ -903,6 +903,9 @@ typedef enum {
rv_op_vwsll_vv = 872,
rv_op_vwsll_vx = 873,
rv_op_vwsll_vi = 874,
rv_op_amocas_w = 875,
rv_op_amocas_d = 876,
rv_op_amocas_q = 877,
} rv_op;
/* register names */
@ -2090,6 +2093,9 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "vwsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
{ "vwsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
{ "vwsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 },
{ "amocas.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
};
/* CSR names */
@ -2841,6 +2847,9 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 34: op = rv_op_amoxor_w; break;
case 35: op = rv_op_amoxor_d; break;
case 36: op = rv_op_amoxor_q; break;
case 42: op = rv_op_amocas_w; break;
case 43: op = rv_op_amocas_d; break;
case 44: op = rv_op_amocas_q; break;
case 66: op = rv_op_amoor_w; break;
case 67: op = rv_op_amoor_d; break;
case 68: op = rv_op_amoor_q; break;

View file

@ -210,7 +210,7 @@ command line options with ``qemu-system-riscv32``.
Running U-Boot
--------------
U-Boot mainline v2021.07 release is tested at the time of writing. To build a
U-Boot mainline v2024.01 release is tested at the time of writing. To build a
U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use
the sifive_unleashed_defconfig with similar commands as described above for
Linux:
@ -325,15 +325,10 @@ configuration of U-Boot:
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_unleashed_defconfig
$ make menuconfig
then manually select the following configuration:
* Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
and unselect the following configuration:
* Library routines ---> Allow access to binman information in the device tree
$ ./scripts/config --enable OF_BOARD
$ ./scripts/config --disable BINMAN_FDT
$ ./scripts/config --disable SPL
$ make olddefconfig
This changes U-Boot to use the QEMU generated device tree blob, and bypass
running the U-Boot SPL stage.
@ -352,17 +347,13 @@ It's possible to create a 32-bit U-Boot S-mode image as well.
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_unleashed_defconfig
$ make menuconfig
then manually update the following configuration in U-Boot:
* Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
* RISC-V architecture ---> Base ISA ---> RV32I
* Boot options ---> Boot images ---> Text Base ---> 0x80400000
and unselect the following configuration:
* Library routines ---> Allow access to binman information in the device tree
$ ./scripts/config --disable ARCH_RV64I
$ ./scripts/config --enable ARCH_RV32I
$ ./scripts/config --set-val TEXT_BASE 0x80400000
$ ./scripts/config --enable OF_BOARD
$ ./scripts/config --disable BINMAN_FDT
$ ./scripts/config --disable SPL
$ make olddefconfig
Use the same command line options to boot the 32-bit U-Boot S-mode image:

View file

@ -95,6 +95,11 @@ The following machine-specific options are supported:
SiFive CLINT. When not specified, this option is assumed to be "off".
This option is restricted to the TCG accelerator.
- acpi=[on|off|auto]
When this option is "on" (which is the default), ACPI tables are generated and
exposed as firmware tables etc/acpi/rsdp and etc/acpi/tables.
- aia=[none|aplic|aplic-imsic]
This option allows selecting interrupt controller defined by the AIA

View file

@ -35,7 +35,7 @@
#include "target/arm/cpu.h"
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/nvram/fw_cfg_acpi.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
@ -58,6 +58,7 @@
#include "migration/vmstate.h"
#include "hw/acpi/ghes.h"
#include "hw/acpi/viot.h"
#include "hw/virtio/virtio-acpi.h"
#define ARM_SPI_BASE 32
@ -94,21 +95,6 @@ static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
aml_append(scope, dev);
}
static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
{
Aml *dev = aml_device("FWCF");
aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
fw_cfg_memmap->size, AML_READ_WRITE));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
{
Aml *dev, *crs;
@ -133,32 +119,6 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
aml_append(scope, dev);
}
static void acpi_dsdt_add_virtio(Aml *scope,
const MemMapEntry *virtio_mmio_memmap,
uint32_t mmio_irq, int num)
{
hwaddr base = virtio_mmio_memmap->base;
hwaddr size = virtio_mmio_memmap->size;
int i;
for (i = 0; i < num; i++) {
uint32_t irq = mmio_irq + i;
Aml *dev = aml_device("VR%02u", i);
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
aml_append(crs,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, &irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
base += size;
}
}
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
uint32_t irq, VirtMachineState *vms)
{
@ -864,9 +824,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (vmc->acpi_expose_flash) {
acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
}
acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
virtio_acpi_dsdt_add(scope, memmap[VIRT_MMIO].base, memmap[VIRT_MMIO].size,
(irqmap[VIRT_MMIO] + ARM_SPI_BASE),
0, NUM_VIRTIO_TRANSPORTS);
acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms);
if (vms->acpi_dev) {
build_ged_aml(scope, "\\_SB."GED_DEVICE,

View file

@ -37,6 +37,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pcie_host.h"
#include "hw/usb/xhci.h"
#include "hw/virtio/virtio-acpi.h"
#include "hw/virtio/virtio-mmio.h"
#include "hw/input/i8042.h"
@ -77,19 +78,7 @@ static void acpi_dsdt_add_virtio(Aml *scope,
uint32_t irq = mms->virtio_irq_base + index;
hwaddr base = VIRTIO_MMIO_BASE + index * 512;
hwaddr size = 512;
Aml *dev = aml_device("VR%02u", (unsigned)index);
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
aml_append(dev, aml_name_decl("_UID", aml_int(index)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
aml_append(crs,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, &irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
virtio_acpi_dsdt_add(scope, base, size, irq, index, 1);
}
}
}

23
hw/nvram/fw_cfg-acpi.c Normal file
View file

@ -0,0 +1,23 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Add fw_cfg device in DSDT
*
*/
#include "hw/nvram/fw_cfg_acpi.h"
#include "hw/acpi/aml-build.h"
void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap)
{
Aml *dev = aml_device("FWCF");
aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
fw_cfg_memmap->size, AML_READ_WRITE));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}

View file

@ -17,3 +17,4 @@ system_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files(
system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c'))
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
specific_ss.add(when: 'CONFIG_ACPI', if_true: files('fw_cfg-acpi.c'))

View file

@ -281,3 +281,16 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
crs_range_set_free(&crs_range_set);
}
void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq)
{
bool ambig;
Object *obj = object_resolve_path_type("", TYPE_GPEX_HOST, &ambig);
if (!obj || ambig) {
return;
}
GPEX_HOST(obj)->gpex_cfg.irq = irq;
acpi_dsdt_add_gpex(scope, &GPEX_HOST(obj)->gpex_cfg);
}

View file

@ -154,6 +154,18 @@ static Property gpex_host_properties[] = {
*/
DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
allow_unmapped_accesses, true),
DEFINE_PROP_UINT64(PCI_HOST_ECAM_BASE, GPEXHost, gpex_cfg.ecam.base, 0),
DEFINE_PROP_SIZE(PCI_HOST_ECAM_SIZE, GPEXHost, gpex_cfg.ecam.size, 0),
DEFINE_PROP_UINT64(PCI_HOST_PIO_BASE, GPEXHost, gpex_cfg.pio.base, 0),
DEFINE_PROP_SIZE(PCI_HOST_PIO_SIZE, GPEXHost, gpex_cfg.pio.size, 0),
DEFINE_PROP_UINT64(PCI_HOST_BELOW_4G_MMIO_BASE, GPEXHost,
gpex_cfg.mmio32.base, 0),
DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MMIO_SIZE, GPEXHost,
gpex_cfg.mmio32.size, 0),
DEFINE_PROP_UINT64(PCI_HOST_ABOVE_4G_MMIO_BASE, GPEXHost,
gpex_cfg.mmio64.base, 0),
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost,
gpex_cfg.mmio64.size, 0),
DEFINE_PROP_END_OF_LIST(),
};

View file

@ -45,6 +45,7 @@ config RISCV_VIRT
select FW_CFG_DMA
select PLATFORM_BUS
select ACPI
select ACPI_PCI
config SHAKTI_C
bool

View file

@ -27,16 +27,21 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/pci.h"
#include "hw/acpi/utils.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/nvram/fw_cfg_acpi.h"
#include "hw/pci-host/gpex.h"
#include "hw/riscv/virt.h"
#include "hw/riscv/numa.h"
#include "hw/virtio/virtio-acpi.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/reset.h"
#include "migration/vmstate.h"
#include "hw/riscv/virt.h"
#include "hw/riscv/numa.h"
#include "hw/intc/riscv_aclint.h"
#define ACPI_BUILD_TABLE_SIZE 0x20000
#define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index))
typedef struct AcpiBuildState {
/* Copy of table in RAM (for patching) */
@ -58,17 +63,56 @@ static void acpi_align_size(GArray *blob, unsigned align)
static void riscv_acpi_madt_add_rintc(uint32_t uid,
const CPUArchIdList *arch_ids,
GArray *entry)
GArray *entry,
RISCVVirtState *s)
{
uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
uint64_t hart_id = arch_ids->cpus[uid].arch_id;
uint32_t imsic_size, local_cpu_id, socket_id;
uint64_t imsic_socket_addr, imsic_addr;
MachineState *ms = MACHINE(s);
socket_id = arch_ids->cpus[uid].props.node_id;
local_cpu_id = (arch_ids->cpus[uid].arch_id -
riscv_socket_first_hartid(ms, socket_id)) %
riscv_socket_hart_count(ms, socket_id);
imsic_socket_addr = s->memmap[VIRT_IMSIC_S].base +
(socket_id * VIRT_IMSIC_GROUP_MAX_SIZE);
imsic_size = IMSIC_HART_SIZE(guest_index_bits);
imsic_addr = imsic_socket_addr + local_cpu_id * imsic_size;
build_append_int_noprefix(entry, 0x18, 1); /* Type */
build_append_int_noprefix(entry, 20, 1); /* Length */
build_append_int_noprefix(entry, 36, 1); /* Length */
build_append_int_noprefix(entry, 1, 1); /* Version */
build_append_int_noprefix(entry, 0, 1); /* Reserved */
build_append_int_noprefix(entry, 0x1, 4); /* Flags */
build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */
build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */
/* External Interrupt Controller ID */
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
build_append_int_noprefix(entry,
ACPI_BUILD_INTC_ID(
arch_ids->cpus[uid].props.node_id,
local_cpu_id),
4);
} else if (s->aia_type == VIRT_AIA_TYPE_NONE) {
build_append_int_noprefix(entry,
ACPI_BUILD_INTC_ID(
arch_ids->cpus[uid].props.node_id,
2 * local_cpu_id + 1),
4);
} else {
build_append_int_noprefix(entry, 0, 4);
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* IMSIC Base address */
build_append_int_noprefix(entry, imsic_addr, 8);
/* IMSIC Size */
build_append_int_noprefix(entry, imsic_size, 4);
} else {
build_append_int_noprefix(entry, 0, 8);
build_append_int_noprefix(entry, 0, 4);
}
}
static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
@ -87,7 +131,7 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
aml_int(arch_ids->cpus[i].arch_id)));
/* build _MAT object */
riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf);
riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf, s);
aml_append(dev, aml_name_decl("_MAT",
aml_buffer(madt_buf->len,
(uint8_t *)madt_buf->data)));
@ -97,19 +141,36 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
}
}
static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
static void
acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
uint32_t uart_irq)
{
Aml *dev = aml_device("FWCF");
aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *dev = aml_device("COM0");
aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
fw_cfg_memmap->size, AML_READ_WRITE));
aml_append(crs, aml_memory32_fixed(uart_memmap->base,
uart_memmap->size, AML_READ_WRITE));
aml_append(crs,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, &uart_irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
Aml *pkg = aml_package(2);
aml_append(pkg, aml_string("clock-frequency"));
aml_append(pkg, aml_int(3686400));
Aml *UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301");
Aml *pkg1 = aml_package(1);
aml_append(pkg1, pkg);
Aml *package = aml_package(2);
aml_append(package, UUID);
aml_append(package, pkg1);
aml_append(dev, aml_name_decl("_DSD", package));
aml_append(scope, dev);
}
@ -121,6 +182,7 @@ static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
* 5.2.36 RISC-V Hart Capabilities Table (RHCT)
* REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16
* https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view
* https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view
*/
static void build_rhct(GArray *table_data,
BIOSLinker *linker,
@ -130,8 +192,10 @@ static void build_rhct(GArray *table_data,
MachineState *ms = MACHINE(s);
const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
size_t len, aligned_len;
uint32_t isa_offset, num_rhct_nodes;
RISCVCPU *cpu;
uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0;
RISCVCPU *cpu = &s->soc[0].harts[0];
uint32_t mmu_offset = 0;
uint8_t satp_mode_max;
char *isa;
AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id,
@ -147,6 +211,13 @@ static void build_rhct(GArray *table_data,
/* ISA + N hart info */
num_rhct_nodes = 1 + ms->smp.cpus;
if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
num_rhct_nodes++;
}
if (cpu->cfg.satp_mode.supported != 0) {
num_rhct_nodes++;
}
/* Number of RHCT nodes*/
build_append_int_noprefix(table_data, num_rhct_nodes, 4);
@ -158,7 +229,6 @@ static void build_rhct(GArray *table_data,
isa_offset = table_data->len - table.table_offset;
build_append_int_noprefix(table_data, 0, 2); /* Type 0 */
cpu = &s->soc[0].harts[0];
isa = riscv_isa_string(cpu);
len = 8 + strlen(isa) + 1;
aligned_len = (len % 2) ? (len + 1) : len;
@ -174,14 +244,87 @@ static void build_rhct(GArray *table_data,
build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */
}
/* CMO node */
if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
cmo_offset = table_data->len - table.table_offset;
build_append_int_noprefix(table_data, 1, 2); /* Type */
build_append_int_noprefix(table_data, 10, 2); /* Length */
build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
build_append_int_noprefix(table_data, 0, 1); /* Reserved */
/* CBOM block size */
if (cpu->cfg.cbom_blocksize) {
build_append_int_noprefix(table_data,
__builtin_ctz(cpu->cfg.cbom_blocksize),
1);
} else {
build_append_int_noprefix(table_data, 0, 1);
}
/* CBOP block size */
build_append_int_noprefix(table_data, 0, 1);
/* CBOZ block size */
if (cpu->cfg.cboz_blocksize) {
build_append_int_noprefix(table_data,
__builtin_ctz(cpu->cfg.cboz_blocksize),
1);
} else {
build_append_int_noprefix(table_data, 0, 1);
}
}
/* MMU node structure */
if (cpu->cfg.satp_mode.supported != 0) {
satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
mmu_offset = table_data->len - table.table_offset;
build_append_int_noprefix(table_data, 2, 2); /* Type */
build_append_int_noprefix(table_data, 8, 2); /* Length */
build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
build_append_int_noprefix(table_data, 0, 1); /* Reserved */
/* MMU Type */
if (satp_mode_max == VM_1_10_SV57) {
build_append_int_noprefix(table_data, 2, 1); /* Sv57 */
} else if (satp_mode_max == VM_1_10_SV48) {
build_append_int_noprefix(table_data, 1, 1); /* Sv48 */
} else if (satp_mode_max == VM_1_10_SV39) {
build_append_int_noprefix(table_data, 0, 1); /* Sv39 */
} else {
assert(1);
}
}
/* Hart Info Node */
for (int i = 0; i < arch_ids->len; i++) {
len = 16;
int num_offsets = 1;
build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */
build_append_int_noprefix(table_data, 16, 2); /* Length */
build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */
build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */
/* Length */
if (cmo_offset) {
len += 4;
num_offsets++;
}
if (mmu_offset) {
len += 4;
num_offsets++;
}
build_append_int_noprefix(table_data, len, 2);
build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
/* Number of offsets */
build_append_int_noprefix(table_data, num_offsets, 2);
build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
/* Offsets */
build_append_int_noprefix(table_data, isa_offset, 4);
if (cmo_offset) {
build_append_int_noprefix(table_data, cmo_offset, 4);
}
if (mmu_offset) {
build_append_int_noprefix(table_data, mmu_offset, 4);
}
}
acpi_table_end(linker, &table);
@ -209,6 +352,8 @@ static void build_dsdt(GArray *table_data,
RISCVVirtState *s)
{
Aml *scope, *dsdt;
MachineState *ms = MACHINE(s);
uint8_t socket_count;
const MemMapEntry *memmap = s->memmap;
AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
@ -226,7 +371,30 @@ static void build_dsdt(GArray *table_data,
scope = aml_scope("\\_SB");
acpi_dsdt_add_cpus(scope, s);
acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
socket_count = riscv_socket_count(ms);
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ);
if (socket_count == 1) {
virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
memmap[VIRT_VIRTIO].size,
VIRTIO_IRQ, 0, VIRTIO_COUNT);
acpi_dsdt_add_gpex_host(scope, PCIE_IRQ);
} else if (socket_count == 2) {
virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
memmap[VIRT_VIRTIO].size,
VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
VIRTIO_COUNT);
acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES);
} else {
virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
memmap[VIRT_VIRTIO].size,
VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
VIRTIO_COUNT);
acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES * 2);
}
aml_append(dsdt, scope);
@ -242,6 +410,7 @@ static void build_dsdt(GArray *table_data,
* 5.2.12 Multiple APIC Description Table (MADT)
* REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15
* https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view
* https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view
*/
static void build_madt(GArray *table_data,
BIOSLinker *linker,
@ -250,6 +419,21 @@ static void build_madt(GArray *table_data,
MachineClass *mc = MACHINE_GET_CLASS(s);
MachineState *ms = MACHINE(s);
const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
uint8_t group_index_bits = imsic_num_bits(riscv_socket_count(ms));
uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
uint16_t imsic_max_hart_per_socket = 0;
uint8_t hart_index_bits;
uint64_t aplic_addr;
uint32_t gsi_base;
uint8_t socket;
for (socket = 0; socket < riscv_socket_count(ms); socket++) {
if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
imsic_max_hart_per_socket = s->soc[socket].num_harts;
}
}
hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket);
AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
@ -261,7 +445,84 @@ static void build_madt(GArray *table_data,
/* RISC-V Local INTC structures per HART */
for (int i = 0; i < arch_ids->len; i++) {
riscv_acpi_madt_add_rintc(i, arch_ids, table_data);
riscv_acpi_madt_add_rintc(i, arch_ids, table_data, s);
}
/* IMSIC */
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* IMSIC */
build_append_int_noprefix(table_data, 0x19, 1); /* Type */
build_append_int_noprefix(table_data, 16, 1); /* Length */
build_append_int_noprefix(table_data, 1, 1); /* Version */
build_append_int_noprefix(table_data, 0, 1); /* Reserved */
build_append_int_noprefix(table_data, 0, 4); /* Flags */
/* Number of supervisor mode Interrupt Identities */
build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
/* Number of guest mode Interrupt Identities */
build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
/* Guest Index Bits */
build_append_int_noprefix(table_data, guest_index_bits, 1);
/* Hart Index Bits */
build_append_int_noprefix(table_data, hart_index_bits, 1);
/* Group Index Bits */
build_append_int_noprefix(table_data, group_index_bits, 1);
/* Group Index Shift */
build_append_int_noprefix(table_data, IMSIC_MMIO_GROUP_MIN_SHIFT, 1);
}
if (s->aia_type != VIRT_AIA_TYPE_NONE) {
/* APLICs */
for (socket = 0; socket < riscv_socket_count(ms); socket++) {
aplic_addr = s->memmap[VIRT_APLIC_S].base +
s->memmap[VIRT_APLIC_S].size * socket;
gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
build_append_int_noprefix(table_data, 0x1A, 1); /* Type */
build_append_int_noprefix(table_data, 36, 1); /* Length */
build_append_int_noprefix(table_data, 1, 1); /* Version */
build_append_int_noprefix(table_data, socket, 1); /* APLIC ID */
build_append_int_noprefix(table_data, 0, 4); /* Flags */
build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */
/* Number of IDCs */
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
build_append_int_noprefix(table_data,
s->soc[socket].num_harts,
2);
} else {
build_append_int_noprefix(table_data, 0, 2);
}
/* Total External Interrupt Sources Supported */
build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_SOURCES, 2);
/* Global System Interrupt Base */
build_append_int_noprefix(table_data, gsi_base, 4);
/* APLIC Address */
build_append_int_noprefix(table_data, aplic_addr, 8);
/* APLIC size */
build_append_int_noprefix(table_data,
s->memmap[VIRT_APLIC_S].size, 4);
}
} else {
/* PLICs */
for (socket = 0; socket < riscv_socket_count(ms); socket++) {
aplic_addr = s->memmap[VIRT_PLIC].base +
s->memmap[VIRT_PLIC].size * socket;
gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
build_append_int_noprefix(table_data, 0x1B, 1); /* Type */
build_append_int_noprefix(table_data, 36, 1); /* Length */
build_append_int_noprefix(table_data, 1, 1); /* Version */
build_append_int_noprefix(table_data, socket, 1); /* PLIC ID */
build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */
/* Total External Interrupt Sources Supported */
build_append_int_noprefix(table_data,
VIRT_IRQCHIP_NUM_SOURCES - 1, 2);
build_append_int_noprefix(table_data, 0, 2); /* Max Priority */
build_append_int_noprefix(table_data, 0, 4); /* Flags */
/* PLIC Size */
build_append_int_noprefix(table_data, s->memmap[VIRT_PLIC].size, 4);
/* PLIC Address */
build_append_int_noprefix(table_data, aplic_addr, 8);
/* Global System Interrupt Vector Base */
build_append_int_noprefix(table_data, gsi_base, 4);
}
}
acpi_table_end(linker, &table);
@ -294,6 +555,16 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_rhct(tables_blob, tables->linker, s);
acpi_add_table(table_offsets, tables_blob);
{
AcpiMcfgInfo mcfg = {
.base = s->memmap[VIRT_PCIE_MMIO].base,
.size = s->memmap[VIRT_PCIE_MMIO].size,
};
build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id,
s->oem_table_id);
}
/* XSDT is pointed to by RSDP */
xsdt = tables_blob->len;
build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id,

View file

@ -38,7 +38,6 @@
#include "kvm/kvm_riscv.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/riscv_aplic.h"
#include "hw/intc/riscv_imsic.h"
#include "hw/intc/sifive_plic.h"
#include "hw/misc/sifive_test.h"
#include "hw/platform-bus.h"
@ -54,28 +53,6 @@
#include "hw/acpi/aml-build.h"
#include "qapi/qapi-visit-common.h"
/*
* The virt machine physical address space used by some of the devices
* namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
* number of CPUs, and number of IMSIC guest files.
*
* Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
* and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
* of virt machine physical address space.
*/
#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
#if VIRT_IMSIC_GROUP_MAX_SIZE < \
IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
#error "Can't accommodate single IMSIC group in address space"
#endif
#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
VIRT_IMSIC_GROUP_MAX_SIZE)
#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
#error "Can't accommodate all IMSIC groups in address space"
#endif
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
static bool virt_use_kvm_aia(RISCVVirtState *s)
{
@ -273,6 +250,11 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
cpu_ptr->cfg.cboz_blocksize);
}
if (cpu_ptr->cfg.ext_zicbop) {
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbop-block-size",
cpu_ptr->cfg.cbop_blocksize);
}
qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv");
qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay");
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg",
@ -460,24 +442,6 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
"sifive,plic-1.0.0", "riscv,plic0"
};
if (kvm_enabled()) {
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
} else {
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
}
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
if (kvm_enabled()) {
plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
} else {
plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
}
}
plic_phandles[socket] = (*phandle)++;
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
@ -490,8 +454,33 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
(char **)&plic_compat,
ARRAY_SIZE(plic_compat));
qemu_fdt_setprop(ms->fdt, plic_name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
if (kvm_enabled()) {
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
}
qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
plic_cells,
s->soc[socket].num_harts * sizeof(uint32_t) * 2);
} else {
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
}
qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
plic_cells,
s->soc[socket].num_harts * sizeof(uint32_t) * 4);
}
qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg",
0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev",
@ -512,7 +501,7 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
g_free(plic_cells);
}
static uint32_t imsic_num_bits(uint32_t count)
uint32_t imsic_num_bits(uint32_t count)
{
uint32_t ret = 0;
@ -1077,21 +1066,45 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
}
static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
hwaddr ecam_base, hwaddr ecam_size,
hwaddr mmio_base, hwaddr mmio_size,
hwaddr high_mmio_base,
hwaddr high_mmio_size,
hwaddr pio_base,
DeviceState *irqchip)
DeviceState *irqchip,
RISCVVirtState *s)
{
DeviceState *dev;
MemoryRegion *ecam_alias, *ecam_reg;
MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg;
hwaddr ecam_base = s->memmap[VIRT_PCIE_ECAM].base;
hwaddr ecam_size = s->memmap[VIRT_PCIE_ECAM].size;
hwaddr mmio_base = s->memmap[VIRT_PCIE_MMIO].base;
hwaddr mmio_size = s->memmap[VIRT_PCIE_MMIO].size;
hwaddr high_mmio_base = virt_high_pcie_memmap.base;
hwaddr high_mmio_size = virt_high_pcie_memmap.size;
hwaddr pio_base = s->memmap[VIRT_PCIE_PIO].base;
hwaddr pio_size = s->memmap[VIRT_PCIE_PIO].size;
qemu_irq irq;
int i;
dev = qdev_new(TYPE_GPEX_HOST);
/* Set GPEX object properties for the virt machine */
object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE,
ecam_base, NULL);
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE,
ecam_size, NULL);
object_property_set_uint(OBJECT(GPEX_HOST(dev)),
PCI_HOST_BELOW_4G_MMIO_BASE,
mmio_base, NULL);
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE,
mmio_size, NULL);
object_property_set_uint(OBJECT(GPEX_HOST(dev)),
PCI_HOST_ABOVE_4G_MMIO_BASE,
high_mmio_base, NULL);
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE,
high_mmio_size, NULL);
object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE,
pio_base, NULL);
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE,
pio_size, NULL);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
ecam_alias = g_new0(MemoryRegion, 1);
@ -1122,6 +1135,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
}
GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus;
return dev;
}
@ -1517,15 +1531,7 @@ static void virt_machine_init(MachineState *machine)
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}
gpex_pcie_init(system_memory,
memmap[VIRT_PCIE_ECAM].base,
memmap[VIRT_PCIE_ECAM].size,
memmap[VIRT_PCIE_MMIO].base,
memmap[VIRT_PCIE_MMIO].size,
virt_high_pcie_memmap.base,
virt_high_pcie_memmap.size,
memmap[VIRT_PCIE_PIO].base,
pcie_irqchip);
gpex_pcie_init(system_memory, pcie_irqchip, s);
create_platform_bus(s, mmio_irqchip);

View file

@ -77,3 +77,4 @@ system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c'))
system_ss.add(files('virtio-hmp-cmds.c'))
specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss)
system_ss.add(when: 'CONFIG_ACPI', if_true: files('virtio-acpi.c'))

33
hw/virtio/virtio-acpi.c Normal file
View file

@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* virtio ACPI Support
*
*/
#include "hw/virtio/virtio-acpi.h"
#include "hw/acpi/aml-build.h"
void virtio_acpi_dsdt_add(Aml *scope, const hwaddr base, const hwaddr size,
uint32_t mmio_irq, long int start_index, int num)
{
hwaddr virtio_base = base;
uint32_t irq = mmio_irq;
long int i;
for (i = start_index; i < start_index + num; i++) {
Aml *dev = aml_device("VR%02u", (unsigned)i);
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(virtio_base, size, AML_READ_WRITE));
aml_append(crs,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, &irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
virtio_base += size;
irq++;
}
}

View file

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ACPI support for fw_cfg
*
*/
#ifndef FW_CFG_ACPI_H
#define FW_CFG_ACPI_H
#include "qemu/osdep.h"
#include "exec/hwaddr.h"
void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap);
#endif

View file

@ -40,6 +40,15 @@ struct GPEXRootState {
/*< public >*/
};
struct GPEXConfig {
MemMapEntry ecam;
MemMapEntry mmio32;
MemMapEntry mmio64;
MemMapEntry pio;
int irq;
PCIBus *bus;
};
struct GPEXHost {
/*< private >*/
PCIExpressHost parent_obj;
@ -55,19 +64,22 @@ struct GPEXHost {
int irq_num[GPEX_NUM_IRQS];
bool allow_unmapped_accesses;
};
struct GPEXConfig {
MemMapEntry ecam;
MemMapEntry mmio32;
MemMapEntry mmio64;
MemMapEntry pio;
int irq;
PCIBus *bus;
struct GPEXConfig gpex_cfg;
};
int gpex_set_irq_num(GPEXHost *s, int index, int gsi);
void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg);
void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq);
#define PCI_HOST_PIO_BASE "x-pio-base"
#define PCI_HOST_PIO_SIZE "x-pio-size"
#define PCI_HOST_ECAM_BASE "x-ecam-base"
#define PCI_HOST_ECAM_SIZE "x-ecam-size"
#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base"
#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size"
#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base"
#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size"
#endif /* HW_GPEX_H */

View file

@ -23,6 +23,7 @@
#include "hw/riscv/riscv_hart.h"
#include "hw/sysbus.h"
#include "hw/block/flash.h"
#include "hw/intc/riscv_imsic.h"
#define VIRT_CPUS_MAX_BITS 9
#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
@ -60,6 +61,7 @@ struct RISCVVirtState {
char *oem_table_id;
OnOffAuto acpi;
const MemMapEntry *memmap;
struct GPEXHost *gpex_host;
};
enum {
@ -127,4 +129,28 @@ enum {
bool virt_is_acpi_enabled(RISCVVirtState *s);
void virt_acpi_setup(RISCVVirtState *vms);
uint32_t imsic_num_bits(uint32_t count);
/*
* The virt machine physical address space used by some of the devices
* namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
* number of CPUs, and number of IMSIC guest files.
*
* Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
* and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
* of virt machine physical address space.
*/
#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
#if VIRT_IMSIC_GROUP_MAX_SIZE < \
IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
#error "Can't accomodate single IMSIC group in address space"
#endif
#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
VIRT_IMSIC_GROUP_MAX_SIZE)
#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
#error "Can't accomodate all IMSIC groups in address space"
#endif
#endif

View file

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ACPI support for virtio
*/
#ifndef VIRTIO_ACPI_H
#define VIRTIO_ACPI_H
#include "qemu/osdep.h"
#include "exec/hwaddr.h"
void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base,
const hwaddr virtio_mmio_size, uint32_t mmio_irq,
long int start_index, int num);
#endif

View file

@ -322,6 +322,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned

View file

@ -80,6 +80,7 @@
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@ -637,6 +638,7 @@
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@ -930,12 +932,13 @@
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@ -975,6 +978,8 @@
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
@ -1042,9 +1047,16 @@
#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
@ -1088,6 +1100,8 @@
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */

View file

@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range {
* DRIVER_OK
*/
#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
/* Device may expose the virtqueue's descriptor area, driver area and
* device area to a different group for ASID binding than where its
* buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
*/
#define VHOST_BACKEND_F_DESC_ASID 0x7
/* IOTLB don't flush memory mapping across device reset */
#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
#endif

View file

@ -103,6 +103,11 @@
*/
#define VIRTIO_F_NOTIFICATION_DATA 38
/* This feature indicates that the driver uses the data provided by the device
* as a virtqueue identifier in available buffer notifications.
*/
#define VIRTIO_F_NOTIF_CONFIG_DATA 39
/*
* This feature indicates that the driver can reset a queue individually.
*/

View file

@ -166,6 +166,17 @@ struct virtio_pci_common_cfg {
uint32_t queue_used_hi; /* read-write */
};
/*
* Warning: do not use sizeof on this: use offsetofend for
* specific fields you need.
*/
struct virtio_pci_modern_common_cfg {
struct virtio_pci_common_cfg cfg;
uint16_t queue_notify_data; /* read-write */
uint16_t queue_reset; /* read-write */
};
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;

View file

@ -491,6 +491,38 @@ struct kvm_smccc_filter {
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
/*
* Get feature ID registers userspace writable mask.
*
* From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
* Feature Register 2"):
*
* "The Feature ID space is defined as the System register space in
* AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
* op2=={0-7}."
*
* This covers all currently known R/O registers that indicate
* anything useful feature wise, including the ID registers.
*
* If we ever need to introduce a new range, it will be described as
* such in the range field.
*/
#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
({ \
__u64 __op1 = (op1) & 3; \
__op1 -= (__op1 == 3); \
(__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
})
#define KVM_ARM_FEATURE_ID_RANGE 0
#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
struct reg_mask_range {
__u64 addr; /* Pointer to mask array */
__u32 range; /* Requested range */
__u32 reserved[13];
};
#endif
#endif /* __ARM_KVM_H__ */

View file

@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr)
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
#define __NR_epoll_create1 20
@ -816,15 +816,21 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease)
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
#define __NR_map_shadow_stack 453
__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
#define __NR_futex_wake 454
__SYSCALL(__NR_futex_wake, sys_futex_wake)
#define __NR_futex_wait 455
__SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#undef __NR_syscalls
#define __NR_syscalls 453
#define __NR_syscalls 457
/*
* 32 bit systems traditionally used different

View file

@ -0,0 +1 @@
#include <asm-generic/bitsperlong.h>

View file

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
*/
#ifndef __UAPI_ASM_LOONGARCH_KVM_H
#define __UAPI_ASM_LOONGARCH_KVM_H
#include <linux/types.h>
/*
* KVM LoongArch specific structures and definitions.
*
* Some parts derived from the x86 version of this file.
*/
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
/*
* for KVM_GET_REGS and KVM_SET_REGS
*/
struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 gpr[32];
__u64 pc;
};
/*
* for KVM_GET_FPU and KVM_SET_FPU
*/
struct kvm_fpu {
__u32 fcsr;
__u64 fcc; /* 8x8 */
struct kvm_fpureg {
__u64 val64[4];
} fpr[32];
};
/*
* For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows:
*
* bits[63..52] - As per linux/kvm.h
* bits[51..32] - Must be zero.
* bits[31..16] - Register set.
*
* Register set = 0: GP registers from kvm_regs (see definitions below).
*
* Register set = 1: CSR registers.
*
* Register set = 2: KVM specific registers (see definitions below).
*
* Register set = 3: FPU / SIMD registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
*/
#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL)
#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL)
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
/*
* KVM_REG_LOONGARCH_KVM - KVM specific control registers.
*/
#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
struct kvm_debug_exit_arch {
};
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};
/* definition of registers in kvm_run */
struct kvm_sync_regs {
};
/* dummy definition */
struct kvm_sregs {
};
struct kvm_iocsr_entry {
__u32 addr;
__u32 pad;
__u64 data;
};
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
#endif /* __UAPI_ASM_LOONGARCH_KVM_H */

View file

@ -0,0 +1 @@
#include <asm-generic/mman.h>

View file

@ -0,0 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>

View file

@ -381,5 +381,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
#define __NR_map_shadow_stack (__NR_Linux + 453)
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_N32_H */

View file

@ -357,5 +357,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
#define __NR_map_shadow_stack (__NR_Linux + 453)
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_N64_H */

View file

@ -427,5 +427,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
#define __NR_map_shadow_stack (__NR_Linux + 453)
#define __NR_futex_wake (__NR_Linux + 454)
#define __NR_futex_wait (__NR_Linux + 455)
#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_O32_H */

View file

@ -434,6 +434,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_32_H */

View file

@ -406,6 +406,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_64_H */

View file

@ -80,6 +80,7 @@ struct kvm_riscv_csr {
unsigned long sip;
unsigned long satp;
unsigned long scounteren;
unsigned long senvcfg;
};
/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr {
unsigned long iprio2h;
};
/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_smstateen_csr {
unsigned long sstateen0;
};
/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_timer {
__u64 frequency;
@ -131,6 +137,8 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZICSR,
KVM_RISCV_ISA_EXT_ZIFENCEI,
KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_SMSTATEEN,
KVM_RISCV_ISA_EXT_ZICOND,
KVM_RISCV_ISA_EXT_MAX,
};
@ -148,6 +156,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_PMU,
KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_MAX,
};
@ -178,10 +187,13 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
#define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_REG(name) \
(offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
#define KVM_REG_RISCV_CSR_AIA_REG(name) \
(offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \
(offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long))
/* Timer registers are mapped as type 4 */
#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT)

View file

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 Regents of the University of California
*/
#ifndef _ASM_RISCV_PTRACE_H
#define _ASM_RISCV_PTRACE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#define PTRACE_GETFDPIC 33
#define PTRACE_GETFDPIC_EXEC 0
#define PTRACE_GETFDPIC_INTERP 1
/*
* User-mode register state for core dumps, ptrace, sigcontext
*
* This decouples struct pt_regs from the userspace ABI.
* struct user_regs_struct must form a prefix of struct pt_regs.
*/
struct user_regs_struct {
unsigned long pc;
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
unsigned long t6;
};
struct __riscv_f_ext_state {
__u32 f[32];
__u32 fcsr;
};
struct __riscv_d_ext_state {
__u64 f[32];
__u32 fcsr;
};
struct __riscv_q_ext_state {
__u64 f[64] __attribute__((aligned(16)));
__u32 fcsr;
/*
* Reserved for expansion of sigcontext structure. Currently zeroed
* upon signal, and must be zero upon sigreturn.
*/
__u32 reserved[3];
};
struct __riscv_ctx_hdr {
__u32 magic;
__u32 size;
};
struct __riscv_extra_ext_header {
__u32 __padding[129] __attribute__((aligned(16)));
/*
* Reserved for expansion of sigcontext structure. Currently zeroed
* upon signal, and must be zero upon sigreturn.
*/
__u32 reserved;
struct __riscv_ctx_hdr hdr;
};
union __riscv_fp_state {
struct __riscv_f_ext_state f;
struct __riscv_d_ext_state d;
struct __riscv_q_ext_state q;
};
struct __riscv_v_ext_state {
unsigned long vstart;
unsigned long vl;
unsigned long vtype;
unsigned long vcsr;
unsigned long vlenb;
void *datap;
/*
* In signal handler, datap will be set a correct user stack offset
* and vector registers will be copied to the address of datap
* pointer.
*/
};
struct __riscv_v_regset_state {
unsigned long vstart;
unsigned long vl;
unsigned long vtype;
unsigned long vcsr;
unsigned long vlenb;
char vreg[];
};
/*
* According to spec: The number of bits in a single vector register,
* VLEN >= ELEN, which must be a power of 2, and must be no greater than
* 2^16 = 65536bits = 8192bytes
*/
#define RISCV_MAX_VLENB (8192)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */

View file

@ -425,5 +425,9 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_S390_UNISTD_32_H */

View file

@ -373,5 +373,9 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_S390_UNISTD_64_H */

View file

@ -443,6 +443,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_32_H */

View file

@ -366,6 +366,9 @@
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
#define __NR_futex_wake 454
#define __NR_futex_wait 455
#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_64_H */

View file

@ -318,6 +318,9 @@
#define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450)
#define __NR_cachestat (__X32_SYSCALL_BIT + 451)
#define __NR_fchmodat2 (__X32_SYSCALL_BIT + 452)
#define __NR_futex_wake (__X32_SYSCALL_BIT + 454)
#define __NR_futex_wait (__X32_SYSCALL_BIT + 455)
#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)

View file

@ -47,6 +47,8 @@ enum {
IOMMUFD_CMD_VFIO_IOAS,
IOMMUFD_CMD_HWPT_ALLOC,
IOMMUFD_CMD_GET_HW_INFO,
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
};
/**
@ -347,20 +349,86 @@ struct iommu_vfio_ioas {
};
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
/**
* enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
* @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
* the parent HWPT in a nesting configuration.
* @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
* enforced on device attachment
*/
enum iommufd_hwpt_alloc_flags {
IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
};
/**
* enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
* entry attributes
* @IOMMU_VTD_S1_SRE: Supervisor request
* @IOMMU_VTD_S1_EAFE: Extended access enable
* @IOMMU_VTD_S1_WPE: Write protect enable
*/
enum iommu_hwpt_vtd_s1_flags {
IOMMU_VTD_S1_SRE = 1 << 0,
IOMMU_VTD_S1_EAFE = 1 << 1,
IOMMU_VTD_S1_WPE = 1 << 2,
};
/**
* struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
* info (IOMMU_HWPT_DATA_VTD_S1)
* @flags: Combination of enum iommu_hwpt_vtd_s1_flags
* @pgtbl_addr: The base address of the stage-1 page table.
* @addr_width: The address width of the stage-1 page table
* @__reserved: Must be 0
*/
struct iommu_hwpt_vtd_s1 {
__aligned_u64 flags;
__aligned_u64 pgtbl_addr;
__u32 addr_width;
__u32 __reserved;
};
/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
* @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
*/
enum iommu_hwpt_data_type {
IOMMU_HWPT_DATA_NONE,
IOMMU_HWPT_DATA_VTD_S1,
};
/**
* struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
* @size: sizeof(struct iommu_hwpt_alloc)
* @flags: Must be 0
* @flags: Combination of enum iommufd_hwpt_alloc_flags
* @dev_id: The device to allocate this HWPT for
* @pt_id: The IOAS to connect this HWPT to
* @pt_id: The IOAS or HWPT to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
* @data_type: One of enum iommu_hwpt_data_type
* @data_len: Length of the type specific data
* @data_uptr: User pointer to the type specific data
*
* Explicitly allocate a hardware page table object. This is the same object
* type that is returned by iommufd_device_attach() and represents the
* underlying iommu driver's iommu_domain kernel object.
*
* A HWPT will be created with the IOVA mappings from the given IOAS.
* A kernel-managed HWPT will be created with the mappings from the given
* IOAS via the @pt_id. The @data_type for this allocation must be set to
* IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
* nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
*
* A user-managed nested HWPT will be created from a given parent HWPT via
* @pt_id, in which the parent HWPT must be allocated previously via the
* same ioctl from a given IOAS (@pt_id). In this case, the @data_type
* must be set to a pre-defined type corresponding to an I/O page table
* type supported by the underlying IOMMU hardware.
*
* If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
* @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
* must be given.
*/
struct iommu_hwpt_alloc {
__u32 size;
@ -369,13 +437,26 @@ struct iommu_hwpt_alloc {
__u32 pt_id;
__u32 out_hwpt_id;
__u32 __reserved;
__u32 data_type;
__u32 data_len;
__aligned_u64 data_uptr;
};
#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
/**
* enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
* @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
* on a nested_parent domain.
* https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
*/
enum iommu_hw_info_vtd_flags {
IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
};
/**
* struct iommu_hw_info_vtd - Intel VT-d hardware information
*
* @flags: Must be 0
* @flags: Combination of enum iommu_hw_info_vtd_flags
* @__reserved: Must be 0
*
* @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
@ -404,6 +485,20 @@ enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_INTEL_VTD,
};
/**
* enum iommufd_hw_capabilities
* @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
* If available, it means the following APIs
* are supported:
*
* IOMMU_HWPT_GET_DIRTY_BITMAP
* IOMMU_HWPT_SET_DIRTY_TRACKING
*
*/
enum iommufd_hw_capabilities {
IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
};
/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
@ -415,6 +510,8 @@ enum iommu_hw_info_type {
* the iommu type specific hardware information data
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
* in the enum iommu_hw_capabilities.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
@ -439,6 +536,81 @@ struct iommu_hw_info {
__aligned_u64 data_uptr;
__u32 out_data_type;
__u32 __reserved;
__aligned_u64 out_capabilities;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
/*
* enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
* tracking
* @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
*/
enum iommufd_hwpt_set_dirty_tracking_flags {
IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
};
/**
* struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
* @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
* @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
* @hwpt_id: HW pagetable ID that represents the IOMMU domain
* @__reserved: Must be 0
*
* Toggle dirty tracking on an HW pagetable.
*/
struct iommu_hwpt_set_dirty_tracking {
__u32 size;
__u32 flags;
__u32 hwpt_id;
__u32 __reserved;
};
#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
/**
* enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
* @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
* any dirty bits metadata. This flag
* can be passed in the expectation
* where the next operation is an unmap
* of the same IOVA range.
*
*/
enum iommufd_hwpt_get_dirty_bitmap_flags {
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
};
/**
* struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
* @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
* @hwpt_id: HW pagetable ID that represents the IOMMU domain
* @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
* @__reserved: Must be 0
* @iova: base IOVA of the bitmap first bit
* @length: IOVA range size
* @page_size: page size granularity of each bit in the bitmap
* @data: bitmap where to set the dirty bits. The bitmap bits each
* represent a page_size which you deviate from an arbitrary iova.
*
* Checking a given IOVA is dirty:
*
* data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
*
* Walk the IOMMU pagetables for a given IOVA range to return a bitmap
* with the dirty IOVAs. In doing so it will also by default clear any
* dirty bit metadata set in the IOPTE.
*/
struct iommu_hwpt_get_dirty_bitmap {
__u32 size;
__u32 hwpt_id;
__u32 flags;
__u32 __reserved;
__aligned_u64 iova;
__aligned_u64 length;
__aligned_u64 page_size;
__aligned_u64 data;
};
#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
#endif

View file

@ -264,6 +264,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_SBI 35
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@ -336,6 +337,13 @@ struct kvm_run {
__u32 len;
__u8 is_write;
} mmio;
/* KVM_EXIT_LOONGARCH_IOCSR */
struct {
__u64 phys_addr;
__u8 data[8];
__u32 len;
__u8 is_write;
} iocsr_io;
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@ -1188,6 +1196,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
#ifdef KVM_CAP_IRQ_ROUTING
@ -1358,6 +1367,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_RISCV 0x8000000000000000ULL
#define KVM_REG_LOONGARCH 0x9000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
@ -1558,6 +1568,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* Available with KVM_CAP_COUNTER_OFFSET */
#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)

View file

@ -68,6 +68,7 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
SEV_RET_INVALID_KEY = 0x27,
SEV_RET_MAX,
} sev_ret_code;

View file

@ -27,8 +27,13 @@
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
}
} ATTRS
#ifdef __cplusplus
/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
#define __DECLARE_FLEX_ARRAY(T, member) \
T member[0]
#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@ -49,3 +54,5 @@
#ifndef __counted_by
#define __counted_by(m)
#endif
#endif /* _LINUX_STDDEF_H */

View file

@ -40,7 +40,8 @@
UFFD_FEATURE_EXACT_ADDRESS | \
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
UFFD_FEATURE_POISON)
UFFD_FEATURE_POISON | \
UFFD_FEATURE_WP_ASYNC)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@ -216,6 +217,11 @@ struct uffdio_api {
* (i.e. empty ptes). This will be the default behavior for shmem
* & hugetlbfs, so this flag only affects anonymous memory behavior
* when userfault write-protection mode is registered.
*
* UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection
* asynchronous mode is supported in which the write fault is
* automatically resolved and write-protection is un-set.
* It implies UFFD_FEATURE_WP_UNPOPULATED.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@ -232,6 +238,7 @@ struct uffdio_api {
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
#define UFFD_FEATURE_WP_ASYNC (1<<15)
__u64 features;
__u64 ioctls;

View file

@ -277,8 +277,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
__u64 size; /* Region size (bytes) */
__u64 offset; /* Region offset from start of device fd */
__aligned_u64 size; /* Region size (bytes) */
__aligned_u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
@ -294,8 +294,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
__u64 offset; /* Offset of mmap'able area within region */
__u64 size; /* Size of mmap'able area */
__aligned_u64 offset; /* Offset of mmap'able area within region */
__aligned_u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
@ -450,9 +450,9 @@ struct vfio_device_migration_info {
VFIO_DEVICE_STATE_V1_RESUMING)
__u32 reserved;
__u64 pending_bytes;
__u64 data_offset;
__u64 data_size;
__aligned_u64 pending_bytes;
__aligned_u64 data_offset;
__aligned_u64 data_size;
};
/*
@ -476,7 +476,7 @@ struct vfio_device_migration_info {
struct vfio_region_info_cap_nvlink2_ssatgt {
struct vfio_info_cap_header header;
__u64 tgt;
__aligned_u64 tgt;
};
/*
@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info {
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
__u64 drm_format_mod; /* tiled mode */
__aligned_u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info {
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
__u32 reserved;
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
@ -863,9 +864,10 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
__u64 offset; /* device fd offset of write */
__u64 data; /* data to be written */
__aligned_u64 offset; /* device fd offset of write */
__aligned_u64 data; /* data to be written */
__s32 fd; /* -1 for de-assignment */
__u32 reserved;
};
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
@ -1434,6 +1436,27 @@ struct vfio_device_feature_mig_data_size {
#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
/**
* Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
* based on the operation specified in op flag.
*
* The functionality is incorporated for devices that needs bus master control,
* but the in-band device interface lacks the support. Consequently, it is not
* applicable to PCI devices, as bus master control for PCI devices is managed
* in-band through the configuration space. At present, this feature is supported
* only for CDX devices.
* When the device's BUS MASTER setting is configured as CLEAR, it will result in
* blocking all incoming DMA requests from the device. On the other hand, configuring
* the device's BUS MASTER setting as SET (enable) will grant the device the
* capability to perform DMA to the host memory.
*/
struct vfio_device_feature_bus_master {
__u32 op;
#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
};
#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@ -1449,7 +1472,7 @@ struct vfio_iommu_type1_info {
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
__u64 iova_pgsizes; /* Bitmap of supported page sizes */
__aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
__u32 cap_offset; /* Offset within info struct of first cap */
__u32 pad;
};

View file

@ -219,4 +219,12 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
/* Get the group for the descriptor table including driver & device areas
* of a virtqueue: read index, write group in num.
* The virtqueue index is stored in the index field of vhost_vring_state.
* The group ID of the descriptor table for this specific virtqueue
* is returned via num field of vhost_vring_state.
*/
#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
struct vhost_vring_state)
#endif

@ -1 +1 @@
Subproject commit 057eb10b6d523540012e6947d5c9f63e95244e94
Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877

View file

@ -156,6 +156,9 @@ for arch in $ARCHLIST; do
cp_portable "$tmpdir/bootparam.h" \
"$output/include/standard-headers/asm-$arch"
fi
if [ $arch = riscv ]; then
cp "$tmpdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/"
fi
done
rm -rf "$output/linux-headers/linux"

View file

@ -23,6 +23,8 @@
#define TYPE_RISCV_CPU "riscv-cpu"
#define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu"
#define TYPE_RISCV_VENDOR_CPU "riscv-vendor-cpu"
#define TYPE_RISCV_BARE_CPU "riscv-bare-cpu"
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
@ -32,6 +34,9 @@
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
#define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i")
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")

View file

@ -53,6 +53,11 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
#define BYTE(x) (x)
#endif
bool riscv_cpu_is_32bit(RISCVCPU *cpu)
{
return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
}
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
@ -78,6 +83,7 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
*/
const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
@ -87,6 +93,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
@ -370,6 +377,17 @@ static void set_satp_mode_max_supported(RISCVCPU *cpu,
/* Set the satp mode to the max supported */
static void set_satp_mode_default_map(RISCVCPU *cpu)
{
/*
* Bare CPUs do not default to the max available.
* Users must set a valid satp_mode in the command
* line.
*/
if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
warn_report("No satp mode set. Defaulting to 'bare'");
cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
return;
}
cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
}
#endif
@ -552,6 +570,28 @@ static void rv128_base_cpu_init(Object *obj)
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
#endif
}
static void rv64i_bare_cpu_init(Object *obj)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
riscv_cpu_set_misa(env, MXL_RV64, RVI);
/* Remove the defaults from the parent class */
RISCV_CPU(obj)->cfg.ext_zicntr = false;
RISCV_CPU(obj)->cfg.ext_zihpm = false;
/* Set to QEMU's first supported priv version */
env->priv_ver = PRIV_VERSION_1_10_0;
/*
* Support all available satp_mode settings. The default
* value will be set to MBARE if the user doesn't set
* satp_mode manually (see set_satp_mode_default()).
*/
#ifndef CONFIG_USER_ONLY
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
#endif
}
#else
static void rv32_base_cpu_init(Object *obj)
{
@ -891,6 +931,14 @@ static void riscv_cpu_reset_hold(Object *obj)
/* mmte is supposed to have pm.current hardwired to 1 */
env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
* extension is enabled.
*/
if (riscv_has_ext(env, RVH)) {
env->mideleg |= HS_MODE_INTERRUPTS;
}
/*
* Clear mseccfg and unlock all the PMP entries upon reset.
* This is allowed as per the priv and smepmp specifications
@ -943,7 +991,7 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
#ifndef CONFIG_USER_ONLY
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
{
bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
bool rv32 = riscv_cpu_is_32bit(cpu);
uint8_t satp_mode_map_max, satp_mode_supported_max;
/* The CPU wants the OS to decide which satp mode to use */
@ -1019,6 +1067,14 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
{
Error *local_err = NULL;
#ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
#endif
/*
* KVM accel does not have a specialized finalize()
* callback because its extensions are validated
@ -1031,14 +1087,6 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
return;
}
}
#ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
#endif
}
static void riscv_cpu_realize(DeviceState *dev, Error **errp)
@ -1297,6 +1345,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
@ -1340,6 +1389,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
@ -1406,6 +1456,13 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
DEFINE_PROP_END_OF_LIST(),
};
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("svade", svade, true),
MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
DEFINE_PROP_END_OF_LIST(),
};
/* Deprecated entries marked for future removal */
const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
@ -1474,11 +1531,79 @@ Property riscv_cpu_options[] = {
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
DEFINE_PROP_END_OF_LIST(),
};
/*
* RVA22U64 defines some 'named features' or 'synthetic extensions'
* that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
* and Zicclsm. We do not implement caching in QEMU so we'll consider
* all these named features as always enabled.
*
* There's no riscv,isa update for them (nor for zic64b, despite it
* having a cfg offset) at this moment.
*/
static RISCVCPUProfile RVA22U64 = {
.parent = NULL,
.name = "rva22u64",
.misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
.ext_offsets = {
CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
/* mandatory named features for this profile */
CPU_CFG_OFFSET(zic64b),
RISCV_PROFILE_EXT_LIST_END
}
};
/*
* As with RVA22U64, RVA22S64 also defines 'named features'.
*
* Cache related features that we consider enabled since we don't
* implement cache: Ssccptr
*
* Other named features that we already implement: Sstvecd, Sstvala,
* Sscounterenw
*
* Named features that we need to enable: svade
*
* The remaining features/extensions comes from RVA22U64.
*/
static RISCVCPUProfile RVA22S64 = {
.parent = &RVA22U64,
.name = "rva22s64",
.misa_ext = RVS,
.priv_spec = PRIV_VERSION_1_12_0,
.satp_mode = VM_1_10_SV39,
.ext_offsets = {
/* rva22s64 exts */
CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
CPU_CFG_OFFSET(ext_svinval),
/* rva22s64 named features */
CPU_CFG_OFFSET(svade),
RISCV_PROFILE_EXT_LIST_END
}
};
RISCVCPUProfile *riscv_profiles[] = {
&RVA22U64,
&RVA22S64,
NULL,
};
static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
@ -1499,6 +1624,22 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
#if defined(TARGET_RISCV64)
static void rva22u64_profile_cpu_init(Object *obj)
{
rv64i_bare_cpu_init(obj);
RVA22U64.enabled = true;
}
static void rva22s64_profile_cpu_init(Object *obj)
{
rv64i_bare_cpu_init(obj);
RVA22S64.enabled = true;
}
#endif
static const gchar *riscv_gdb_arch_name(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@ -1570,9 +1711,9 @@ static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
bool value = RISCV_CPU(obj)->cfg.mvendorid;
uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
visit_type_bool(v, name, &value, errp);
visit_type_uint32(v, name, &value, errp);
}
static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
@ -1599,9 +1740,9 @@ static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
bool value = RISCV_CPU(obj)->cfg.mimpid;
uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
visit_type_bool(v, name, &value, errp);
visit_type_uint64(v, name, &value, errp);
}
static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
@ -1649,9 +1790,9 @@ static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
bool value = RISCV_CPU(obj)->cfg.marchid;
uint64_t value = RISCV_CPU(obj)->cfg.marchid;
visit_type_bool(v, name, &value, errp);
visit_type_uint64(v, name, &value, errp);
}
static void riscv_cpu_class_init(ObjectClass *c, void *data)
@ -1746,6 +1887,27 @@ char *riscv_isa_string(RISCVCPU *cpu)
.instance_init = initfn \
}
#define DEFINE_VENDOR_CPU(type_name, initfn) \
{ \
.name = type_name, \
.parent = TYPE_RISCV_VENDOR_CPU, \
.instance_init = initfn \
}
#define DEFINE_BARE_CPU(type_name, initfn) \
{ \
.name = type_name, \
.parent = TYPE_RISCV_BARE_CPU, \
.instance_init = initfn \
}
#define DEFINE_PROFILE_CPU(type_name, initfn) \
{ \
.name = type_name, \
.parent = TYPE_RISCV_BARE_CPU, \
.instance_init = initfn \
}
static const TypeInfo riscv_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU,
@ -1763,22 +1925,35 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.parent = TYPE_RISCV_CPU,
.abstract = true,
},
{
.name = TYPE_RISCV_VENDOR_CPU,
.parent = TYPE_RISCV_CPU,
.abstract = true,
},
{
.name = TYPE_RISCV_BARE_CPU,
.parent = TYPE_RISCV_CPU,
.abstract = true,
},
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init),
#if defined(TARGET_RISCV32)
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
#elif defined(TARGET_RISCV64)
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
#endif
};

View file

@ -76,6 +76,22 @@ const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
typedef struct riscv_cpu_profile {
struct riscv_cpu_profile *parent;
const char *name;
uint32_t misa_ext;
bool enabled;
bool user_set;
int priv_spec;
int satp_mode;
const int32_t ext_offsets[];
} RISCVCPUProfile;
#define RISCV_PROFILE_EXT_LIST_END -1
#define RISCV_PROFILE_ATTR_UNUSED -1
extern RISCVCPUProfile *riscv_profiles[];
/* Privileged specification version */
enum {
PRIV_VERSION_1_10_0 = 0,
@ -679,6 +695,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags);
void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
@ -765,6 +782,7 @@ typedef struct RISCVCPUMultiExtConfig {
extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
extern Property riscv_cpu_options[];

View file

@ -65,6 +65,7 @@ struct RISCVCPUConfig {
bool ext_zicntr;
bool ext_zicsr;
bool ext_zicbom;
bool ext_zicbop;
bool ext_zicboz;
bool ext_zicond;
bool ext_zihintntl;
@ -77,6 +78,7 @@ struct RISCVCPUConfig {
bool ext_svnapot;
bool ext_svpbmt;
bool ext_zdinx;
bool ext_zacas;
bool ext_zawrs;
bool ext_zfa;
bool ext_zfbfmin;
@ -115,6 +117,8 @@ struct RISCVCPUConfig {
bool ext_smepmp;
bool rvv_ta_all_1s;
bool rvv_ma_all_1s;
bool svade;
bool zic64b;
uint32_t mvendorid;
uint64_t marchid;
@ -142,6 +146,7 @@ struct RISCVCPUConfig {
uint16_t vlen;
uint16_t elen;
uint16_t cbom_blocksize;
uint16_t cbop_blocksize;
uint16_t cboz_blocksize;
bool mmu;
bool pmp;

View file

@ -1749,8 +1749,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
* See if we need to adjust cause. Yes if its VS mode interrupt
* no if hypervisor has delegated one of hs mode's interrupt
*/
if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
cause == IRQ_VS_EXT) {
if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
cause == IRQ_VS_EXT)) {
cause = cause - 1;
}
write_gva = false;

View file

@ -195,8 +195,11 @@ static RISCVException mctr(CPURISCVState *env, int csrno)
if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
/* Offset for RV32 mhpmcounternh counters */
base_csrno += 0x80;
csrno -= 0x80;
}
g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
ctr_index = csrno - base_csrno;
if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
/* The PMU is not enabled or counter is out of range */
@ -1328,11 +1331,14 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
MSTATUS_TW | MSTATUS_VS;
MSTATUS_TW;
if (riscv_has_ext(env, RVF)) {
mask |= MSTATUS_FS;
}
if (riscv_has_ext(env, RVV)) {
mask |= MSTATUS_VS;
}
if (xl != MXL_RV32 || env->debugger) {
if (riscv_has_ext(env, RVH)) {

View file

@ -1004,3 +1004,9 @@ vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
# *** RV32 Zacas Standard Extension ***
amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st
amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
# *** RV64 Zacas Standard Extension ***
amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st

View file

@ -3631,19 +3631,19 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
}
/*
* Whole Vector Register Move Instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 16.6)
* Whole Vector Register Move Instructions depend on vtype register(vsew).
* Thus, we need to check vill bit. (Section 16.6)
*/
#define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) && \
vext_check_isa_ill(s) && \
QEMU_IS_ALIGNED(a->rd, LEN) && \
QEMU_IS_ALIGNED(a->rs2, LEN)) { \
uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
if (s->vstart_eq_zero) { \
/* EEW = 8 */ \
tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
vreg_ofs(s, a->rs2), maxsz, maxsz); \
mark_vs_dirty(s); \
} else { \

View file

@ -0,0 +1,150 @@
/*
* RISC-V translation routines for the RV64 Zacas Standard Extension.
*
* Copyright (c) 2020-2023 PLCT Lab
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZACAS(ctx) do { \
if (!ctx->cfg_ptr->ext_zacas) { \
return false; \
} \
} while (0)
static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv dest = get_gpr(ctx, a->rd, EXT_NONE);
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
decode_save_opc(ctx);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a)
{
REQUIRE_ZACAS(ctx);
return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESL);
}
static TCGv_i64 get_gpr_pair(DisasContext *ctx, int reg_num)
{
TCGv_i64 t;
assert(get_ol(ctx) == MXL_RV32);
if (reg_num == 0) {
return tcg_constant_i64(0);
}
t = tcg_temp_new_i64();
tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
return t;
}
static void gen_set_gpr_pair(DisasContext *ctx, int reg_num, TCGv_i64 t)
{
assert(get_ol(ctx) == MXL_RV32);
if (reg_num != 0) {
#ifdef TARGET_RISCV32
tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
#else
tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
#endif
if (get_xl_max(ctx) == MXL_RV128) {
tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
tcg_gen_sari_tl(cpu_gprh[reg_num + 1], cpu_gpr[reg_num + 1], 63);
}
}
}
static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
/*
* Encodings with odd numbered registers specified in rs2 and rd are
* reserved.
*/
if ((a->rs2 | a->rd) & 1) {
return false;
}
TCGv_i64 dest = get_gpr_pair(ctx, a->rd);
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
decode_save_opc(ctx);
tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr_pair(ctx, a->rd, dest);
return true;
}
static bool trans_amocas_d(DisasContext *ctx, arg_amocas_d *a)
{
REQUIRE_ZACAS(ctx);
switch (get_ol(ctx)) {
case MXL_RV32:
return gen_cmpxchg64(ctx, a, MO_ALIGN | MO_TEUQ);
case MXL_RV64:
case MXL_RV128:
return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TEUQ);
default:
g_assert_not_reached();
}
}
static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a)
{
REQUIRE_ZACAS(ctx);
REQUIRE_64BIT(ctx);
/*
* Encodings with odd numbered registers specified in rs2 and rd are
* reserved.
*/
if ((a->rs2 | a->rd) & 1) {
return false;
}
#ifdef TARGET_RISCV64
TCGv_i128 dest = tcg_temp_new_i128();
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv_i128 src2 = tcg_temp_new_i128();
TCGv_i64 src2l = get_gpr(ctx, a->rs2, EXT_NONE);
TCGv_i64 src2h = get_gpr(ctx, a->rs2 == 0 ? 0 : a->rs2 + 1, EXT_NONE);
TCGv_i64 destl = get_gpr(ctx, a->rd, EXT_NONE);
TCGv_i64 desth = get_gpr(ctx, a->rd == 0 ? 0 : a->rd + 1, EXT_NONE);
tcg_gen_concat_i64_i128(src2, src2l, src2h);
tcg_gen_concat_i64_i128(dest, destl, desth);
decode_save_opc(ctx);
tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx,
(MO_ALIGN | MO_TEUO));
tcg_gen_extr_i128_i64(destl, desth, dest);
if (a->rd != 0) {
gen_set_gpr(ctx, a->rd, destl);
gen_set_gpr(ctx, a->rd + 1, desth);
}
#endif
return true;
}

View file

@ -296,7 +296,7 @@ NOP_PRIVCHECK(th_dcache_csw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)

View file

@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <sys/prctl.h>
#include <linux/kvm.h>
@ -47,6 +48,9 @@
#include "sysemu/runstate.h"
#include "hw/riscv/numa.h"
#define PR_RISCV_V_SET_CONTROL 69
#define PR_RISCV_V_VSTATE_CTRL_ON 2
void riscv_kvm_aplic_request(void *opaque, int irq, int level)
{
kvm_set_irq(kvm_state, irq, !!level);
@ -54,7 +58,7 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level)
static bool cap_has_mp_state;
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
uint64_t idx)
{
uint64_t id = KVM_REG_RISCV | type | idx;
@ -72,18 +76,38 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
return id;
}
#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
{
return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
}
#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
{
return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
}
#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
#define RISCV_CORE_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
#define RISCV_CSR_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
#define RISCV_CONFIG_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
KVM_REG_RISCV_CONFIG_REG(name))
#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
#define RISCV_VECTOR_CSR_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
KVM_REG_RISCV_VECTOR_CSR_REG(name))
#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
do { \
@ -101,17 +125,17 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
} \
} while (0)
#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
if (ret) { \
abort(); \
} \
} while (0)
#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
#define KVM_RISCV_SET_TIMER(cs, name, reg) \
do { \
int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
if (ret) { \
abort(); \
} \
@ -138,6 +162,7 @@ static KVMCPUConfig kvm_misa_ext_cfgs[] = {
KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
};
static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
@ -202,8 +227,8 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
/* If we're here we're going to disable the MISA bit */
reg = 0;
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
/*
@ -364,8 +389,8 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
continue;
}
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
@ -398,7 +423,7 @@ static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
}
if (value) {
error_setg(errp, "extension %s is not available with KVM",
error_setg(errp, "'%s' is not available with KVM",
propname);
}
}
@ -479,6 +504,11 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
/* We don't have the needed KVM support for profiles */
for (i = 0; riscv_profiles[i] != NULL; i++) {
riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
}
}
static int kvm_riscv_get_regs_core(CPUState *cs)
@ -495,7 +525,7 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
env->pc = reg;
for (i = 1; i < 32; i++) {
uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, &reg);
if (ret) {
return ret;
@ -520,7 +550,7 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
}
for (i = 1; i < 32; i++) {
uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, &reg);
if (ret) {
@ -574,7 +604,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs)
if (riscv_has_ext(env, RVD)) {
uint64_t reg;
for (i = 0; i < 32; i++) {
ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), &reg);
if (ret) {
return ret;
}
@ -586,7 +616,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs)
if (riscv_has_ext(env, RVF)) {
uint32_t reg;
for (i = 0; i < 32; i++) {
ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), &reg);
if (ret) {
return ret;
}
@ -608,7 +638,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs)
uint64_t reg;
for (i = 0; i < 32; i++) {
reg = env->fpr[i];
ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), &reg);
if (ret) {
return ret;
}
@ -620,7 +650,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs)
uint32_t reg;
for (i = 0; i < 32; i++) {
reg = env->fpr[i];
ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), &reg);
if (ret) {
return ret;
}
@ -639,10 +669,10 @@ static void kvm_riscv_get_regs_timer(CPUState *cs)
return;
}
KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
env->kvm_timer_dirty = true;
}
@ -656,8 +686,8 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
return;
}
KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
/*
* To set register of RISCV_TIMER_REG(state) will occur a error from KVM
@ -666,7 +696,7 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
* TODO If KVM changes, adapt here.
*/
if (env->kvm_timer_state) {
KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
}
/*
@ -675,7 +705,7 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
* during the migration.
*/
if (migration_is_running(migrate_get_current()->state)) {
KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
KVM_RISCV_GET_TIMER(cs, frequency, reg);
if (reg != env->kvm_timer_frequency) {
error_report("Dst Hosts timer frequency != Src Hosts");
}
@ -684,6 +714,65 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
static int kvm_riscv_get_regs_vector(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
target_ulong reg;
int ret = 0;
if (!riscv_has_ext(env, RVV)) {
return 0;
}
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
if (ret) {
return ret;
}
env->vstart = reg;
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
if (ret) {
return ret;
}
env->vl = reg;
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
if (ret) {
return ret;
}
env->vtype = reg;
return 0;
}
static int kvm_riscv_put_regs_vector(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
target_ulong reg;
int ret = 0;
if (!riscv_has_ext(env, RVV)) {
return 0;
}
reg = env->vstart;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
if (ret) {
return ret;
}
reg = env->vl;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
if (ret) {
return ret;
}
reg = env->vtype;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
return ret;
}
typedef struct KVMScratchCPU {
int kvmfd;
int vmfd;
@ -746,24 +835,21 @@ static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
struct kvm_one_reg reg;
int ret;
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(mvendorid));
reg.id = RISCV_CONFIG_REG(env, mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(marchid));
reg.id = RISCV_CONFIG_REG(env, marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(mimpid));
reg.id = RISCV_CONFIG_REG(env, mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@ -778,8 +864,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
struct kvm_one_reg reg;
int ret;
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(isa));
reg.id = RISCV_CONFIG_REG(env, isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@ -800,8 +885,8 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
struct kvm_one_reg reg;
int ret;
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@ -822,8 +907,8 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@ -914,8 +999,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
reg_id = kvm_riscv_reg_id(&cpu->env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
if (!reg_search) {
@ -983,6 +1068,11 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
ret = kvm_riscv_get_regs_vector(cs);
if (ret) {
return ret;
}
return ret;
}
@ -1023,6 +1113,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_riscv_put_regs_vector(cs);
if (ret) {
return ret;
}
if (KVM_PUT_RESET_STATE == level) {
RISCVCPU *cpu = RISCV_CPU(cs);
if (cs->cpu_index == 0) {
@ -1082,8 +1177,7 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
uint64_t id;
int ret;
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(mvendorid));
id = RISCV_CONFIG_REG(env, mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@ -1095,15 +1189,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
return ret;
}
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(marchid));
id = RISCV_CONFIG_REG(env, marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
KVM_REG_RISCV_CONFIG_REG(mimpid));
id = RISCV_CONFIG_REG(env, mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@ -1376,21 +1468,24 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
exit(1);
}
socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
&socket_bits, true, NULL);
if (ret < 0) {
error_report("KVM AIA: failed to set group_bits");
exit(1);
}
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
&group_shift, true, NULL);
if (ret < 0) {
error_report("KVM AIA: failed to set group_shift");
exit(1);
if (socket_count > 1) {
socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
&socket_bits, true, NULL);
if (ret < 0) {
error_report("KVM AIA: failed to set group_bits");
exit(1);
}
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
&group_shift, true, NULL);
if (ret < 0) {
error_report("KVM AIA: failed to set group_shift");
exit(1);
}
}
guest_bits = guest_num == 0 ? 0 :
@ -1479,11 +1574,36 @@ static void kvm_cpu_instance_init(CPUState *cs)
}
}
/*
* We'll get here via the following path:
*
* riscv_cpu_realize()
* -> cpu_exec_realizefn()
* -> kvm_cpu_realize() (via accel_cpu_common_realize())
*/
static bool kvm_cpu_realize(CPUState *cs, Error **errp)
{
RISCVCPU *cpu = RISCV_CPU(cs);
int ret;
if (riscv_has_ext(&cpu->env, RVV)) {
ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
if (ret) {
error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
strerrorname_np(errno));
return false;
}
}
return true;
}
static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
acc->cpu_instance_init = kvm_cpu_instance_init;
acc->cpu_target_realize = kvm_cpu_realize;
}
static const TypeInfo kvm_cpu_accel_type_info = {

View file

@ -126,7 +126,7 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
/* If !mseccfg.MML then ignore writes with encoding RW=01 */
if ((val & PMP_WRITE) && !(val & PMP_READ) &&
!MSECCFG_MML_ISSET(env)) {
val &= ~(PMP_WRITE | PMP_READ);
return false;
}
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
@ -150,8 +150,7 @@ void pmp_unlock_entries(CPURISCVState *env)
}
}
static void pmp_decode_napot(target_ulong a, target_ulong *sa,
target_ulong *ea)
static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
{
/*
* aaaa...aaa0 8-byte NAPOT range
@ -173,8 +172,8 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
target_ulong prev_addr = 0u;
target_ulong sa = 0u;
target_ulong ea = 0u;
hwaddr sa = 0u;
hwaddr ea = 0u;
if (pmp_index >= 1u) {
prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
@ -227,8 +226,7 @@ void pmp_update_rule_nums(CPURISCVState *env)
}
}
static int pmp_is_in_range(CPURISCVState *env, int pmp_index,
target_ulong addr)
static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
{
int result = 0;
@ -305,14 +303,14 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
* Return true if a pmp rule match or default match
* Return false if no match
*/
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs, target_ulong mode)
{
int i = 0;
int pmp_size = 0;
target_ulong s = 0;
target_ulong e = 0;
hwaddr s = 0;
hwaddr e = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
@ -624,12 +622,12 @@ target_ulong mseccfg_csr_read(CPURISCVState *env)
* To avoid this we return a size of 1 (which means no caching) if the PMP
* region only covers partial of the TLB page.
*/
target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
{
target_ulong pmp_sa;
target_ulong pmp_ea;
target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
hwaddr pmp_sa;
hwaddr pmp_ea;
hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
int i;
/*

View file

@ -53,8 +53,8 @@ typedef struct {
} pmp_entry_t;
typedef struct {
target_ulong sa;
target_ulong ea;
hwaddr sa;
hwaddr ea;
} pmp_addr_t;
typedef struct {
@ -73,11 +73,11 @@ target_ulong mseccfg_csr_read(CPURISCVState *env);
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val);
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs,
target_ulong mode);
target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr);
target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr);
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
void pmp_update_rule_nums(CPURISCVState *env);
uint32_t pmp_get_num_rules(CPURISCVState *env);

View file

@ -26,6 +26,7 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-machine-target.h"
#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-input-visitor.h"
@ -98,6 +99,35 @@ static void riscv_obj_add_multiext_props(Object *obj, QDict *qdict_out,
}
}
static void riscv_obj_add_named_feats_qdict(Object *obj, QDict *qdict_out)
{
const RISCVCPUMultiExtConfig *named_cfg;
RISCVCPU *cpu = RISCV_CPU(obj);
QObject *value;
bool flag_val;
for (int i = 0; riscv_cpu_named_features[i].name != NULL; i++) {
named_cfg = &riscv_cpu_named_features[i];
flag_val = isa_ext_is_enabled(cpu, named_cfg->offset);
value = QOBJECT(qbool_from_bool(flag_val));
qdict_put_obj(qdict_out, named_cfg->name, value);
}
}
static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out)
{
RISCVCPUProfile *profile;
QObject *value;
for (int i = 0; riscv_profiles[i] != NULL; i++) {
profile = riscv_profiles[i];
value = QOBJECT(qbool_from_bool(profile->enabled));
qdict_put_obj(qdict_out, profile->name, value);
}
}
static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props,
const QDict *qdict_in,
Error **errp)
@ -128,11 +158,6 @@ static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props,
goto err;
}
riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err);
if (local_err) {
goto err;
}
visit_end_struct(visitor, NULL);
err:
@ -190,6 +215,13 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
}
}
riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err);
if (local_err) {
error_propagate(errp, local_err);
object_unref(obj);
return NULL;
}
expansion_info = g_new0(CpuModelExpansionInfo, 1);
expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
expansion_info->model->name = g_strdup(model->name);
@ -199,6 +231,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_extensions);
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_experimental_exts);
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_vendor_exts);
riscv_obj_add_named_feats_qdict(obj, qdict_out);
riscv_obj_add_profiles_qdict(obj, qdict_out);
/* Add our CPU boolean options too */
riscv_obj_add_qdict_prop(obj, qdict_out, "mmu");

View file

@ -34,6 +34,7 @@
/* Hash that stores user set extensions */
static GHashTable *multi_ext_user_opts;
static GHashTable *misa_ext_user_opts;
static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
{
@ -41,6 +42,52 @@ static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
GUINT_TO_POINTER(ext_offset));
}
static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
{
return g_hash_table_contains(misa_ext_user_opts,
GUINT_TO_POINTER(misa_bit));
}
static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
{
g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
(gpointer)value);
}
static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
{
g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
(gpointer)value);
}
static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
bool enabled)
{
CPURISCVState *env = &cpu->env;
if (enabled) {
env->misa_ext |= bit;
env->misa_ext_mask |= bit;
} else {
env->misa_ext &= ~bit;
env->misa_ext_mask &= ~bit;
}
}
static const char *cpu_priv_ver_to_str(int priv_ver)
{
switch (priv_ver) {
case PRIV_VERSION_1_10_0:
return "v1.10.0";
case PRIV_VERSION_1_11_0:
return "v1.11.0";
case PRIV_VERSION_1_12_0:
return "v1.12.0";
}
g_assert_not_reached();
}
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@ -114,6 +161,79 @@ static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
g_assert_not_reached();
}
static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
{
const RISCVCPUMultiExtConfig *feat;
const RISCVIsaExtData *edata;
for (edata = isa_edata_arr; edata->name != NULL; edata++) {
if (edata->ext_enable_offset == ext_offset) {
return edata->name;
}
}
for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
if (feat->offset == ext_offset) {
return feat->name;
}
}
g_assert_not_reached();
}
static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
{
const RISCVCPUMultiExtConfig *feat;
for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
if (feat->offset == ext_offset) {
return true;
}
}
return false;
}
static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
{
switch (feat_offset) {
case CPU_CFG_OFFSET(zic64b):
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
break;
case CPU_CFG_OFFSET(svade):
cpu->cfg.ext_svadu = false;
break;
default:
g_assert_not_reached();
}
}
static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
uint32_t ext_offset)
{
int ext_priv_ver;
if (env->priv_ver == PRIV_VERSION_LATEST) {
return;
}
if (cpu_cfg_offset_is_named_feat(ext_offset)) {
return;
}
ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
if (env->priv_ver < ext_priv_ver) {
/*
* Note: the 'priv_spec' command line option, if present,
* will take precedence over this priv_ver bump.
*/
env->priv_ver = ext_priv_ver;
}
}
static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
bool value)
{
@ -273,6 +393,55 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
}
}
static void riscv_cpu_update_named_features(RISCVCPU *cpu)
{
cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
cpu->cfg.cboz_blocksize == 64;
cpu->cfg.svade = !cpu->cfg.ext_svadu;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
{
const char *warn_msg = "RVG mandates disabled extension %s";
uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
bool send_warn = cpu_misa_ext_is_user_set(RVG);
for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
uint32_t bit = g_misa_bits[i];
if (riscv_has_ext(&cpu->env, bit)) {
continue;
}
if (!cpu_misa_ext_is_user_set(bit)) {
riscv_cpu_write_misa_bit(cpu, bit, true);
continue;
}
if (send_warn) {
warn_report(warn_msg, riscv_get_misa_ext_name(bit));
}
}
if (!cpu->cfg.ext_zicsr) {
if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
cpu->cfg.ext_zicsr = true;
} else if (send_warn) {
warn_report(warn_msg, "zicsr");
}
}
if (!cpu->cfg.ext_zifencei) {
if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
cpu->cfg.ext_zifencei = true;
} else if (send_warn) {
warn_report(warn_msg, "zifencei");
}
}
}
/*
* Check consistency between chosen extensions while setting
* cpu->cfg accordingly.
@ -282,31 +451,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
/* Do some ISA extension error checking */
if (riscv_has_ext(env, RVG) &&
!(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
riscv_has_ext(env, RVD) &&
cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) {
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) &&
!cpu->cfg.ext_zicsr) {
error_setg(errp, "RVG requires Zicsr but user set Zicsr to false");
return;
}
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) &&
!cpu->cfg.ext_zifencei) {
error_setg(errp, "RVG requires Zifencei but user set "
"Zifencei to false");
return;
}
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true);
env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
if (riscv_has_ext(env, RVG)) {
riscv_cpu_validate_g(cpu);
}
if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
@ -343,6 +489,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
error_setg(errp, "Zacas extension requires A extension");
return;
}
if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
error_setg(errp, "Zawrs extension requires A extension");
return;
@ -620,6 +771,106 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
riscv_cpu_disable_priv_spec_isa_exts(cpu);
}
#ifndef CONFIG_USER_ONLY
static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
RISCVCPUProfile *profile,
bool send_warn)
{
int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
if (profile->satp_mode > satp_max) {
if (send_warn) {
bool is_32bit = riscv_cpu_is_32bit(cpu);
const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
const char *cur_satp = satp_mode_str(satp_max, is_32bit);
warn_report("Profile %s requires satp mode %s, "
"but satp mode %s was set", profile->name,
req_satp, cur_satp);
}
return false;
}
return true;
}
#endif
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
RISCVCPUProfile *profile)
{
CPURISCVState *env = &cpu->env;
const char *warn_msg = "Profile %s mandates disabled extension %s";
bool send_warn = profile->user_set && profile->enabled;
bool parent_enabled, profile_impl = true;
int i;
#ifndef CONFIG_USER_ONLY
if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
send_warn);
}
#endif
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
profile->priv_spec != env->priv_ver) {
profile_impl = false;
if (send_warn) {
warn_report("Profile %s requires priv spec %s, "
"but priv ver %s was set", profile->name,
cpu_priv_ver_to_str(profile->priv_spec),
cpu_priv_ver_to_str(env->priv_ver));
}
}
for (i = 0; misa_bits[i] != 0; i++) {
uint32_t bit = misa_bits[i];
if (!(profile->misa_ext & bit)) {
continue;
}
if (!riscv_has_ext(&cpu->env, bit)) {
profile_impl = false;
if (send_warn) {
warn_report(warn_msg, profile->name,
riscv_get_misa_ext_name(bit));
}
}
}
for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
int ext_offset = profile->ext_offsets[i];
if (!isa_ext_is_enabled(cpu, ext_offset)) {
profile_impl = false;
if (send_warn) {
warn_report(warn_msg, profile->name,
cpu_cfg_ext_get_name(ext_offset));
}
}
}
profile->enabled = profile_impl;
if (profile->parent != NULL) {
parent_enabled = object_property_get_bool(OBJECT(cpu),
profile->parent->name,
NULL);
profile->enabled = profile->enabled && parent_enabled;
}
}
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
{
for (int i = 0; riscv_profiles[i] != NULL; i++) {
riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
}
}
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
{
CPURISCVState *env = &cpu->env;
@ -637,6 +888,9 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
return;
}
riscv_cpu_update_named_features(cpu);
riscv_cpu_validate_profiles(cpu);
if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
/*
* Enhanced PMP should only be available
@ -663,6 +917,11 @@ static bool riscv_cpu_is_generic(Object *cpu_obj)
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
static bool riscv_cpu_is_vendor(Object *cpu_obj)
{
return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
}
/*
* We'll get here via the following path:
*
@ -731,13 +990,15 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
target_ulong misa_bit = misa_ext_cfg->misa_bit;
RISCVCPU *cpu = RISCV_CPU(obj);
CPURISCVState *env = &cpu->env;
bool generic_cpu = riscv_cpu_is_generic(obj);
bool vendor_cpu = riscv_cpu_is_vendor(obj);
bool prev_val, value;
if (!visit_type_bool(v, name, &value, errp)) {
return;
}
cpu_misa_ext_add_user_opt(misa_bit, value);
prev_val = env->misa_ext & misa_bit;
if (value == prev_val) {
@ -745,19 +1006,23 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
}
if (value) {
if (!generic_cpu) {
if (vendor_cpu) {
g_autofree char *cpuname = riscv_cpu_get_name(cpu);
error_setg(errp, "'%s' CPU does not allow enabling extensions",
cpuname);
return;
}
env->misa_ext |= misa_bit;
env->misa_ext_mask |= misa_bit;
} else {
env->misa_ext &= ~misa_bit;
env->misa_ext_mask &= ~misa_bit;
if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
/*
* Note: the 'priv_spec' command line option, if present,
* will take precedence over this priv_ver bump.
*/
env->priv_ver = PRIV_VERSION_1_12_0;
}
}
riscv_cpu_write_misa_bit(cpu, misa_bit, value);
}
static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
@ -821,7 +1086,116 @@ static void riscv_cpu_add_misa_properties(Object *cpu_obj)
NULL, (void *)misa_cfg);
object_property_set_description(cpu_obj, name, desc);
if (use_def_vals) {
object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL);
riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
misa_cfg->enabled);
}
}
}
static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
RISCVCPUProfile *profile = opaque;
RISCVCPU *cpu = RISCV_CPU(obj);
bool value;
int i, ext_offset;
if (riscv_cpu_is_vendor(obj)) {
error_setg(errp, "Profile %s is not available for vendor CPUs",
profile->name);
return;
}
if (cpu->env.misa_mxl != MXL_RV64) {
error_setg(errp, "Profile %s only available for 64 bit CPUs",
profile->name);
return;
}
if (!visit_type_bool(v, name, &value, errp)) {
return;
}
profile->user_set = true;
profile->enabled = value;
if (profile->parent != NULL) {
object_property_set_bool(obj, profile->parent->name,
profile->enabled, NULL);
}
if (profile->enabled) {
cpu->env.priv_ver = profile->priv_spec;
}
#ifndef CONFIG_USER_ONLY
if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
const char *satp_prop = satp_mode_str(profile->satp_mode,
riscv_cpu_is_32bit(cpu));
object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
}
#endif
for (i = 0; misa_bits[i] != 0; i++) {
uint32_t bit = misa_bits[i];
if (!(profile->misa_ext & bit)) {
continue;
}
if (bit == RVI && !profile->enabled) {
/*
* Disabling profiles will not disable the base
* ISA RV64I.
*/
continue;
}
cpu_misa_ext_add_user_opt(bit, profile->enabled);
riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
}
for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
ext_offset = profile->ext_offsets[i];
if (profile->enabled) {
if (cpu_cfg_offset_is_named_feat(ext_offset)) {
riscv_cpu_enable_named_feat(cpu, ext_offset);
}
cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
}
cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
}
}
static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
RISCVCPUProfile *profile = opaque;
bool value = profile->enabled;
visit_type_bool(v, name, &value, errp);
}
static void riscv_cpu_add_profiles(Object *cpu_obj)
{
for (int i = 0; riscv_profiles[i] != NULL; i++) {
const RISCVCPUProfile *profile = riscv_profiles[i];
object_property_add(cpu_obj, profile->name, "bool",
cpu_get_profile, cpu_set_profile,
NULL, (void *)profile);
/*
* CPUs might enable a profile right from the start.
* Enable its mandatory extensions right away in this
* case.
*/
if (profile->enabled) {
object_property_set_bool(cpu_obj, profile->name, true, NULL);
}
}
}
@ -850,7 +1224,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
{
const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
RISCVCPU *cpu = RISCV_CPU(obj);
bool generic_cpu = riscv_cpu_is_generic(obj);
bool vendor_cpu = riscv_cpu_is_vendor(obj);
bool prev_val, value;
if (!visit_type_bool(v, name, &value, errp)) {
@ -864,9 +1238,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
multi_ext_cfg->name, lower);
}
g_hash_table_insert(multi_ext_user_opts,
GUINT_TO_POINTER(multi_ext_cfg->offset),
(gpointer)value);
cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
@ -874,13 +1246,17 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
return;
}
if (value && !generic_cpu) {
if (value && vendor_cpu) {
g_autofree char *cpuname = riscv_cpu_get_name(cpu);
error_setg(errp, "'%s' CPU does not allow enabling extensions",
cpuname);
return;
}
if (value) {
cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
}
isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
}
@ -949,6 +1325,8 @@ static void riscv_cpu_add_user_properties(Object *obj)
riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
riscv_cpu_add_profiles(obj);
for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) {
qdev_property_add_static(DEVICE(obj), prop);
}
@ -999,6 +1377,7 @@ static void tcg_cpu_instance_init(CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
Object *obj = OBJECT(cpu);
misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
riscv_cpu_add_user_properties(obj);

View file

@ -1089,6 +1089,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_rvv.c.inc"
#include "insn_trans/trans_rvb.c.inc"
#include "insn_trans/trans_rvzicond.c.inc"
#include "insn_trans/trans_rvzacas.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzicbo.c.inc"
#include "insn_trans/trans_rvzfa.c.inc"