KVM/riscv changes for 6.9

- Exception and interrupt handling for selftests
 - Sstc (aka arch_timer) selftest
 - Forward seed CSR access to KVM userspace
 - Ztso extension support for Guest/VM
 - Zacas extension support for Guest/VM
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmXpXfMACgkQrUjsVaLH
 LAfZoRAAgIJqgL9jMQ2dliTq8sk24dQQUPo1V2yP7yEYLeUyp/9EAR31ZhBDP1EE
 yTT0yvEbyn21fxhdHagVnqJXOepHz1MPmGGxEFx96RVib/m80zDhyDKogW6IgP4c
 e9yXW1Wo6m0R+7aQhn8YA9+47xbmq/cNDCwjlkIp0oL/SyktJdTcPjZUTr524jde
 dYTGLqSyoQyZMm+wBrJYTiME6nFK3RhKf7V9Mn77VTTFnhIk4J2upl+1kE2pLTAQ
 Zp3EwXCK1B2D2J1bdOuqclCApglw3H0CM4c81knDaEyB4w/l/OwpuCA+u58tSU9g
 z6DTO+vYvwlROmfeFjjmHKx1pl9uSktYlFlVqinelW7IG7Y3qDD1zPnbT27OpkLP
 rFIsF4Dm42MnmcC0sTxaMgKQtMvb56lgpaoa9XHL/DD76pAUvgKoWUnWay+32j1e
 8Hhx/PEp16ALGvDfm+9Wo8AgbvrFGl37epe2LXFFr6+zOzmGN+6vATW2EqgJ3Ueo
 P5TNkcFSyKg61r0moEr/ZSKZNvv8MuVUKSe0EmPykIccqg6oYHAoAJ66FL5yaX1k
 n/aIVNnIavzwlN6DeaQyjAxZxfSg39Z7wOH5PyKmateYKg9yM8P3RY/DndiO6F3c
 me9Q0eshu+C7h4YBRXPVstdWkkhCSVa+TN1b/6hnlSP0HLtyHik=
 =cWpl
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-6.9-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.9

- Exception and interrupt handling for selftests
- Sstc (aka arch_timer) selftest
- Forward seed CSR access to KVM userspace
- Ztso extension support for Guest/VM
- Zacas extension support for Guest/VM
This commit is contained in:
Paolo Bonzini 2024-03-11 10:10:48 -04:00
commit f074158a0d
18 changed files with 1380 additions and 307 deletions

View file

@ -165,6 +165,8 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZVFH,
KVM_RISCV_ISA_EXT_ZVFHMIN,
KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_ZTSO,
KVM_RISCV_ISA_EXT_ZACAS,
KVM_RISCV_ISA_EXT_MAX,
};

View file

@ -7,6 +7,8 @@
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2
#define INSN_OPCODE_SYSTEM 28
@ -213,9 +215,20 @@ struct csr_func {
unsigned long wr_mask);
};
static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
return KVM_INSN_ILLEGAL_TRAP;
return KVM_INSN_EXIT_TO_USER_SPACE;
}
static const struct csr_func csr_funcs[] = {
KVM_RISCV_VCPU_AIA_CSR_FUNCS
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
};
/**

View file

@ -40,6 +40,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC),
@ -66,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZKSED),
KVM_ISA_EXT_ARR(ZKSH),
KVM_ISA_EXT_ARR(ZKT),
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC),
KVM_ISA_EXT_ARR(ZVFH),
@ -117,6 +119,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZACAS:
case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB:
case KVM_RISCV_ISA_EXT_ZBC:
@ -141,6 +144,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZKSED:
case KVM_RISCV_ISA_EXT_ZKSH:
case KVM_RISCV_ISA_EXT_ZKT:
case KVM_RISCV_ISA_EXT_ZTSO:
case KVM_RISCV_ISA_EXT_ZVBB:
case KVM_RISCV_ISA_EXT_ZVBC:
case KVM_RISCV_ISA_EXT_ZVFH:

View file

@ -0,0 +1,541 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Regents of the University of California
*/
#ifndef _ASM_RISCV_CSR_H
#define _ASM_RISCV_CSR_H
#include <linux/bits.h>
/* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
#define SR_FS_OFF _AC(0x00000000, UL)
#define SR_FS_INITIAL _AC(0x00002000, UL)
#define SR_FS_CLEAN _AC(0x00004000, UL)
#define SR_FS_DIRTY _AC(0x00006000, UL)
#define SR_VS _AC(0x00000600, UL) /* Vector Status */
#define SR_VS_OFF _AC(0x00000000, UL)
#define SR_VS_INITIAL _AC(0x00000200, UL)
#define SR_VS_CLEAN _AC(0x00000400, UL)
#define SR_VS_DIRTY _AC(0x00000600, UL)
#define SR_XS _AC(0x00018000, UL) /* Extension Status */
#define SR_XS_OFF _AC(0x00000000, UL)
#define SR_XS_INITIAL _AC(0x00008000, UL)
#define SR_XS_CLEAN _AC(0x00010000, UL)
#define SR_XS_DIRTY _AC(0x00018000, UL)
#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */
#ifndef CONFIG_64BIT
#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */
#else
#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */
#endif
#ifdef CONFIG_64BIT
#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
#endif
/* SATP flags */
#ifndef CONFIG_64BIT
#define SATP_PPN _AC(0x003FFFFF, UL)
#define SATP_MODE_32 _AC(0x80000000, UL)
#define SATP_MODE_SHIFT 31
#define SATP_ASID_BITS 9
#define SATP_ASID_SHIFT 22
#define SATP_ASID_MASK _AC(0x1FF, UL)
#else
#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
#define SATP_MODE_48 _AC(0x9000000000000000, UL)
#define SATP_MODE_57 _AC(0xa000000000000000, UL)
#define SATP_MODE_SHIFT 60
#define SATP_ASID_BITS 16
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
#endif
/* Exception cause high bit - is an interrupt if set */
#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
/* Interrupt causes (minus the high bit) */
#define IRQ_S_SOFT 1
#define IRQ_VS_SOFT 2
#define IRQ_M_SOFT 3
#define IRQ_S_TIMER 5
#define IRQ_VS_TIMER 6
#define IRQ_M_TIMER 7
#define IRQ_S_EXT 9
#define IRQ_VS_EXT 10
#define IRQ_M_EXT 11
#define IRQ_S_GEXT 12
#define IRQ_PMU_OVF 13
#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0)
/* Exception causes */
#define EXC_INST_MISALIGNED 0
#define EXC_INST_ACCESS 1
#define EXC_INST_ILLEGAL 2
#define EXC_BREAKPOINT 3
#define EXC_LOAD_MISALIGNED 4
#define EXC_LOAD_ACCESS 5
#define EXC_STORE_MISALIGNED 6
#define EXC_STORE_ACCESS 7
#define EXC_SYSCALL 8
#define EXC_HYPERVISOR_SYSCALL 9
#define EXC_SUPERVISOR_SYSCALL 10
#define EXC_INST_PAGE_FAULT 12
#define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15
#define EXC_INST_GUEST_PAGE_FAULT 20
#define EXC_LOAD_GUEST_PAGE_FAULT 21
#define EXC_VIRTUAL_INST_FAULT 22
#define EXC_STORE_GUEST_PAGE_FAULT 23
/* PMP configuration */
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
#define PMP_A 0x18
#define PMP_A_TOR 0x08
#define PMP_A_NA4 0x10
#define PMP_A_NAPOT 0x18
#define PMP_L 0x80
/* HSTATUS flags */
#ifdef CONFIG_64BIT
#define HSTATUS_VSXL _AC(0x300000000, UL)
#define HSTATUS_VSXL_SHIFT 32
#endif
#define HSTATUS_VTSR _AC(0x00400000, UL)
#define HSTATUS_VTW _AC(0x00200000, UL)
#define HSTATUS_VTVM _AC(0x00100000, UL)
#define HSTATUS_VGEIN _AC(0x0003f000, UL)
#define HSTATUS_VGEIN_SHIFT 12
#define HSTATUS_HU _AC(0x00000200, UL)
#define HSTATUS_SPVP _AC(0x00000100, UL)
#define HSTATUS_SPV _AC(0x00000080, UL)
#define HSTATUS_GVA _AC(0x00000040, UL)
#define HSTATUS_VSBE _AC(0x00000020, UL)
/* HGATP flags */
#define HGATP_MODE_OFF _AC(0, UL)
#define HGATP_MODE_SV32X4 _AC(1, UL)
#define HGATP_MODE_SV39X4 _AC(8, UL)
#define HGATP_MODE_SV48X4 _AC(9, UL)
#define HGATP_MODE_SV57X4 _AC(10, UL)
#define HGATP32_MODE_SHIFT 31
#define HGATP32_VMID_SHIFT 22
#define HGATP32_VMID GENMASK(28, 22)
#define HGATP32_PPN GENMASK(21, 0)
#define HGATP64_MODE_SHIFT 60
#define HGATP64_VMID_SHIFT 44
#define HGATP64_VMID GENMASK(57, 44)
#define HGATP64_PPN GENMASK(43, 0)
#define HGATP_PAGE_SHIFT 12
#ifdef CONFIG_64BIT
#define HGATP_PPN HGATP64_PPN
#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
#define HGATP_VMID HGATP64_VMID
#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
#else
#define HGATP_PPN HGATP32_PPN
#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
#define HGATP_VMID HGATP32_VMID
#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
#endif
/* VSIP & HVIP relation */
#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
(_AC(1, UL) << IRQ_S_TIMER) | \
(_AC(1, UL) << IRQ_S_EXT))
/* AIA CSR bits */
#define TOPI_IID_SHIFT 16
#define TOPI_IID_MASK GENMASK(11, 0)
#define TOPI_IPRIO_MASK GENMASK(7, 0)
#define TOPI_IPRIO_BITS 8
#define TOPEI_ID_SHIFT 16
#define TOPEI_ID_MASK GENMASK(10, 0)
#define TOPEI_PRIO_MASK GENMASK(10, 0)
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
#define ISELECT_MASK GENMASK(8, 0)
#define HVICTL_VTI BIT(30)
#define HVICTL_IID GENMASK(27, 16)
#define HVICTL_IID_SHIFT 16
#define HVICTL_DPR BIT(9)
#define HVICTL_IPRIOM BIT(8)
#define HVICTL_IPRIO GENMASK(7, 0)
/* xENVCFG flags */
#define ENVCFG_STCE (_AC(1, ULL) << 63)
#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
#define ENVCFG_CBZE (_AC(1, UL) << 7)
#define ENVCFG_CBCFE (_AC(1, UL) << 6)
#define ENVCFG_CBIE_SHIFT 4
#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
#define ENVCFG_CBIE_ILL _AC(0x0, UL)
#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
#define ENVCFG_CBIE_INV _AC(0x3, UL)
#define ENVCFG_FIOM _AC(0x1, UL)
/* Smstateen bits */
#define SMSTATEEN0_AIA_IMSIC_SHIFT 58
#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT)
#define SMSTATEEN0_AIA_SHIFT 59
#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT)
#define SMSTATEEN0_AIA_ISEL_SHIFT 60
#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT)
#define SMSTATEEN0_HSENVCFG_SHIFT 62
#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT)
#define SMSTATEEN0_SSTATEEN0_SHIFT 63
#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT)
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02
#define CSR_HPMCOUNTER3 0xc03
#define CSR_HPMCOUNTER4 0xc04
#define CSR_HPMCOUNTER5 0xc05
#define CSR_HPMCOUNTER6 0xc06
#define CSR_HPMCOUNTER7 0xc07
#define CSR_HPMCOUNTER8 0xc08
#define CSR_HPMCOUNTER9 0xc09
#define CSR_HPMCOUNTER10 0xc0a
#define CSR_HPMCOUNTER11 0xc0b
#define CSR_HPMCOUNTER12 0xc0c
#define CSR_HPMCOUNTER13 0xc0d
#define CSR_HPMCOUNTER14 0xc0e
#define CSR_HPMCOUNTER15 0xc0f
#define CSR_HPMCOUNTER16 0xc10
#define CSR_HPMCOUNTER17 0xc11
#define CSR_HPMCOUNTER18 0xc12
#define CSR_HPMCOUNTER19 0xc13
#define CSR_HPMCOUNTER20 0xc14
#define CSR_HPMCOUNTER21 0xc15
#define CSR_HPMCOUNTER22 0xc16
#define CSR_HPMCOUNTER23 0xc17
#define CSR_HPMCOUNTER24 0xc18
#define CSR_HPMCOUNTER25 0xc19
#define CSR_HPMCOUNTER26 0xc1a
#define CSR_HPMCOUNTER27 0xc1b
#define CSR_HPMCOUNTER28 0xc1c
#define CSR_HPMCOUNTER29 0xc1d
#define CSR_HPMCOUNTER30 0xc1e
#define CSR_HPMCOUNTER31 0xc1f
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
#define CSR_HPMCOUNTER3H 0xc83
#define CSR_HPMCOUNTER4H 0xc84
#define CSR_HPMCOUNTER5H 0xc85
#define CSR_HPMCOUNTER6H 0xc86
#define CSR_HPMCOUNTER7H 0xc87
#define CSR_HPMCOUNTER8H 0xc88
#define CSR_HPMCOUNTER9H 0xc89
#define CSR_HPMCOUNTER10H 0xc8a
#define CSR_HPMCOUNTER11H 0xc8b
#define CSR_HPMCOUNTER12H 0xc8c
#define CSR_HPMCOUNTER13H 0xc8d
#define CSR_HPMCOUNTER14H 0xc8e
#define CSR_HPMCOUNTER15H 0xc8f
#define CSR_HPMCOUNTER16H 0xc90
#define CSR_HPMCOUNTER17H 0xc91
#define CSR_HPMCOUNTER18H 0xc92
#define CSR_HPMCOUNTER19H 0xc93
#define CSR_HPMCOUNTER20H 0xc94
#define CSR_HPMCOUNTER21H 0xc95
#define CSR_HPMCOUNTER22H 0xc96
#define CSR_HPMCOUNTER23H 0xc97
#define CSR_HPMCOUNTER24H 0xc98
#define CSR_HPMCOUNTER25H 0xc99
#define CSR_HPMCOUNTER26H 0xc9a
#define CSR_HPMCOUNTER27H 0xc9b
#define CSR_HPMCOUNTER28H 0xc9c
#define CSR_HPMCOUNTER29H 0xc9d
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
#define CSR_SSCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
#define CSR_SENVCFG 0x10a
#define CSR_SSTATEEN0 0x10c
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
#define CSR_STVAL 0x143
#define CSR_SIP 0x144
#define CSR_SATP 0x180
#define CSR_STIMECMP 0x14D
#define CSR_STIMECMPH 0x15D
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
#define CSR_STOPI 0xdb0
/* Supervisor-Level High-Half CSRs (AIA) */
#define CSR_SIEH 0x114
#define CSR_SIPH 0x154
#define CSR_VSSTATUS 0x200
#define CSR_VSIE 0x204
#define CSR_VSTVEC 0x205
#define CSR_VSSCRATCH 0x240
#define CSR_VSEPC 0x241
#define CSR_VSCAUSE 0x242
#define CSR_VSTVAL 0x243
#define CSR_VSIP 0x244
#define CSR_VSATP 0x280
#define CSR_VSTIMECMP 0x24D
#define CSR_VSTIMECMPH 0x25D
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
#define CSR_HIDELEG 0x603
#define CSR_HIE 0x604
#define CSR_HTIMEDELTA 0x605
#define CSR_HCOUNTEREN 0x606
#define CSR_HGEIE 0x607
#define CSR_HENVCFG 0x60a
#define CSR_HTIMEDELTAH 0x615
#define CSR_HENVCFGH 0x61a
#define CSR_HTVAL 0x643
#define CSR_HIP 0x644
#define CSR_HVIP 0x645
#define CSR_HTINST 0x64a
#define CSR_HGATP 0x680
#define CSR_HGEIP 0xe12
/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
#define CSR_HVIEN 0x608
#define CSR_HVICTL 0x609
#define CSR_HVIPRIO1 0x646
#define CSR_HVIPRIO2 0x647
/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
#define CSR_VSTOPI 0xeb0
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
#define CSR_HIDELEGH 0x613
#define CSR_HVIENH 0x618
#define CSR_HVIPH 0x655
#define CSR_HVIPRIO1H 0x656
#define CSR_HVIPRIO2H 0x657
#define CSR_VSIEH 0x214
#define CSR_VSIPH 0x254
/* Hypervisor stateen CSRs */
#define CSR_HSTATEEN0 0x60c
#define CSR_HSTATEEN0H 0x61c
#define CSR_MSTATUS 0x300
#define CSR_MISA 0x301
#define CSR_MIDELEG 0x303
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MENVCFG 0x30a
#define CSR_MENVCFGH 0x31a
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
#define CSR_MVENDORID 0xf11
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
#define CSR_MTOPI 0xfb0
/* Virtual Interrupts for Supervisor Level (AIA) */
#define CSR_MVIEN 0x308
#define CSR_MVIP 0x309
/* Machine-Level High-Half CSRs (AIA) */
#define CSR_MIDELEGH 0x313
#define CSR_MIEH 0x314
#define CSR_MVIENH 0x318
#define CSR_MVIPH 0x319
#define CSR_MIPH 0x354
#define CSR_VSTART 0x8
#define CSR_VCSR 0xf
#define CSR_VL 0xc20
#define CSR_VTYPE 0xc21
#define CSR_VLENB 0xc22
#ifdef CONFIG_RISCV_M_MODE
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
# define CSR_TVEC CSR_MTVEC
# define CSR_SCRATCH CSR_MSCRATCH
# define CSR_EPC CSR_MEPC
# define CSR_CAUSE CSR_MCAUSE
# define CSR_TVAL CSR_MTVAL
# define CSR_IP CSR_MIP
# define CSR_IEH CSR_MIEH
# define CSR_ISELECT CSR_MISELECT
# define CSR_IREG CSR_MIREG
# define CSR_IPH CSR_MIPH
# define CSR_TOPEI CSR_MTOPEI
# define CSR_TOPI CSR_MTOPI
# define SR_IE SR_MIE
# define SR_PIE SR_MPIE
# define SR_PP SR_MPP
# define RV_IRQ_SOFT IRQ_M_SOFT
# define RV_IRQ_TIMER IRQ_M_TIMER
# define RV_IRQ_EXT IRQ_M_EXT
#else /* CONFIG_RISCV_M_MODE */
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
# define CSR_TVEC CSR_STVEC
# define CSR_SCRATCH CSR_SSCRATCH
# define CSR_EPC CSR_SEPC
# define CSR_CAUSE CSR_SCAUSE
# define CSR_TVAL CSR_STVAL
# define CSR_IP CSR_SIP
# define CSR_IEH CSR_SIEH
# define CSR_ISELECT CSR_SISELECT
# define CSR_IREG CSR_SIREG
# define CSR_IPH CSR_SIPH
# define CSR_TOPEI CSR_STOPEI
# define CSR_TOPI CSR_STOPI
# define SR_IE SR_SIE
# define SR_PIE SR_SPIE
# define SR_PP SR_SPP
# define RV_IRQ_SOFT IRQ_S_SOFT
# define RV_IRQ_TIMER IRQ_S_TIMER
# define RV_IRQ_EXT IRQ_S_EXT
# define RV_IRQ_PMU IRQ_PMU_OVF
# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF)
#endif /* !CONFIG_RISCV_M_MODE */
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
#ifdef __ASSEMBLY__
#define __ASM_STR(x) x
#else
#define __ASM_STR(x) #x
#endif
#ifndef __ASSEMBLY__
#define csr_swap(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
})
#define csr_read(csr) \
({ \
register unsigned long __v; \
__asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
: "=r" (__v) : \
: "memory"); \
__v; \
})
#define csr_write(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
#define csr_read_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
})
#define csr_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
#define csr_read_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
})
#define csr_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_CSR_H */

View file

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <asm-generic/barrier.h>
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
/*
* Reduce instruction retirement.
* This assumes the PC changes.
*/
__asm__ __volatile__ ("pause");
#else
/* Encoding of the pause instruction */
__asm__ __volatile__ (".4byte 0x100000F");
#endif
barrier();
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_PROCESSOR_H */

View file

@ -53,6 +53,7 @@ LIBKVM_s390x += lib/s390x/diag318_test_handler.c
LIBKVM_s390x += lib/s390x/processor.c
LIBKVM_s390x += lib/s390x/ucall.c
LIBKVM_riscv += lib/riscv/handlers.S
LIBKVM_riscv += lib/riscv/processor.c
LIBKVM_riscv += lib/riscv/ucall.c
@ -143,7 +144,6 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test
TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += arch_timer
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
@ -184,6 +185,7 @@ TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
@ -194,6 +196,7 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += steal_time
SPLIT_TESTS += arch_timer
SPLIT_TESTS += get-reg-list
TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
@ -217,7 +220,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
@ -260,32 +263,36 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS))
SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH_DIR)/%.o, $(SPLIT_TESTS))
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ))
-include $(TEST_DEP_FILES)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
x := $(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \
$(TEST_GEN_PROGS_EXTENDED): %: %.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS)
$(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH_DIR)/%.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
$(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH_DIR)/%.o: $(ARCH_DIR)/%.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
EXTRA_CLEAN += $(GEN_HDRS) \
$(LIBKVM_OBJS) \
$(SPLIT_TESTS_OBJS) \
$(SPLIT_TEST_GEN_OBJ) \
$(TEST_DEP_FILES) \
$(TEST_GEN_OBJ) \
cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
@ -299,7 +306,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(SPLIT_TESTS_OBJS): $(GEN_HDRS)
$(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS)
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
$(TEST_GEN_OBJ): $(GEN_HDRS)

View file

@ -1,64 +1,19 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the aarch64 timer IRQ functionality
*
* The test validates both the virtual and physical timer IRQs using
* CVAL and TVAL registers. This consitutes the four stages in the test.
* The guest's main thread configures the timer interrupt for a stage
* and waits for it to fire, with a timeout equal to the timer period.
* It asserts that the timeout doesn't exceed the timer period.
*
* On the other hand, upon receipt of an interrupt, the guest's interrupt
* handler validates the interrupt by checking if the architectural state
* is in compliance with the specifications.
*
* The test provides command-line options to configure the timer's
* period (-p), number of vCPUs (-n), and iterations per stage (-i).
* To stress-test the timer stack even more, an option to migrate the
* vCPUs across pCPUs (-m), at a particular rate, is also provided.
* CVAL and TVAL registers.
*
* Copyright (c) 2021, Google LLC.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <pthread.h>
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <sys/sysinfo.h>
#include "kvm_util.h"
#include "processor.h"
#include "delay.h"
#include "arch_timer.h"
#include "delay.h"
#include "gic.h"
#include "processor.h"
#include "timer_test.h"
#include "vgic.h"
#define NR_VCPUS_DEF 4
#define NR_TEST_ITERS_DEF 5
#define TIMER_TEST_PERIOD_MS_DEF 10
#define TIMER_TEST_ERR_MARGIN_US 100
#define TIMER_TEST_MIGRATION_FREQ_MS 2
struct test_args {
int nr_vcpus;
int nr_iter;
int timer_period_ms;
int migration_freq_ms;
struct kvm_arm_counter_offset offset;
};
static struct test_args test_args = {
.nr_vcpus = NR_VCPUS_DEF,
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.offset = { .reserved = 1 },
};
#define msecs_to_usecs(msec) ((msec) * 1000LL)
#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL
@ -70,22 +25,8 @@ enum guest_stage {
GUEST_STAGE_MAX,
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
int nr_iter;
enum guest_stage guest_stage;
uint64_t xcnt;
};
static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static int vtimer_irq, ptimer_irq;
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
static void
guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
{
@ -190,10 +131,14 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
/* Setup a timeout for the interrupt to arrive */
udelay(msecs_to_usecs(test_args.timer_period_ms) +
TIMER_TEST_ERR_MARGIN_US);
test_args.timer_err_margin_us);
irq_iter = READ_ONCE(shared_data->nr_iter);
GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%lx, irq_iter = 0x%lx.\n"
" Guest timer interrupt was not trigged within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
}
@ -222,137 +167,6 @@ static void guest_code(void)
GUEST_DONE();
}
static void *test_vcpu_run(void *arg)
{
unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
__set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit");
}
return NULL;
}
static uint32_t test_get_pcpu(void)
{
uint32_t pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
nproc_conf = get_nprocs_conf();
sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
/* Randomly find an available pCPU to place a vCPU on */
do {
pcpu = rand() % nproc_conf;
} while (!CPU_ISSET(pcpu, &online_cpuset));
return pcpu;
}
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);
return ret;
}
static void *test_vcpu_migration(void *arg)
{
unsigned int i, n_done;
bool vcpu_done;
do {
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
pthread_mutex_lock(&vcpu_done_map_lock);
vcpu_done = test_bit(i, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
if (vcpu_done) {
n_done++;
continue;
}
test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
return NULL;
}
static void test_run(struct kvm_vm *vm)
{
pthread_t pt_vcpu_migration;
unsigned int i;
int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}
/* Spawn a thread to control the vCPU migrations */
if (test_args.migration_freq_ms) {
srand(time(NULL));
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread");
}
for (i = 0; i < test_args.nr_vcpus; i++)
pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
bitmap_free(vcpu_done_map);
}
static void test_init_timer_irq(struct kvm_vm *vm)
{
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
@ -369,7 +183,7 @@ static void test_init_timer_irq(struct kvm_vm *vm)
static int gic_fd;
static struct kvm_vm *test_vm_create(void)
struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
unsigned int i;
@ -380,10 +194,14 @@ static struct kvm_vm *test_vm_create(void)
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
if (!test_args.offset.reserved) {
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
else
if (!test_args.reserved) {
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) {
struct kvm_arm_counter_offset offset = {
.counter_offset = test_args.counter_offset,
.reserved = 0,
};
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &offset);
} else
TEST_FAIL("no support for global offset");
}
@ -400,81 +218,8 @@ static struct kvm_vm *test_vm_create(void)
return vm;
}
static void test_vm_cleanup(struct kvm_vm *vm)
void test_vm_cleanup(struct kvm_vm *vm)
{
close(gic_fd);
kvm_vm_free(vm);
}
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
name);
pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
NR_VCPUS_DEF, KVM_MAX_VCPUS);
pr_info("\t-i: Number of iterations per stage (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'o':
test_args.offset.counter_offset = strtol(optarg, NULL, 0);
test_args.offset.reserved = 0;
break;
case 'h':
default:
goto err;
}
}
return true;
err:
test_print_help(argv[0]);
return false;
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
"At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
test_vm_cleanup(vm);
return 0;
}

View file

@ -0,0 +1,259 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the arch timer IRQ functionality
*
* The guest's main thread configures the timer interrupt and waits
* for it to fire, with a timeout equal to the timer period.
* It asserts that the timeout doesn't exceed the timer period plus
* a user configurable error margin(default to 100us)
*
* On the other hand, upon receipt of an interrupt, the guest's interrupt
* handler validates the interrupt by checking if the architectural state
* is in compliance with the specifications.
*
* The test provides command-line options to configure the timer's
* period (-p), number of vCPUs (-n), iterations per stage (-i) and timer
* interrupt arrival error margin (-e). To stress-test the timer stack
* even more, an option to migrate the vCPUs across pCPUs (-m), at a
* particular rate, is also provided.
*
* Copyright (c) 2021, Google LLC.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <pthread.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <sys/sysinfo.h>
#include "timer_test.h"
struct test_args test_args = {
.nr_vcpus = NR_VCPUS_DEF,
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US,
.reserved = 1,
};
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
static void *test_vcpu_run(void *arg)
{
unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
__set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit");
}
pr_info("PASS(vCPU-%d).\n", vcpu_idx);
return NULL;
}
static uint32_t test_get_pcpu(void)
{
uint32_t pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
nproc_conf = get_nprocs_conf();
sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
/* Randomly find an available pCPU to place a vCPU on */
do {
pcpu = rand() % nproc_conf;
} while (!CPU_ISSET(pcpu, &online_cpuset));
return pcpu;
}
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);
return ret;
}
static void *test_vcpu_migration(void *arg)
{
unsigned int i, n_done;
bool vcpu_done;
do {
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
pthread_mutex_lock(&vcpu_done_map_lock);
vcpu_done = test_bit(i, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
if (vcpu_done) {
n_done++;
continue;
}
test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
return NULL;
}
static void test_run(struct kvm_vm *vm)
{
pthread_t pt_vcpu_migration;
unsigned int i;
int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}
/* Spawn a thread to control the vCPU migrations */
if (test_args.migration_freq_ms) {
srand(time(NULL));
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread");
}
for (i = 0; i < test_args.nr_vcpus; i++)
pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
bitmap_free(vcpu_done_map);
}
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n"
"\t\t [-m migration_freq_ms] [-o counter_offset]\n"
"\t\t [-e timer_err_margin_us]\n", name);
pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
NR_VCPUS_DEF, KVM_MAX_VCPUS);
pr_info("\t-i: Number of iterations per stage (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n");
pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n",
TIMER_TEST_ERR_MARGIN_US);
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'e':
test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg);
break;
case 'o':
test_args.counter_offset = strtol(optarg, NULL, 0);
test_args.reserved = 0;
break;
case 'h':
default:
goto err;
}
}
return true;
err:
test_print_help(argv[0]);
return false;
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
"At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
test_vm_cleanup(vm);
return 0;
}

View file

@ -226,8 +226,4 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res);
uint32_t guest_get_vcpuid(void);
#endif /* SELFTEST_KVM_PROCESSOR_H */

View file

@ -1081,4 +1081,6 @@ void kvm_selftest_arch_init(void);
void kvm_arch_vm_post_create(struct kvm_vm *vm);
uint32_t guest_get_vcpuid(void);
#endif /* SELFTEST_KVM_UTIL_BASE_H */

View file

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* RISC-V Arch Timer(sstc) specific interface
*
* Copyright (c) 2024 Intel Corporation
*/
#ifndef SELFTEST_KVM_ARCH_TIMER_H
#define SELFTEST_KVM_ARCH_TIMER_H
#include <asm/csr.h>
#include <asm/vdso/processor.h>
static unsigned long timer_freq;
#define msec_to_cycles(msec) \
((timer_freq) * (uint64_t)(msec) / 1000)
#define usec_to_cycles(usec) \
((timer_freq) * (uint64_t)(usec) / 1000000)
#define cycles_to_usec(cycles) \
((uint64_t)(cycles) * 1000000 / (timer_freq))
static inline uint64_t timer_get_cycles(void)
{
return csr_read(CSR_TIME);
}
static inline void timer_set_cmp(uint64_t cval)
{
csr_write(CSR_STIMECMP, cval);
}
static inline uint64_t timer_get_cmp(void)
{
return csr_read(CSR_STIMECMP);
}
static inline void timer_irq_enable(void)
{
csr_set(CSR_SIE, IE_TIE);
}
static inline void timer_irq_disable(void)
{
csr_clear(CSR_SIE, IE_TIE);
}
static inline void timer_set_next_cmp_ms(uint32_t msec)
{
uint64_t now_ct = timer_get_cycles();
uint64_t next_ct = now_ct + msec_to_cycles(msec);
timer_set_cmp(next_ct);
}
static inline void __delay(uint64_t cycles)
{
uint64_t start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
}
static inline void udelay(unsigned long usec)
{
__delay(usec_to_cycles(usec));
}
#endif /* SELFTEST_KVM_ARCH_TIMER_H */

View file

@ -7,8 +7,9 @@
#ifndef SELFTEST_KVM_PROCESSOR_H
#define SELFTEST_KVM_PROCESSOR_H
#include "kvm_util.h"
#include <linux/stringify.h>
#include <asm/csr.h>
#include "kvm_util.h"
static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
uint64_t idx, uint64_t size)
@ -47,6 +48,58 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
KVM_REG_RISCV_SBI_SINGLE, \
idx, KVM_REG_SIZE_ULONG)
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext);
struct ex_regs {
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
unsigned long t6;
unsigned long epc;
unsigned long status;
unsigned long cause;
};
#define NR_VECTORS 2
#define NR_EXCEPTIONS 32
#define EC_MASK (NR_EXCEPTIONS - 1)
typedef void(*exception_handler_fn)(struct ex_regs *);
void vm_init_vector_tables(struct kvm_vm *vm);
void vcpu_init_vector_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler);
void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler);
/* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT 39
@ -101,13 +154,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
#define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE
#define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT
#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
#define SATP_MODE_48 _AC(0x9000000000000000, UL)
#define SATP_ASID_BITS 16
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
/* SBI return error codes */
#define SBI_SUCCESS 0
#define SBI_ERR_FAILURE -1
@ -147,4 +193,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
bool guest_sbi_probe_extension(int extid, long *out_val);
static inline void local_irq_enable(void)
{
csr_set(CSR_SSTATUS, SR_SIE);
}
static inline void local_irq_disable(void)
{
csr_clear(CSR_SSTATUS, SR_SIE);
}
#endif /* SELFTEST_KVM_PROCESSOR_H */

View file

@ -20,6 +20,8 @@
#include <sys/mman.h>
#include "kselftest.h"
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
static inline int _no_printf(const char *format, ...) { return 0; }
#ifdef DEBUG

View file

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* timer test specific header
*
* Copyright (C) 2018, Google LLC
*/
#ifndef SELFTEST_KVM_TIMER_TEST_H
#define SELFTEST_KVM_TIMER_TEST_H
#include "kvm_util.h"
#define NR_VCPUS_DEF 4
#define NR_TEST_ITERS_DEF 5
#define TIMER_TEST_PERIOD_MS_DEF 10
#define TIMER_TEST_ERR_MARGIN_US 100
#define TIMER_TEST_MIGRATION_FREQ_MS 2
/* Timer test cmdline parameters */
struct test_args {
uint32_t nr_vcpus;
uint32_t nr_iter;
uint32_t timer_period_ms;
uint32_t migration_freq_ms;
uint32_t timer_err_margin_us;
/* Members of struct kvm_arm_counter_offset */
uint64_t counter_offset;
uint64_t reserved;
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
uint32_t nr_iter;
int guest_stage;
uint64_t xcnt;
};
extern struct test_args test_args;
extern struct kvm_vcpu *vcpus[];
extern struct test_vcpu_shared_data vcpu_shared_data[];
struct kvm_vm *test_vm_create(void);
void test_vm_cleanup(struct kvm_vm *vm);
#endif /* SELFTEST_KVM_TIMER_TEST_H */

View file

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Intel Corporation
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <asm/csr.h>
.macro save_context
addi sp, sp, (-8*34)
sd x1, 0(sp)
sd x2, 8(sp)
sd x3, 16(sp)
sd x4, 24(sp)
sd x5, 32(sp)
sd x6, 40(sp)
sd x7, 48(sp)
sd x8, 56(sp)
sd x9, 64(sp)
sd x10, 72(sp)
sd x11, 80(sp)
sd x12, 88(sp)
sd x13, 96(sp)
sd x14, 104(sp)
sd x15, 112(sp)
sd x16, 120(sp)
sd x17, 128(sp)
sd x18, 136(sp)
sd x19, 144(sp)
sd x20, 152(sp)
sd x21, 160(sp)
sd x22, 168(sp)
sd x23, 176(sp)
sd x24, 184(sp)
sd x25, 192(sp)
sd x26, 200(sp)
sd x27, 208(sp)
sd x28, 216(sp)
sd x29, 224(sp)
sd x30, 232(sp)
sd x31, 240(sp)
csrr s0, CSR_SEPC
csrr s1, CSR_SSTATUS
csrr s2, CSR_SCAUSE
sd s0, 248(sp)
sd s1, 256(sp)
sd s2, 264(sp)
.endm
.macro restore_context
ld s2, 264(sp)
ld s1, 256(sp)
ld s0, 248(sp)
csrw CSR_SCAUSE, s2
csrw CSR_SSTATUS, s1
csrw CSR_SEPC, s0
ld x31, 240(sp)
ld x30, 232(sp)
ld x29, 224(sp)
ld x28, 216(sp)
ld x27, 208(sp)
ld x26, 200(sp)
ld x25, 192(sp)
ld x24, 184(sp)
ld x23, 176(sp)
ld x22, 168(sp)
ld x21, 160(sp)
ld x20, 152(sp)
ld x19, 144(sp)
ld x18, 136(sp)
ld x17, 128(sp)
ld x16, 120(sp)
ld x15, 112(sp)
ld x14, 104(sp)
ld x13, 96(sp)
ld x12, 88(sp)
ld x11, 80(sp)
ld x10, 72(sp)
ld x9, 64(sp)
ld x8, 56(sp)
ld x7, 48(sp)
ld x6, 40(sp)
ld x5, 32(sp)
ld x4, 24(sp)
ld x3, 16(sp)
ld x2, 8(sp)
ld x1, 0(sp)
addi sp, sp, (8*34)
.endm
.balign 4
.global exception_vectors
exception_vectors:
save_context
move a0, sp
call route_exception
restore_context
sret

View file

@ -13,6 +13,18 @@
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
static vm_vaddr_t exception_handlers;
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
{
unsigned long value = 0;
int ret;
ret = __vcpu_get_reg(vcpu, ext, &value);
return !ret && !!value;
}
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{
return (v + vm->page_size) & ~(vm->page_size - 1);
@ -314,6 +326,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup sscratch for guest_get_vcpuid() */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
/* Setup default exception vector of guest */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
@ -364,8 +379,80 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
va_end(ap);
}
void kvm_exit_unexpected_exception(int vector, int ec)
{
ucall(UCALL_UNHANDLED, 2, vector, ec);
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
uc.args[0], uc.args[1]);
}
}
struct handlers {
exception_handler_fn exception_handlers[NR_VECTORS][NR_EXCEPTIONS];
};
void route_exception(struct ex_regs *regs)
{
struct handlers *handlers = (struct handlers *)exception_handlers;
int vector = 0, ec;
ec = regs->cause & ~CAUSE_IRQ_FLAG;
if (ec >= NR_EXCEPTIONS)
goto unexpected_exception;
/* Use the same handler for all the interrupts */
if (regs->cause & CAUSE_IRQ_FLAG) {
vector = 1;
ec = 0;
}
if (handlers && handlers->exception_handlers[vector][ec])
return handlers->exception_handlers[vector][ec](regs);
unexpected_exception:
return kvm_exit_unexpected_exception(vector, ec);
}
void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
{
extern char exception_vectors;
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)&exception_vectors);
}
void vm_init_vector_tables(struct kvm_vm *vm)
{
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
vm->page_size, MEM_REGION_DATA);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
assert(vector < NR_EXCEPTIONS);
handlers->exception_handlers[0][vector] = handler;
}
void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler)
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
handlers->exception_handlers[1][0] = handler;
}
uint32_t guest_get_vcpuid(void)
{
return csr_read(CSR_SSCRATCH);
}
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,

View file

@ -0,0 +1,111 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the riscv64 sstc timer IRQ functionality
*
* The test validates the sstc timer IRQs using vstimecmp registers.
* It's ported from the aarch64 arch_timer test.
*
* Copyright (c) 2024, Intel Corporation.
*/
#define _GNU_SOURCE
#include "arch_timer.h"
#include "kvm_util.h"
#include "processor.h"
#include "timer_test.h"
static int timer_irq = IRQ_S_TIMER;
static void guest_irq_handler(struct ex_regs *regs)
{
uint64_t xcnt, xcnt_diff_us, cmp;
unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG;
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
xcnt = timer_get_cycles();
cmp = timer_get_cmp();
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Make sure we are dealing with the correct timer IRQ */
GUEST_ASSERT_EQ(intid, timer_irq);
__GUEST_ASSERT(xcnt >= cmp,
"xcnt = 0x%"PRIx64", cmp = 0x%"PRIx64", xcnt_diff_us = 0x%" PRIx64,
xcnt, cmp, xcnt_diff_us);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
static void guest_run(struct test_vcpu_shared_data *shared_data)
{
uint32_t irq_iter, config_iter;
shared_data->nr_iter = 0;
shared_data->guest_stage = 0;
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
/* Setup the next interrupt */
timer_set_next_cmp_ms(test_args.timer_period_ms);
shared_data->xcnt = timer_get_cycles();
timer_irq_enable();
/* Setup a timeout for the interrupt to arrive */
udelay(msecs_to_usecs(test_args.timer_period_ms) +
test_args.timer_err_margin_us);
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
" Guest timer interrupt was not trigged within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
}
static void guest_code(void)
{
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
local_irq_enable();
guest_run(shared_data);
GUEST_DONE();
}
struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
__TEST_REQUIRE(__vcpu_has_ext(vcpus[0], RISCV_ISA_EXT_REG(KVM_RISCV_ISA_EXT_SSTC)),
"SSTC not available, skipping test\n");
vm_init_vector_tables(vm);
vm_install_interrupt_handler(vm, guest_irq_handler);
for (int i = 0; i < nr_vcpus; i++)
vcpu_init_vector_tables(vcpus[i]);
/* Initialize guest timer frequency. */
vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq);
sync_global_to_guest(vm, timer_freq);
pr_debug("timer_freq: %lu\n", timer_freq);
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
return vm;
}
void test_vm_cleanup(struct kvm_vm *vm)
{
kvm_vm_free(vm);
}

View file

@ -47,6 +47,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
@ -73,6 +74,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
@ -123,15 +125,6 @@ bool check_reject_set(int err)
return err == EINVAL;
}
static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id)
{
int ret;
unsigned long value;
ret = __vcpu_get_reg(vcpu, ext_id, &value);
return (ret) ? false : !!value;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
@ -176,7 +169,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
__vcpu_set_reg(vcpu, feature, 1);
/* Double check whether the desired extension was enabled */
__TEST_REQUIRE(vcpu_has_ext(vcpu, feature),
__TEST_REQUIRE(__vcpu_has_ext(vcpu, feature),
"%s not available, skipping tests", s->name);
}
}
@ -419,6 +412,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC),
@ -445,6 +439,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZKSED),
KVM_ISA_EXT_ARR(ZKSH),
KVM_ISA_EXT_ARR(ZKT),
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC),
KVM_ISA_EXT_ARR(ZVFH),
@ -940,6 +935,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC);
KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS);
KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
@ -966,6 +962,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR);
KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED);
KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH);
KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
@ -993,6 +990,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_svinval,
&config_svnapot,
&config_svpbmt,
&config_zacas,
&config_zba,
&config_zbb,
&config_zbc,
@ -1019,6 +1017,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zksed,
&config_zksh,
&config_zkt,
&config_ztso,
&config_zvbb,
&config_zvbc,
&config_zvfh,