This patch adds initial support for nested virtualizaiton with only
placeholder (or stub) functions for emulating hfences, hlv/hsv, and
shadow g-stage page table. These stub function will be filled-up by
subsequent patches.
arch/riscv/cpu/generic/cpu_exception.c | 38 +
arch/riscv/cpu/generic/cpu_vcpu_csr.c | 54 -
arch/riscv/cpu/generic/cpu_vcpu_fp.c | 56 +-
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 27 +-
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 587 ++++++++++
arch/riscv/cpu/generic/cpu_vcpu_sbi.c | 11 +
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 998 ++++++++++++++++--
arch/riscv/cpu/generic/include/cpu_vcpu_csr.h | 37 -
.../cpu/generic/include/cpu_vcpu_nested.h | 105 ++
.../riscv/cpu/generic/include/cpu_vcpu_trap.h | 26 +-
arch/riscv/cpu/generic/
objects.mk | 2 +-
11 files changed, 1758 insertions(+), 183 deletions(-)
delete mode 100644 arch/riscv/cpu/generic/cpu_vcpu_csr.c
create mode 100644 arch/riscv/cpu/generic/cpu_vcpu_nested.c
delete mode 100644 arch/riscv/cpu/generic/include/cpu_vcpu_csr.h
create mode 100644 arch/riscv/cpu/generic/include/cpu_vcpu_nested.h
diff --git a/arch/riscv/cpu/generic/cpu_exception.c b/arch/riscv/cpu/generic/cpu_exception.c
index 76fded9d..b08ee243 100644
--- a/arch/riscv/cpu/generic/cpu_exception.c
+++ b/arch/riscv/cpu/generic/cpu_exception.c
@@ -58,6 +58,14 @@ void do_handle_irq(arch_regs_t *regs, unsigned long cause)
vmm_scheduler_irq_enter(regs, FALSE);
+ if (cause == IRQ_VS_SOFT ||
+ cause == IRQ_VS_TIMER ||
+ cause == IRQ_VS_EXT) {
+ rc = cpu_vcpu_redirect_vsirq(vmm_scheduler_current_vcpu(),
+ regs, cause);
+ goto done;
+ }
+
/* NOTE: Only exec <= 0xFFFFFFFFUL will be handled */
if (cause <= 0xFFFFFFFFUL) {
rc = vmm_host_active_irq_exec(cause);
@@ -65,10 +73,13 @@ void do_handle_irq(arch_regs_t *regs, unsigned long cause)
rc = VMM_EINVALID;
}
+done:
if (rc) {
do_error(vmm_scheduler_current_vcpu(), regs,
cause | SCAUSE_INTERRUPT_MASK,
"interrupt handling failed", rc, TRUE);
+ } else {
+ cpu_vcpu_take_vsirq(vmm_scheduler_current_vcpu(), regs);
}
vmm_scheduler_irq_exit(regs);
@@ -100,6 +111,31 @@ void do_handle_trap(arch_regs_t *regs, unsigned long cause)
}
switch (cause) {
+ case CAUSE_MISALIGNED_FETCH:
+ case CAUSE_FETCH_ACCESS:
+ case CAUSE_ILLEGAL_INSTRUCTION:
+ case CAUSE_BREAKPOINT:
+ case CAUSE_MISALIGNED_LOAD:
+ case CAUSE_LOAD_ACCESS:
+ case CAUSE_MISALIGNED_STORE:
+ case CAUSE_STORE_ACCESS:
+ case CAUSE_USER_ECALL:
+ case CAUSE_FETCH_PAGE_FAULT:
+ case CAUSE_LOAD_PAGE_FAULT:
+ case CAUSE_STORE_PAGE_FAULT:
+ msg = "general fault failed";
+ if (regs->hstatus & HSTATUS_SPV) {
+ trap.sepc = regs->sepc;
+ trap.scause = cause;
+ trap.stval = csr_read(CSR_STVAL);
+ trap.htval = csr_read(CSR_HTVAL);
+ trap.htinst = csr_read(CSR_HTINST);
+ rc = cpu_vcpu_general_fault(vcpu, regs, &trap);
+ panic = FALSE;
+ } else {
+ rc = VMM_EINVALID;
+ }
+ break;
case CAUSE_FETCH_GUEST_PAGE_FAULT:
case CAUSE_LOAD_GUEST_PAGE_FAULT:
case CAUSE_STORE_GUEST_PAGE_FAULT:
@@ -147,6 +183,8 @@ void do_handle_trap(arch_regs_t *regs, unsigned long cause)
done:
if (rc) {
do_error(vcpu, regs, cause, msg, rc, panic);
+ } else {
+ cpu_vcpu_take_vsirq(vcpu, regs);
}
vmm_scheduler_irq_exit(regs);
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_csr.c b/arch/riscv/cpu/generic/cpu_vcpu_csr.c
deleted file mode 100644
index 76856ca7..00000000
--- a/arch/riscv/cpu/generic/cpu_vcpu_csr.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright (c) 2019 Anup Patel.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * @file cpu_vcpu_csr.c
- * @author Anup Patel (
an...@brainfault.org)
- * @brief source for VCPU CSR read/write handling
- */
-
-#include <vmm_error.h>
-#include <vmm_stdio.h>
-#include <cpu_vcpu_csr.h>
-
-int cpu_vcpu_csr_read(struct vmm_vcpu *vcpu,
- unsigned long csr_num,
- unsigned long *csr_val)
-{
- /*
- * We don't have any CSRs to emulate because runtime
- * M-mode firmware (i.e. OpenSBI) takes care of it
- */
- vmm_printf("%s: vcpu=%s invalid csr_num=0x%lx\n",
- __func__, (vcpu) ? vcpu->name : "(null)", csr_num);
-
- return VMM_ENOTSUPP;
-}
-
-int cpu_vcpu_csr_write(struct vmm_vcpu *vcpu,
- unsigned long csr_num,
- unsigned long csr_val)
-{
- /*
- * We don't have any CSRs to emulate because runtime
- * M-mode firmware (i.e. OpenSBI) takes care of it
- */
- vmm_printf("%s: vcpu=%s invalid csr_num=0x%lx\n",
- __func__, (vcpu) ? vcpu->name : "(null)", csr_num);
-
- return VMM_ENOTSUPP;
-}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_fp.c b/arch/riscv/cpu/generic/cpu_vcpu_fp.c
index e4ccebaa..7a1f907a 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_fp.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_fp.c
@@ -45,31 +45,51 @@ static inline void cpu_vcpu_fp_clean(arch_regs_t *regs)
regs->sstatus |= SSTATUS_FS_CLEAN;
}
-void cpu_vcpu_fp_save(struct vmm_vcpu *vcpu, arch_regs_t *regs)
+static inline void cpu_vcpu_fp_force_save(struct vmm_vcpu *vcpu)
+{
+ unsigned long *isa = riscv_priv(vcpu)->isa;
+
+ if (riscv_isa_extension_available(isa, d))
+ __cpu_vcpu_fp_d_save(&riscv_priv(vcpu)->fp.d);
+ else if (riscv_isa_extension_available(isa, f))
+ __cpu_vcpu_fp_f_save(&riscv_priv(vcpu)->fp.f);
+}
+
+static inline void cpu_vcpu_fp_force_restore(struct vmm_vcpu *vcpu)
{
- unsigned long *isa;
+ unsigned long *isa = riscv_priv(vcpu)->isa;
- if ((regs->sstatus & SSTATUS_FS) == SSTATUS_FS_DIRTY) {
- isa = riscv_priv(vcpu)->isa;
- if (riscv_isa_extension_available(isa, d))
- __cpu_vcpu_fp_d_save(&riscv_priv(vcpu)->fp.d);
- else if (riscv_isa_extension_available(isa, f))
- __cpu_vcpu_fp_f_save(&riscv_priv(vcpu)->fp.f);
- cpu_vcpu_fp_clean(regs);
+ if (riscv_isa_extension_available(isa, d))
+ __cpu_vcpu_fp_d_restore(&riscv_priv(vcpu)->fp.d);
+ else if (riscv_isa_extension_available(isa, f))
+ __cpu_vcpu_fp_f_restore(&riscv_priv(vcpu)->fp.f);
+}
+
+void cpu_vcpu_fp_save(struct vmm_vcpu *vcpu, arch_regs_t *regs)
+{
+ if (riscv_nested_virt(vcpu)) {
+ /* Always save FP state when nested virtualization is ON */
+ cpu_vcpu_fp_force_save(vcpu);
+ } else {
+ /* Lazy save FP state when nested virtualization is OFF */
+ if ((regs->sstatus & SSTATUS_FS) == SSTATUS_FS_DIRTY) {
+ cpu_vcpu_fp_force_save(vcpu);
+ cpu_vcpu_fp_clean(regs);
+ }
}
}
void cpu_vcpu_fp_restore(struct vmm_vcpu *vcpu, arch_regs_t *regs)
{
- unsigned long *isa;
-
- if ((regs->sstatus & SSTATUS_FS) != SSTATUS_FS_OFF) {
- isa = riscv_priv(vcpu)->isa;
- if (riscv_isa_extension_available(isa, d))
- __cpu_vcpu_fp_d_restore(&riscv_priv(vcpu)->fp.d);
- else if (riscv_isa_extension_available(isa, f))
- __cpu_vcpu_fp_f_restore(&riscv_priv(vcpu)->fp.f);
- cpu_vcpu_fp_clean(regs);
+ if (riscv_nested_virt(vcpu)) {
+ /* Always restore FP state when nested virtualization is ON */
+ cpu_vcpu_fp_force_restore(vcpu);
+ } else {
+ /* Lazy restore FP state when nested virtualization is OFF */
+ if ((regs->sstatus & SSTATUS_FS) != SSTATUS_FS_OFF) {
+ cpu_vcpu_fp_force_restore(vcpu);
+ cpu_vcpu_fp_clean(regs);
+ }
}
}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index 4ff619a7..e33bce24 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -39,6 +39,8 @@
#include <cpu_tlb.h>
#include <cpu_sbi.h>
#include <cpu_vcpu_fp.h>
+#include <cpu_vcpu_trap.h>
+#include <cpu_vcpu_nested.h>
#include <cpu_vcpu_helper.h>
#include <cpu_vcpu_timer.h>
#include <cpu_guest_serial.h>
@@ -52,8 +54,7 @@
riscv_isa_extension_mask(f) | \
riscv_isa_extension_mask(i) | \
riscv_isa_extension_mask(m) | \
- riscv_isa_extension_mask(s) | \
- riscv_isa_extension_mask(u))
+ riscv_isa_extension_mask(h))
static char *guest_fdt_find_serial_node(char *guest_name)
{
@@ -277,6 +278,18 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
goto fail_free_isa;
}
riscv_priv(vcpu)->isa[0] &= RISCV_ISA_ALLOWED;
+
+ /* H-extension only available when AIA CSRs are available */
+ if (!riscv_aia_available) {
+ riscv_priv(vcpu)->isa[0] &=
+ ~riscv_isa_extension_mask(h);
+ }
+
+ /* Initialize nested state */
+ rc = cpu_vcpu_nested_init(vcpu);
+ if (rc) {
+ goto fail_free_isa;
+ }
}
/* Set a0 to VCPU sub-id (i.e. virtual HARTID) */
@@ -306,6 +319,9 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
/* By default, make CY, TM, and IR counters accessible in VU mode */
riscv_priv(vcpu)->scounteren = 7;
+ /* Reset nested state */
+ cpu_vcpu_nested_reset(vcpu);
+
/* Initialize FP state */
cpu_vcpu_fp_init(vcpu);
@@ -345,10 +361,14 @@ int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
return VMM_OK;
}
+ /* Cleanup timer */
rc = riscv_timer_event_deinit(vcpu, &riscv_timer_priv(vcpu));
if (rc)
return rc;
+ /* Cleanup nested state */
+ cpu_vcpu_nested_deinit(vcpu);
+
/* Free ISA bitmap */
vmm_free(riscv_priv(vcpu)->isa);
riscv_priv(vcpu)->isa = NULL;
@@ -403,6 +423,7 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
cpu_vcpu_time_delta_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_gstage_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_irq_deleg_update(vcpu, riscv_nested_virt(vcpu));
+ cpu_vcpu_take_vsirq(vcpu, regs);
}
}
@@ -568,6 +589,8 @@ void cpu_vcpu_dump_private_regs(struct vmm_chardev *cdev,
vmm_cprintf(cdev, " %s=0x%"PRIADDR"\n",
" scounteren", priv->scounteren);
+ cpu_vcpu_nested_dump_regs(cdev, vcpu);
+
cpu_vcpu_fp_dump_regs(cdev, vcpu);
}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
new file mode 100644
index 00000000..5f8774de
--- /dev/null
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -0,0 +1,587 @@
+/**
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file cpu_vcpu_nested.c
+ * @brief source of VCPU nested functions
+ */
+
+#include <vmm_error.h>
+#include <vmm_stdio.h>
+#include <generic_mmu.h>
+
+#include <cpu_hwcap.h>
+#include <cpu_vcpu_helper.h>
+#include <cpu_vcpu_nested.h>
+#include <cpu_vcpu_trap.h>
+#include <riscv_csr.h>
+
+int cpu_vcpu_nested_init(struct vmm_vcpu *vcpu)
+{
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ npriv->pgtbl = mmu_pgtbl_alloc(MMU_STAGE2, -1);
+ if (!npriv->pgtbl) {
+ return VMM_ENOMEM;
+ }
+
+ return VMM_OK;
+}
+
+void cpu_vcpu_nested_reset(struct vmm_vcpu *vcpu)
+{
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ npriv->virt = FALSE;
+#ifdef CONFIG_64BIT
+ npriv->hstatus = HSTATUS_VSXL_RV64 << HSTATUS_VSXL_SHIFT;
+#else
+ npriv->hstatus = 0;
+#endif
+ npriv->hedeleg = 0;
+ npriv->hideleg = 0;
+ npriv->hvip = 0;
+ npriv->hcounteren = 0;
+ npriv->htimedelta = 0;
+ npriv->htimedeltah = 0;
+ npriv->htval = 0;
+ npriv->htinst = 0;
+ npriv->hgatp = 0;
+ npriv->vsstatus = 0;
+ npriv->vsie = 0;
+ npriv->vstvec = 0;
+ npriv->vsscratch = 0;
+ npriv->vsepc = 0;
+ npriv->vscause = 0;
+ npriv->vstval = 0;
+ npriv->vsatp = 0;
+
+ npriv->hvictl = 0;
+}
+
+void cpu_vcpu_nested_deinit(struct vmm_vcpu *vcpu)
+{
+ mmu_pgtbl_free(riscv_nested_priv(vcpu)->pgtbl);
+}
+
+void cpu_vcpu_nested_dump_regs(struct vmm_chardev *cdev,
+ struct vmm_vcpu *vcpu)
+{
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h))
+ return;
+
+ vmm_cprintf(cdev, "\n");
+ vmm_cprintf(cdev, " %s=%s\n",
+ " virt", (npriv->virt) ? "on" : "off");
+ vmm_cprintf(cdev, "\n");
+#ifdef CONFIG_64BIT
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR"\n",
+ " htimedelta", npriv->htimedelta);
+#else
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " htimedelta", npriv->htimedelta,
+ "htimedeltah", npriv->htimedeltah);
+#endif
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " hstatus", npriv->hstatus, " hgatp", npriv->hgatp);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " hedeleg", npriv->hedeleg, " hideleg", npriv->hideleg);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " hvip", npriv->hvip, " hcounteren", npriv->hcounteren);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " htval", npriv->htval, " htinst", npriv->htinst);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " vsstatus", npriv->vsstatus, " vsie", npriv->vsie);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " vsatp", npriv->vsatp, " vstvec", npriv->vstvec);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " vsscratch", npriv->vsscratch, " vsepc", npriv->vsepc);
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR" %s=0x%"PRIADDR"\n",
+ " vscause", npriv->vscause, " vstval", npriv->vstval);
+
+ vmm_cprintf(cdev, "(V) %s=0x%"PRIADDR"\n",
+ " hvictl", npriv->hvictl);
+}
+
+int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask)
+{
+ int csr_shift = 0;
+ bool read_only = FALSE;
+ unsigned long *csr, zero = 0, writeable_mask = 0;
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ /*
+ * These CSRs should never trap for virtual-HS/U modes because
+ * we only emulate these CSRs for virtual-VS/VU modes.
+ */
+ if (!riscv_nested_virt(vcpu)) {
+ return VMM_EINVALID;
+ }
+
+ /*
+ * Access of these CSRs from virtual-VU mode should be forwarded
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ switch (csr_num) {
+ case CSR_SIE:
+ if (npriv->hvictl & HVICTL_VTI) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ csr = &npriv->vsie;
+ writeable_mask = VSIE_WRITEABLE & (npriv->hideleg >> 1);
+ break;
+ case CSR_SIEH:
+ if (npriv->hvictl & HVICTL_VTI) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ csr = &zero;
+ break;
+ case CSR_SIP:
+ if (npriv->hvictl & HVICTL_VTI) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ csr = &npriv->hvip;
+ csr_shift = 1;
+ writeable_mask = HVIP_VSSIP & npriv->hideleg;
+ break;
+ case CSR_SIPH:
+ if (npriv->hvictl & HVICTL_VTI) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ csr = &zero;
+ break;
+ default:
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ if (val) {
+ *val = (csr_shift < 0) ?
+ (*csr) << -csr_shift : (*csr) >> csr_shift;
+ }
+
+ if (read_only) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ } else {
+ writeable_mask = (csr_shift < 0) ?
+ writeable_mask >> -csr_shift :
+ writeable_mask << csr_shift;
+ wr_mask = (csr_shift < 0) ?
+ wr_mask >> -csr_shift : wr_mask << csr_shift;
+ new_val = (csr_shift < 0) ?
+ new_val >> -csr_shift : new_val << csr_shift;
+ wr_mask &= writeable_mask;
+ *csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+ }
+
+ return VMM_OK;
+}
+
+int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask)
+{
+ int csr_shift = 0;
+ bool read_only = FALSE;
+ unsigned int csr_priv = (csr_num >> 8) & 0x3;
+ unsigned long *csr, mode, zero = 0, writeable_mask = 0;
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded
+ * to virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return (csr_priv == (PRV_S + 1)) ?
+ TRAP_RETURN_VIRTUAL_INSN : TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * If H-extension is not available for VCPU then forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * H-extension CSRs not allowed in virtual-U mode so forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ switch (csr_num) {
+ case CSR_HSTATUS:
+ csr = &npriv->hstatus;
+ writeable_mask = HSTATUS_VTSR | HSTATUS_VTW | HSTATUS_VTVM |
+ HSTATUS_HU | HSTATUS_SPVP | HSTATUS_SPV |
+ HSTATUS_GVA;
+ if (wr_mask & HSTATUS_SPV) {
+ /*
+ * Enable (or Disable) host SRET trapping for
+ * virtual-HS mode. This will be auto-disabled
+ * by cpu_vcpu_nested_set_virt() upon SRET trap
+ * from virtual-HS mode.
+ */
+ regs->hstatus &= ~HSTATUS_VTSR;
+ regs->hstatus |= (new_val & HSTATUS_SPV) ?
+ HSTATUS_VTSR : 0;
+ }
+ break;
+ case CSR_HEDELEG:
+ csr = &npriv->hedeleg;
+ writeable_mask = HEDELEG_WRITEABLE;
+ break;
+ case CSR_HIDELEG:
+ csr = &npriv->hideleg;
+ writeable_mask = HIDELEG_WRITEABLE;
+ break;
+ case CSR_HVIP:
+ csr = &npriv->hvip;
+ writeable_mask = HVIP_WRITEABLE;
+ break;
+ case CSR_HIE:
+ csr = &npriv->vsie;
+ csr_shift = -1;
+ writeable_mask = HVIP_WRITEABLE;
+ break;
+ case CSR_HIP:
+ csr = &npriv->hvip;
+ writeable_mask = HVIP_VSSIP;
+ break;
+ case CSR_HGEIP:
+ csr = &zero;
+ read_only = TRUE;
+ break;
+ case CSR_HGEIE:
+ csr = &zero;
+ break;
+ case CSR_HCOUNTEREN:
+ csr = &npriv->hcounteren;
+ writeable_mask = HCOUNTEREN_WRITEABLE;
+ break;
+ case CSR_HTIMEDELTA:
+ csr = &npriv->htimedelta;
+ writeable_mask = -1UL;
+ break;
+#ifndef CONFIG_64BIT
+ case CSR_HTIMEDELTAH:
+ csr = &npriv->htimedeltah;
+ writeable_mask = -1UL;
+ break;
+#endif
+ case CSR_HTVAL:
+ csr = &npriv->htval;
+ writeable_mask = -1UL;
+ break;
+ case CSR_HTINST:
+ csr = &npriv->htinst;
+ writeable_mask = -1UL;
+ break;
+ case CSR_HGATP:
+ csr = &npriv->hgatp;
+ writeable_mask = HGATP_MODE | HGATP_VMID | HGATP_PPN;
+ if (wr_mask & HGATP_MODE) {
+ mode = (new_val & HGATP_MODE) >> HGATP_MODE_SHIFT;
+ switch (mode) {
+ /*
+ * We (intentionally) support only Sv39x4 on RV64
+ * and Sv32x4 on RV32 for guest G-stage so that
+ * software page table walks on guest G-stage is
+ * faster.
+ */
+#ifdef CONFIG_64BIT
+ case HGATP_MODE_SV39X4:
+ if (riscv_stage2_mode != HGATP_MODE_SV48X4 &&
+ riscv_stage2_mode != HGATP_MODE_SV39X4) {
+ mode = HGATP_MODE_OFF;
+ }
+ break;
+#else
+ case HGATP_MODE_SV32X4:
+ if (riscv_stage2_mode != HGATP_MODE_SV32X4) {
+ mode = HGATP_MODE_OFF;
+ }
+ break;
+#endif
+ default:
+ mode = HGATP_MODE_OFF;
+ break;
+ }
+ new_val &= ~HGATP_MODE;
+ new_val |= (mode << HGATP_MODE_SHIFT) & HGATP_MODE;
+ }
+ break;
+ case CSR_VSSTATUS:
+ csr = &npriv->vsstatus;
+ writeable_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UBE |
+ SSTATUS_SPP | SSTATUS_SUM | SSTATUS_MXR |
+ SSTATUS_FS | SSTATUS_UXL;
+ break;
+ case CSR_VSIP:
+ csr = &npriv->hvip;
+ csr_shift = 1;
+ writeable_mask = HVIP_VSSIP & npriv->hideleg;
+ break;
+ case CSR_VSIE:
+ csr = &npriv->vsie;
+ writeable_mask = VSIE_WRITEABLE & (npriv->hideleg >> 1);
+ break;
+ case CSR_VSTVEC:
+ csr = &npriv->vstvec;
+ writeable_mask = -1UL;
+ break;
+ case CSR_VSSCRATCH:
+ csr = &npriv->vsscratch;
+ writeable_mask = -1UL;
+ break;
+ case CSR_VSEPC:
+ csr = &npriv->vsepc;
+ writeable_mask = -1UL;
+ break;
+ case CSR_VSCAUSE:
+ csr = &npriv->vscause;
+ writeable_mask = 0x1fUL;
+ break;
+ case CSR_VSTVAL:
+ csr = &npriv->vstval;
+ writeable_mask = -1UL;
+ break;
+ case CSR_VSATP:
+ csr = &npriv->vsatp;
+ writeable_mask = SATP_MODE | SATP_ASID | SATP_PPN;
+ if (wr_mask & SATP_MODE) {
+ mode = (new_val & SATP_MODE) >> SATP_MODE_SHIFT;
+ switch (mode) {
+#ifdef CONFIG_64BIT
+ case SATP_MODE_SV48:
+ if (riscv_stage1_mode != SATP_MODE_SV48) {
+ mode = SATP_MODE_OFF;
+ }
+ break;
+ case SATP_MODE_SV39:
+ if (riscv_stage1_mode != SATP_MODE_SV48 &&
+ riscv_stage1_mode != SATP_MODE_SV39) {
+ mode = SATP_MODE_OFF;
+ }
+ break;
+#else
+ case SATP_MODE_SV32:
+ if (riscv_stage1_mode != SATP_MODE_SV32) {
+ mode = SATP_MODE_OFF;
+ }
+ break;
+#endif
+ default:
+ mode = SATP_MODE_OFF;
+ break;
+ }
+ new_val &= ~SATP_MODE;
+ new_val |= (mode << SATP_MODE_SHIFT) & SATP_MODE;
+ }
+ break;
+ case CSR_HVICTL:
+ csr = &npriv->hvictl;
+ writeable_mask = HVICTL_VTI | HVICTL_IID |
+ HVICTL_IPRIOM | HVICTL_IPRIO;
+ break;
+ default:
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ if (val) {
+ *val = (csr_shift < 0) ?
+ (*csr) << -csr_shift : (*csr) >> csr_shift;
+ }
+
+ if (read_only) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ } else {
+ writeable_mask = (csr_shift < 0) ?
+ writeable_mask >> -csr_shift :
+ writeable_mask << csr_shift;
+ wr_mask = (csr_shift < 0) ?
+ wr_mask >> -csr_shift : wr_mask << csr_shift;
+ new_val = (csr_shift < 0) ?
+ new_val >> -csr_shift : new_val << csr_shift;
+ wr_mask &= writeable_mask;
+ *csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+ }
+
+ return VMM_OK;
+}
+
+int cpu_vcpu_nested_page_fault(struct vmm_vcpu *vcpu,
+ bool trap_from_smode,
+ const struct cpu_vcpu_trap *trap,
+ struct cpu_vcpu_trap *out_trap)
+{
+ /* TODO: */
+ return VMM_OK;
+}
+
+void cpu_vcpu_nested_hfence_vvma(struct vmm_vcpu *vcpu,
+ unsigned long *vaddr, unsigned int *asid)
+{
+ /* TODO: */
+}
+
+void cpu_vcpu_nested_hfence_gvma(struct vmm_vcpu *vcpu,
+ physical_addr_t *gaddr, unsigned int *vmid)
+{
+ /* TODO: */
+}
+
+int cpu_vcpu_nested_hlv(struct vmm_vcpu *vcpu, unsigned long vaddr,
+ bool hlvx, void *data, unsigned long len,
+ unsigned long *out_scause,
+ unsigned long *out_stval,
+ unsigned long *out_htval)
+{
+ /* TODO: */
+ return VMM_OK;
+}
+
+int cpu_vcpu_nested_hsv(struct vmm_vcpu *vcpu, unsigned long vaddr,
+ const void *data, unsigned long len,
+ unsigned long *out_scause,
+ unsigned long *out_stval,
+ unsigned long *out_htval)
+{
+ /* TODO: */
+ return VMM_OK;
+}
+
+void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
+ enum nested_set_virt_event event, bool virt,
+ bool spvp, bool gva)
+{
+ unsigned long tmp;
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+
+ /* If H-extension is not available for VCPU then do nothing */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return;
+ }
+
+ /* Skip hardware CSR update if no change in virt state */
+ if (virt == npriv->virt)
+ goto skip_csr_update;
+
+ /* Swap hcounteren and hedeleg CSRs */
+ npriv->hcounteren = csr_swap(CSR_HCOUNTEREN, npriv->hcounteren);
+ npriv->hedeleg = csr_swap(CSR_HEDELEG, npriv->hedeleg);
+
+ /* Update interrupt delegation */
+ cpu_vcpu_irq_deleg_update(vcpu, virt);
+
+ /* Update time delta */
+ cpu_vcpu_time_delta_update(vcpu, virt);
+
+ /* Update G-stage page table */
+ cpu_vcpu_gstage_update(vcpu, virt);
+
+ /* Swap hardware vs<xyz> CSRs except vsie and vsstatus */
+ npriv->vstvec = csr_swap(CSR_VSTVEC, npriv->vstvec);
+ npriv->vsscratch = csr_swap(CSR_VSSCRATCH, npriv->vsscratch);
+ npriv->vsepc = csr_swap(CSR_VSEPC, npriv->vsepc);
+ npriv->vscause = csr_swap(CSR_VSCAUSE, npriv->vscause);
+ npriv->vstval = csr_swap(CSR_VSTVAL, npriv->vstval);
+ npriv->vsatp = csr_swap(CSR_VSATP, npriv->vsatp);
+
+ /* Update vsstatus CSR */
+ if (virt) {
+ /* Nested virtualization state changing from OFF to ON */
+
+ /*
+ * Update vsstatus in following manner:
+ * 1) Swap hardware vsstatus (i.e. virtual-HS mode sstatus)
+ * with vsstatus in nested virtualization context (i.e.
+ * virtual-VS mode sstatus)
+ * 2) Swap host sstatus.FS (i.e. HS mode sstatus.FS) with
+ * the vsstatus.FS saved in nested virtualization context
+ * (i.e. virtual-HS mode sstatus.FS)
+ */
+ npriv->vsstatus = csr_swap(CSR_VSSTATUS, npriv->vsstatus);
+ tmp = regs->sstatus & SSTATUS_FS;
+ regs->sstatus &= ~SSTATUS_FS;
+ regs->sstatus |= (npriv->vsstatus & SSTATUS_FS);
+ npriv->vsstatus &= ~SSTATUS_FS;
+ npriv->vsstatus |= tmp;
+ } else {
+ /* Nested virtualization state changing from ON to OFF */
+
+ /*
+ * Update vsstatus in following manner:
+ * 1) Swap host sstatus.FS (i.e. virtual-HS mode sstatus.FS)
+ * with vsstatus.FS saved in the nested virtualization
+ * context (i.e. HS mode sstatus.FS)
+ * 2) Swap hardware vsstatus (i.e. virtual-VS mode sstatus)
+ * with vsstatus in nested virtualization context (i.e.
+ * virtual-HS mode sstatus)
+ */
+ tmp = regs->sstatus & SSTATUS_FS;
+ regs->sstatus &= ~SSTATUS_FS;
+ regs->sstatus |= (npriv->vsstatus & SSTATUS_FS);
+ npriv->vsstatus &= ~SSTATUS_FS;
+ npriv->vsstatus |= tmp;
+ npriv->vsstatus = csr_swap(CSR_VSSTATUS, npriv->vsstatus);
+ }
+
+skip_csr_update:
+ if (event != NESTED_SET_VIRT_EVENT_SRET) {
+ /* Update Guest hstatus.SPV bit */
+ npriv->hstatus &= ~HSTATUS_SPV;
+ npriv->hstatus |= (npriv->virt) ? HSTATUS_SPV : 0;
+
+ /* Update Guest hstatus.SPVP bit */
+ if (npriv->virt) {
+ npriv->hstatus &= ~HSTATUS_SPVP;
+ if (spvp)
+ npriv->hstatus |= HSTATUS_SPVP;
+ }
+
+ /* Update Guest hstatus.GVA bit */
+ if (event == NESTED_SET_VIRT_EVENT_TRAP) {
+ npriv->hstatus &= ~HSTATUS_GVA;
+ npriv->hstatus |= (gva) ? HSTATUS_GVA : 0;
+ }
+ }
+
+ /* Update host SRET and VM trapping */
+ regs->hstatus &= ~HSTATUS_VTSR;
+ if (virt && (npriv->hstatus & HSTATUS_VTSR)) {
+ regs->hstatus |= HSTATUS_VTSR;
+ }
+ regs->hstatus &= ~HSTATUS_VTVM;
+ if (virt && (npriv->hstatus & HSTATUS_VTVM)) {
+ regs->hstatus |= HSTATUS_VTVM;
+ }
+
+ /* Update virt flag */
+ npriv->virt = virt;
+}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_sbi.c b/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
index fbd6a8f6..9788150e 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
@@ -74,6 +74,17 @@ int cpu_vcpu_sbi_ecall(struct vmm_vcpu *vcpu, ulong cause,
bool is_0_1_spec = FALSE;
unsigned long args[6];
+ /* Forward SBI calls from virtual-VS mode to virtual-HS mode */
+ if (riscv_nested_virt(vcpu)) {
+ trap.sepc = regs->sepc;
+ trap.scause = CAUSE_VIRTUAL_SUPERVISOR_ECALL;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = 0;
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return VMM_OK;
+ }
+
args[0] = regs->a0;
args[1] = regs->a1;
args[2] = regs->a2;
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_trap.c b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
index 953e6e4b..e990ed4a 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_trap.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
@@ -30,46 +30,86 @@
#include <libs/stringlib.h>
#include <generic_mmu.h>
-#include <cpu_vcpu_csr.h>
+#include <cpu_hwcap.h>
+#include <cpu_vcpu_nested.h>
#include <cpu_vcpu_trap.h>
#include <cpu_vcpu_unpriv.h>
-int cpu_vcpu_redirect_trap(struct vmm_vcpu *vcpu,
- arch_regs_t *regs,
- struct cpu_vcpu_trap *trap)
+void cpu_vcpu_redirect_smode_trap(arch_regs_t *regs,
+ struct cpu_vcpu_trap *trap, bool prev_spp)
{
+ /* Read Guest sstatus */
unsigned long vsstatus = csr_read(CSR_VSSTATUS);
- /* Change Guest SSTATUS.SPP bit */
+ /* Change Guest sstatus.SPP bit */
vsstatus &= ~SSTATUS_SPP;
- if (regs->sstatus & SSTATUS_SPP)
+ if (prev_spp)
vsstatus |= SSTATUS_SPP;
- /* Change Guest SSTATUS.SPIE bit */
+ /* Change Guest sstatus.SPIE bit */
vsstatus &= ~SSTATUS_SPIE;
if (vsstatus & SSTATUS_SIE)
vsstatus |= SSTATUS_SPIE;
- /* Clear Guest SSTATUS.SIE bit */
+ /* Clear Guest sstatus.SIE bit */
vsstatus &= ~SSTATUS_SIE;
- /* Update Guest SSTATUS */
+ /* Update Guest sstatus */
csr_write(CSR_VSSTATUS, vsstatus);
- /* Update Guest SCAUSE, STVAL, and SEPC */
+ /* Update Guest scause, stval, and sepc */
csr_write(CSR_VSCAUSE, trap->scause);
csr_write(CSR_VSTVAL, trap->stval);
csr_write(CSR_VSEPC, trap->sepc);
- /* Set Guest PC to Guest exception vector */
+ /* Set next PC to exception vector */
regs->sepc = csr_read(CSR_VSTVEC);
- return 0;
+ /* Set next privilege mode to supervisor */
+ regs->sstatus |= SSTATUS_SPP;
+}
+
+void cpu_vcpu_redirect_trap(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ struct cpu_vcpu_trap *trap)
+{
+ struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ bool prev_spp = (regs->sstatus & SSTATUS_SPP) ? TRUE : FALSE;
+ bool gva = FALSE;
+
+ /* Determine GVA bit state */
+ switch (trap->scause) {
+ case CAUSE_MISALIGNED_FETCH:
+ case CAUSE_FETCH_ACCESS:
+ case CAUSE_MISALIGNED_LOAD:
+ case CAUSE_LOAD_ACCESS:
+ case CAUSE_MISALIGNED_STORE:
+ case CAUSE_STORE_ACCESS:
+ case CAUSE_FETCH_PAGE_FAULT:
+ case CAUSE_LOAD_PAGE_FAULT:
+ case CAUSE_STORE_PAGE_FAULT:
+ case CAUSE_FETCH_GUEST_PAGE_FAULT:
+ case CAUSE_LOAD_GUEST_PAGE_FAULT:
+ case CAUSE_STORE_GUEST_PAGE_FAULT:
+ gva = TRUE;
+ break;
+ default:
+ break;
+ }
+
+ /* Turn-off nested virtualization for virtual-HS mode */
+ cpu_vcpu_nested_set_virt(vcpu, regs, NESTED_SET_VIRT_EVENT_TRAP,
+ FALSE, prev_spp, gva);
+
+ /* Update Guest HTVAL and HTINST */
+ npriv->htval = trap->htval;
+ npriv->htinst = trap->htinst;
+
+ /* Update Guest supervisor state */
+ cpu_vcpu_redirect_smode_trap(regs, trap, prev_spp);
}
static int cpu_vcpu_stage2_map(struct vmm_vcpu *vcpu,
- arch_regs_t *regs,
- physical_addr_t fault_addr)
+ physical_addr_t fault_addr)
{
int rc, rc1;
u32 reg_flags = 0x0, pg_reg_flags = 0x0;
@@ -160,9 +200,9 @@ static int cpu_vcpu_emulate_load(struct vmm_vcpu *vcpu,
u16 data16;
u32 data32;
u64 data64;
+ struct cpu_vcpu_trap trap;
unsigned long insn, insn_len;
int rc = VMM_OK, shift = 0, len = 0;
- struct cpu_vcpu_trap trap = { 0 };
if (htinst & 0x1) {
/*
@@ -176,12 +216,18 @@ static int cpu_vcpu_emulate_load(struct vmm_vcpu *vcpu,
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
+ trap.sepc = 0;
+ trap.scause = 0;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = 0;
insn = __cpu_vcpu_unpriv_read_insn(regs->sepc, &trap);
if (trap.scause) {
if (trap.scause == CAUSE_LOAD_PAGE_FAULT)
trap.scause = CAUSE_FETCH_PAGE_FAULT;
trap.sepc = trap.stval = regs->sepc;
- return cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return VMM_OK;
}
insn_len = INSN_LEN(insn);
}
@@ -287,8 +333,8 @@ static int cpu_vcpu_emulate_store(struct vmm_vcpu *vcpu,
u32 data32;
u64 data64;
int rc = VMM_OK, len = 0;
+ struct cpu_vcpu_trap trap;
unsigned long data, insn, insn_len;
- struct cpu_vcpu_trap trap = { 0 };
if (htinst & 0x1) {
/*
@@ -298,16 +344,22 @@ static int cpu_vcpu_emulate_store(struct vmm_vcpu *vcpu,
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & 0x2) ? INSN_LEN(insn) : 2;
} else {
- /*
+ /*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
+ trap.sepc = 0;
+ trap.scause = 0;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = 0;
insn = __cpu_vcpu_unpriv_read_insn(regs->sepc, &trap);
if (trap.scause) {
if (trap.scause == CAUSE_LOAD_PAGE_FAULT)
trap.scause = CAUSE_FETCH_PAGE_FAULT;
trap.sepc = trap.stval = regs->sepc;
- return cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return VMM_OK;
}
insn_len = INSN_LEN(insn);
}
@@ -384,9 +436,31 @@ int cpu_vcpu_page_fault(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
struct cpu_vcpu_trap *trap)
{
+ int rc;
struct vmm_region *reg;
+ struct cpu_vcpu_trap otrap;
physical_addr_t fault_addr;
+ if (riscv_nested_virt(vcpu)) {
+ otrap.sepc = 0;
+ otrap.scause = 0;
+ otrap.stval = 0;
+ otrap.htval = 0;
+ otrap.htinst = 0;
+ rc = cpu_vcpu_nested_page_fault(vcpu,
+ (regs->hstatus & HSTATUS_SPVP) ? TRUE : FALSE,
+ trap, &otrap);
+ if (rc) {
+ return rc;
+ }
+
+ if (otrap.scause) {
+ cpu_vcpu_redirect_trap(vcpu, regs, &otrap);
+ }
+
+ return VMM_OK;
+ }
+
fault_addr = ((physical_addr_t)trap->htval << 2);
fault_addr |= ((physical_addr_t)trap->stval & 0x3);
@@ -397,18 +471,18 @@ int cpu_vcpu_page_fault(struct vmm_vcpu *vcpu,
/* Emulate load/store instructions for virtual device */
switch (trap->scause) {
case CAUSE_LOAD_GUEST_PAGE_FAULT:
- return cpu_vcpu_emulate_load(vcpu, regs,
- fault_addr, trap->htinst);
+ return cpu_vcpu_emulate_load(vcpu, regs, fault_addr,
+ trap->htinst);
case CAUSE_STORE_GUEST_PAGE_FAULT:
- return cpu_vcpu_emulate_store(vcpu, regs,
- fault_addr, trap->htinst);
+ return cpu_vcpu_emulate_store(vcpu, regs, fault_addr,
+ trap->htinst);
default:
return VMM_ENOTSUPP;
};
}
/* Mapping does not exist hence create one */
- return cpu_vcpu_stage2_map(vcpu, regs, fault_addr);
+ return cpu_vcpu_stage2_map(vcpu, fault_addr);
}
static int truly_illegal_insn(struct vmm_vcpu *vcpu,
@@ -421,75 +495,788 @@ static int truly_illegal_insn(struct vmm_vcpu *vcpu,
trap.sepc = regs->sepc;
trap.scause = CAUSE_ILLEGAL_INSTRUCTION;
trap.stval = insn;
- return cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ trap.htval = 0;
+ trap.htinst = 0;
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+
+ return VMM_OK;
}
-static int system_opcode_insn(struct vmm_vcpu *vcpu,
+static int truly_virtual_insn(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
ulong insn)
{
- int rc = VMM_OK, do_write, rs1_num;
- ulong rs1_val, csr_num, csr_val, new_csr_val;
+ struct cpu_vcpu_trap trap;
- if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
- /* Wait for irq with default timeout */
- vmm_vcpu_irq_wait_timeout(vcpu, 0);
- goto done;
- }
+ /* Redirect trap to Guest VCPU */
+ trap.sepc = regs->sepc;
+ trap.scause = CAUSE_VIRTUAL_INST_FAULT;
+ trap.stval = insn;
+ trap.htval = 0;
+ trap.htinst = 0;
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
- rs1_num = (insn >> 15) & 0x1f;
- rs1_val = GET_RS1(insn, regs);
- csr_num = insn >> 20;
+ return VMM_OK;
+}
- rc = cpu_vcpu_csr_read(vcpu, csr_num, &csr_val);
- if (rc == VMM_EINVALID) {
- return truly_illegal_insn(vcpu, regs, insn);
- }
- if (rc) {
- return rc;
- }
+struct system_opcode_func {
+ ulong mask;
+ ulong match;
+ /*
+ * Possible return values are as follows:
+ * 1) Returns < 0 for error case
+ * 2) Return == 0 to increment PC and continue
+ * 3) Return == 1 to inject illegal instruction trap and continue
+ * 4) Return == 2 to inject virtual instruction trap and continue
+ * 5) Return == 3 to do nothing and continue
+ */
+ int (*func)(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn);
+};
- do_write = rs1_num;
+struct csr_func {
+ unsigned int csr_num;
+ /*
+ * Possible return values are as follows:
+ * 1) Returns < 0 for error case
+ * 2) Return == 0 to increment PC and continue
+ * 3) Return == 1 to inject illegal instruction trap and continue
+ * 4) Return == 2 to inject virtual instruction trap and continue
+ * 5) Return == 3 to do nothing and continue
+ */
+ int (*rmw_func)(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask);
+};
+
+static const struct csr_func csr_funcs[] = {
+ {
+ .csr_num = CSR_SIE,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_SIEH,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_SIP,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_SIPH,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HSTATUS,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HEDELEG,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HIDELEG,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HVIP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HIE,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HIP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HGEIP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HGEIE,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HCOUNTEREN,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HTIMEDELTA,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HTIMEDELTAH,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HTVAL,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HTINST,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_HGATP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSSTATUS,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSIP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSIE,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSTVEC,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSSCRATCH,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSEPC,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSCAUSE,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSTVAL,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSATP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+};
+
+static int csr_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
+{
+ int i, rc = TRAP_RETURN_ILLEGAL_INSN;
+ unsigned int csr_num = insn >> SH_RS2;
+ unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
+ ulong rs1_val = GET_RS1(insn, regs);
+ const struct csr_func *tcfn, *cfn = NULL;
+ ulong val = 0, wr_mask = 0, new_val = 0;
+
+ /* Decode the CSR instruction */
switch (GET_RM(insn)) {
case 1:
- new_csr_val = rs1_val;
- do_write = 1;
+ wr_mask = -1UL;
+ new_val = rs1_val;
break;
case 2:
- new_csr_val = csr_val | rs1_val;
+ wr_mask = rs1_val;
+ new_val = -1UL;
break;
- case 3: new_csr_val = csr_val & ~rs1_val;
+ case 3:
+ wr_mask = rs1_val;
+ new_val = 0;
break;
case 5:
- new_csr_val = rs1_num;
- do_write = 1;
+ wr_mask = -1UL;
+ new_val = rs1_num;
break;
case 6:
- new_csr_val = csr_val | rs1_num;
+ wr_mask = rs1_num;
+ new_val = -1UL;
break;
case 7:
- new_csr_val = csr_val & ~rs1_num;
+ wr_mask = rs1_num;
+ new_val = 0;
break;
default:
- return truly_illegal_insn(vcpu, regs, insn);
- };
+ return rc;
+ }
- if (do_write) {
- rc = cpu_vcpu_csr_write(vcpu, csr_num, new_csr_val);
- if (rc == VMM_EINVALID) {
- return truly_illegal_insn(vcpu, regs, insn);
+ /* Find CSR function */
+ for (i = 0; i < array_size(csr_funcs); i++) {
+ tcfn = &csr_funcs[i];
+ if (tcfn->csr_num == csr_num) {
+ cfn = tcfn;
+ break;
}
- if (rc) {
- return rc;
+ }
+ if (!cfn || !cfn->rmw_func) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /* Do CSR emulation */
+ rc = cfn->rmw_func(vcpu, regs, csr_num, &val, new_val, wr_mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Update destination register for CSR reads */
+ if ((insn >> SH_RD) & MASK_RX) {
+ SET_RD(insn, regs, val);
+ }
+
+ return VMM_OK;
+}
+
+static int sret_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
+{
+ bool next_virt;
+ unsigned long vsstatus, next_sepc, next_spp;
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ vsstatus = csr_read(CSR_VSSTATUS);
+
+ /*
+ * Find next nested virtualization mode, next privilege mode,
+ * and next sepc
+ */
+ next_virt = (riscv_nested_priv(vcpu)->hstatus & HSTATUS_SPV) ?
+ TRUE : FALSE;
+ next_sepc = csr_read(CSR_VSEPC);
+ next_spp = vsstatus & SSTATUS_SPP;
+
+ /* Update Guest sstatus.sie */
+ vsstatus &= ~SSTATUS_SIE;
+ vsstatus |= (vsstatus & SSTATUS_SPIE) ? SSTATUS_SIE : 0;
+ csr_write(CSR_VSSTATUS, vsstatus);
+
+ /* Update return address and return privilege mode*/
+ regs->sepc = next_sepc;
+ regs->sstatus &= ~SSTATUS_SPP;
+ regs->sstatus |= next_spp;
+
+ /* Set nested virtualization state based on guest hstatus.SPV */
+ cpu_vcpu_nested_set_virt(vcpu, regs, NESTED_SET_VIRT_EVENT_SRET,
+ next_virt, FALSE, FALSE);
+
+ return TRAP_RETURN_CONTINUE;
+}
+
+static int wfi_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
+{
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /* Wait for irq with default timeout */
+ vmm_vcpu_irq_wait_timeout(vcpu, 0);
+ return VMM_OK;
+}
+
+static int hfence_vvma_insn(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs, ulong insn)
+{
+ unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
+ unsigned int rs2_num = (insn >> SH_RS2) & MASK_RX;
+ unsigned long vaddr = GET_RS1(insn, regs);
+ unsigned int asid = GET_RS2(insn, regs);
+
+ /*
+ * If H-extension is not available for VCPU then forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ if (!rs1_num && !rs2_num) {
+ cpu_vcpu_nested_hfence_vvma(vcpu, NULL, NULL);
+ } else if (!rs1_num && rs2_num) {
+ cpu_vcpu_nested_hfence_vvma(vcpu, NULL, &asid);
+ } else if (rs1_num && !rs2_num) {
+ cpu_vcpu_nested_hfence_vvma(vcpu, &vaddr, NULL);
+ } else {
+ cpu_vcpu_nested_hfence_vvma(vcpu, &vaddr, &asid);
+ }
+
+ return VMM_OK;
+}
+
+static int hfence_gvma_insn(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs, ulong insn)
+{
+ unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
+ unsigned int rs2_num = (insn >> SH_RS2) & MASK_RX;
+ physical_addr_t gaddr = GET_RS1(insn, regs) << 2;
+ unsigned int vmid = GET_RS2(insn, regs);
+
+ /*
+ * If H-extension is not available for VCPU then forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ if (!rs1_num && !rs2_num) {
+ cpu_vcpu_nested_hfence_gvma(vcpu, NULL, NULL);
+ } else if (!rs1_num && rs2_num) {
+ cpu_vcpu_nested_hfence_gvma(vcpu, NULL, &vmid);
+ } else if (rs1_num && !rs2_num) {
+ cpu_vcpu_nested_hfence_gvma(vcpu, &gaddr, NULL);
+ } else {
+ cpu_vcpu_nested_hfence_gvma(vcpu, &gaddr, &vmid);
+ }
+
+ return VMM_OK;
+}
+
+union hxv_reg_data {
+ ulong data_ulong;
+ u64 data_u64;
+ u32 data_u32;
+ u16 data_u16;
+ u8 data_u8;
+};
+
+static int hlv_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
+{
+ int rc;
+ void *data;
+ bool hlvx = FALSE;
+ union hxv_reg_data v;
+ struct cpu_vcpu_trap trap;
+ unsigned long shift = 0, len, vaddr;
+
+ /*
+ * If H-extension is not available for VCPU then forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap when guest hstatus.HU == 0.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP) &&
+ !(riscv_nested_priv(vcpu)->hstatus & HSTATUS_HU)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ vaddr = GET_RS1(insn, regs);
+ v.data_u64 = 0;
+
+ if ((insn & INSN_MASK_HLV_B) == INSN_MATCH_HLV_B) {
+ data = &v.data_u8;
+ len = 1;
+ shift = (sizeof(long) - 1) * 8;
+ } else if ((insn & INSN_MASK_HLV_BU) == INSN_MATCH_HLV_BU) {
+ data = &v.data_u8;
+ len = 1;
+ } else if ((insn & INSN_MASK_HLV_H) == INSN_MATCH_HLV_H) {
+ data = &v.data_u16;
+ len = 2;
+ shift = (sizeof(long) - 2) * 8;
+ } else if ((insn & INSN_MASK_HLV_HU) == INSN_MATCH_HLV_HU) {
+ data = &v.data_u16;
+ len = 2;
+ } else if ((insn & INSN_MASK_HLVX_HU) == INSN_MATCH_HLVX_HU) {
+ data = &v.data_u16;
+ len = 2;
+ hlvx = TRUE;
+ } else if ((insn & INSN_MASK_HLV_W) == INSN_MATCH_HLV_W) {
+ data = &v.data_u32;
+ len = 4;
+ shift = (sizeof(long) - 4) * 8;
+ } else if ((insn & INSN_MASK_HLV_WU) == INSN_MATCH_HLV_WU) {
+ data = &v.data_u32;
+ len = 4;
+ } else if ((insn & INSN_MASK_HLVX_WU) == INSN_MATCH_HLVX_WU) {
+ data = &v.data_u32;
+ len = 4;
+ hlvx = TRUE;
+ } else if ((insn & INSN_MASK_HLV_D) == INSN_MATCH_HLV_D) {
+ data = &v.data_u64;
+ len = 8;
+ } else {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ trap.sepc = regs->sepc;
+ trap.scause = 0;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = insn;
+
+ rc = cpu_vcpu_nested_hlv(vcpu, vaddr, hlvx, data, len,
+ &trap.scause, &trap.stval, &trap.htval);
+ if (rc) {
+ return rc;
+ }
+
+ if (trap.scause) {
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return TRAP_RETURN_CONTINUE;
+ } else {
+ SET_RD(insn, regs, ((long)(v.data_ulong << shift)) >> shift);
+ }
+
+ return VMM_OK;
+}
+
+static int hsv_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
+{
+ int rc;
+ void *data;
+ union hxv_reg_data v;
+ unsigned long len, vaddr;
+ struct cpu_vcpu_trap trap;
+
+ /*
+ * If H-extension is not available for VCPU then forward trap
+ * as illegal instruction trap to virtual-HS mode.
+ */
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, h)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a virtual instruction trap.
+ */
+ if (riscv_nested_virt(vcpu)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+
+ /*
+ * Trap from virtual-U mode should be forwarded to virtual-HS mode
+ * as illegal instruction trap when guest hstatus.HU == 0.
+ */
+ if (!(regs->hstatus & HSTATUS_SPVP) &&
+ !(riscv_nested_priv(vcpu)->hstatus & HSTATUS_HU)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ vaddr = GET_RS1(insn, regs);
+ v.data_ulong = GET_RS2(insn, regs);
+
+ if ((insn & INSN_MASK_HSV_B) == INSN_MATCH_HSV_B) {
+ data = &v.data_u8;
+ len = 1;
+ } else if ((insn & INSN_MASK_HSV_H) == INSN_MATCH_HSV_H) {
+ data = &v.data_u16;
+ len = 2;
+ } else if ((insn & INSN_MASK_HSV_W) == INSN_MATCH_HSV_W) {
+ data = &v.data_u32;
+ len = 4;
+ } else if ((insn & INSN_MASK_HSV_D) == INSN_MATCH_HSV_D) {
+ data = &v.data_u64;
+ len = 8;
+ } else {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+
+ trap.sepc = regs->sepc;
+ trap.scause = 0;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = insn;
+
+ rc = cpu_vcpu_nested_hsv(vcpu, vaddr, data, len,
+ &trap.scause, &trap.stval, &trap.htval);
+ if (rc) {
+ return rc;
+ }
+
+ if (trap.scause) {
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return TRAP_RETURN_CONTINUE;
+ }
+
+ return VMM_OK;
+}
+
+static const struct system_opcode_func system_opcode_funcs[] = {
+ {
+ .mask = INSN_MASK_CSRRW,
+ .match = INSN_MATCH_CSRRW,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRS,
+ .match = INSN_MATCH_CSRRS,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRC,
+ .match = INSN_MATCH_CSRRC,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRWI,
+ .match = INSN_MATCH_CSRRWI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRSI,
+ .match = INSN_MATCH_CSRRSI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRCI,
+ .match = INSN_MATCH_CSRRCI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_SRET,
+ .match = INSN_MATCH_SRET,
+ .func = sret_insn,
+ },
+ {
+ .mask = INSN_MASK_WFI,
+ .match = INSN_MATCH_WFI,
+ .func = wfi_insn,
+ },
+ {
+ .mask = INSN_MASK_HFENCE_VVMA,
+ .match = INSN_MATCH_HFENCE_VVMA,
+ .func = hfence_vvma_insn,
+ },
+ {
+ .mask = INSN_MASK_HFENCE_GVMA,
+ .match = INSN_MATCH_HFENCE_GVMA,
+ .func = hfence_gvma_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_B,
+ .match = INSN_MATCH_HLV_B,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_BU,
+ .match = INSN_MATCH_HLV_BU,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_H,
+ .match = INSN_MATCH_HLV_H,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_HU,
+ .match = INSN_MATCH_HLV_HU,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLVX_HU,
+ .match = INSN_MATCH_HLVX_HU,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_W,
+ .match = INSN_MATCH_HLV_W,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_WU,
+ .match = INSN_MATCH_HLV_WU,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLVX_WU,
+ .match = INSN_MATCH_HLVX_WU,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HLV_D,
+ .match = INSN_MATCH_HLV_D,
+ .func = hlv_insn,
+ },
+ {
+ .mask = INSN_MASK_HSV_B,
+ .match = INSN_MATCH_HSV_B,
+ .func = hsv_insn,
+ },
+ {
+ .mask = INSN_MASK_HSV_H,
+ .match = INSN_MATCH_HSV_H,
+ .func = hsv_insn,
+ },
+ {
+ .mask = INSN_MASK_HSV_W,
+ .match = INSN_MATCH_HSV_W,
+ .func = hsv_insn,
+ },
+ {
+ .mask = INSN_MASK_HSV_D,
+ .match = INSN_MATCH_HSV_D,
+ .func = hsv_insn,
+ },
+};
+
+static int system_opcode_insn(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs,
+ ulong insn)
+{
+ int i, rc = TRAP_RETURN_ILLEGAL_INSN;
+ const struct system_opcode_func *ifn;
+
+ for (i = 0; i < array_size(system_opcode_funcs); i++) {
+ ifn = &system_opcode_funcs[i];
+ if ((insn & ifn->mask) == ifn->match) {
+ rc = ifn->func(vcpu, regs, insn);
+ break;
}
}
- SET_RD(insn, regs, csr_val);
+ if (rc == TRAP_RETURN_ILLEGAL_INSN)
+ return truly_illegal_insn(vcpu, regs, insn);
+ else if (rc == TRAP_RETURN_VIRTUAL_INSN)
+ return truly_virtual_insn(vcpu, regs, insn);
-done:
- regs->sepc += 4;
+ if (!rc) {
+ regs->sepc += INSN_LEN(insn);
+ }
- return rc;
+ return (rc < 0) ? rc : VMM_OK;
+}
+
+int cpu_vcpu_general_fault(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs, struct cpu_vcpu_trap *trap)
+{
+ struct cpu_vcpu_trap itrap = { 0 };
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded
+ * to virtual-HS mode.
+ */
+
+ if (!riscv_nested_virt(vcpu)) {
+ return VMM_EINVALID;
+ }
+
+ /*
+ * Blindly forward all general faults to virtual-HS mode
+ * except illegal instruction fault
+ */
+ if (trap->scause != CAUSE_ILLEGAL_INSTRUCTION) {
+ cpu_vcpu_redirect_trap(vcpu, regs, trap);
+ return VMM_OK;
+ }
+
+ /* Update trap->stval for illegal instruction fault */
+ if (unlikely((trap->stval & INSN_16BIT_MASK) != INSN_16BIT_MASK)) {
+ if (trap->stval == 0) {
+ trap->stval = __cpu_vcpu_unpriv_read_insn(regs->sepc,
+ &itrap);
+ if (itrap.scause) {
+ if (itrap.scause == CAUSE_LOAD_PAGE_FAULT)
+ itrap.scause = CAUSE_FETCH_PAGE_FAULT;
+ itrap.stval = itrap.sepc = regs->sepc;
+ cpu_vcpu_redirect_trap(vcpu, regs, &itrap);
+ return VMM_OK;
+ }
+ }
+ }
+
+ /* Forward illegal instruction fault */
+ return truly_illegal_insn(vcpu, regs, trap->stval);
+}
+
+int cpu_vcpu_illegal_insn_fault(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs,
+ unsigned long stval)
+{
+ unsigned long insn = stval;
+ struct cpu_vcpu_trap trap = { 0 };
+
+ /*
+ * Trap from virtual-VS and virtual-VU modes should be forwarded to
+ * virtual-HS mode as a illegal instruction trap.
+ */
+
+ if (!riscv_nested_virt(vcpu)) {
+ return VMM_EINVALID;
+ }
+
+ if (unlikely((insn & INSN_16BIT_MASK) != INSN_16BIT_MASK)) {
+ if (insn == 0) {
+ insn = __cpu_vcpu_unpriv_read_insn(regs->sepc, &trap);
+ if (trap.scause) {
+ if (trap.scause == CAUSE_LOAD_PAGE_FAULT)
+ trap.scause = CAUSE_FETCH_PAGE_FAULT;
+ trap.stval = trap.sepc = regs->sepc;
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return VMM_OK;
+ }
+ }
+ }
+
+ return truly_illegal_insn(vcpu, regs, insn);
}
int cpu_vcpu_virtual_insn_fault(struct vmm_vcpu *vcpu,
@@ -499,18 +1286,18 @@ int cpu_vcpu_virtual_insn_fault(struct vmm_vcpu *vcpu,
unsigned long insn = stval;
struct cpu_vcpu_trap trap = { 0 };
- if (unlikely((insn & 3) != 3)) {
+ if (unlikely((insn & INSN_16BIT_MASK) != INSN_16BIT_MASK)) {
if (insn == 0) {
insn = __cpu_vcpu_unpriv_read_insn(regs->sepc, &trap);
if (trap.scause) {
if (trap.scause == CAUSE_LOAD_PAGE_FAULT)
trap.scause = CAUSE_FETCH_PAGE_FAULT;
trap.stval = trap.sepc = regs->sepc;
- return cpu_vcpu_redirect_trap(vcpu, regs,
- &trap);
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+ return VMM_OK;
}
}
- if ((insn & 3) != 3)
+ if ((insn & INSN_16BIT_MASK) != INSN_16BIT_MASK)
return truly_illegal_insn(vcpu, regs, insn);
}
@@ -521,3 +1308,78 @@ int cpu_vcpu_virtual_insn_fault(struct vmm_vcpu *vcpu,
return truly_illegal_insn(vcpu, regs, insn);
};
}
+
+void cpu_vcpu_take_vsirq(struct vmm_vcpu *vcpu, struct arch_regs *regs)
+{
+ int vsirq;
+ bool next_spp;
+ unsigned long irqs;
+ struct cpu_vcpu_trap trap;
+ struct riscv_priv_nested *npriv;
+
+ /* Do nothing for Orphan VCPUs */
+ if (!vcpu->is_normal) {
+ return;
+ }
+
+ /* Do nothing if virt state is OFF */
+ npriv = riscv_nested_priv(vcpu);
+ if (!npriv->virt) {
+ return;
+ }
+
+ /*
+ * Determine whether we are resuming in virtual-VS mode
+ * or virtual-VU mode
+ */
+ next_spp = (regs->sstatus & SSTATUS_SPP) ? TRUE : FALSE;
+
+ /*
+ * Do nothing if we going to virtual-VS mode and
+ * interrupts are disabled
+ */
+ if (next_spp && !(csr_read(CSR_VSSTATUS) & SSTATUS_SIE)) {
+ return;
+ }
+
+ /* Determine virtual-VS mode interrupt number */
+ vsirq = 0;
+ irqs = npriv->hvip;
+ irqs &= npriv->vsie << 1;
+ irqs &= npriv->hideleg;
+ if (irqs & MIP_VSEIP) {
+ vsirq = IRQ_S_EXT;
+ } else if (irqs & MIP_VSTIP) {
+ vsirq = IRQ_S_TIMER;
+ } else if (irqs & MIP_VSSIP) {
+ vsirq = IRQ_S_SOFT;
+ }
+
+ /* Take virtual-VS mode interrupt */
+ if (vsirq > 0) {
+ trap.scause = SCAUSE_INTERRUPT_MASK | vsirq;
+ trap.sepc = regs->sepc;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = 0;
+ cpu_vcpu_redirect_smode_trap(regs, &trap, next_spp);
+ }
+
+ return;
+}
+
+int cpu_vcpu_redirect_vsirq(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned long irq)
+{
+ struct cpu_vcpu_trap trap = { 0 };
+
+ if (!vcpu || !vcpu->is_normal || !riscv_nested_virt(vcpu)) {
+ return VMM_EFAIL;
+ }
+
+ trap.sepc = regs->sepc;
+ trap.scause = SCAUSE_INTERRUPT_MASK | (irq - 1);
+ cpu_vcpu_redirect_trap(vcpu, regs, &trap);
+
+ return VMM_OK;
+}
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_csr.h b/arch/riscv/cpu/generic/include/cpu_vcpu_csr.h
deleted file mode 100644
index cc1bb580..00000000
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_csr.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright (c) 2019 Anup Patel.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * @file cpu_vcpu_csr.h
- * @author Anup Patel (
an...@brainfault.org)
- * @brief header for VCPU CSR read/write handling
- */
-#ifndef _CPU_VCPU_CSR_H__
-#define _CPU_VCPU_CSR_H__
-
-#include <vmm_types.h>
-#include <vmm_manager.h>
-
-int cpu_vcpu_csr_read(struct vmm_vcpu *vcpu,
- unsigned long csr_num,
- unsigned long *csr_val);
-
-int cpu_vcpu_csr_write(struct vmm_vcpu *vcpu,
- unsigned long csr_num,
- unsigned long csr_val);
-
-#endif
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_nested.h b/arch/riscv/cpu/generic/include/cpu_vcpu_nested.h
new file mode 100644
index 00000000..669690d3
--- /dev/null
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_nested.h
@@ -0,0 +1,105 @@
+/**
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file cpu_vcpu_nested.h
+ * @brief header of VCPU nested functions
+ */
+
+#ifndef _CPU_VCPU_NESTED_H__
+#define _CPU_VCPU_NESTED_H__
+
+#include <vmm_types.h>
+
+struct vmm_vcpu;
+struct cpu_vcpu_trap;
+struct arch_regs;
+
+/** Function to init nested state */
+int cpu_vcpu_nested_init(struct vmm_vcpu *vcpu);
+
+/** Function to reset nested state */
+void cpu_vcpu_nested_reset(struct vmm_vcpu *vcpu);
+
+/** Function to initialize nested state */
+void cpu_vcpu_nested_deinit(struct vmm_vcpu *vcpu);
+
+/** Function to dump nested registers */
+void cpu_vcpu_nested_dump_regs(struct vmm_chardev *cdev,
+ struct vmm_vcpu *vcpu);
+
+/** Function to access nested non-virt CSRs */
+int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask);
+
+/** Function to access nested virt CSRs */
+int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask);
+
+/** Function to handle nested page fault */
+int cpu_vcpu_nested_page_fault(struct vmm_vcpu *vcpu,
+ bool trap_from_smode,
+ const struct cpu_vcpu_trap *trap,
+ struct cpu_vcpu_trap *out_trap);
+
+/** Function to handle nested hfence.vvma instruction */
+void cpu_vcpu_nested_hfence_vvma(struct vmm_vcpu *vcpu,
+ unsigned long *vaddr, unsigned int *asid);
+
+/** Function to handle nested hfence.gvma instruction */
+void cpu_vcpu_nested_hfence_gvma(struct vmm_vcpu *vcpu,
+ physical_addr_t *gaddr, unsigned int *vmid);
+
+/**
+ * Function to handle nested hlv instruction
+ * @returns (< 0) error code upon failure and (>= 0) trap return value
+ * upon success
+ */
+int cpu_vcpu_nested_hlv(struct vmm_vcpu *vcpu, unsigned long vaddr,
+ bool hlvx, void *data, unsigned long len,
+ unsigned long *out_scause,
+ unsigned long *out_stval,
+ unsigned long *out_htval);
+
+/**
+ * Function to handle nested hsv instruction
+ * @returns (< 0) error code upon failure and (>= 0) trap return value
+ * upon success
+ */
+int cpu_vcpu_nested_hsv(struct vmm_vcpu *vcpu, unsigned long vaddr,
+ const void *data, unsigned long len,
+ unsigned long *out_scause,
+ unsigned long *out_stval,
+ unsigned long *out_htval);
+
+enum nested_set_virt_event {
+ NESTED_SET_VIRT_EVENT_TRAP = 0,
+ NESTED_SET_VIRT_EVENT_SRET,
+};
+
+/**
+ * Function to change nested virtualization state
+ * NOTE: This can also update Guest hstatus.SPV and hstatus.SPVP bits
+ */
+void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
+ enum nested_set_virt_event event, bool virt,
+ bool spvp, bool gva);
+
+#endif
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_trap.h b/arch/riscv/cpu/generic/include/cpu_vcpu_trap.h
index cb0ffbb6..d77bba87 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_trap.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_trap.h
@@ -44,18 +44,38 @@ struct cpu_vcpu_trap {
unsigned long htinst;
};
-int cpu_vcpu_redirect_trap(struct vmm_vcpu *vcpu,
- arch_regs_t *regs,
- struct cpu_vcpu_trap *trap);
+enum trap_return {
+ TRAP_RETURN_OK=0,
+ TRAP_RETURN_ILLEGAL_INSN,
+ TRAP_RETURN_VIRTUAL_INSN,
+ TRAP_RETURN_CONTINUE
+};
+
+void cpu_vcpu_update_trap(struct vmm_vcpu *vcpu, arch_regs_t *regs);
+
+void cpu_vcpu_redirect_smode_trap(arch_regs_t *regs,
+ struct cpu_vcpu_trap *trap, bool prev_spp);
+
+void cpu_vcpu_redirect_trap(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ struct cpu_vcpu_trap *trap);
int cpu_vcpu_page_fault(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
struct cpu_vcpu_trap *trap);
+int cpu_vcpu_general_fault(struct vmm_vcpu *vcpu,
+ arch_regs_t *regs,
+ struct cpu_vcpu_trap *trap);
+
int cpu_vcpu_virtual_insn_fault(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
unsigned long stval);
+void cpu_vcpu_take_vsirq(struct vmm_vcpu *vcpu, arch_regs_t *regs);
+
+int cpu_vcpu_redirect_vsirq(struct vmm_vcpu *vcpu, arch_regs_t *regs,
+ unsigned long irq);
+
#endif
#endif
diff --git a/arch/riscv/cpu/generic/
objects.mk b/arch/riscv/cpu/generic/
objects.mk
index bbb9a6b5..69daf98a 100644
--- a/arch/riscv/cpu/generic/
objects.mk
+++ b/arch/riscv/cpu/generic/
objects.mk
@@ -79,7 +79,7 @@ cpu-objs-$(CONFIG_SMP)+=cpu_smp_ops_default.o
cpu-objs-$(CONFIG_SMP)+=cpu_smp_ops_sbi.o
cpu-objs-$(CONFIG_SMP)+=cpu_sbi_ipi.o
cpu-objs-y+= cpu_vcpu_helper.o
-cpu-objs-y+= cpu_vcpu_csr.o
+cpu-objs-y+= cpu_vcpu_nested.o
cpu-objs-y+= cpu_vcpu_fp.o
cpu-objs-y+= cpu_vcpu_irq.o
cpu-objs-y+= cpu_vcpu_sbi.o
--
2.25.1