The guest hypervisor can allow nested guest to use Sstc extension
whenever Sstc is available to guest hypervisor so let us virtualize
Sstc extension for nested guest.
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 151 ++++++++++++++--
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 164 +++++++++++++++---
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 ++
arch/riscv/cpu/generic/include/arch_regs.h | 2 +
.../cpu/generic/include/cpu_vcpu_timer.h | 5 +
.../cpu/generic/include/riscv_encoding.h | 3 +
6 files changed, 308 insertions(+), 33 deletions(-)
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index b75cdec4..8c897902 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -856,6 +856,8 @@ void cpu_vcpu_nested_reset(struct vmm_vcpu *vcpu)
npriv->htimedeltah = 0;
npriv->htval = 0;
npriv->htinst = 0;
+ npriv->henvcfg = 0;
+ npriv->henvcfgh = 0;
npriv->hgatp = 0;
npriv->vsstatus = 0;
npriv->vsie = 0;
@@ -923,9 +925,11 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned int csr_num, unsigned long *val,
unsigned long new_val, unsigned long wr_mask)
{
+ u64 tmp64;
int csr_shift = 0;
bool read_only = FALSE;
- unsigned long *csr, zero = 0, writeable_mask = 0;
+ unsigned long *csr, tmpcsr = 0, csr_rdor = 0;
+ unsigned long zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
riscv_stats_priv(vcpu)->nested_smode_csr_rmw++;
@@ -965,6 +969,7 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
return TRAP_RETURN_VIRTUAL_INSN;
}
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
csr_shift = 1;
writeable_mask = HVIP_VSSIP & npriv->hideleg;
break;
@@ -974,18 +979,48 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}
csr = &zero;
break;
+ case CSR_STIMECMP:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+#ifdef CONFIG_32BIT
+ if (!(npriv->henvcfgh & ENVCFGH_STCE)) {
+#else
+ if (!(npriv->henvcfg & ENVCFG_STCE)) {
+#endif
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu);
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_STIMECMPH:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ if (!(npriv->henvcfgh & ENVCFGH_STCE)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu) >> 32;
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#endif
default:
return TRAP_RETURN_ILLEGAL_INSN;
}
if (val) {
- *val = (csr_shift < 0) ?
- (*csr) << -csr_shift : (*csr) >> csr_shift;
+ *val = (csr_shift < 0) ? (*csr | csr_rdor) << -csr_shift :
+ (*csr | csr_rdor) >> csr_shift;
}
if (read_only) {
return TRAP_RETURN_ILLEGAL_INSN;
- } else {
+ } else if (wr_mask) {
writeable_mask = (csr_shift < 0) ?
writeable_mask >> -csr_shift :
writeable_mask << csr_shift;
@@ -995,6 +1030,29 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val >> -csr_shift : new_val << csr_shift;
wr_mask &= writeable_mask;
*csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+
+ switch (csr_num) {
+ case CSR_STIMECMP:
+#ifdef CONFIG_32BIT
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffffULL
+ tmp64 |= tmpcsr;
+#else
+ tmp64 = tmpcsr;
+#endif
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_STIMECMPH:
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffff00000000ULL
+ tmp64 |= ((u64)tmpcsr) << 32;
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#endif
+ default:
+ break;
+ }
}
return VMM_OK;
@@ -1004,10 +1062,12 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned int csr_num, unsigned long *val,
unsigned long new_val, unsigned long wr_mask)
{
+ u64 tmp64;
int csr_shift = 0;
bool read_only = FALSE, nuke_swtlb = FALSE;
unsigned int csr_priv = (csr_num >> 8) & 0x3;
- unsigned long *csr, mode, zero = 0, writeable_mask = 0;
+ unsigned long *csr, tmpcsr = 0, csr_rdor = 0;
+ unsigned long mode, zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
riscv_stats_priv(vcpu)->nested_hext_csr_rmw++;
@@ -1082,6 +1142,7 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
break;
case CSR_HIP:
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
writeable_mask = HVIP_VSSIP;
break;
case CSR_HGEIP:
@@ -1157,11 +1218,19 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}
break;
case CSR_HENVCFG:
-#ifndef CONFIG_64BIT
- case CSR_HENVCFGH:
+ csr = &npriv->henvcfg;
+#ifdef CONFIG_32BIT
+ writeable_mask = 0;
+#else
+ writeable_mask = ENVCFG_STCE;
#endif
- csr = &zero;
break;
+#ifdef CONFIG_32BIT
+ case CSR_HENVCFGH:
+ csr = &npriv->henvcfgh;
+ writeable_mask = ENVCFGH_STCE;
+ break;
+#endif
case CSR_VSSTATUS:
csr = &npriv->vsstatus;
writeable_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UBE |
@@ -1170,6 +1239,7 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
break;
case CSR_VSIP:
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
csr_shift = 1;
writeable_mask = HVIP_VSSIP & npriv->hideleg;
break;
@@ -1237,6 +1307,26 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val |= (mode << SATP_MODE_SHIFT) & SATP_MODE;
}
break;
+ case CSR_VSTIMECMP:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu);
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_VSTIMECMPH:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu) >> 32;
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#endif
case CSR_HVICTL:
csr = &npriv->hvictl;
writeable_mask = HVICTL_VTI | HVICTL_IID |
@@ -1247,13 +1337,13 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}
if (val) {
- *val = (csr_shift < 0) ?
- (*csr) << -csr_shift : (*csr) >> csr_shift;
+ *val = (csr_shift < 0) ? (*csr | csr_rdor) << -csr_shift :
+ (*csr | csr_rdor) >> csr_shift;
}
if (read_only) {
return TRAP_RETURN_ILLEGAL_INSN;
- } else {
+ } else if (wr_mask) {
writeable_mask = (csr_shift < 0) ?
writeable_mask >> -csr_shift :
writeable_mask << csr_shift;
@@ -1263,6 +1353,43 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val >> -csr_shift : new_val << csr_shift;
wr_mask &= writeable_mask;
*csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+
+ switch (csr_num) {
+ case CSR_VSTIMECMP:
+#ifdef CONFIG_32BIT
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffffULL
+ tmp64 |= tmpcsr;
+#else
+ tmp64 = tmpcsr;
+#endif
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_VSTIMECMPH:
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffff00000000ULL
+ tmp64 |= ((u64)tmpcsr) << 32;
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#endif
+ case CSR_HTIMEDELTA:
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_HTIMEDELTAH:
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
}
if (nuke_swtlb) {
@@ -1626,7 +1753,7 @@ void cpu_vcpu_nested_take_vsirq(struct vmm_vcpu *vcpu,
/* Determine virtual-VS mode interrupt number */
vsirq = 0;
- irqs = npriv->hvip;
+ irqs = npriv->hvip | (cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0);
irqs &= npriv->vsie << 1;
irqs &= npriv->hideleg;
if (irqs & MIP_VSEIP) {
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index f4add40c..cb19aeed 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -25,15 +25,22 @@
#include <vmm_heap.h>
#include <vmm_limits.h>
#include <vmm_stdio.h>
+#include <vmm_scheduler.h>
#include <vmm_timer.h>
#include <vmm_vcpu_irq.h>
#include <cpu_hwcap.h>
#include <cpu_vcpu_timer.h>
+#include <cpu_vcpu_trap.h>
#include <riscv_encoding.h>
struct cpu_vcpu_timer {
+ /* virtual-VS mode state */
+ u64 vs_next_cycle;
+ struct vmm_timer_event vs_time_ev;
+ /* S mode state */
u64 next_cycle;
+ struct vmm_timer_event time_nested_ev;
struct vmm_timer_event time_ev;
};
@@ -52,6 +59,81 @@ static inline u64 cpu_vcpu_timer_delta(struct vmm_vcpu *vcpu,
return riscv_guest_priv(vcpu->guest)->time_delta + ndelta;
}
+bool cpu_vcpu_timer_vs_irq(struct vmm_vcpu *vcpu)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ return t->vs_next_cycle <=
+ (csr_read(CSR_TIME) + cpu_vcpu_timer_delta(vcpu, TRUE));
+}
+
+u64 cpu_vcpu_timer_vs_cycle(struct vmm_vcpu *vcpu)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ return t->vs_next_cycle;
+}
+
+static void cpu_vcpu_timer_vs_expired(struct vmm_timer_event *ev)
+{
+ struct vmm_vcpu *vcpu = ev->priv;
+
+ if (cpu_vcpu_timer_vs_irq(vcpu)) {
+ vmm_vcpu_irq_wait_resume(vcpu);
+ } else {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+}
+
+void cpu_vcpu_timer_vs_restart(struct vmm_vcpu *vcpu)
+{
+ u64 vs_delta_ns, vs_next_cycle;
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ /* Stop the VS timer when next timer tick equals U64_MAX */
+ if (t->vs_next_cycle == U64_MAX) {
+ vmm_timer_event_stop(&t->vs_time_ev);
+ return;
+ }
+
+ /* Do nothing is Virtual-VS mode IRQ is pending */
+ if (cpu_vcpu_timer_vs_irq(vcpu)) {
+ vmm_timer_event_stop(&t->vs_time_ev);
+ return;
+ }
+
+ /* Start the VS timer event */
+ vs_next_cycle = t->vs_next_cycle - cpu_vcpu_timer_delta(vcpu, TRUE);
+ vs_delta_ns = vmm_timer_delta_cycles_to_ns(vs_next_cycle);
+ vmm_timer_event_start(&t->vs_time_ev, vs_delta_ns);
+}
+
+void cpu_vcpu_timer_vs_start(struct vmm_vcpu *vcpu, u64 vs_next_cycle)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ /* Save the next VS timer tick value */
+ t->vs_next_cycle = vs_next_cycle;
+
+ /* Restart VS timer */
+ cpu_vcpu_timer_vs_restart(vcpu);
+}
+
+static void cpu_vcpu_timer_nested_expired(struct vmm_timer_event *ev)
+{
+ int rc;
+ struct vmm_vcpu *vcpu = ev->priv;
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ return;
+ }
+
+ /* Redirect trap to invoke nested world switch */
+ rc = cpu_vcpu_redirect_vsirq(vcpu, vmm_scheduler_irq_regs(),
+ IRQ_VS_TIMER);
+ BUG_ON(rc);
+}
+
static void cpu_vcpu_timer_expired(struct vmm_timer_event *ev)
{
struct vmm_vcpu *vcpu = ev->priv;
@@ -71,6 +153,9 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
u64 delta_ns;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+ /* This function should only be called when nested virt is OFF */
+ BUG_ON(riscv_nested_virt(vcpu));
+
/* Save the next timer tick value */
t->next_cycle = next_cycle;
@@ -99,19 +184,17 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
vmm_vcpu_irq_clear(vcpu, IRQ_VS_TIMER);
/* Start the timer event */
- next_cycle -= cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ next_cycle -= cpu_vcpu_timer_delta(vcpu, FALSE);
delta_ns = vmm_timer_delta_cycles_to_ns(next_cycle);
vmm_timer_event_start(&t->time_ev, delta_ns);
}
void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
{
- u64 current_delta, new_delta = 0;
+ u64 delta_ns, new_delta;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
- current_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
new_delta = cpu_vcpu_timer_delta(vcpu, nested_virt);
-
#ifdef CONFIG_32BIT
csr_write(CSR_HTIMEDELTA, (u32)new_delta);
csr_write(CSR_HTIMEDELTAH, (u32)(new_delta >> 32));
@@ -123,13 +206,30 @@ void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
return;
}
- t->next_cycle += new_delta - current_delta;
+ if (nested_virt) {
#ifdef CONFIG_32BIT
- csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
- csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
#else
- csr_write(CSR_VSTIMECMP, t->next_cycle);
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
#endif
+
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle -
+ cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_nested_ev, delta_ns);
+ }
+ } else {
+ vmm_timer_event_stop(&t->time_nested_ev);
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+#else
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
+#endif
+ }
}
void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)
@@ -143,27 +243,30 @@ void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)
t = riscv_timer_priv(vcpu);
+ if (riscv_nested_virt(vcpu)) {
+ vmm_timer_event_stop(&t->time_nested_ev);
+ } else {
#ifdef CONFIG_32BIT
- t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
- t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
#else
- t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
#endif
- if (t->next_cycle == U64_MAX) {
- return;
}
- delta_ns = t->next_cycle -
- cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
- delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
- vmm_timer_event_start(&t->time_ev, delta_ns);
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle - cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_ev, delta_ns);
+ }
}
void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)
{
- u64 time_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ u64 delta_ns, time_delta;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+ time_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
#ifdef CONFIG_32BIT
csr_write(CSR_HTIMEDELTA, (u32)time_delta);
csr_write(CSR_HTIMEDELTAH, (u32)(time_delta >> 32));
@@ -177,12 +280,21 @@ void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)
vmm_timer_event_stop(&t->time_ev);
+ if (riscv_nested_virt(vcpu)) {
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle -
+ cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_nested_ev, delta_ns);
+ }
+ } else {
#ifdef CONFIG_32BIT
- csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
- csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
#else
- csr_write(CSR_VSTIMECMP, t->next_cycle);
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
#endif
+ }
}
int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
@@ -197,12 +309,20 @@ int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
if (!(*timer))
return VMM_ENOMEM;
t = *timer;
+ INIT_TIMER_EVENT(&t->vs_time_ev,
+ cpu_vcpu_timer_vs_expired, vcpu);
+ INIT_TIMER_EVENT(&t->time_nested_ev,
+ cpu_vcpu_timer_nested_expired, vcpu);
INIT_TIMER_EVENT(&t->time_ev, cpu_vcpu_timer_expired, vcpu);
} else {
t = *timer;
}
+ t->vs_next_cycle = U64_MAX;
+ vmm_timer_event_stop(&t->vs_time_ev);
+
t->next_cycle = U64_MAX;
+ vmm_timer_event_stop(&t->time_nested_ev);
vmm_timer_event_stop(&t->time_ev);
if (riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
@@ -220,6 +340,8 @@ int cpu_vcpu_timer_deinit(struct vmm_vcpu *vcpu, void **timer)
return VMM_EINVALID;
t = *timer;
+ vmm_timer_event_stop(&t->vs_time_ev);
+ vmm_timer_event_stop(&t->time_nested_ev);
vmm_timer_event_stop(&t->time_ev);
vmm_free(t);
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_trap.c b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
index 32bcc6dd..4ebce159 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_trap.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
@@ -565,6 +565,14 @@ static const struct csr_func csr_funcs[] = {
.csr_num = CSR_SIPH,
.rmw_func = cpu_vcpu_nested_smode_csr_rmw,
},
+ {
+ .csr_num = CSR_STIMECMP,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_STIMECMPH,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
{
.csr_num = CSR_HSTATUS,
.rmw_func = cpu_vcpu_nested_hext_csr_rmw,
@@ -665,6 +673,14 @@ static const struct csr_func csr_funcs[] = {
.csr_num = CSR_VSATP,
.rmw_func = cpu_vcpu_nested_hext_csr_rmw,
},
+ {
+ .csr_num = CSR_VSTIMECMP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSTIMECMPH,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
};
static int csr_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
diff --git a/arch/riscv/cpu/generic/include/arch_regs.h b/arch/riscv/cpu/generic/include/arch_regs.h
index 23372b8e..3afa9faf 100644
--- a/arch/riscv/cpu/generic/include/arch_regs.h
+++ b/arch/riscv/cpu/generic/include/arch_regs.h
@@ -173,6 +173,8 @@ struct riscv_priv_nested {
unsigned long htimedeltah;
unsigned long htval;
unsigned long htinst;
+ unsigned long henvcfg;
+ unsigned long henvcfgh;
unsigned long hgatp;
unsigned long vsstatus;
unsigned long vsie;
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
index caa3eea5..84d00707 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
@@ -28,6 +28,11 @@
struct vmm_vcpu;
+bool cpu_vcpu_timer_vs_irq(struct vmm_vcpu *vcpu);
+u64 cpu_vcpu_timer_vs_cycle(struct vmm_vcpu *vcpu);
+void cpu_vcpu_timer_vs_restart(struct vmm_vcpu *vcpu);
+void cpu_vcpu_timer_vs_start(struct vmm_vcpu *vcpu, u64 next_vs_cycle);
+
void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle);
void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt);
void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu);
diff --git a/arch/riscv/cpu/generic/include/riscv_encoding.h b/arch/riscv/cpu/generic/include/riscv_encoding.h
index 1514df11..b3753e19 100644
--- a/arch/riscv/cpu/generic/include/riscv_encoding.h
+++ b/arch/riscv/cpu/generic/include/riscv_encoding.h
@@ -371,6 +371,9 @@
#define ENVCFG_CBIE_INV _AC(0x3, UL)
#define ENVCFG_FIOM _AC(0x1, UL)
+#define ENVCFGH_STCE (_AC(1, UL) << 31)
+#define ENVCFGH_PBMTE (_AC(1, UL) << 30)
+
/* ===== User-level CSRs ===== */
/* User Trap Setup (N-extension) */
--
2.34.1