[PATCH 0/8] Xvisor Sstc virtualization support

13 views
Skip to first unread message

Anup Patel

unread,
Oct 26, 2022, 7:16:49 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
This series adds Sstc virtualization (and nested virtualization)
support in Xvisor RISC-V

These patches can also be found in riscv_sstc_virt_v1 branch at:
https://github.com/avpatel/xvisor-next

Anup Patel (8):
RISC-V: Rename VCPU timer handling functions for consistency
RISC-V: Move time delta update function to cpu_vcpu_timer.c
RISC-V: Introduce VCPU timer save/restore functions
RISC-V: VCPU ISA bitmap should only have extensions available on Host
RISC-V: Use Sstc virtualization in VCPU timer implement
TESTS: riscv: Add sstc to ISA string whenever Xvisor support it
RISC-V: Take nested interrupts after vmm_scheduler_irq_exit()
RISC-V: Add nested virtualization support for Sstc extension

arch/riscv/cpu/generic/cpu_exception.c | 7 +-
arch/riscv/cpu/generic/cpu_init.c | 6 +-
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 53 ++-
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 157 ++++++++-
arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c | 4 +-
arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c | 4 +-
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 306 ++++++++++++++++--
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 +
arch/riscv/cpu/generic/include/arch_regs.h | 3 +
arch/riscv/cpu/generic/include/cpu_hwcap.h | 9 +-
.../cpu/generic/include/cpu_vcpu_helper.h | 6 +-
.../cpu/generic/include/cpu_vcpu_timer.h | 20 +-
.../cpu/generic/include/riscv_encoding.h | 3 +
tests/riscv/common/basic/arch_sbi.c | 39 ++-
tests/riscv/virt32/virt32-guest.dts | 2 +-
tests/riscv/virt64/virt64-guest.dts | 2 +-
16 files changed, 534 insertions(+), 103 deletions(-)

--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:16:52 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
All VCPU handling functions have "cpu_vcpu_" prefix so let us rename
VCPU time handling functions to align with this convention.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 5 +-
arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c | 4 +-
arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c | 4 +-
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 55 ++++++++++---------
.../cpu/generic/include/cpu_vcpu_timer.h | 12 ++--
5 files changed, 39 insertions(+), 41 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index f5bca399..c019b924 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -336,7 +336,8 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
/* Initialize FP state */
cpu_vcpu_fp_init(vcpu);

- riscv_timer_event_init(vcpu, &riscv_timer_priv(vcpu));
+ /* Initialize timer */
+ cpu_vcpu_timer_init(vcpu, &riscv_timer_priv(vcpu));

return VMM_OK;

@@ -373,7 +374,7 @@ int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
}

/* Cleanup timer */
- rc = riscv_timer_event_deinit(vcpu, &riscv_timer_priv(vcpu));
+ rc = cpu_vcpu_timer_deinit(vcpu, &riscv_timer_priv(vcpu));
if (rc)
return rc;

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c b/arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c
index 57720c6d..42279718 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c
@@ -51,10 +51,10 @@ static int vcpu_sbi_legacy_ecall(struct vmm_vcpu *vcpu, unsigned long ext_id,
switch (ext_id) {
case SBI_EXT_0_1_SET_TIMER:
if (riscv_priv(vcpu)->xlen == 32)
- riscv_timer_event_start(vcpu,
+ cpu_vcpu_timer_start(vcpu,
((u64)args[1] << 32) | (u64)args[0]);
else
- riscv_timer_event_start(vcpu, (u64)args[0]);
+ cpu_vcpu_timer_start(vcpu, (u64)args[0]);
break;
case SBI_EXT_0_1_CONSOLE_PUTCHAR:
send = (u8)args[0];
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c b/arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c
index 7502c0b2..6bdcdb22 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c
@@ -41,10 +41,10 @@ static int vcpu_sbi_time_ecall(struct vmm_vcpu *vcpu, unsigned long ext_id,
return SBI_ERR_NOT_SUPPORTED;

if (riscv_priv(vcpu)->xlen == 32)
- riscv_timer_event_start(vcpu,
+ cpu_vcpu_timer_start(vcpu,
((u64)args[1] << 32) | (u64)args[0]);
else
- riscv_timer_event_start(vcpu, (u64)args[0]);
+ cpu_vcpu_timer_start(vcpu, (u64)args[0]);

return 0;
}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index cc15ef9b..c1e046e6 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -25,27 +25,32 @@
#include <vmm_heap.h>
#include <vmm_limits.h>
#include <vmm_stdio.h>
+#include <vmm_timer.h>
#include <vmm_vcpu_irq.h>
#include <cpu_vcpu_timer.h>

#include <riscv_encoding.h>

-static void riscv_timer_event_expired(struct vmm_timer_event *ev)
+struct cpu_vcpu_timer {
+ struct vmm_timer_event time_ev;
+};
+
+static void cpu_vcpu_timer_expired(struct vmm_timer_event *ev)
{
struct vmm_vcpu *vcpu = ev->priv;
- struct riscv_timer_event *tevent = riscv_timer_priv(vcpu);
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

- BUG_ON(!tevent);
+ BUG_ON(!t);
vmm_vcpu_irq_assert(vcpu, IRQ_VS_TIMER, 0x0);
}

-void riscv_timer_event_start(struct vmm_vcpu *vcpu, u64 next_cycle)
+void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
{
u64 delta_ns;
- struct riscv_timer_event *tevent = riscv_timer_priv(vcpu);
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

if (next_cycle == U64_MAX) {
- vmm_timer_event_stop(&tevent->time_ev);
+ vmm_timer_event_stop(&t->time_ev);
vmm_vcpu_irq_clear(vcpu, IRQ_VS_TIMER);
return;
}
@@ -59,45 +64,41 @@ void riscv_timer_event_start(struct vmm_vcpu *vcpu, u64 next_cycle)
/* Start the timer event */
next_cycle -= riscv_guest_priv(vcpu->guest)->time_delta;
delta_ns = vmm_timer_delta_cycles_to_ns(next_cycle);
- vmm_timer_event_start(&tevent->time_ev, delta_ns);
+ vmm_timer_event_start(&t->time_ev, delta_ns);
}

-int riscv_timer_event_init(struct vmm_vcpu *vcpu, void **timer_event)
+int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
{
- struct riscv_timer_event *tevent;
+ struct cpu_vcpu_timer *t;

- if (!vcpu || !timer_event)
+ if (!vcpu || !timer)
return VMM_EINVALID;

- if (!(*timer_event)) {
- *timer_event = vmm_zalloc(sizeof(struct riscv_timer_event));
- if (!(*timer_event))
+ if (!(*timer)) {
+ *timer = vmm_zalloc(sizeof(struct cpu_vcpu_timer));
+ if (!(*timer))
return VMM_ENOMEM;
- tevent = *timer_event;
- INIT_TIMER_EVENT(&tevent->time_ev, riscv_timer_event_expired,
- vcpu);
+ t = *timer;
+ INIT_TIMER_EVENT(&t->time_ev, cpu_vcpu_timer_expired, vcpu);
} else {
- tevent = *timer_event;
+ t = *timer;
}

- vmm_timer_event_stop(&tevent->time_ev);
+ vmm_timer_event_stop(&t->time_ev);

return VMM_OK;
}

-int riscv_timer_event_deinit(struct vmm_vcpu *vcpu, void **timer_event)
+int cpu_vcpu_timer_deinit(struct vmm_vcpu *vcpu, void **timer)
{
- struct riscv_timer_event *tevent;
-
- if (!vcpu || !timer_event)
- return VMM_EINVALID;
+ struct cpu_vcpu_timer *t;

- if (!(*timer_event))
+ if (!vcpu || !timer || !(*timer))
return VMM_EINVALID;
- tevent = *timer_event;
+ t = *timer;

- vmm_timer_event_stop(&tevent->time_ev);
- vmm_free(tevent);
+ vmm_timer_event_stop(&t->time_ev);
+ vmm_free(t);

return VMM_OK;
}
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
index 617eb8df..71df1662 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
@@ -25,15 +25,11 @@
#define _CPU_VCPU_TIMER_H__

#include <vmm_types.h>
-#include <vmm_manager.h>
-#include <vmm_timer.h>

-struct riscv_timer_event {
- struct vmm_timer_event time_ev;
-};
+struct vmm_vcpu;

-void riscv_timer_event_start(struct vmm_vcpu *vcpu, u64 next_cycle);
-int riscv_timer_event_init(struct vmm_vcpu *vcpu, void **timer_event);
-int riscv_timer_event_deinit(struct vmm_vcpu *vcpu, void **timer_event);
+void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle);
+int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer);
+int cpu_vcpu_timer_deinit(struct vmm_vcpu *vcpu, void **timer);

#endif
--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:16:57 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
We add VCPU timer save/restore functions which currently only updates
time delta in the VCPU restore path but in subsequent patches these
functions will save/restore vstimecmp CSRs.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 3 ++-
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 14 ++++++++++++++
arch/riscv/cpu/generic/include/cpu_vcpu_timer.h | 2 ++
3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index b41c363c..9cd8046c 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -414,6 +414,7 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
priv->vsatp = csr_read(CSR_VSATP);
priv->scounteren = csr_read(CSR_SCOUNTEREN);
cpu_vcpu_fp_save(tvcpu, regs);
+ cpu_vcpu_timer_save(tvcpu);
}
clrx();
}
@@ -431,8 +432,8 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
csr_write(CSR_VSTVAL, priv->vstval);
csr_write(CSR_VSATP, priv->vsatp);
csr_write(CSR_SCOUNTEREN, priv->scounteren);
+ cpu_vcpu_timer_restore(vcpu);
cpu_vcpu_fp_restore(vcpu, regs);
- cpu_vcpu_timer_delta_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_gstage_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_irq_deleg_update(vcpu, riscv_nested_virt(vcpu));
} else {
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index 891f00b2..e73cc211 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -32,6 +32,7 @@
#include <riscv_encoding.h>

struct cpu_vcpu_timer {
+ u64 next_cycle;
struct vmm_timer_event time_ev;
};

@@ -49,6 +50,10 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
u64 delta_ns;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

+ /* Save the next timer tick value */
+ t->next_cycle = next_cycle;
+
+ /* Stop the timer when next timer tick equals U64_MAX */
if (next_cycle == U64_MAX) {
vmm_timer_event_stop(&t->time_ev);
vmm_vcpu_irq_clear(vcpu, IRQ_VS_TIMER);
@@ -87,6 +92,15 @@ void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
#endif
}

+void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)
+{
+}
+
+void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)
+{
+ cpu_vcpu_timer_delta_update(vcpu, riscv_nested_virt(vcpu));
+}
+
int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
{
struct cpu_vcpu_timer *t;
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
index 30079c92..caa3eea5 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
@@ -30,6 +30,8 @@ struct vmm_vcpu;

void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle);
void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt);
+void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu);
+void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu);
int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer);
int cpu_vcpu_timer_deinit(struct vmm_vcpu *vcpu, void **timer);

--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:16:59 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
The VCPU ISA bitmap is parsed from Xvisor Guest DT which allows
users to specify desired ISA features via Xvisor Guest DT.

For correctness, the VCPU ISA bitmap should only have extensions
which are available on Host so we AND the VCPU ISA bitmap with
Host ISA bitmap.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_init.c | 6 ++----
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 4 ++++
arch/riscv/cpu/generic/include/cpu_hwcap.h | 9 +++------
3 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_init.c b/arch/riscv/cpu/generic/cpu_init.c
index 823d1f7f..deeb610c 100644
--- a/arch/riscv/cpu/generic/cpu_init.c
+++ b/arch/riscv/cpu/generic/cpu_init.c
@@ -206,11 +206,9 @@ int riscv_isa_parse_string(const char *isa,
return VMM_OK;
}

-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
+const unsigned long *riscv_isa_extension_host(void)
{
- if (!isa_bitmap)
- return riscv_isa[0];
- return isa_bitmap[0];
+ return riscv_isa;
}

bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index 9cd8046c..e5ccd03d 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -287,6 +287,10 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
}
riscv_priv(vcpu)->isa[0] &= RISCV_ISA_ALLOWED;

+ /* VCPU ISA bitmap should be ANDed with Host ISA bitmap */
+ bitmap_and(riscv_priv(vcpu)->isa, riscv_priv(vcpu)->isa,
+ riscv_isa_extension_host(), RISCV_ISA_EXT_MAX);
+
/* H-extension only available when AIA CSRs are available */
if (!riscv_isa_extension_available(NULL, SxAIA)) {
riscv_priv(vcpu)->isa[0] &=
diff --git a/arch/riscv/cpu/generic/include/cpu_hwcap.h b/arch/riscv/cpu/generic/include/cpu_hwcap.h
index c47dffd0..ab13f674 100644
--- a/arch/riscv/cpu/generic/include/cpu_hwcap.h
+++ b/arch/riscv/cpu/generic/include/cpu_hwcap.h
@@ -75,14 +75,11 @@ struct vmm_devtree_node;
int riscv_node_to_hartid(struct vmm_devtree_node *node, u32 *hart_id);

/**
- * Get base extension word
+ * Get host ISA extension bitmap
*
- * @isa_bitmap ISA bitmap to use
- * @returns base extension word as unsigned long value
- *
- * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ * @returns const pointer to host ISA extension bitmap
*/
-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+const unsigned long *riscv_isa_extension_host(void);

#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)

--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:17:01 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
We should use Sstc virtualization for VCPU timer implementation whenever
underlying Guest desires it and Host supports it.

In addition to above, whenever the time delta is updated for nested
world-switch we should also adjust vstimecmp accordingly.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 17 ++-
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 3 +
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 117 +++++++++++++++---
arch/riscv/cpu/generic/include/arch_regs.h | 1 +
.../cpu/generic/include/cpu_vcpu_helper.h | 3 +
tests/riscv/virt32/virt32-guest.dts | 2 +-
tests/riscv/virt64/virt64-guest.dts | 2 +-
7 files changed, 128 insertions(+), 17 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index e5ccd03d..dc40fa5d 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -54,7 +54,8 @@
riscv_isa_extension_mask(f) | \
riscv_isa_extension_mask(i) | \
riscv_isa_extension_mask(m) | \
- riscv_isa_extension_mask(h))
+ riscv_isa_extension_mask(h) | \
+ riscv_isa_extension_mask(SSTC))

static char *guest_fdt_find_serial_node(char *guest_name)
{
@@ -323,6 +324,7 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
riscv_priv(vcpu)->hie = 0;
riscv_priv(vcpu)->hip = 0;
riscv_priv(vcpu)->hvip = 0;
+ riscv_priv(vcpu)->henvcfg = 0;
riscv_priv(vcpu)->vsstatus = 0;
riscv_priv(vcpu)->vstvec = 0;
riscv_priv(vcpu)->vsscratch = 0;
@@ -436,6 +438,7 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
csr_write(CSR_VSTVAL, priv->vstval);
csr_write(CSR_VSATP, priv->vsatp);
csr_write(CSR_SCOUNTEREN, priv->scounteren);
+ cpu_vcpu_envcfg_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_timer_restore(vcpu);
cpu_vcpu_fp_restore(vcpu, regs);
cpu_vcpu_gstage_update(vcpu, riscv_nested_virt(vcpu));
@@ -453,6 +456,18 @@ void arch_vcpu_post_switch(struct vmm_vcpu *vcpu,
}
}

+void cpu_vcpu_envcfg_update(struct vmm_vcpu *vcpu, bool nested_virt)
+{
+ u64 henvcfg = (nested_virt) ? 0 : riscv_priv(vcpu)->henvcfg;
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_HENVCFG, (u32)henvcfg);
+ csr_write(CSR_HENVCFGH, (u32)(henvcfg >> 32));
+#else
+ csr_write(CSR_HENVCFG, henvcfg);
+#endif
+}
+
void cpu_vcpu_irq_deleg_update(struct vmm_vcpu *vcpu, bool nested_virt)
{
if (vcpu->is_normal && nested_virt) {
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index 08f196c9..b75cdec4 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -1501,6 +1501,9 @@ void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
npriv->hcounteren = csr_swap(CSR_HCOUNTEREN, npriv->hcounteren);
npriv->hedeleg = csr_swap(CSR_HEDELEG, npriv->hedeleg);

+ /* Update environment configuration */
+ cpu_vcpu_envcfg_update(vcpu, virt);
+
/* Update interrupt delegation */
cpu_vcpu_irq_deleg_update(vcpu, virt);

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index e73cc211..f4add40c 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -27,8 +27,9 @@
#include <vmm_stdio.h>
#include <vmm_timer.h>
#include <vmm_vcpu_irq.h>
-#include <cpu_vcpu_timer.h>

+#include <cpu_hwcap.h>
+#include <cpu_vcpu_timer.h>
#include <riscv_encoding.h>

struct cpu_vcpu_timer {
@@ -36,13 +37,33 @@ struct cpu_vcpu_timer {
struct vmm_timer_event time_ev;
};

+static inline u64 cpu_vcpu_timer_delta(struct vmm_vcpu *vcpu,
+ bool nested_virt)
+{
+ u64 ndelta = 0;
+
+ if (nested_virt) {
+ ndelta = riscv_nested_priv(vcpu)->htimedelta;
+#ifdef CONFIG_32BIT
+ ndelta |= ((u64)riscv_nested_priv(vcpu)->htimedeltah) << 32;
+#endif
+ }
+
+ return riscv_guest_priv(vcpu->guest)->time_delta + ndelta;
+}
+
static void cpu_vcpu_timer_expired(struct vmm_timer_event *ev)
{
struct vmm_vcpu *vcpu = ev->priv;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

BUG_ON(!t);
- vmm_vcpu_irq_assert(vcpu, IRQ_VS_TIMER, 0x0);
+
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ vmm_vcpu_irq_wait_resume(vcpu);
+ } else {
+ vmm_vcpu_irq_assert(vcpu, IRQ_VS_TIMER, 0x0);
+ }
}

void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
@@ -53,6 +74,17 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
/* Save the next timer tick value */
t->next_cycle = next_cycle;

+ /* If Sstc available then simply update vstimecmp CSRs */
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+#else
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
+#endif
+ return;
+ }
+
/* Stop the timer when next timer tick equals U64_MAX */
if (next_cycle == U64_MAX) {
vmm_timer_event_stop(&t->time_ev);
@@ -67,38 +99,90 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
vmm_vcpu_irq_clear(vcpu, IRQ_VS_TIMER);

/* Start the timer event */
- next_cycle -= riscv_guest_priv(vcpu->guest)->time_delta;
+ next_cycle -= cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
delta_ns = vmm_timer_delta_cycles_to_ns(next_cycle);
vmm_timer_event_start(&t->time_ev, delta_ns);
}

void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
{
- u64 vtdelta, tdelta = riscv_guest_priv(vcpu->guest)->time_delta;
+ u64 current_delta, new_delta = 0;
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

- if (nested_virt) {
- vtdelta = riscv_nested_priv(vcpu)->htimedelta;
-#ifndef CONFIG_64BIT
- vtdelta |= ((u64)riscv_nested_priv(vcpu)->htimedeltah) << 32;
+ current_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ new_delta = cpu_vcpu_timer_delta(vcpu, nested_virt);
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_HTIMEDELTA, (u32)new_delta);
+ csr_write(CSR_HTIMEDELTAH, (u32)(new_delta >> 32));
+#else
+ csr_write(CSR_HTIMEDELTA, new_delta);
#endif
- tdelta += vtdelta;
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ return;
}

-#ifdef CONFIG_64BIT
- csr_write(CSR_HTIMEDELTA, tdelta);
+ t->next_cycle += new_delta - current_delta;
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
#else
- csr_write(CSR_HTIMEDELTA, (u32)tdelta);
- csr_write(CSR_HTIMEDELTAH, (u32)(tdelta >> 32));
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
#endif
}

void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)
{
+ u64 delta_ns;
+ struct cpu_vcpu_timer *t;
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ return;
+ }
+
+ t = riscv_timer_priv(vcpu);
+
+#ifdef CONFIG_32BIT
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
+#else
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+#endif
+ if (t->next_cycle == U64_MAX) {
+ return;
+ }
+
+ delta_ns = t->next_cycle -
+ cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_ev, delta_ns);
}

void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)
{
- cpu_vcpu_timer_delta_update(vcpu, riscv_nested_virt(vcpu));
+ u64 time_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_HTIMEDELTA, (u32)time_delta);
+ csr_write(CSR_HTIMEDELTAH, (u32)(time_delta >> 32));
+#else
+ csr_write(CSR_HTIMEDELTA, time_delta);
+#endif
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ return;
+ }
+
+ vmm_timer_event_stop(&t->time_ev);
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+#else
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
+#endif
}

int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
@@ -118,8 +202,13 @@ int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
t = *timer;
}

+ t->next_cycle = U64_MAX;
vmm_timer_event_stop(&t->time_ev);

+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ riscv_priv(vcpu)->henvcfg |= ENVCFG_STCE;
+ }
+
return VMM_OK;
}

diff --git a/arch/riscv/cpu/generic/include/arch_regs.h b/arch/riscv/cpu/generic/include/arch_regs.h
index fc251b24..23372b8e 100644
--- a/arch/riscv/cpu/generic/include/arch_regs.h
+++ b/arch/riscv/cpu/generic/include/arch_regs.h
@@ -215,6 +215,7 @@ struct riscv_priv {
unsigned long hie;
unsigned long hip;
unsigned long hvip;
+ u64 henvcfg;
unsigned long vsstatus;
unsigned long vstvec;
unsigned long vsscratch;
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h b/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
index 355d515f..1c2f49d1 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
@@ -26,6 +26,9 @@
#include <vmm_types.h>
#include <vmm_manager.h>

+/** Function to update environment configuration */
+void cpu_vcpu_envcfg_update(struct vmm_vcpu *vcpu, bool nested_virt);
+
/** Function to update interrupt delegation */
void cpu_vcpu_irq_deleg_update(struct vmm_vcpu *vcpu, bool nested_virt);

diff --git a/tests/riscv/virt32/virt32-guest.dts b/tests/riscv/virt32/virt32-guest.dts
index 7f418fe2..4261c5d4 100644
--- a/tests/riscv/virt32/virt32-guest.dts
+++ b/tests/riscv/virt32/virt32-guest.dts
@@ -16,7 +16,7 @@
vcpu_template {
device_type = "vcpu";
compatible = "riscv,generic";
- riscv,isa = "rv32imafdch";
+ riscv,isa = "rv32imafdch_sstc";
start_pc = <0x00000000>;
poweroff;
};
diff --git a/tests/riscv/virt64/virt64-guest.dts b/tests/riscv/virt64/virt64-guest.dts
index 66f4dd59..5ffe044e 100644
--- a/tests/riscv/virt64/virt64-guest.dts
+++ b/tests/riscv/virt64/virt64-guest.dts
@@ -16,7 +16,7 @@
vcpu_template {
device_type = "vcpu";
compatible = "riscv,generic";
- riscv,isa = "rv64imafdch";
+ riscv,isa = "rv64imafdch_sstc";
start_pc = <0x00000000>;
poweroff;
};
--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:17:03 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
The basic firmware should detect and append "sstc" to ISA string
whenever Xvisor virtualize it for the Guest/VM.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
tests/riscv/common/basic/arch_sbi.c | 39 ++++++++++++++++++++++++++++-
1 file changed, 38 insertions(+), 1 deletion(-)

diff --git a/tests/riscv/common/basic/arch_sbi.c b/tests/riscv/common/basic/arch_sbi.c
index eebcdaef..cfd55e76 100644
--- a/tests/riscv/common/basic/arch_sbi.c
+++ b/tests/riscv/common/basic/arch_sbi.c
@@ -181,6 +181,29 @@ void sbi_reset(void)
#define SBI_EXT_XVISOR (SBI_EXT_FIRMWARE_START + 0x2)
#define SBI_EXT_XVISOR_ISA_EXT 0x0

+/*
+ * Increse this to higher value as kernel support more ISA extensions.
+ */
+#define RISCV_ISA_EXT_MAX 64
+
+/* The base ID for multi-letter ISA extensions */
+#define RISCV_ISA_EXT_BASE 26
+
+/*
+ * This enum represent the logical ID for each multi-letter
+ * RISC-V ISA extension. The logical ID should start from
+ * RISCV_ISA_EXT_BASE and must not exceed RISCV_ISA_EXT_MAX.
+ * 0-25 range is reserved for single letter extensions while
+ * all the multi-letter extensions should define the next
+ * available logical extension id.
+ */
+enum riscv_isa_ext_id {
+ RISCV_ISA_EXT_SSAIA = RISCV_ISA_EXT_BASE,
+ RISCV_ISA_EXT_SMAIA,
+ RISCV_ISA_EXT_SSTC,
+ RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
+};
+
unsigned long sbi_xvisor_isa_string(char *out_isa, unsigned long max_len)
{
struct sbiret ret;
@@ -191,6 +214,8 @@ unsigned long sbi_xvisor_isa_string(char *out_isa, unsigned long max_len)
if (!out_isa || (max_len - pos) < 5)
return pos;

+ basic_memset(out_isa, 0, max_len);
+
#if __riscv_xlen == 64
basic_strcpy(&out_isa[pos], "rv64");
#elif __riscv_xlen == 32
@@ -218,7 +243,19 @@ unsigned long sbi_xvisor_isa_string(char *out_isa, unsigned long max_len)

out_isa[pos++] = valid_isa_order[i];
}
- out_isa[pos++] = '\0';
+
+#define SET_ISA_EXT_MAP(__name, __bit) \
+ do { \
+ ret = sbi_ecall(SBI_EXT_XVISOR, SBI_EXT_XVISOR_ISA_EXT, \
+ __bit, 0, 0, 0, 0, 0); \
+ if (!ret.error && ret.value) { \
+ basic_strcat(&out_isa[pos], "_" __name); \
+ pos += basic_strlen("_" __name); \
+ } \
+ } while (0) \
+
+ SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
+#undef SET_ISA_EXT_MAP

return pos;
}
--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:17:04 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
The correct location of time delta update function is cpu_vcpu_timer.c
where other bits of VCPU timer virtualization code resides.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 22 +------------------
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 3 ++-
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 20 +++++++++++++++++
.../cpu/generic/include/cpu_vcpu_helper.h | 3 ---
.../cpu/generic/include/cpu_vcpu_timer.h | 1 +
5 files changed, 24 insertions(+), 25 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index c019b924..b41c363c 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -432,7 +432,7 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
csr_write(CSR_VSATP, priv->vsatp);
csr_write(CSR_SCOUNTEREN, priv->scounteren);
cpu_vcpu_fp_restore(vcpu, regs);
- cpu_vcpu_time_delta_update(vcpu, riscv_nested_virt(vcpu));
+ cpu_vcpu_timer_delta_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_gstage_update(vcpu, riscv_nested_virt(vcpu));
cpu_vcpu_irq_deleg_update(vcpu, riscv_nested_virt(vcpu));
} else {
@@ -469,26 +469,6 @@ void cpu_vcpu_irq_deleg_update(struct vmm_vcpu *vcpu, bool nested_virt)
}
}

-void cpu_vcpu_time_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
-{
- u64 vtdelta, tdelta = riscv_guest_priv(vcpu->guest)->time_delta;
-
- if (nested_virt) {
- vtdelta = riscv_nested_priv(vcpu)->htimedelta;
-#ifndef CONFIG_64BIT
- vtdelta |= ((u64)riscv_nested_priv(vcpu)->htimedeltah) << 32;
-#endif
- tdelta += vtdelta;
- }
-
-#ifdef CONFIG_64BIT
- csr_write(CSR_HTIMEDELTA, tdelta);
-#else
- csr_write(CSR_HTIMEDELTA, (u32)tdelta);
- csr_write(CSR_HTIMEDELTAH, (u32)(tdelta >> 32));
-#endif
-}
-
void cpu_vcpu_gstage_update(struct vmm_vcpu *vcpu, bool nested_virt)
{
struct mmu_pgtbl *pgtbl = (nested_virt) ?
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index 1c7871a8..08f196c9 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -34,6 +34,7 @@
#include <cpu_tlb.h>
#include <cpu_vcpu_helper.h>
#include <cpu_vcpu_nested.h>
+#include <cpu_vcpu_timer.h>
#include <cpu_vcpu_trap.h>
#include <riscv_csr.h>

@@ -1504,7 +1505,7 @@ void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
cpu_vcpu_irq_deleg_update(vcpu, virt);

/* Update time delta */
- cpu_vcpu_time_delta_update(vcpu, virt);
+ cpu_vcpu_timer_delta_update(vcpu, virt);

/* Update G-stage page table */
cpu_vcpu_gstage_update(vcpu, virt);
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index c1e046e6..891f00b2 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -67,6 +67,26 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
vmm_timer_event_start(&t->time_ev, delta_ns);
}

+void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
+{
+ u64 vtdelta, tdelta = riscv_guest_priv(vcpu->guest)->time_delta;
+
+ if (nested_virt) {
+ vtdelta = riscv_nested_priv(vcpu)->htimedelta;
+#ifndef CONFIG_64BIT
+ vtdelta |= ((u64)riscv_nested_priv(vcpu)->htimedeltah) << 32;
+#endif
+ tdelta += vtdelta;
+ }
+
+#ifdef CONFIG_64BIT
+ csr_write(CSR_HTIMEDELTA, tdelta);
+#else
+ csr_write(CSR_HTIMEDELTA, (u32)tdelta);
+ csr_write(CSR_HTIMEDELTAH, (u32)(tdelta >> 32));
+#endif
+}
+
int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
{
struct cpu_vcpu_timer *t;
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h b/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
index fc587323..355d515f 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_helper.h
@@ -29,9 +29,6 @@
/** Function to update interrupt delegation */
void cpu_vcpu_irq_deleg_update(struct vmm_vcpu *vcpu, bool nested_virt);

-/** Function to update time delta */
-void cpu_vcpu_time_delta_update(struct vmm_vcpu *vcpu, bool nested_virt);
-
/** Function to update G-stage page table */
void cpu_vcpu_gstage_update(struct vmm_vcpu *vcpu, bool nested_virt);

diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
index 71df1662..30079c92 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
@@ -29,6 +29,7 @@
struct vmm_vcpu;

void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle);
+void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt);

Anup Patel

unread,
Oct 26, 2022, 7:17:06 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
We should take nested interrupts after vmm_scheduler_irq_exit() because
vmm_scheduler_irq_exit() might schedule-out the current VCPU. This also
helps us simplify arch_vcpu_post_switch() because we don't need to take
nested interrupts over there as well.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_exception.c | 7 ++-----
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 4 +---
2 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_exception.c b/arch/riscv/cpu/generic/cpu_exception.c
index d4a08dd2..72993bc2 100644
--- a/arch/riscv/cpu/generic/cpu_exception.c
+++ b/arch/riscv/cpu/generic/cpu_exception.c
@@ -79,9 +79,6 @@ done:
do_error(vmm_scheduler_current_vcpu(), regs,
cause | SCAUSE_INTERRUPT_MASK,
"interrupt handling failed", rc, TRUE);
- } else {
- cpu_vcpu_nested_take_vsirq(vmm_scheduler_current_vcpu(),
- regs);
}

vmm_scheduler_irq_exit(regs);
@@ -187,8 +184,6 @@ void do_handle_trap(arch_regs_t *regs, unsigned long cause)
done:
if (rc) {
do_error(vcpu, regs, cause, msg, rc, panic);
- } else {
- cpu_vcpu_nested_take_vsirq(vcpu, regs);
}

vmm_scheduler_irq_exit(regs);
@@ -203,6 +198,8 @@ void do_handle_exception(arch_regs_t *regs)
} else {
do_handle_trap(regs, scause & ~SCAUSE_INTERRUPT_MASK);
}
+
+ cpu_vcpu_nested_take_vsirq(vmm_scheduler_current_vcpu(), regs);
}

int __cpuinit arch_cpu_irq_setup(void)
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index dc40fa5d..b4f8d256 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -451,9 +451,7 @@ void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
void arch_vcpu_post_switch(struct vmm_vcpu *vcpu,
arch_regs_t *regs)
{
- if (vcpu->is_normal) {
- cpu_vcpu_nested_take_vsirq(vcpu, regs);
- }
+ /* For now nothing to do here. */
}

void cpu_vcpu_envcfg_update(struct vmm_vcpu *vcpu, bool nested_virt)
--
2.34.1

Anup Patel

unread,
Oct 26, 2022, 7:17:09 AM10/26/22
to xvisor...@googlegroups.com, Anup Patel
The guest hypervisor can allow nested guest to use Sstc extension
whenever Sstc is available to guest hypervisor so let us virtualize
Sstc extension for nested guest.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 151 ++++++++++++++--
arch/riscv/cpu/generic/cpu_vcpu_timer.c | 164 +++++++++++++++---
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 ++
arch/riscv/cpu/generic/include/arch_regs.h | 2 +
.../cpu/generic/include/cpu_vcpu_timer.h | 5 +
.../cpu/generic/include/riscv_encoding.h | 3 +
6 files changed, 308 insertions(+), 33 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index b75cdec4..8c897902 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -856,6 +856,8 @@ void cpu_vcpu_nested_reset(struct vmm_vcpu *vcpu)
npriv->htimedeltah = 0;
npriv->htval = 0;
npriv->htinst = 0;
+ npriv->henvcfg = 0;
+ npriv->henvcfgh = 0;
npriv->hgatp = 0;
npriv->vsstatus = 0;
npriv->vsie = 0;
@@ -923,9 +925,11 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned int csr_num, unsigned long *val,
unsigned long new_val, unsigned long wr_mask)
{
+ u64 tmp64;
int csr_shift = 0;
bool read_only = FALSE;
- unsigned long *csr, zero = 0, writeable_mask = 0;
+ unsigned long *csr, tmpcsr = 0, csr_rdor = 0;
+ unsigned long zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);

riscv_stats_priv(vcpu)->nested_smode_csr_rmw++;
@@ -965,6 +969,7 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
return TRAP_RETURN_VIRTUAL_INSN;
}
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
csr_shift = 1;
writeable_mask = HVIP_VSSIP & npriv->hideleg;
break;
@@ -974,18 +979,48 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}
csr = &zero;
break;
+ case CSR_STIMECMP:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+#ifdef CONFIG_32BIT
+ if (!(npriv->henvcfgh & ENVCFGH_STCE)) {
+#else
+ if (!(npriv->henvcfg & ENVCFG_STCE)) {
+#endif
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu);
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_STIMECMPH:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ if (!(npriv->henvcfgh & ENVCFGH_STCE)) {
+ return TRAP_RETURN_VIRTUAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu) >> 32;
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#endif
default:
return TRAP_RETURN_ILLEGAL_INSN;
}

if (val) {
- *val = (csr_shift < 0) ?
- (*csr) << -csr_shift : (*csr) >> csr_shift;
+ *val = (csr_shift < 0) ? (*csr | csr_rdor) << -csr_shift :
+ (*csr | csr_rdor) >> csr_shift;
}

if (read_only) {
return TRAP_RETURN_ILLEGAL_INSN;
- } else {
+ } else if (wr_mask) {
writeable_mask = (csr_shift < 0) ?
writeable_mask >> -csr_shift :
writeable_mask << csr_shift;
@@ -995,6 +1030,29 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val >> -csr_shift : new_val << csr_shift;
wr_mask &= writeable_mask;
*csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+
+ switch (csr_num) {
+ case CSR_STIMECMP:
+#ifdef CONFIG_32BIT
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffffULL
+ tmp64 |= tmpcsr;
+#else
+ tmp64 = tmpcsr;
+#endif
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_STIMECMPH:
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffff00000000ULL
+ tmp64 |= ((u64)tmpcsr) << 32;
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#endif
+ default:
+ break;
+ }
}

return VMM_OK;
@@ -1004,10 +1062,12 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned int csr_num, unsigned long *val,
unsigned long new_val, unsigned long wr_mask)
{
+ u64 tmp64;
int csr_shift = 0;
bool read_only = FALSE, nuke_swtlb = FALSE;
unsigned int csr_priv = (csr_num >> 8) & 0x3;
- unsigned long *csr, mode, zero = 0, writeable_mask = 0;
+ unsigned long *csr, tmpcsr = 0, csr_rdor = 0;
+ unsigned long mode, zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);

riscv_stats_priv(vcpu)->nested_hext_csr_rmw++;
@@ -1082,6 +1142,7 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
break;
case CSR_HIP:
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
writeable_mask = HVIP_VSSIP;
break;
case CSR_HGEIP:
@@ -1157,11 +1218,19 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}
break;
case CSR_HENVCFG:
-#ifndef CONFIG_64BIT
- case CSR_HENVCFGH:
+ csr = &npriv->henvcfg;
+#ifdef CONFIG_32BIT
+ writeable_mask = 0;
+#else
+ writeable_mask = ENVCFG_STCE;
#endif
- csr = &zero;
break;
+#ifdef CONFIG_32BIT
+ case CSR_HENVCFGH:
+ csr = &npriv->henvcfgh;
+ writeable_mask = ENVCFGH_STCE;
+ break;
+#endif
case CSR_VSSTATUS:
csr = &npriv->vsstatus;
writeable_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UBE |
@@ -1170,6 +1239,7 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
break;
case CSR_VSIP:
csr = &npriv->hvip;
+ csr_rdor = cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0;
csr_shift = 1;
writeable_mask = HVIP_VSSIP & npriv->hideleg;
break;
@@ -1237,6 +1307,26 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val |= (mode << SATP_MODE_SHIFT) & SATP_MODE;
}
break;
+ case CSR_VSTIMECMP:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu);
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_VSTIMECMPH:
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ return TRAP_RETURN_ILLEGAL_INSN;
+ }
+ tmpcsr = cpu_vcpu_timer_vs_cycle(vcpu) >> 32;
+ csr = &tmpcsr;
+ writeable_mask = -1UL;
+ break;
+#endif
case CSR_HVICTL:
csr = &npriv->hvictl;
writeable_mask = HVICTL_VTI | HVICTL_IID |
@@ -1247,13 +1337,13 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
}

if (val) {
- *val = (csr_shift < 0) ?
- (*csr) << -csr_shift : (*csr) >> csr_shift;
+ *val = (csr_shift < 0) ? (*csr | csr_rdor) << -csr_shift :
+ (*csr | csr_rdor) >> csr_shift;
}

if (read_only) {
return TRAP_RETURN_ILLEGAL_INSN;
- } else {
+ } else if (wr_mask) {
writeable_mask = (csr_shift < 0) ?
writeable_mask >> -csr_shift :
writeable_mask << csr_shift;
@@ -1263,6 +1353,43 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
new_val >> -csr_shift : new_val << csr_shift;
wr_mask &= writeable_mask;
*csr = (*csr & ~wr_mask) | (new_val & wr_mask);
+
+ switch (csr_num) {
+ case CSR_VSTIMECMP:
+#ifdef CONFIG_32BIT
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffffULL
+ tmp64 |= tmpcsr;
+#else
+ tmp64 = tmpcsr;
+#endif
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_VSTIMECMPH:
+ tmp64 = cpu_vcpu_timer_vs_cycle(vcpu);
+ tmp64 &= ~0xffffffff00000000ULL
+ tmp64 |= ((u64)tmpcsr) << 32;
+ cpu_vcpu_timer_vs_start(vcpu, tmp64);
+ break;
+#endif
+ case CSR_HTIMEDELTA:
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+ break;
+#ifdef CONFIG_32BIT
+ case CSR_HTIMEDELTAH:
+ if (riscv_isa_extension_available(riscv_priv(vcpu)->isa,
+ SSTC)) {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
}

if (nuke_swtlb) {
@@ -1626,7 +1753,7 @@ void cpu_vcpu_nested_take_vsirq(struct vmm_vcpu *vcpu,

/* Determine virtual-VS mode interrupt number */
vsirq = 0;
- irqs = npriv->hvip;
+ irqs = npriv->hvip | (cpu_vcpu_timer_vs_irq(vcpu) ? HVIP_VSTIP : 0);
irqs &= npriv->vsie << 1;
irqs &= npriv->hideleg;
if (irqs & MIP_VSEIP) {
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_timer.c b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
index f4add40c..cb19aeed 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_timer.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_timer.c
@@ -25,15 +25,22 @@
#include <vmm_heap.h>
#include <vmm_limits.h>
#include <vmm_stdio.h>
+#include <vmm_scheduler.h>
#include <vmm_timer.h>
#include <vmm_vcpu_irq.h>

#include <cpu_hwcap.h>
#include <cpu_vcpu_timer.h>
+#include <cpu_vcpu_trap.h>
#include <riscv_encoding.h>

struct cpu_vcpu_timer {
+ /* virtual-VS mode state */
+ u64 vs_next_cycle;
+ struct vmm_timer_event vs_time_ev;
+ /* S mode state */
u64 next_cycle;
+ struct vmm_timer_event time_nested_ev;
struct vmm_timer_event time_ev;
};

@@ -52,6 +59,81 @@ static inline u64 cpu_vcpu_timer_delta(struct vmm_vcpu *vcpu,
return riscv_guest_priv(vcpu->guest)->time_delta + ndelta;
}

+bool cpu_vcpu_timer_vs_irq(struct vmm_vcpu *vcpu)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ return t->vs_next_cycle <=
+ (csr_read(CSR_TIME) + cpu_vcpu_timer_delta(vcpu, TRUE));
+}
+
+u64 cpu_vcpu_timer_vs_cycle(struct vmm_vcpu *vcpu)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ return t->vs_next_cycle;
+}
+
+static void cpu_vcpu_timer_vs_expired(struct vmm_timer_event *ev)
+{
+ struct vmm_vcpu *vcpu = ev->priv;
+
+ if (cpu_vcpu_timer_vs_irq(vcpu)) {
+ vmm_vcpu_irq_wait_resume(vcpu);
+ } else {
+ cpu_vcpu_timer_vs_restart(vcpu);
+ }
+}
+
+void cpu_vcpu_timer_vs_restart(struct vmm_vcpu *vcpu)
+{
+ u64 vs_delta_ns, vs_next_cycle;
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ /* Stop the VS timer when next timer tick equals U64_MAX */
+ if (t->vs_next_cycle == U64_MAX) {
+ vmm_timer_event_stop(&t->vs_time_ev);
+ return;
+ }
+
+ /* Do nothing is Virtual-VS mode IRQ is pending */
+ if (cpu_vcpu_timer_vs_irq(vcpu)) {
+ vmm_timer_event_stop(&t->vs_time_ev);
+ return;
+ }
+
+ /* Start the VS timer event */
+ vs_next_cycle = t->vs_next_cycle - cpu_vcpu_timer_delta(vcpu, TRUE);
+ vs_delta_ns = vmm_timer_delta_cycles_to_ns(vs_next_cycle);
+ vmm_timer_event_start(&t->vs_time_ev, vs_delta_ns);
+}
+
+void cpu_vcpu_timer_vs_start(struct vmm_vcpu *vcpu, u64 vs_next_cycle)
+{
+ struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);
+
+ /* Save the next VS timer tick value */
+ t->vs_next_cycle = vs_next_cycle;
+
+ /* Restart VS timer */
+ cpu_vcpu_timer_vs_restart(vcpu);
+}
+
+static void cpu_vcpu_timer_nested_expired(struct vmm_timer_event *ev)
+{
+ int rc;
+ struct vmm_vcpu *vcpu = ev->priv;
+
+ if (!riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
+ return;
+ }
+
+ /* Redirect trap to invoke nested world switch */
+ rc = cpu_vcpu_redirect_vsirq(vcpu, vmm_scheduler_irq_regs(),
+ IRQ_VS_TIMER);
+ BUG_ON(rc);
+}
+
static void cpu_vcpu_timer_expired(struct vmm_timer_event *ev)
{
struct vmm_vcpu *vcpu = ev->priv;
@@ -71,6 +153,9 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
u64 delta_ns;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

+ /* This function should only be called when nested virt is OFF */
+ BUG_ON(riscv_nested_virt(vcpu));
+
/* Save the next timer tick value */
t->next_cycle = next_cycle;

@@ -99,19 +184,17 @@ void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle)
vmm_vcpu_irq_clear(vcpu, IRQ_VS_TIMER);

/* Start the timer event */
- next_cycle -= cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ next_cycle -= cpu_vcpu_timer_delta(vcpu, FALSE);
delta_ns = vmm_timer_delta_cycles_to_ns(next_cycle);
vmm_timer_event_start(&t->time_ev, delta_ns);
}

void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
{
- u64 current_delta, new_delta = 0;
+ u64 delta_ns, new_delta;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

- current_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
new_delta = cpu_vcpu_timer_delta(vcpu, nested_virt);
-
#ifdef CONFIG_32BIT
csr_write(CSR_HTIMEDELTA, (u32)new_delta);
csr_write(CSR_HTIMEDELTAH, (u32)(new_delta >> 32));
@@ -123,13 +206,30 @@ void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt)
return;
}

- t->next_cycle += new_delta - current_delta;
+ if (nested_virt) {
#ifdef CONFIG_32BIT
- csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
- csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
#else
- csr_write(CSR_VSTIMECMP, t->next_cycle);
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
#endif
+
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle -
+ cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_nested_ev, delta_ns);
+ }
+ } else {
+ vmm_timer_event_stop(&t->time_nested_ev);
+
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+#else
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
+#endif
+ }
}

void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)
@@ -143,27 +243,30 @@ void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu)

t = riscv_timer_priv(vcpu);

+ if (riscv_nested_virt(vcpu)) {
+ vmm_timer_event_stop(&t->time_nested_ev);
+ } else {
#ifdef CONFIG_32BIT
- t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
- t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle |= (u64)csr_swap(CSR_VSTIMECMPH, -1UL) << 32;
#else
- t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
+ t->next_cycle = csr_swap(CSR_VSTIMECMP, -1UL);
#endif
- if (t->next_cycle == U64_MAX) {
- return;
}

- delta_ns = t->next_cycle -
- cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
- delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
- vmm_timer_event_start(&t->time_ev, delta_ns);
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle - cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_ev, delta_ns);
+ }
}

void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)
{
- u64 time_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
+ u64 delta_ns, time_delta;
struct cpu_vcpu_timer *t = riscv_timer_priv(vcpu);

+ time_delta = cpu_vcpu_timer_delta(vcpu, riscv_nested_virt(vcpu));
#ifdef CONFIG_32BIT
csr_write(CSR_HTIMEDELTA, (u32)time_delta);
csr_write(CSR_HTIMEDELTAH, (u32)(time_delta >> 32));
@@ -177,12 +280,21 @@ void cpu_vcpu_timer_restore(struct vmm_vcpu *vcpu)

vmm_timer_event_stop(&t->time_ev);

+ if (riscv_nested_virt(vcpu)) {
+ if (t->next_cycle != U64_MAX) {
+ delta_ns = t->next_cycle -
+ cpu_vcpu_timer_delta(vcpu, FALSE);
+ delta_ns = vmm_timer_delta_cycles_to_ns(delta_ns);
+ vmm_timer_event_start(&t->time_nested_ev, delta_ns);
+ }
+ } else {
#ifdef CONFIG_32BIT
- csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
- csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycle);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycle >> 32));
#else
- csr_write(CSR_VSTIMECMP, t->next_cycle);
+ csr_write(CSR_VSTIMECMP, t->next_cycle);
#endif
+ }
}

int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
@@ -197,12 +309,20 @@ int cpu_vcpu_timer_init(struct vmm_vcpu *vcpu, void **timer)
if (!(*timer))
return VMM_ENOMEM;
t = *timer;
+ INIT_TIMER_EVENT(&t->vs_time_ev,
+ cpu_vcpu_timer_vs_expired, vcpu);
+ INIT_TIMER_EVENT(&t->time_nested_ev,
+ cpu_vcpu_timer_nested_expired, vcpu);
INIT_TIMER_EVENT(&t->time_ev, cpu_vcpu_timer_expired, vcpu);
} else {
t = *timer;
}

+ t->vs_next_cycle = U64_MAX;
+ vmm_timer_event_stop(&t->vs_time_ev);
+
t->next_cycle = U64_MAX;
+ vmm_timer_event_stop(&t->time_nested_ev);
vmm_timer_event_stop(&t->time_ev);

if (riscv_isa_extension_available(riscv_priv(vcpu)->isa, SSTC)) {
@@ -220,6 +340,8 @@ int cpu_vcpu_timer_deinit(struct vmm_vcpu *vcpu, void **timer)
return VMM_EINVALID;
t = *timer;

+ vmm_timer_event_stop(&t->vs_time_ev);
+ vmm_timer_event_stop(&t->time_nested_ev);
vmm_timer_event_stop(&t->time_ev);
vmm_free(t);

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_trap.c b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
index 32bcc6dd..4ebce159 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_trap.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
@@ -565,6 +565,14 @@ static const struct csr_func csr_funcs[] = {
.csr_num = CSR_SIPH,
.rmw_func = cpu_vcpu_nested_smode_csr_rmw,
},
+ {
+ .csr_num = CSR_STIMECMP,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
+ {
+ .csr_num = CSR_STIMECMPH,
+ .rmw_func = cpu_vcpu_nested_smode_csr_rmw,
+ },
{
.csr_num = CSR_HSTATUS,
.rmw_func = cpu_vcpu_nested_hext_csr_rmw,
@@ -665,6 +673,14 @@ static const struct csr_func csr_funcs[] = {
.csr_num = CSR_VSATP,
.rmw_func = cpu_vcpu_nested_hext_csr_rmw,
},
+ {
+ .csr_num = CSR_VSTIMECMP,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
+ {
+ .csr_num = CSR_VSTIMECMPH,
+ .rmw_func = cpu_vcpu_nested_hext_csr_rmw,
+ },
};

static int csr_insn(struct vmm_vcpu *vcpu, arch_regs_t *regs, ulong insn)
diff --git a/arch/riscv/cpu/generic/include/arch_regs.h b/arch/riscv/cpu/generic/include/arch_regs.h
index 23372b8e..3afa9faf 100644
--- a/arch/riscv/cpu/generic/include/arch_regs.h
+++ b/arch/riscv/cpu/generic/include/arch_regs.h
@@ -173,6 +173,8 @@ struct riscv_priv_nested {
unsigned long htimedeltah;
unsigned long htval;
unsigned long htinst;
+ unsigned long henvcfg;
+ unsigned long henvcfgh;
unsigned long hgatp;
unsigned long vsstatus;
unsigned long vsie;
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
index caa3eea5..84d00707 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_timer.h
@@ -28,6 +28,11 @@

struct vmm_vcpu;

+bool cpu_vcpu_timer_vs_irq(struct vmm_vcpu *vcpu);
+u64 cpu_vcpu_timer_vs_cycle(struct vmm_vcpu *vcpu);
+void cpu_vcpu_timer_vs_restart(struct vmm_vcpu *vcpu);
+void cpu_vcpu_timer_vs_start(struct vmm_vcpu *vcpu, u64 next_vs_cycle);
+
void cpu_vcpu_timer_start(struct vmm_vcpu *vcpu, u64 next_cycle);
void cpu_vcpu_timer_delta_update(struct vmm_vcpu *vcpu, bool nested_virt);
void cpu_vcpu_timer_save(struct vmm_vcpu *vcpu);
diff --git a/arch/riscv/cpu/generic/include/riscv_encoding.h b/arch/riscv/cpu/generic/include/riscv_encoding.h
index 1514df11..b3753e19 100644
--- a/arch/riscv/cpu/generic/include/riscv_encoding.h
+++ b/arch/riscv/cpu/generic/include/riscv_encoding.h
@@ -371,6 +371,9 @@
#define ENVCFG_CBIE_INV _AC(0x3, UL)
#define ENVCFG_FIOM _AC(0x1, UL)

+#define ENVCFGH_STCE (_AC(1, UL) << 31)
+#define ENVCFGH_PBMTE (_AC(1, UL) << 30)
+
/* ===== User-level CSRs ===== */

/* User Trap Setup (N-extension) */
--
2.34.1

Anup Patel

unread,
Oct 28, 2022, 6:55:01 AM10/28/22
to xvisor...@googlegroups.com, Anup Patel
On Wed, Oct 26, 2022 at 4:46 PM Anup Patel <apa...@ventanamicro.com> wrote:
>
> This series adds Sstc virtualization (and nested virtualization)
> support in Xvisor RISC-V
>
> These patches can also be found in riscv_sstc_virt_v1 branch at:
> https://github.com/avpatel/xvisor-next
>
> Anup Patel (8):
> RISC-V: Rename VCPU timer handling functions for consistency
> RISC-V: Move time delta update function to cpu_vcpu_timer.c
> RISC-V: Introduce VCPU timer save/restore functions
> RISC-V: VCPU ISA bitmap should only have extensions available on Host
> RISC-V: Use Sstc virtualization in VCPU timer implement
> TESTS: riscv: Add sstc to ISA string whenever Xvisor support it
> RISC-V: Take nested interrupts after vmm_scheduler_irq_exit()
> RISC-V: Add nested virtualization support for Sstc extension

Applied this series to the xvisor-next repo.

Thanks,
Anup

>
> arch/riscv/cpu/generic/cpu_exception.c | 7 +-
> arch/riscv/cpu/generic/cpu_init.c | 6 +-
> arch/riscv/cpu/generic/cpu_vcpu_helper.c | 53 ++-
> arch/riscv/cpu/generic/cpu_vcpu_nested.c | 157 ++++++++-
> arch/riscv/cpu/generic/cpu_vcpu_sbi_legacy.c | 4 +-
> arch/riscv/cpu/generic/cpu_vcpu_sbi_replace.c | 4 +-
> arch/riscv/cpu/generic/cpu_vcpu_timer.c | 306 ++++++++++++++++--
> arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 +
> arch/riscv/cpu/generic/include/arch_regs.h | 3 +
> arch/riscv/cpu/generic/include/cpu_hwcap.h | 9 +-
> .../cpu/generic/include/cpu_vcpu_helper.h | 6 +-
> .../cpu/generic/include/cpu_vcpu_timer.h | 20 +-
> .../cpu/generic/include/riscv_encoding.h | 3 +
> tests/riscv/common/basic/arch_sbi.c | 39 ++-
> tests/riscv/virt32/virt32-guest.dts | 2 +-
> tests/riscv/virt64/virt64-guest.dts | 2 +-
> 16 files changed, 534 insertions(+), 103 deletions(-)
>
> --
> 2.34.1
>
> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20221026111628.2898-1-apatel%40ventanamicro.com.
Reply all
Reply to author
Forward
0 new messages