We count number of traps taken by a normal VCPU for different
scenarios and exceptions and print these details in the
arch_vcpu_stat_dump() function.
Signed-off-by: Anup Patel <
apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_exception.c | 2 +
arch/riscv/cpu/generic/cpu_vcpu_helper.c | 81 +++++++++++++++++++++-
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 19 +++++
arch/riscv/cpu/generic/cpu_vcpu_sbi.c | 1 +
arch/riscv/cpu/generic/include/arch_regs.h | 21 ++++++
5 files changed, 123 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/cpu/generic/cpu_exception.c b/arch/riscv/cpu/generic/cpu_exception.c
index 26f84bb4..d4a08dd2 100644
--- a/arch/riscv/cpu/generic/cpu_exception.c
+++ b/arch/riscv/cpu/generic/cpu_exception.c
@@ -180,6 +180,8 @@ void do_handle_trap(arch_regs_t *regs, unsigned long cause)
if (rc) {
vmm_manager_vcpu_halt(vcpu);
+ } else {
+ riscv_stats_priv(vcpu)->trap[cause]++;
}
done:
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_helper.c b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
index da8f9ecd..f5bca399 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_helper.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_helper.c
@@ -312,6 +312,9 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
/* TODO: Update HSTATUS.VSBE for big-endian Guest */
+ /* Reset stats gathering */
+ memset(riscv_stats_priv(vcpu), 0, sizeof(struct riscv_priv_stats));
+
/* Update VCPU CSRs */
riscv_priv(vcpu)->hie = 0;
riscv_priv(vcpu)->hip = 0;
@@ -612,7 +615,83 @@ void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
}
}
+static const char trap_names[][32] = {
+ [CAUSE_MISALIGNED_FETCH] = "Misaligned Fetch Fault",
+ [CAUSE_FETCH_ACCESS] = "Fetch Access Fault",
+ [CAUSE_ILLEGAL_INSTRUCTION] = "Illegal Instruction Fault",
+ [CAUSE_BREAKPOINT] = "Breakpoint Fault",
+ [CAUSE_MISALIGNED_LOAD] = "Misaligned Load Fault",
+ [CAUSE_LOAD_ACCESS] = "Load Access Fault",
+ [CAUSE_MISALIGNED_STORE] = "Misaligned Store Fault",
+ [CAUSE_STORE_ACCESS] = "Store Access Fault",
+ [CAUSE_USER_ECALL] = "User Ecall",
+ [CAUSE_SUPERVISOR_ECALL] = "Supervisor Ecall",
+ [CAUSE_VIRTUAL_SUPERVISOR_ECALL] = "Virtual Supervisor Ecall",
+ [CAUSE_MACHINE_ECALL] = "Machine Ecall",
+ [CAUSE_FETCH_PAGE_FAULT] = "Fetch Page Fault",
+ [CAUSE_LOAD_PAGE_FAULT] = "Load Page Fault",
+ [CAUSE_STORE_PAGE_FAULT] = "Store Page Fault",
+ [CAUSE_FETCH_GUEST_PAGE_FAULT] = "Fetch Guest Page Fault",
+ [CAUSE_LOAD_GUEST_PAGE_FAULT] = "Load Guest Page Fault",
+ [CAUSE_VIRTUAL_INST_FAULT] = "Virtual Instruction Fault",
+ [CAUSE_STORE_GUEST_PAGE_FAULT] = "Store Guest Page Fault",
+};
+
void arch_vcpu_stat_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
- /* For now no arch specific stats */
+ int i;
+ bool have_traps = FALSE;
+
+ for (i = 0; i < RISCV_PRIV_MAX_TRAP_CAUSE; i++) {
+ if (!riscv_stats_priv(vcpu)->trap[i]) {
+ continue;
+ }
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n", trap_names[i],
+ riscv_stats_priv(vcpu)->trap[i]);
+ have_traps = TRUE;
+ }
+
+ if (have_traps) {
+ vmm_cprintf(cdev, "\n");
+ }
+
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Enter",
+ riscv_stats_priv(vcpu)->nested_enter);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Exit",
+ riscv_stats_priv(vcpu)->nested_exit);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Virtual Interrupt",
+ riscv_stats_priv(vcpu)->nested_vsirq);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested S-mode CSR Access",
+ riscv_stats_priv(vcpu)->nested_smode_csr_rmw);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested HS-mode CSR Access",
+ riscv_stats_priv(vcpu)->nested_hext_csr_rmw);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Load Guest Page Fault",
+ riscv_stats_priv(vcpu)->nested_load_guest_page_fault);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Store Guest Page Fault",
+ riscv_stats_priv(vcpu)->nested_store_guest_page_fault);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested Fetch Guest Page Fault",
+ riscv_stats_priv(vcpu)->nested_fetch_guest_page_fault);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested HFENCE.VVMA Instruction",
+ riscv_stats_priv(vcpu)->nested_hfence_vvma);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested HFENCE.GVMA Instruction",
+ riscv_stats_priv(vcpu)->nested_hfence_gvma);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested HLV Instruction",
+ riscv_stats_priv(vcpu)->nested_hlv);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested HSV Instruction",
+ riscv_stats_priv(vcpu)->nested_hsv);
+ vmm_cprintf(cdev, "%-32s: 0x%"PRIx64"\n",
+ "Nested SBI Ecall",
+ riscv_stats_priv(vcpu)->nested_sbi);
}
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index 9a96d9cd..b8284724 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -927,6 +927,8 @@ int cpu_vcpu_nested_smode_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned long *csr, zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ riscv_stats_priv(vcpu)->nested_smode_csr_rmw++;
+
/*
* These CSRs should never trap for virtual-HS/U modes because
* we only emulate these CSRs for virtual-VS/VU modes.
@@ -1007,6 +1009,8 @@ int cpu_vcpu_nested_hext_csr_rmw(struct vmm_vcpu *vcpu, arch_regs_t *regs,
unsigned long *csr, mode, zero = 0, writeable_mask = 0;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ riscv_stats_priv(vcpu)->nested_hext_csr_rmw++;
+
/*
* Trap from virtual-VS and virtual-VU modes should be forwarded
* to virtual-HS mode as a virtual instruction trap.
@@ -1282,12 +1286,15 @@ int cpu_vcpu_nested_page_fault(struct vmm_vcpu *vcpu,
guest_gpa |= ((physical_addr_t)trap->stval & 0x3);
switch (trap->scause) {
case CAUSE_LOAD_GUEST_PAGE_FAULT:
+ riscv_stats_priv(vcpu)->nested_load_guest_page_fault++;
guest_access = NESTED_XLATE_LOAD;
break;
case CAUSE_STORE_GUEST_PAGE_FAULT:
+ riscv_stats_priv(vcpu)->nested_store_guest_page_fault++;
guest_access = NESTED_XLATE_STORE;
break;
case CAUSE_FETCH_GUEST_PAGE_FAULT:
+ riscv_stats_priv(vcpu)->nested_fetch_guest_page_fault++;
guest_access = NESTED_XLATE_FETCH;
break;
default:
@@ -1316,6 +1323,8 @@ void cpu_vcpu_nested_hfence_vvma(struct vmm_vcpu *vcpu,
unsigned long hgatp;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ riscv_stats_priv(vcpu)->nested_hfence_vvma++;
+
/*
* The HFENCE.VVMA instructions help virtual-HS mode flush
* VS-stage TLB entries for virtual-VS/VU modes.
@@ -1352,6 +1361,8 @@ void cpu_vcpu_nested_hfence_gvma(struct vmm_vcpu *vcpu,
unsigned long current_vmid =
(npriv->hgatp & HGATP_VMID) >> HGATP_VMID_SHIFT;
+ riscv_stats_priv(vcpu)->nested_hfence_gvma++;
+
/*
* The HFENCE.GVMA instructions help virtual-HS mode flush
* G-stage TLB entries for virtual-VS/VU modes.
@@ -1386,6 +1397,8 @@ int cpu_vcpu_nested_hlv(struct vmm_vcpu *vcpu, unsigned long vaddr,
struct nested_xlate_context xc;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ riscv_stats_priv(vcpu)->nested_hlv++;
+
/* Don't handle misaligned HLV */
if (vaddr & (len - 1)) {
*out_scause = CAUSE_MISALIGNED_LOAD;
@@ -1427,6 +1440,8 @@ int cpu_vcpu_nested_hsv(struct vmm_vcpu *vcpu, unsigned long vaddr,
struct nested_xlate_context xc;
struct riscv_priv_nested *npriv = riscv_nested_priv(vcpu);
+ riscv_stats_priv(vcpu)->nested_hsv++;
+
/* Don't handle misaligned HSV */
if (vaddr & (len - 1)) {
*out_scause = CAUSE_MISALIGNED_STORE;
@@ -1497,6 +1512,7 @@ void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
/* Update vsstatus CSR */
if (virt) {
/* Nested virtualization state changing from OFF to ON */
+ riscv_stats_priv(vcpu)->nested_enter++;
/*
* Update vsstatus in following manner:
@@ -1515,6 +1531,7 @@ void cpu_vcpu_nested_set_virt(struct vmm_vcpu *vcpu, struct arch_regs *regs,
npriv->vsstatus |= tmp;
} else {
/* Nested virtualization state changing from ON to OFF */
+ riscv_stats_priv(vcpu)->nested_exit++;
/*
* Update vsstatus in following manner:
@@ -1621,6 +1638,8 @@ void cpu_vcpu_nested_take_vsirq(struct vmm_vcpu *vcpu,
}
vmm_timer_event_stop(npriv->timer_event);
+ riscv_stats_priv(vcpu)->nested_vsirq++;
+
/* Take virtual-VS mode interrupt */
trap.scause = SCAUSE_INTERRUPT_MASK | vsirq;
trap.sepc = regs->sepc;
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_sbi.c b/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
index 9788150e..23a26b09 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_sbi.c
@@ -76,6 +76,7 @@ int cpu_vcpu_sbi_ecall(struct vmm_vcpu *vcpu, ulong cause,
/* Forward SBI calls from virtual-VS mode to virtual-HS mode */
if (riscv_nested_virt(vcpu)) {
+ riscv_stats_priv(vcpu)->nested_sbi++;
trap.sepc = regs->sepc;
trap.scause = CAUSE_VIRTUAL_SUPERVISOR_ECALL;
trap.stval = 0;
diff --git a/arch/riscv/cpu/generic/include/arch_regs.h b/arch/riscv/cpu/generic/include/arch_regs.h
index 39b1bf7b..fc251b24 100644
--- a/arch/riscv/cpu/generic/include/arch_regs.h
+++ b/arch/riscv/cpu/generic/include/arch_regs.h
@@ -186,11 +186,31 @@ struct riscv_priv_nested {
unsigned long hvictl;
};
+#define RISCV_PRIV_MAX_TRAP_CAUSE 0x18
+struct riscv_priv_stats {
+ u64 trap[RISCV_PRIV_MAX_TRAP_CAUSE];
+ u64 nested_enter;
+ u64 nested_exit;
+ u64 nested_vsirq;
+ u64 nested_smode_csr_rmw;
+ u64 nested_hext_csr_rmw;
+ u64 nested_load_guest_page_fault;
+ u64 nested_store_guest_page_fault;
+ u64 nested_fetch_guest_page_fault;
+ u64 nested_hfence_vvma;
+ u64 nested_hfence_gvma;
+ u64 nested_hlv;
+ u64 nested_hsv;
+ u64 nested_sbi;
+};
+
struct riscv_priv {
/* Register width */
unsigned long xlen;
/* ISA feature bitmap */
unsigned long *isa;
+ /* Statistic data */
+ struct riscv_priv_stats stats;
/* CSR state */
unsigned long hie;
unsigned long hip;
@@ -222,6 +242,7 @@ struct riscv_guest_priv {
#define riscv_regs(vcpu) (&((vcpu)->regs))
#define riscv_priv(vcpu) ((struct riscv_priv *)((vcpu)->arch_priv))
+#define riscv_stats_priv(vcpu) (&riscv_priv(vcpu)->stats)
#define riscv_nested_priv(vcpu) (&riscv_priv(vcpu)->nested)
#define riscv_nested_virt(vcpu) (riscv_nested_priv(vcpu)->virt)
#define riscv_fp_priv(vcpu) (&riscv_priv(vcpu)->fp)
--
2.34.1