[PATCH 0/6] Misc Xvisor improvements and fixes

17 views
Skip to first unread message

Anup Patel

unread,
May 9, 2022, 7:16:10 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
This series adds few assorted improvements and fixes for Xvisor which were
discovered while testing nested virtualization support of Xvisor RISC-V.

These patches can also be found in riscv_misc_v1 branch at:
https://github.com/avpatel/xvisor-next.git

Anup Patel (6):
COMMANDS: host: Add sub-command to poke a host CPU
RISC-V: Fix compile error for latest binutils 2.38
ARCH: generic_mmu: Check child pointer before use in
mmu_pgtbl_get_child()
RISC-V: Print shadow page input address when panic
ARCH: generic_mmu: Fix typo in mmu_pgtbl_nonpool_alloc()
COMMANDS: memory: Add iodump8, iodump16, and iodump32 sub-commands

arch/common/generic_mmu.c | 26 +++-
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 20 +--
arch/riscv/cpu/generic/objects.mk | 13 +-
commands/cmd_host.c | 57 ++++++++-
commands/cmd_memory.c | 152 +++++++++++++++++++----
5 files changed, 228 insertions(+), 40 deletions(-)

--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:12 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
We add "host cpu poke [<hcpu>]" command which helps users poke
all online CPUs or a particular online CPU. Using this new command,
users can check of the target host CPU is dead or hung or alive.
It can also be used to force context switch on a particular host
CPU because we async IPIs for poking and async IPI worker has the
highest priority.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
commands/cmd_host.c | 57 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 56 insertions(+), 1 deletion(-)

diff --git a/commands/cmd_host.c b/commands/cmd_host.c
index 9de09ec5..dcb5fc8a 100644
--- a/commands/cmd_host.c
+++ b/commands/cmd_host.c
@@ -28,6 +28,8 @@
#include <vmm_resource.h>
#include <vmm_devtree.h>
#include <vmm_devdrv.h>
+#include <vmm_timer.h>
+#include <vmm_heap.h>
#include <vmm_host_irq.h>
#include <vmm_host_irqext.h>
#include <vmm_host_ram.h>
@@ -57,6 +59,7 @@ static void cmd_host_usage(struct vmm_chardev *cdev)
vmm_cprintf(cdev, " host help\n");
vmm_cprintf(cdev, " host info\n");
vmm_cprintf(cdev, " host cpu info\n");
+ vmm_cprintf(cdev, " host cpu poke [<hcpu>]\n");
vmm_cprintf(cdev, " host cpu stats\n");
vmm_cprintf(cdev, " host irq stats\n");
vmm_cprintf(cdev, " host irq set_affinity <hirq> <hcpu>\n");
@@ -152,6 +155,50 @@ static int cmd_host_cpu_info(struct vmm_chardev *cdev)
return VMM_OK;
}

+static void host_cpu_poke_func(void *arg0, void *arg1, void *arg2)
+{
+ *((bool *)arg0) = TRUE;
+}
+
+static int cmd_host_cpu_poke(struct vmm_chardev *cdev,
+ const struct vmm_cpumask *cmask)
+{
+ u32 c;
+ u64 tstamp;
+ bool *poke;
+ bool free_poke = TRUE;
+
+ poke = vmm_zalloc(sizeof(*poke));
+ if (!poke) {
+ return VMM_ENOMEM;
+ }
+
+ for_each_cpu(c, cmask) {
+ vmm_cprintf(cdev, "CPU%d: Poke using async IPI ... ", c);
+
+ *poke = FALSE;
+ tstamp = vmm_timer_timestamp() + 1000000000ULL;
+ vmm_smp_ipi_async_call(vmm_cpumask_of(c), host_cpu_poke_func,
+ poke, NULL, NULL);
+ while (!(*poke)) {
+ if (tstamp < vmm_timer_timestamp()) {
+ free_poke = FALSE;
+ break;
+ }
+
+ vmm_scheduler_yield();
+ }
+
+ vmm_cprintf(cdev, "%s\n", (*poke) ? "Done" : "Timeout");
+ }
+
+ if (free_poke) {
+ vmm_free(poke);
+ }
+
+ return VMM_OK;
+}
+
static int cmd_host_cpu_stats(struct vmm_chardev *cdev)
{
int rc;
@@ -663,8 +710,8 @@ static int cmd_host_class_device_list(struct vmm_chardev *cdev,

static int cmd_host_exec(struct vmm_chardev *cdev, int argc, char **argv)
{
+ const struct vmm_cpumask *cmask;
int hirq, hcpu, colcnt, size;
-
physical_addr_t physaddr;

if (argc <= 1) {
@@ -679,6 +726,14 @@ static int cmd_host_exec(struct vmm_chardev *cdev, int argc, char **argv)
} else if ((strcmp(argv[1], "cpu") == 0) && (2 < argc)) {
if (strcmp(argv[2], "info") == 0) {
return cmd_host_cpu_info(cdev);
+ } else if (strcmp(argv[2], "poke") == 0) {
+ hcpu = (3 < argc) ? atoi(argv[3]) : -1;
+ if (hcpu >= 0 && vmm_cpu_online(hcpu)) {
+ cmask = vmm_cpumask_of(hcpu);
+ } else {
+ cmask = cpu_online_mask;
+ }
+ return cmd_host_cpu_poke(cdev, cmask);
} else if (strcmp(argv[2], "stats") == 0) {
return cmd_host_cpu_stats(cdev);
}
--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:14 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
Latest binutits 2.38, expect build systems to specify "_zicsr_zifenci"
in "-march" parameter if the software is going to access CSRs and use
"fence.i" instruction. We update RISC-V specific compiler flags to
detect and specify "_zicsr_zifence" for latest binutils 2.38.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/objects.mk | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/riscv/cpu/generic/objects.mk b/arch/riscv/cpu/generic/objects.mk
index 69daf98a..230da2a1 100644
--- a/arch/riscv/cpu/generic/objects.mk
+++ b/arch/riscv/cpu/generic/objects.mk
@@ -54,11 +54,18 @@ ifeq ($(CONFIG_CMODEL_MEDANY),y)
arch-cflags-y += -mcmodel=medany
endif

+# Check whether the assembler and the compiler support the Zicsr and Zifencei extensions
+have_zicsr_zifenci := $(shell $(CC) -nostdlib -march=$(march-y)$(arch-a-y)$(arch-c-y)_zicsr_zifencei -x c /dev/null -o /dev/null 2>&1 | grep "zicsr\|zifencei" > /dev/null && echo n || echo y)
+march-zicsr-zifenci-$(have_zicsr_zifenci) = _zicsr_zifencei
+
+march-nonld-isa-y = $(march-y)$(arch-a-y)fd$(arch-c-y)$(march-zicsr-zifenci-y)
+march-ld-isa-y = $(march-y)$(arch-a-y)$(arch-c-y)
+
cpu-cppflags+=-DTEXT_START=0x10000000
-cpu-cflags += $(arch-cflags-y) -march=$(march-y)$(arch-a-y)$(arch-c-y)
+cpu-cflags += $(arch-cflags-y) -march=$(march-nonld-isa-y)
cpu-cflags += -fno-strict-aliasing -O2
-cpu-asflags += $(arch-cflags-y) -march=$(march-y)$(arch-a-y)fd$(arch-c-y)
-cpu-ldflags += $(arch-ldflags-y) -march=$(march-y)$(arch-a-y)$(arch-c-y)
+cpu-asflags += $(arch-cflags-y) -march=$(march-nonld-isa-y)
+cpu-ldflags += $(arch-ldflags-y) -march=$(march-ld-isa-y)

cpu-objs-y+= cpu_entry.o
cpu-objs-y+= cpu_proc.o
--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:16 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
The mmu_pgtbl_find() called by mmu_pgtbl_get_child() can fail as well so
we should check child pointer before use in mmu_pgtbl_get_child().

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/common/generic_mmu.c | 24 +++++++++++++++++++++---
1 file changed, 21 insertions(+), 3 deletions(-)

diff --git a/arch/common/generic_mmu.c b/arch/common/generic_mmu.c
index d617a5cb..8bfc5506 100644
--- a/arch/common/generic_mmu.c
+++ b/arch/common/generic_mmu.c
@@ -490,17 +490,24 @@ struct mmu_pgtbl *mmu_pgtbl_get_child(struct mmu_pgtbl *parent,
vmm_spin_unlock_irqrestore_lite(&parent->tbl_lock, flags);

if (arch_mmu_pte_is_valid(&pte_val, parent->stage, parent->level)) {
+ child = NULL;
if ((parent->level > 0) &&
arch_mmu_pte_is_table(&pte_val, parent->stage,
parent->level)) {
tbl_pa = arch_mmu_pte_table_addr(&pte_val,
parent->stage, parent->level);
child = mmu_pgtbl_find(parent->stage, tbl_pa);
- if (child->parent == parent) {
- return child;
+ if (!child || child->parent != parent) {
+ vmm_printf("%s: invalid child for address "
+ "0x%"PRIPADDR" in page table at "
+ "0x%"PRIPADDR" stage=%d level=%d\n"
+ , __func__, map_ia, parent->tbl_pa,
+ parent->stage, parent->level);
+ child = NULL;
}
}
- return NULL;
+
+ return child;
}

if (!create) {
@@ -510,11 +517,22 @@ struct mmu_pgtbl *mmu_pgtbl_get_child(struct mmu_pgtbl *parent,
child = mmu_pgtbl_alloc(parent->stage, parent->level - 1,
parent->attr, parent->hw_tag);
if (!child) {
+ vmm_printf("%s: failed to alloc child for address "
+ "0x%"PRIPADDR" in page table at "
+ "0x%"PRIPADDR" stage=%d level=%d\n",
+ __func__, map_ia, parent->tbl_pa,
+ parent->stage, parent->level);
return NULL;
}

if ((rc = mmu_pgtbl_attach(parent, map_ia, child))) {
+ vmm_printf("%s: failed to attach child for address "
+ "0x%"PRIPADDR" in page table at "
+ "0x%"PRIPADDR" stage=%d level=%d\n",
+ __func__, map_ia, parent->tbl_pa,
+ parent->stage, parent->level);
mmu_pgtbl_free(child);
+ child = NULL;
}

return child;
--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:18 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
As additional debug information, we should print the shadow page
input address when map/unmap in shadow page table fails.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/riscv/cpu/generic/cpu_vcpu_nested.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_vcpu_nested.c b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
index 1ba76d9d..4d3d4f08 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_nested.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_nested.c
@@ -118,8 +118,9 @@ static void nested_swtlb_update(struct vmm_vcpu *vcpu, bool itlb,
struct nested_swtlb_entry, head);
rc = mmu_unmap_page(npriv->pgtbl, &swte->shadow_page);
if (rc) {
- vmm_panic("%s: shadow page unmap failed (error %d)\n",
- __func__, rc);
+ vmm_panic("%s: shadow page unmap @ 0x%"PRIPADDR
+ " failed (error %d)\n", __func__,
+ swte->shadow_page.ia, rc);
}
} else {
BUG_ON(1);
@@ -131,8 +132,9 @@ static void nested_swtlb_update(struct vmm_vcpu *vcpu, bool itlb,

rc = mmu_map_page(npriv->pgtbl, &swte->shadow_page);
if (rc) {
- vmm_panic("%s: shadow page map failed (error %d)\n",
- __func__, rc);
+ vmm_panic("%s: shadow page map @ 0x%"PRIPADDR
+ " failed (error %d)\n", __func__,
+ swte->shadow_page.ia, rc);
}

list_add(&swte->head, &xtlb->active_list);
@@ -166,8 +168,9 @@ void cpu_vcpu_nested_swtlb_flush(struct vmm_vcpu *vcpu,

rc = mmu_unmap_page(npriv->pgtbl, &swte->shadow_page);
if (rc) {
- vmm_panic("%s: shadow page unmap failed (error %d)\n",
- __func__, rc);
+ vmm_panic("%s: shadow page unmap @ 0x%"PRIPADDR
+ " failed (error %d)\n", __func__,
+ swte->shadow_page.ia, rc);
}

list_add_tail(&swte->head, &swtlb->itlb.free_list);
@@ -183,8 +186,9 @@ void cpu_vcpu_nested_swtlb_flush(struct vmm_vcpu *vcpu,

rc = mmu_unmap_page(npriv->pgtbl, &swte->shadow_page);
if (rc) {
- vmm_panic("%s: shadow page unmap failed (error %d)\n",
- __func__, rc);
+ vmm_panic("%s: shadow page unmap @ 0x%"PRIPADDR
+ " failed (error %d)\n", __func__,
+ swte->shadow_page.ia, rc);
}

list_add_tail(&swte->head, &swtlb->dtlb.free_list);
--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:20 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
We should be freeing npgtbl upon failure instead of pgtbl.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
arch/common/generic_mmu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/common/generic_mmu.c b/arch/common/generic_mmu.c
index 8bfc5506..46bf73a7 100644
--- a/arch/common/generic_mmu.c
+++ b/arch/common/generic_mmu.c
@@ -217,7 +217,7 @@ static struct mmu_pgtbl *mmu_pgtbl_nonpool_alloc(int stage, int level)
flags);
vmm_host_free_pages(pgtbl->tbl_va,
VMM_SIZE_TO_PAGE(pgtbl->tbl_sz));
- vmm_free(pgtbl);
+ vmm_free(npgtbl);
return NULL;
}

--
2.34.1

Anup Patel

unread,
May 9, 2022, 7:16:24 AM5/9/22
to xvisor...@googlegroups.com, Anup Patel
IO device and regular RAM are treated differently and have different
mem_flags so let's add separate memory sub-commands to dump IO device
registers. Also, it is very useful to do memory/io dump from a given
host CPU so we add additional parameter to specify host CPU as well.

Signed-off-by: Anup Patel <apa...@ventanamicro.com>
---
commands/cmd_memory.c | 152 +++++++++++++++++++++++++++++++++++-------
1 file changed, 128 insertions(+), 24 deletions(-)

diff --git a/commands/cmd_memory.c b/commands/cmd_memory.c
index c7797d6e..c8e9eb34 100644
--- a/commands/cmd_memory.c
+++ b/commands/cmd_memory.c
@@ -24,6 +24,9 @@

#include <vmm_error.h>
#include <vmm_stdio.h>
+#include <vmm_smp.h>
+#include <vmm_timer.h>
+#include <vmm_heap.h>
#include <vmm_host_aspace.h>
#include <vmm_modules.h>
#include <vmm_cmdmgr.h>
@@ -94,9 +97,12 @@ static void cmd_memory_usage(struct vmm_chardev *cdev)
{
vmm_cprintf(cdev, "Usage: ");
vmm_cprintf(cdev, " memory help\n");
- vmm_cprintf(cdev, " memory dump8 <phys_addr> <count>\n");
- vmm_cprintf(cdev, " memory dump16 <phys_addr> <count>\n");
- vmm_cprintf(cdev, " memory dump32 <phys_addr> <count>\n");
+ vmm_cprintf(cdev, " memory dump8 <phys_addr> <count> [<hcpu>]\n");
+ vmm_cprintf(cdev, " memory dump16 <phys_addr> <count> [<hcpu>]\n");
+ vmm_cprintf(cdev, " memory dump32 <phys_addr> <count> [<hcpu>]\n");
+ vmm_cprintf(cdev, " memory iodump8 <phys_addr> <count> [<hcpu>]\n");
+ vmm_cprintf(cdev, " memory iodump16 <phys_addr> <count> [<hcpu>]\n");
+ vmm_cprintf(cdev, " memory iodump32 <phys_addr> <count> [<hcpu>]\n");
vmm_cprintf(cdev, " memory crc32 <phys_addr> <count>\n");
#if CONFIG_CRYPTO_HASH_MD5
vmm_cprintf(cdev, " memory md5 <phys_addr> <count>\n");
@@ -114,36 +120,66 @@ static void cmd_memory_usage(struct vmm_chardev *cdev)
"<byte_count>\n");
}

-static int cmd_memory_dump(struct vmm_chardev *cdev,
- physical_addr_t addr,
- u32 wsz, u32 wcnt)
+struct memory_dump_request {
+ struct vmm_chardev *cdev;
+ physical_addr_t addr;
+ u32 wsz;
+ u32 wcnt;
+ bool io;
+ bool done;
+};
+
+static void memory_dump_func(void *arg0, void *arg1, void *arg2)
{
- int rc;
- u32 w;
- bool page_mapped;
+ struct memory_dump_request *dreq = arg0;
virtual_addr_t page_va, addr_offset;
+ struct vmm_chardev *cdev = dreq->cdev;
+ physical_addr_t addr = dreq->addr;
physical_addr_t page_pa;
+ u32 wsz = dreq->wsz;
+ u32 wcnt = dreq->wcnt;
+ bool io = dreq->io;
+ bool page_mapped;
+ int rc;
+ u32 w;
+
addr = addr - (addr & (wsz - 1));
vmm_cprintf(cdev, "Host physical memory "
- "0x%"PRIPADDR" - 0x%"PRIPADDR":",
- addr, (addr + wsz*wcnt));
+ "0x%"PRIPADDR" - 0x%"PRIPADDR" seen by CPU%d:",
+ addr, (addr + wsz*wcnt), vmm_smp_processor_id());
w = 0;
page_pa = addr - (addr & VMM_PAGE_MASK);
- page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE);
+ if (io) {
+ page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE);
+ } else {
+ page_va = vmm_host_memmap(page_pa, VMM_PAGE_SIZE,
+ VMM_MEMORY_FLAGS_NORMAL);
+ }
page_mapped = TRUE;
while (w < wcnt) {
if (page_pa != (addr - (addr & VMM_PAGE_MASK))) {
if (page_mapped) {
- rc = vmm_host_iounmap(page_va);
+ if (io) {
+ rc = vmm_host_iounmap(page_va);
+ } else {
+ rc = vmm_host_memunmap(page_va);
+ }
+ page_mapped = FALSE;
if (rc) {
vmm_cprintf(cdev,
- "Error: Failed to unmap memory.\n");
- return rc;
+ "Error: Failed to unmap memory "
+ "(error %d).\n", rc);
+ goto done;
}
- page_mapped = FALSE;
}
page_pa = addr - (addr & VMM_PAGE_MASK);
- page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE);
+ if (io) {
+ page_va = vmm_host_iomap(page_pa,
+ VMM_PAGE_SIZE);
+ } else {
+ page_va = vmm_host_iomap(page_pa,
+ VMM_PAGE_SIZE);
+ }
page_mapped = TRUE;
}
if (!(w * wsz & 0x0000000F)) {
@@ -170,14 +206,66 @@ static int cmd_memory_dump(struct vmm_chardev *cdev,
w++;
}
vmm_cprintf(cdev, "\n");
+
+done:
if (page_mapped) {
- rc = vmm_host_iounmap(page_va);
- if (rc) {
- vmm_cprintf(cdev, "Error: Failed to unmap memory.\n");
- return rc;
+ if (io) {
+ rc = vmm_host_iounmap(page_va);
+ } else {
+ rc = vmm_host_memunmap(page_va);
}
page_mapped = FALSE;
+ if (rc) {
+ vmm_cprintf(cdev, "Error: Failed to unmap memory"
+ " (error %d).\n", rc);
+ }
+ }
+
+ dreq->done = TRUE;
+}
+
+static int cmd_memory_dump(struct vmm_chardev *cdev,
+ physical_addr_t addr,
+ u32 wsz, u32 wcnt, bool io, int hcpu)
+{
+ struct memory_dump_request *dreq;
+ const struct vmm_cpumask *cmask;
+ u64 tstamp;
+
+ if (hcpu >= 0 && vmm_cpu_online(hcpu)) {
+ cmask = vmm_cpumask_of(hcpu);
+ } else {
+ cmask = vmm_cpumask_of(vmm_smp_processor_id());
}
+
+ dreq = vmm_zalloc(sizeof(*dreq));
+ if (!dreq) {
+ return VMM_ENOMEM;
+ }
+
+ dreq->cdev = cdev;
+ dreq->addr = addr;
+ dreq->wsz = wsz;
+ dreq->wcnt = wcnt;
+ dreq->io = io;
+ dreq->done = FALSE;
+
+ vmm_smp_ipi_async_call(cmask, memory_dump_func, dreq, NULL, NULL);
+ tstamp = vmm_timer_timestamp() + 1000000000ULL;
+ while (!dreq->done) {
+ if (tstamp < vmm_timer_timestamp()) {
+ break;
+ }
+
+ vmm_scheduler_yield();
+ }
+
+ if (dreq->done) {
+ vmm_free(dreq);
+ } else {
+ return VMM_ETIMEDOUT;
+ }
+
return VMM_OK;
}

@@ -447,6 +535,7 @@ static int cmd_memory_copy(struct vmm_chardev *cdev,
static int cmd_memory_exec(struct vmm_chardev *cdev, int argc, char **argv)
{
u32 tmp;
+ int hcpu;
physical_addr_t addr, src_addr;
if (argc < 2) {
cmd_memory_usage(cdev);
@@ -468,13 +557,28 @@ static int cmd_memory_exec(struct vmm_chardev *cdev, int argc, char **argv)
addr = (physical_addr_t)strtoull(argv[2], NULL, 0);
if (strcmp(argv[1], "dump8") == 0) {
tmp = strtoull(argv[3], NULL, 0);
- return cmd_memory_dump(cdev, addr, 1, (u32)tmp);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 1, (u32)tmp, FALSE, hcpu);
} else if (strcmp(argv[1], "dump16") == 0) {
tmp = strtoull(argv[3], NULL, 0);
- return cmd_memory_dump(cdev, addr, 2, (u32)tmp);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 2, (u32)tmp, FALSE, hcpu);
} else if (strcmp(argv[1], "dump32") == 0) {
tmp = strtoull(argv[3], NULL, 0);
- return cmd_memory_dump(cdev, addr, 4, (u32)tmp);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 4, (u32)tmp, FALSE, hcpu);
+ } else if (strcmp(argv[1], "iodump8") == 0) {
+ tmp = strtoull(argv[3], NULL, 0);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 1, (u32)tmp, TRUE, hcpu);
+ } else if (strcmp(argv[1], "iodump16") == 0) {
+ tmp = strtoull(argv[3], NULL, 0);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 2, (u32)tmp, TRUE, hcpu);
+ } else if (strcmp(argv[1], "iodump32") == 0) {
+ tmp = strtoull(argv[3], NULL, 0);
+ hcpu = (4 < argc) ? atoi(argv[4]) : -1;
+ return cmd_memory_dump(cdev, addr, 4, (u32)tmp, TRUE, hcpu);
} else if (strcmp(argv[1], "crc32") == 0) {
tmp = strtoull(argv[3], NULL, 0);
return cmd_memory_crc32(cdev, addr, (u32)tmp);
--
2.34.1

Anup Patel

unread,
May 13, 2022, 11:38:59 AM5/13/22
to Xvisor Devel, Anup Patel
On Mon, May 9, 2022 at 4:46 PM Anup Patel <apa...@ventanamicro.com> wrote:
>
> This series adds few assorted improvements and fixes for Xvisor which were
> discovered while testing nested virtualization support of Xvisor RISC-V.
>
> These patches can also be found in riscv_misc_v1 branch at:
> https://github.com/avpatel/xvisor-next.git
>
> Anup Patel (6):
> COMMANDS: host: Add sub-command to poke a host CPU
> RISC-V: Fix compile error for latest binutils 2.38
> ARCH: generic_mmu: Check child pointer before use in
> mmu_pgtbl_get_child()
> RISC-V: Print shadow page input address when panic
> ARCH: generic_mmu: Fix typo in mmu_pgtbl_nonpool_alloc()
> COMMANDS: memory: Add iodump8, iodump16, and iodump32 sub-commands

Applied this series to the xvisor-next repo

Regards,
Anup

>
> arch/common/generic_mmu.c | 26 +++-
> arch/riscv/cpu/generic/cpu_vcpu_nested.c | 20 +--
> arch/riscv/cpu/generic/objects.mk | 13 +-
> commands/cmd_host.c | 57 ++++++++-
> commands/cmd_memory.c | 152 +++++++++++++++++++----
> 5 files changed, 228 insertions(+), 40 deletions(-)
>
> --
> 2.34.1
>
> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20220509111533.134783-1-apatel%40ventanamicro.com.
Reply all
Reply to author
Forward
0 new messages