[PATCH 0/6] Nested MMU Test Suite

45 views
Skip to first unread message

Anup Patel

unread,
Jun 9, 2020, 9:39:37 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
This series adds generic Nested MMU Test Suite using Xvisor white-box
testing framework.

The series adds total 12 test-cases which are architecture independent
and tested on RISC-V. In future, we will have more test-cases and these
test cases will also work on ARM32ve and ARM64 ports.

Anup Patel (6):
ARM: arm32ve: Remove redundant defines from arch_mmu.h
ARCH: arm32ve/arm64/riscv: Add arch_mmu_pgflags_set() function
ARCH: generic_mmu: Add mechanism to test nested page tables
RISC-V: Implement arch_mmu_test_nested_pgtbl() for testing nested MMU
CORE: vmm_host_ram: Add APIs to get start and end of all RAM banks
LIBS: wboxtest: Add nested MMU tests

arch/arm/cpu/arm32ve/cpu_vcpu_excep.c | 21 +-
arch/arm/cpu/arm32ve/include/arch_mmu.h | 3 -
arch/arm/cpu/arm64/cpu_vcpu_excep.c | 29 +-
arch/arm/cpu/common/include/mmu_lpae.h | 8 +-
arch/arm/cpu/common/mmu_lpae.c | 96 +++--
arch/common/generic_mmu.c | 317 +++++++++++++++-
arch/common/include/generic_mmu.h | 71 +++-
arch/riscv/cpu/generic/cpu_mmu.c | 353 +++++++++++++++++-
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 +-
arch/riscv/cpu/generic/cpu_vcpu_unpriv.S | 27 +-
arch/riscv/cpu/generic/include/arch_mmu.h | 8 +-
.../cpu/generic/include/cpu_vcpu_unpriv.h | 7 +
core/include/vmm_host_ram.h | 6 +
core/vmm_host_ram.c | 32 ++
libs/wboxtest/nested_mmu/nested_mmu_test.h | 221 +++++++++++
libs/wboxtest/nested_mmu/objects.mk | 35 ++
libs/wboxtest/{ => nested_mmu}/openconf.cfg | 24 +-
.../s1_hugepage_s2_hugepage_nordwr.c | 293 +++++++++++++++
.../s1_hugepage_s2_hugepage_rdonly.c | 293 +++++++++++++++
.../nested_mmu/s1_hugepage_s2_hugepage_rdwr.c | 279 ++++++++++++++
.../nested_mmu/s1_page_s2_page_nordwr.c | 292 +++++++++++++++
.../nested_mmu/s1_page_s2_page_rdonly.c | 292 +++++++++++++++
.../nested_mmu/s1_page_s2_page_rdwr.c | 278 ++++++++++++++
libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c | 145 +++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c | 153 ++++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c | 208 +++++++++++
libs/wboxtest/nested_mmu/s2_page_nordwr.c | 145 +++++++
libs/wboxtest/nested_mmu/s2_page_rdonly.c | 152 ++++++++
libs/wboxtest/nested_mmu/s2_page_rdwr.c | 207 ++++++++++
libs/wboxtest/openconf.cfg | 1 +
30 files changed, 3865 insertions(+), 147 deletions(-)
create mode 100755 libs/wboxtest/nested_mmu/nested_mmu_test.h
create mode 100644 libs/wboxtest/nested_mmu/objects.mk
copy libs/wboxtest/{ => nested_mmu}/openconf.cfg (70%)
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdwr.c

--
2.25.1

Anup Patel

unread,
Jun 9, 2020, 9:39:38 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
We replace arch_mmu_stage1_pgflags_set() with arch_mmu_pgflags_set()
which works for both Stage1 and Stage2 page tables..

Signed-off-by: Anup Patel <an...@brainfault.org>
---
arch/arm/cpu/arm32ve/cpu_vcpu_excep.c | 21 +-----
arch/arm/cpu/arm64/cpu_vcpu_excep.c | 29 +-------
arch/arm/cpu/common/include/mmu_lpae.h | 2 +-
arch/arm/cpu/common/mmu_lpae.c | 84 ++++++++++++++++-------
arch/common/generic_mmu.c | 16 ++---
arch/riscv/cpu/generic/cpu_mmu.c | 57 ++++++++++-----
arch/riscv/cpu/generic/cpu_vcpu_trap.c | 16 +----
arch/riscv/cpu/generic/include/arch_mmu.h | 2 +-
8 files changed, 109 insertions(+), 118 deletions(-)

diff --git a/arch/arm/cpu/arm32ve/cpu_vcpu_excep.c b/arch/arm/cpu/arm32ve/cpu_vcpu_excep.c
index 03214ebd..c5aa2cc8 100644
--- a/arch/arm/cpu/arm32ve/cpu_vcpu_excep.c
+++ b/arch/arm/cpu/arm32ve/cpu_vcpu_excep.c
@@ -88,26 +88,7 @@ static int cpu_vcpu_stage2_map(struct vmm_vcpu *vcpu,
}
}

- if (pg_reg_flags & VMM_REGION_VIRTUAL) {
- pg.flags.af = 0;
- pg.flags.ap = TTBL_HAP_NOACCESS;
- } else if (pg_reg_flags & VMM_REGION_READONLY) {
- pg.flags.af = 1;
- pg.flags.ap = TTBL_HAP_READONLY;
- } else {
- pg.flags.af = 1;
- pg.flags.ap = TTBL_HAP_READWRITE;
- }
-
- if (pg_reg_flags & VMM_REGION_CACHEABLE) {
- if (pg_reg_flags & VMM_REGION_BUFFERABLE) {
- pg.flags.memattr = 0xF;
- } else {
- pg.flags.memattr = 0xA;
- }
- } else {
- pg.flags.memattr = 0x0;
- }
+ arch_mmu_pgflags_set(&pg.flags, MMU_STAGE2, pg_reg_flags);

/* Try to map the page in Stage2 */
rc = mmu_map_page(arm_guest_priv(vcpu->guest)->ttbl, &pg);
diff --git a/arch/arm/cpu/arm64/cpu_vcpu_excep.c b/arch/arm/cpu/arm64/cpu_vcpu_excep.c
index bc1b1b39..74169f96 100644
--- a/arch/arm/cpu/arm64/cpu_vcpu_excep.c
+++ b/arch/arm/cpu/arm64/cpu_vcpu_excep.c
@@ -49,7 +49,6 @@ static int cpu_vcpu_stage2_map(struct vmm_vcpu *vcpu,

inaddr = fipa & TTBL_L3_MAP_MASK;
size = TTBL_L3_BLOCK_SIZE;
- pg.flags.sh = 3U;

rc = vmm_guest_physical_map(vcpu->guest, inaddr, size,
&outaddr, &availsz, &reg_flags);
@@ -93,33 +92,7 @@ static int cpu_vcpu_stage2_map(struct vmm_vcpu *vcpu,
}
}

- if (pg_reg_flags & VMM_REGION_VIRTUAL) {
- pg.flags.af = 0;
- pg.flags.ap = TTBL_HAP_NOACCESS;
- } else if (pg_reg_flags & VMM_REGION_READONLY) {
- pg.flags.af = 1;
- pg.flags.ap = TTBL_HAP_READONLY;
- } else {
- pg.flags.af = 1;
- pg.flags.ap = TTBL_HAP_READWRITE;
- }
-
- /* memattr in stage 2
- * ------------------
- * 0x0 - strongly ordered
- * 0x5 - normal-memory NC
- * 0xA - normal-memory WT
- * 0xF - normal-memory WB
- */
- if (pg_reg_flags & VMM_REGION_CACHEABLE) {
- if (pg_reg_flags & VMM_REGION_BUFFERABLE) {
- pg.flags.memattr = 0xF;
- } else {
- pg.flags.memattr = 0xA;
- }
- } else {
- pg.flags.memattr = 0x0;
- }
+ arch_mmu_pgflags_set(&pg.flags, MMU_STAGE2, pg_reg_flags);

/* Try to map the page in Stage2 */
rc = mmu_map_page(arm_guest_priv(vcpu->guest)->ttbl, &pg);
diff --git a/arch/arm/cpu/common/include/mmu_lpae.h b/arch/arm/cpu/common/include/mmu_lpae.h
index 45ef78d4..a7a65737 100644
--- a/arch/arm/cpu/common/include/mmu_lpae.h
+++ b/arch/arm/cpu/common/include/mmu_lpae.h
@@ -192,7 +192,7 @@ int arch_mmu_level_index(physical_addr_t ia, int stage, int level);

int arch_mmu_level_index_shift(int stage, int level);

-void arch_mmu_stage1_pgflags_set(arch_pgflags_t *flags, u32 mem_flags);
+void arch_mmu_pgflags_set(arch_pgflags_t *flags, int stage, u32 mflags);

void arch_mmu_pte_sync(arch_pte_t *pte, int stage, int level);

diff --git a/arch/arm/cpu/common/mmu_lpae.c b/arch/arm/cpu/common/mmu_lpae.c
index a17c7dbb..82e026d4 100644
--- a/arch/arm/cpu/common/mmu_lpae.c
+++ b/arch/arm/cpu/common/mmu_lpae.c
@@ -25,6 +25,7 @@
#include <vmm_error.h>
#include <vmm_types.h>
#include <vmm_smp.h>
+#include <vmm_guest_aspace.h>
#include <vmm_host_aspace.h>
#include <generic_mmu.h>

@@ -156,35 +157,66 @@ int arch_mmu_level_index_shift(int stage, int level)
return TTBL_L3_INDEX_SHIFT;
}

-void arch_mmu_stage1_pgflags_set(arch_pgflags_t *flags, u32 mem_flags)
+void arch_mmu_pgflags_set(arch_pgflags_t *flags, int stage, u32 mflags)
{
- flags->af = 1;
- if (mem_flags & VMM_MEMORY_WRITEABLE) {
- flags->ap = TTBL_AP_SRW_U;
- } else if (mem_flags & VMM_MEMORY_READABLE) {
- flags->ap = TTBL_AP_SR_U;
- } else {
- flags->ap = TTBL_AP_SR_U;
- }
- flags->xn = (mem_flags & VMM_MEMORY_EXECUTABLE) ? 0 : 1;
- flags->ns = 1;
- flags->sh = TTBL_SH_INNER_SHAREABLE;
+ if (stage == MMU_STAGE2) {
+ flags->sh = 3U;
+ if (mflags & VMM_REGION_VIRTUAL) {
+ flags->af = 0;
+ flags->ap = TTBL_HAP_NOACCESS;
+ } else if (mflags & VMM_REGION_READONLY) {
+ flags->af = 1;
+ flags->ap = TTBL_HAP_READONLY;
+ } else {
+ flags->af = 1;
+ flags->ap = TTBL_HAP_READWRITE;
+ }

- if ((mem_flags & VMM_MEMORY_CACHEABLE) &&
- (mem_flags & VMM_MEMORY_BUFFERABLE)) {
- flags->aindex = AINDEX_NORMAL_WB;
- } else if (mem_flags & VMM_MEMORY_CACHEABLE) {
- flags->aindex = AINDEX_NORMAL_WT;
- } else if (mem_flags & VMM_MEMORY_BUFFERABLE) {
- flags->aindex = AINDEX_NORMAL_WB;
- } else if (mem_flags & VMM_MEMORY_IO_DEVICE) {
- flags->aindex = AINDEX_DEVICE_nGnRE;
- } else if (mem_flags & VMM_MEMORY_DMA_COHERENT) {
- flags->aindex = AINDEX_NORMAL_WB;
- } else if (mem_flags & VMM_MEMORY_DMA_NONCOHERENT) {
- flags->aindex = AINDEX_NORMAL_NC;
+ /* memattr in stage 2
+ * ------------------
+ * 0x0 - strongly ordered
+ * 0x5 - normal-memory NC
+ * 0xA - normal-memory WT
+ * 0xF - normal-memory WB
+ */
+ if (mflags & VMM_REGION_CACHEABLE) {
+ if (mflags & VMM_REGION_BUFFERABLE) {
+ flags->memattr = 0xF;
+ } else {
+ flags->memattr = 0xA;
+ }
+ } else {
+ flags->memattr = 0x0;
+ }
} else {
- flags->aindex = AINDEX_NORMAL_NC;
+ flags->af = 1;
+ if (mflags & VMM_MEMORY_WRITEABLE) {
+ flags->ap = TTBL_AP_SRW_U;
+ } else if (mflags & VMM_MEMORY_READABLE) {
+ flags->ap = TTBL_AP_SR_U;
+ } else {
+ flags->ap = TTBL_AP_SR_U;
+ }
+ flags->xn = (mflags & VMM_MEMORY_EXECUTABLE) ? 0 : 1;
+ flags->ns = 1;
+ flags->sh = TTBL_SH_INNER_SHAREABLE;
+
+ if ((mflags & VMM_MEMORY_CACHEABLE) &&
+ (mflags & VMM_MEMORY_BUFFERABLE)) {
+ flags->aindex = AINDEX_NORMAL_WB;
+ } else if (mflags & VMM_MEMORY_CACHEABLE) {
+ flags->aindex = AINDEX_NORMAL_WT;
+ } else if (mflags & VMM_MEMORY_BUFFERABLE) {
+ flags->aindex = AINDEX_NORMAL_WB;
+ } else if (mflags & VMM_MEMORY_IO_DEVICE) {
+ flags->aindex = AINDEX_DEVICE_nGnRE;
+ } else if (mflags & VMM_MEMORY_DMA_COHERENT) {
+ flags->aindex = AINDEX_NORMAL_WB;
+ } else if (mflags & VMM_MEMORY_DMA_NONCOHERENT) {
+ flags->aindex = AINDEX_NORMAL_NC;
+ } else {
+ flags->aindex = AINDEX_NORMAL_NC;
+ }
}
}

diff --git a/arch/common/generic_mmu.c b/arch/common/generic_mmu.c
index 07b9e6e7..d39eb7c9 100644
--- a/arch/common/generic_mmu.c
+++ b/arch/common/generic_mmu.c
@@ -863,7 +863,7 @@ int __cpuinit arch_cpu_aspace_memory_rwinit(virtual_addr_t tmp_va)
p.ia = tmp_va;
p.oa = 0x0;
p.sz = VMM_PAGE_SIZE;
- arch_mmu_stage1_pgflags_set(&p.flags, VMM_MEMORY_FLAGS_NORMAL);
+ arch_mmu_pgflags_set(&p.flags, MMU_STAGE1, VMM_MEMORY_FLAGS_NORMAL);

rc = mmu_map_hypervisor_page(&p);
if (rc) {
@@ -879,10 +879,10 @@ int __cpuinit arch_cpu_aspace_memory_rwinit(virtual_addr_t tmp_va)
return rc;
}

- arch_mmu_stage1_pgflags_set(&mem_rw_pgflags_cache[cpu],
- VMM_MEMORY_FLAGS_NORMAL);
- arch_mmu_stage1_pgflags_set(&mem_rw_pgflags_nocache[cpu],
- VMM_MEMORY_FLAGS_NORMAL_NOCACHE);
+ arch_mmu_pgflags_set(&mem_rw_pgflags_cache[cpu],
+ MMU_STAGE1, VMM_MEMORY_FLAGS_NORMAL);
+ arch_mmu_pgflags_set(&mem_rw_pgflags_nocache[cpu],
+ MMU_STAGE1, VMM_MEMORY_FLAGS_NORMAL_NOCACHE);

return VMM_OK;
}
@@ -940,7 +940,7 @@ int arch_cpu_aspace_map(virtual_addr_t page_va,
p.ia = page_va;
p.oa = page_pa;
p.sz = page_sz;
- arch_mmu_stage1_pgflags_set(&p.flags, mem_flags);
+ arch_mmu_pgflags_set(&p.flags, MMU_STAGE1, mem_flags);

return mmu_map_hypervisor_page(&p);
}
@@ -1182,8 +1182,8 @@ int __init arch_cpu_aspace_primary_init(physical_addr_t *core_resv_pa,
hyppg.oa = pa;
hyppg.ia = va;
hyppg.sz = l0_size;
- arch_mmu_stage1_pgflags_set(&hyppg.flags,
- VMM_MEMORY_FLAGS_NORMAL);
+ arch_mmu_pgflags_set(&hyppg.flags, MMU_STAGE1,
+ VMM_MEMORY_FLAGS_NORMAL);
if ((rc = mmu_map_hypervisor_page(&hyppg))) {
goto mmu_init_error;
}
diff --git a/arch/riscv/cpu/generic/cpu_mmu.c b/arch/riscv/cpu/generic/cpu_mmu.c
index 83277169..fd693bbf 100644
--- a/arch/riscv/cpu/generic/cpu_mmu.c
+++ b/arch/riscv/cpu/generic/cpu_mmu.c
@@ -24,6 +24,7 @@
#include <vmm_error.h>
#include <vmm_types.h>
#include <vmm_smp.h>
+#include <vmm_guest_aspace.h>
#include <vmm_host_aspace.h>
#include <generic_mmu.h>
#include <arch_barrier.h>
@@ -221,26 +222,44 @@ int arch_mmu_level_index_shift(int stage, int level)
return PGTBL_L0_INDEX_SHIFT;
}

-void arch_mmu_stage1_pgflags_set(arch_pgflags_t *flags, u32 mem_flags)
+void arch_mmu_pgflags_set(arch_pgflags_t *flags, int stage, u32 mflags)
{
- flags->rsw = 0;
- flags->accessed = 0;
- flags->dirty = 0;
- flags->global = 1;
- flags->user = 0;
- flags->execute = (mem_flags & VMM_MEMORY_EXECUTABLE) ? 0 : 1;
- flags->write = (mem_flags & VMM_MEMORY_WRITEABLE) ? 1 : 0;
- flags->read = (mem_flags & VMM_MEMORY_READABLE) ? 1 : 0;
- flags->valid = 1;
-
- /*
- * We ignore following flags:
- * VMM_MEMORY_CACHEABLE
- * VMM_MEMORY_BUFFERABLE
- * VMM_MEMORY_IO_DEVICE
- * VMM_MEMORY_DMA_COHERENT
- * VMM_MEMORY_DMA_NONCOHERENT
- */
+ if (stage == MMU_STAGE2) {
+ flags->user = 1;
+ if (mflags & VMM_REGION_VIRTUAL) {
+ flags->read = 0;
+ flags->write = 0;
+ flags->execute = 1;
+ } else if (mflags & VMM_REGION_READONLY) {
+ flags->read = 1;
+ flags->write = 0;
+ flags->execute = 1;
+ } else {
+ flags->read = 1;
+ flags->write = 1;
+ flags->execute = 1;
+ }
+ flags->valid = 1;
+ } else {
+ flags->rsw = 0;
+ flags->accessed = 0;
+ flags->dirty = 0;
+ flags->global = 1;
+ flags->user = 0;
+ flags->execute = (mflags & VMM_MEMORY_EXECUTABLE) ? 1 : 0;
+ flags->write = (mflags & VMM_MEMORY_WRITEABLE) ? 1 : 0;
+ flags->read = (mflags & VMM_MEMORY_READABLE) ? 1 : 0;
+ flags->valid = 1;
+
+ /*
+ * We ignore following flags:
+ * VMM_MEMORY_CACHEABLE
+ * VMM_MEMORY_BUFFERABLE
+ * VMM_MEMORY_IO_DEVICE
+ * VMM_MEMORY_DMA_COHERENT
+ * VMM_MEMORY_DMA_NONCOHERENT
+ */
+ }
}

void arch_mmu_pte_sync(arch_pte_t *pte, int stage, int level)
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_trap.c b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
index ae691d2b..08d9dea2 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_trap.c
+++ b/arch/riscv/cpu/generic/cpu_vcpu_trap.c
@@ -127,21 +127,7 @@ static int cpu_vcpu_stage2_map(struct vmm_vcpu *vcpu,
#endif
}

- pg.flags.user = 1;
- if (pg_reg_flags & VMM_REGION_VIRTUAL) {
- pg.flags.read = 0;
- pg.flags.write = 0;
- pg.flags.execute = 1;
- } else if (pg_reg_flags & VMM_REGION_READONLY) {
- pg.flags.read = 1;
- pg.flags.write = 0;
- pg.flags.execute = 1;
- } else {
- pg.flags.read = 1;
- pg.flags.write = 1;
- pg.flags.execute = 1;
- }
- pg.flags.valid = 1;
+ arch_mmu_pgflags_set(&pg.flags, MMU_STAGE2, pg_reg_flags);

/* Try to map the page in Stage2 */
rc = mmu_map_page(riscv_guest_priv(vcpu->guest)->pgtbl, &pg);
diff --git a/arch/riscv/cpu/generic/include/arch_mmu.h b/arch/riscv/cpu/generic/include/arch_mmu.h
index 5e8b95d3..b3f9a2a4 100644
--- a/arch/riscv/cpu/generic/include/arch_mmu.h
+++ b/arch/riscv/cpu/generic/include/arch_mmu.h
@@ -153,7 +153,7 @@ int arch_mmu_level_index(physical_addr_t ia, int stage, int level);

int arch_mmu_level_index_shift(int stage, int level);

-void arch_mmu_stage1_pgflags_set(arch_pgflags_t *flags, u32 mem_flags);
+void arch_mmu_pgflags_set(arch_pgflags_t *flags, int stage, u32 mflags);

void arch_mmu_pte_sync(arch_pte_t *pte, int stage, int level);

--
2.25.1

Anup Patel

unread,
Jun 9, 2020, 9:39:40 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
We extend generic MMU support to test nested page tables. To
achieve this, we add required APIs in generic MMU and related
arch functions.

Signed-off-by: Anup Patel <an...@brainfault.org>
---
arch/arm/cpu/common/include/mmu_lpae.h | 6 +
arch/arm/cpu/common/mmu_lpae.c | 10 +
arch/common/generic_mmu.c | 301 ++++++++++++++++++++++
arch/common/include/generic_mmu.h | 71 ++++-
arch/riscv/cpu/generic/cpu_mmu.c | 10 +
arch/riscv/cpu/generic/include/arch_mmu.h | 6 +
6 files changed, 403 insertions(+), 1 deletion(-)

diff --git a/arch/arm/cpu/common/include/mmu_lpae.h b/arch/arm/cpu/common/include/mmu_lpae.h
index a7a65737..6d86d350 100644
--- a/arch/arm/cpu/common/include/mmu_lpae.h
+++ b/arch/arm/cpu/common/include/mmu_lpae.h
@@ -215,6 +215,12 @@ physical_addr_t arch_mmu_pte_table_addr(arch_pte_t *pte, int stage, int level);
void arch_mmu_pte_set_table(arch_pte_t *pte, int stage, int level,
physical_addr_t tbl_pa);

+int arch_mmu_test_nested_pgtbl(physical_addr_t s2_tbl_pa,
+ bool s1_avail, physical_addr_t s1_tbl_pa,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t *out_addr,
+ u32 *out_fault_flags);
+
physical_addr_t arch_mmu_stage2_current_pgtbl_addr(void);

u32 arch_mmu_stage2_current_vmid(void);
diff --git a/arch/arm/cpu/common/mmu_lpae.c b/arch/arm/cpu/common/mmu_lpae.c
index 82e026d4..147200ed 100644
--- a/arch/arm/cpu/common/mmu_lpae.c
+++ b/arch/arm/cpu/common/mmu_lpae.c
@@ -356,6 +356,16 @@ void arch_mmu_pte_set_table(arch_pte_t *pte, int stage, int level,
*pte |= (TTBL_TABLE_MASK | TTBL_VALID_MASK);
}

+int arch_mmu_test_nested_pgtbl(physical_addr_t s2_tbl_pa,
+ bool s1_avail, physical_addr_t s1_tbl_pa,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t *out_addr,
+ u32 *out_fault_flags)
+{
+ /* To be implemented later. */
+ return VMM_ENOTAVAIL;
+}
+
physical_addr_t arch_mmu_stage2_current_pgtbl_addr(void)
{
return cpu_stage2_ttbl_pa();
diff --git a/arch/common/generic_mmu.c b/arch/common/generic_mmu.c
index d39eb7c9..5bc5cf4d 100644
--- a/arch/common/generic_mmu.c
+++ b/arch/common/generic_mmu.c
@@ -701,12 +701,21 @@ int mmu_find_pte(struct mmu_pgtbl *pgtbl, physical_addr_t ia,
int index;
arch_pte_t *pte;
irq_flags_t flags;
+ physical_size_t map_last;
struct mmu_pgtbl *child;

if (!pgtbl || !ptep || !pgtblp) {
return VMM_EFAIL;
}

+ map_last = arch_mmu_level_block_size(pgtbl->stage, pgtbl->level);
+ map_last *= (pgtbl->tbl_sz / sizeof(arch_pte_t));
+ map_last -= 1;
+ if ((ia < pgtbl->map_ia) ||
+ ((pgtbl->map_ia + map_last) < ia)) {
+ return VMM_EFAIL;
+ }
+
index = arch_mmu_level_index(ia, pgtbl->stage, pgtbl->level);
pte = (arch_pte_t *)pgtbl->tbl_va;

@@ -741,6 +750,298 @@ int mmu_find_pte(struct mmu_pgtbl *pgtbl, physical_addr_t ia,
return VMM_OK;
}

+void mmu_walk_address(struct mmu_pgtbl *pgtbl, physical_addr_t ia,
+ void (*fn)(struct mmu_pgtbl *, arch_pte_t *, void *),
+ void *opaque)
+{
+ int index;
+ arch_pte_t *pte;
+ irq_flags_t flags;
+ physical_size_t map_last;
+ struct mmu_pgtbl *child;
+
+ if (!pgtbl || !fn) {
+ return;
+ }
+
+ map_last = arch_mmu_level_block_size(pgtbl->stage, pgtbl->level);
+ map_last *= (pgtbl->tbl_sz / sizeof(arch_pte_t));
+ map_last -= 1;
+ if ((ia < pgtbl->map_ia) ||
+ ((pgtbl->map_ia + map_last) < ia)) {
+ return;
+ }
+
+ index = arch_mmu_level_index(ia, pgtbl->stage, pgtbl->level);
+ pte = (arch_pte_t *)pgtbl->tbl_va;
+
+ fn(pgtbl, &pte[index], opaque);
+
+ vmm_spin_lock_irqsave_lite(&pgtbl->tbl_lock, flags);
+
+ if (!arch_mmu_pte_is_valid(&pte[index], pgtbl->stage, pgtbl->level)) {
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+ return;
+ }
+
+ if ((pgtbl->level == 0) &&
+ arch_mmu_pte_is_table(&pte[index], pgtbl->stage, pgtbl->level)) {
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+ return;
+ }
+
+ if ((pgtbl->level > 0) &&
+ arch_mmu_pte_is_table(&pte[index], pgtbl->stage, pgtbl->level)) {
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+ child = mmu_pgtbl_get_child(pgtbl, ia, FALSE);
+ if (!child) {
+ return;
+ }
+ mmu_walk_address(child, ia, fn, opaque);
+ return;
+ }
+
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+}
+
+void mmu_walk_tables(struct mmu_pgtbl *pgtbl,
+ void (*fn)(struct mmu_pgtbl *pgtbl, void *),
+ void *opaque)
+{
+ irq_flags_t flags;
+ struct mmu_pgtbl *child;
+
+ if (!pgtbl || !fn) {
+ return;
+ }
+
+ fn(pgtbl, opaque);
+
+ vmm_spin_lock_irqsave_lite(&pgtbl->tbl_lock, flags);
+
+ list_for_each_entry(child, &pgtbl->child_list, head) {
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+ mmu_walk_tables(child, fn, opaque);
+ vmm_spin_lock_irqsave_lite(&pgtbl->tbl_lock, flags);
+ }
+
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+}
+
+struct free_address_walk {
+ bool found;
+ int level;
+ physical_addr_t min_addr;
+ physical_addr_t *addr;
+};
+
+static void free_address_walk(struct mmu_pgtbl *pgtbl, void *opaque)
+{
+ arch_pte_t *pte;
+ irq_flags_t flags;
+ physical_addr_t ia;
+ int index, pte_count;
+ struct free_address_walk *w = opaque;
+
+ if (w->found || pgtbl->level != w->level) {
+ return;
+ }
+
+ pte = (arch_pte_t *)pgtbl->tbl_va;
+ pte_count = pgtbl->tbl_sz / sizeof(arch_pte_t);
+
+ vmm_spin_lock_irqsave_lite(&pgtbl->tbl_lock, flags);
+
+ for (index = 0; index < pte_count; index++) {
+ if (arch_mmu_pte_is_valid(&pte[index],
+ pgtbl->stage, pgtbl->level)) {
+ continue;
+ }
+ ia = pgtbl->map_ia +
+ index * arch_mmu_level_block_size(pgtbl->stage, pgtbl->level);
+ if (ia < w->min_addr) {
+ continue;
+ }
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+ w->found = TRUE;
+ *w->addr = ia;
+ return;
+ }
+
+ vmm_spin_unlock_irqrestore_lite(&pgtbl->tbl_lock, flags);
+
+ return;
+}
+
+int mmu_find_free_address(struct mmu_pgtbl *pgtbl, physical_addr_t min_addr,
+ int page_order, physical_addr_t *addr)
+{
+ int level;
+ struct free_address_walk w;
+
+ if (!pgtbl || !addr) {
+ return VMM_EINVALID;
+ }
+
+ for (level = 0; level <= pgtbl->level; level++) {
+ if (arch_mmu_level_block_shift(pgtbl->stage, level) >=
+ page_order) {
+ break;
+ }
+ }
+ if (pgtbl->level < level) {
+ return VMM_EINVALID;
+ }
+
+ while (level <= pgtbl->level) {
+ w.found = FALSE;
+ w.level = level;
+ w.min_addr = min_addr;
+ w.addr = addr;
+
+ mmu_walk_tables(pgtbl, free_address_walk, &w);
+ if (w.found) {
+ return VMM_OK;
+ }
+
+ level++;
+ }
+
+ return VMM_ENOTAVAIL;
+}
+
+struct idmap_nested_pgtbl_walk {
+ struct mmu_pgtbl *s2_pgtbl;
+ int map_level;
+ physical_size_t map_size;
+ u32 reg_flags;
+ int error;
+};
+
+static void idmap_nested_pgtbl_walk(struct mmu_pgtbl *pgtbl, void *opaque)
+{
+ int rc;
+ physical_addr_t ta;
+ struct mmu_page pg = { 0 }, tpg;
+
+ struct idmap_nested_pgtbl_walk *iw = opaque;
+
+ if (iw->error) {
+ return;
+ }
+
+ arch_mmu_pgflags_set(&pg.flags, MMU_STAGE2, iw->reg_flags);
+ for (ta = 0; ta < pgtbl->tbl_sz; ta += iw->map_size) {
+ pg.ia = pgtbl->tbl_pa + ta;
+ pg.ia &= arch_mmu_level_map_mask(MMU_STAGE2, iw->map_level);
+ pg.oa = pgtbl->tbl_pa + ta;
+ pg.oa &= arch_mmu_level_map_mask(MMU_STAGE2, iw->map_level);
+ pg.sz = iw->map_size;
+
+ if (mmu_get_page(iw->s2_pgtbl, pg.ia, &tpg)) {
+ rc = mmu_map_page(iw->s2_pgtbl, &pg);
+ if (rc) {
+ iw->error = rc;
+ return;
+ }
+ } else {
+ if (pg.ia != tpg.ia ||
+ pg.oa != tpg.oa ||
+ pg.sz != tpg.sz) {
+ iw->error = VMM_EFAIL;
+ return;
+ }
+ }
+ }
+}
+
+int mmu_idmap_nested_pgtbl(struct mmu_pgtbl *s2_pgtbl,
+ struct mmu_pgtbl *s1_pgtbl,
+ physical_size_t map_size, u32 reg_flags)
+{
+ int level;
+ struct idmap_nested_pgtbl_walk iw;
+
+ if (!s2_pgtbl || (s2_pgtbl->stage != MMU_STAGE2)) {
+ return VMM_EINVALID;
+ }
+ if (!s1_pgtbl || (s1_pgtbl->stage != MMU_STAGE1)) {
+ return VMM_EINVALID;
+ }
+
+ for (level = 0; level <= s2_pgtbl->level; level++) {
+ if (arch_mmu_level_block_size(s2_pgtbl->stage, level) ==
+ map_size) {
+ break;
+ }
+ }
+ if (s2_pgtbl->level < level) {
+ return VMM_EINVALID;
+ }
+
+ iw.s2_pgtbl = s2_pgtbl;
+ iw.map_level = level;
+ iw.map_size = map_size;
+ iw.reg_flags = reg_flags;
+ iw.error = VMM_OK;
+
+ mmu_walk_tables(s1_pgtbl, idmap_nested_pgtbl_walk, &iw);
+
+ return iw.error;
+}
+
+int mmu_test_nested_pgtbl(struct mmu_pgtbl *s2_pgtbl,
+ struct mmu_pgtbl *s1_pgtbl,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t expected_output_addr,
+ u32 expected_fault_flags)
+{
+ int rc;
+ physical_addr_t oaddr = 0;
+ u32 offlags = 0;
+
+ if (!s2_pgtbl || (s2_pgtbl->stage != MMU_STAGE2)) {
+ return VMM_EINVALID;
+ }
+ if (s1_pgtbl && (s1_pgtbl->stage != MMU_STAGE1)) {
+ return VMM_EINVALID;
+ }
+ if (flags & ~MMU_TEST_VALID_MASK) {
+ return VMM_EINVALID;
+ }
+ if ((flags & MMU_TEST_WIDTH_16BIT) && (addr & 0x1)) {
+ return VMM_EINVALID;
+ }
+ if ((flags & MMU_TEST_WIDTH_32BIT) && (addr & 0x3)) {
+ return VMM_EINVALID;
+ }
+
+ rc = arch_mmu_test_nested_pgtbl(s2_pgtbl->tbl_pa,
+ (s1_pgtbl) ? TRUE : FALSE,
+ (s1_pgtbl) ? s1_pgtbl->tbl_pa : 0,
+ flags, addr, &oaddr, &offlags);
+ if (rc) {
+ return rc;
+ }
+
+ /* All expected fault bits should be set */
+ if ((offlags & expected_fault_flags) ^ expected_fault_flags) {
+ return VMM_EFAIL;
+ }
+
+ /* No unexpected fault bit should be set */
+ if (expected_fault_flags && (offlags & ~expected_fault_flags)) {
+ return VMM_EFAIL;
+ }
+
+ /* Output address should match */
+ if (oaddr != expected_output_addr) {
+ return VMM_EFAIL;
+ }
+
+ return VMM_OK;
+}
+
int mmu_get_hypervisor_page(virtual_addr_t va, struct mmu_page *pg)
{
return mmu_get_page(&mmuctrl.hyp_pgtbl, va, pg);
diff --git a/arch/common/include/generic_mmu.h b/arch/common/include/generic_mmu.h
index a3bb58de..22d49e14 100644
--- a/arch/common/include/generic_mmu.h
+++ b/arch/common/include/generic_mmu.h
@@ -53,7 +53,7 @@ struct mmu_pgtbl {
physical_addr_t map_ia;
physical_addr_t tbl_pa;
vmm_spinlock_t tbl_lock; /*< Lock to protect table contents,
- tte_cnt, child_cnt, and child_list
+ pte_cnt, child_cnt, and child_list
*/
virtual_addr_t tbl_va;
virtual_size_t tbl_sz;
@@ -78,6 +78,42 @@ struct mmu_pgtbl *mmu_pgtbl_alloc(int stage, int level);

int mmu_pgtbl_free(struct mmu_pgtbl *pgtbl);

+static inline enum mmu_stage mmu_pgtbl_stage(struct mmu_pgtbl *pgtbl)
+{
+ return (pgtbl) ? pgtbl->stage : MMU_STAGE_UNKNOWN;
+}
+
+static inline int mmu_pgtbl_level(struct mmu_pgtbl *pgtbl)
+{
+ return (pgtbl) ? pgtbl->level : -1;
+}
+
+static inline physical_addr_t mmu_pgtbl_map_addr(struct mmu_pgtbl *pgtbl)
+{
+ return (pgtbl) ? pgtbl->map_ia : 0;
+}
+
+static inline physical_addr_t mmu_pgtbl_map_addr_end(struct mmu_pgtbl *pgtbl)
+{
+ if (!pgtbl) {
+ return 0;
+ }
+
+ return (pgtbl->map_ia +
+ ((pgtbl->tbl_sz / sizeof(arch_pte_t)) *
+ arch_mmu_level_block_size(pgtbl->stage, pgtbl->level))) - 1;
+}
+
+static inline physical_addr_t mmu_pgtbl_physical_addr(struct mmu_pgtbl *pgtbl)
+{
+ return (pgtbl) ? pgtbl->tbl_pa : 0;
+}
+
+static inline virtual_size_t mmu_pgtbl_size(struct mmu_pgtbl *pgtbl)
+{
+ return (pgtbl) ? pgtbl->tbl_sz : 0;
+}
+
struct mmu_pgtbl *mmu_pgtbl_get_child(struct mmu_pgtbl *parent,
physical_addr_t map_ia,
bool create);
@@ -92,6 +128,39 @@ int mmu_map_page(struct mmu_pgtbl *pgtbl, struct mmu_page *pg);
int mmu_find_pte(struct mmu_pgtbl *pgtbl, physical_addr_t ia,
arch_pte_t **ptep, struct mmu_pgtbl **pgtblp);

+void mmu_walk_address(struct mmu_pgtbl *pgtbl, physical_addr_t ia,
+ void (*fn)(struct mmu_pgtbl *, arch_pte_t *, void *),
+ void *opaque);
+
+void mmu_walk_tables(struct mmu_pgtbl *pgtbl,
+ void (*fn)(struct mmu_pgtbl *pgtbl, void *),
+ void *opaque);
+
+int mmu_find_free_address(struct mmu_pgtbl *pgtbl, physical_addr_t min_addr,
+ int page_order, physical_addr_t *addr);
+
+int mmu_idmap_nested_pgtbl(struct mmu_pgtbl *s2_pgtbl,
+ struct mmu_pgtbl *s1_pgtbl,
+ physical_size_t map_size, u32 reg_flags);
+
+#define MMU_TEST_WIDTH_8BIT (1UL << 0)
+#define MMU_TEST_WIDTH_16BIT (1UL << 1)
+#define MMU_TEST_WIDTH_32BIT (1UL << 2)
+#define MMU_TEST_WRITE (1UL << 3)
+#define MMU_TEST_VALID_MASK 0xfUL
+
+#define MMU_TEST_FAULT_S1 (1UL << 0)
+#define MMU_TEST_FAULT_NOMAP (1UL << 1)
+#define MMU_TEST_FAULT_READ (1UL << 2)
+#define MMU_TEST_FAULT_WRITE (1UL << 3)
+#define MMU_TEST_FAULT_UNKNOWN (1UL << 4)
+
+int mmu_test_nested_pgtbl(struct mmu_pgtbl *s2_pgtbl,
+ struct mmu_pgtbl *s1_pgtbl,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t expected_output_addr,
+ u32 expected_fault_flags);
+
int mmu_get_hypervisor_page(virtual_addr_t va, struct mmu_page *pg);

int mmu_unmap_hypervisor_page(struct mmu_page *pg);
diff --git a/arch/riscv/cpu/generic/cpu_mmu.c b/arch/riscv/cpu/generic/cpu_mmu.c
index fd693bbf..bd7172b9 100644
--- a/arch/riscv/cpu/generic/cpu_mmu.c
+++ b/arch/riscv/cpu/generic/cpu_mmu.c
@@ -351,6 +351,16 @@ void arch_mmu_pte_set_table(arch_pte_t *pte, int stage, int level,
*pte |= PGTBL_PTE_VALID_MASK;
}

+int arch_mmu_test_nested_pgtbl(physical_addr_t s2_tbl_pa,
+ bool s1_avail, physical_addr_t s1_tbl_pa,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t *out_addr,
+ u32 *out_fault_flags)
+{
+ /* To be implemented later. */
+ return VMM_ENOTAVAIL;
+}
+
physical_addr_t arch_mmu_stage2_current_pgtbl_addr(void)
{
unsigned long pgtbl_ppn = csr_read(CSR_HGATP) & HGATP_PPN;
diff --git a/arch/riscv/cpu/generic/include/arch_mmu.h b/arch/riscv/cpu/generic/include/arch_mmu.h
index b3f9a2a4..e67b1176 100644
--- a/arch/riscv/cpu/generic/include/arch_mmu.h
+++ b/arch/riscv/cpu/generic/include/arch_mmu.h
@@ -176,6 +176,12 @@ physical_addr_t arch_mmu_pte_table_addr(arch_pte_t *pte, int stage, int level);
void arch_mmu_pte_set_table(arch_pte_t *pte, int stage, int level,
physical_addr_t tbl_pa);

+int arch_mmu_test_nested_pgtbl(physical_addr_t s2_tbl_pa,
+ bool s1_avail, physical_addr_t s1_tbl_pa,
+ u32 flags, virtual_addr_t addr,
+ physical_addr_t *out_addr,
+ u32 *out_fault_flags);
+
physical_addr_t arch_mmu_stage2_current_pgtbl_addr(void);

u32 arch_mmu_stage2_current_vmid(void);
--
2.25.1

Anup Patel

unread,
Jun 9, 2020, 9:39:42 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
We implement arch_mmu_test_nested_pgtbl() for RISC-V using HSV/HLV
instructions. This will allow us to test RISC-V nested MMU without
creating any Guest/VM.

Signed-off-by: Anup Patel <an...@brainfault.org>
---
arch/riscv/cpu/generic/cpu_mmu.c | 290 +++++++++++++++++-
arch/riscv/cpu/generic/cpu_vcpu_unpriv.S | 27 +-
.../cpu/generic/include/cpu_vcpu_unpriv.h | 7 +
3 files changed, 313 insertions(+), 11 deletions(-)

diff --git a/arch/riscv/cpu/generic/cpu_mmu.c b/arch/riscv/cpu/generic/cpu_mmu.c
index bd7172b9..efc66c17 100644
--- a/arch/riscv/cpu/generic/cpu_mmu.c
+++ b/arch/riscv/cpu/generic/cpu_mmu.c
@@ -28,10 +28,13 @@
#include <vmm_host_aspace.h>
#include <generic_mmu.h>
#include <arch_barrier.h>
+#include <arch_cpu_irq.h>

#include <cpu_hwcap.h>
#include <cpu_tlb.h>
#include <cpu_sbi.h>
+#include <cpu_vcpu_trap.h>
+#include <cpu_vcpu_unpriv.h>

#ifdef CONFIG_64BIT
/* Assume Sv39 */
@@ -357,8 +360,291 @@ int arch_mmu_test_nested_pgtbl(physical_addr_t s2_tbl_pa,
physical_addr_t *out_addr,
u32 *out_fault_flags)
{
- /* To be implemented later. */
- return VMM_ENOTAVAIL;
+ int rc = VMM_OK;
+ irq_flags_t f;
+ struct mmu_page pg;
+ struct mmu_pgtbl *s1_pgtbl, *s2_pgtbl;
+ struct cpu_vcpu_trap trap = { 0 };
+ struct cpu_vcpu_trap *tinfo = &trap;
+ physical_addr_t trap_gpa;
+ unsigned long tmp = -1UL, trap_gva;
+ unsigned long hstatus, stvec, vsatp, hgatp;
+
+ hgatp = riscv_stage2_mode << HGATP_MODE_SHIFT;
+ hgatp |= (s2_tbl_pa >> PGTBL_PAGE_SIZE_SHIFT) & HGATP_PPN;
+ if (s1_avail) {
+ vsatp = riscv_stage1_mode << SATP_MODE_SHIFT;
+ vsatp |= (s1_tbl_pa >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
+ } else {
+ vsatp = 0;
+ }
+ stvec = (unsigned long)&__cpu_vcpu_unpriv_trap_handler;
+
+ arch_cpu_irq_save(f);
+
+ hstatus = csr_read(CSR_HSTATUS);
+ csr_clear(CSR_HSTATUS, HSTATUS_GVA);
+
+ stvec = csr_swap(CSR_STVEC, stvec);
+ vsatp = csr_swap(CSR_VSATP, vsatp);
+ hgatp = csr_swap(CSR_HGATP, hgatp);
+
+ if (flags & MMU_TEST_WRITE) {
+ /*
+ * t0 is register 5
+ * t1 is register 6
+ * t2 is register 7
+ */
+ if (flags & MMU_TEST_WIDTH_8BIT) {
+ /*
+ * HSV.B rs2, (rs1) instruction
+ * 0110001 rs2 rs1 100 00000 1110011
+ */
+ asm volatile("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t0, %[tmp], zero\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+ /*
+ * HSV.B t0, (t2)
+ * 0110001 00101 00111 100 00000 1110011
+ */
+ ".word 0x6253c073\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo)
+ : [tmp] "r"(tmp), [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else if (flags & MMU_TEST_WIDTH_16BIT) {
+ /*
+ * HSV.H rs2, (rs1) instruction
+ * 0110011 rs2 rs1 100 00000 1110011
+ */
+ asm volatile ("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t0, %[tmp], zero\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+ /*
+ * HSV.H t0, (t2)
+ * 0110011 00101 00111 100 00000 1110011
+ */
+ ".word 0x6653c073\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo)
+ : [tmp] "r"(tmp), [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else if (flags & MMU_TEST_WIDTH_32BIT) {
+ /*
+ * HSV.W rs2, (rs1) instruction
+ * 0110101 rs2 rs1 100 00000 1110011
+ */
+ asm volatile ("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t0, %[tmp], zero\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+ /*
+ * HSV.W t0, (t2)
+ * 0110101 00101 00111 100 00000 1110011
+ */
+ ".word 0x6a53c073\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo)
+ : [tmp] "r"(tmp), [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else {
+ rc = VMM_EINVALID;
+ }
+ } else {
+ if (flags & MMU_TEST_WIDTH_8BIT) {
+ /*
+ * HLV.BU rd, (rs1) instruction
+ * 0110000 00001 rs1 100 rd 1110011
+ */
+ asm volatile ("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+ /*
+ * HLV.BU t0, (t2)
+ * 0110000 00001 00111 100 00101 1110011
+ */
+ ".word 0x6013c2f3\n"
+ "add %[tmp], t0, zero\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo), [tmp] "=&r" (tmp)
+ : [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else if (flags & MMU_TEST_WIDTH_16BIT) {
+ /*
+ * HLV.HU rd, (rs1) instruction
+ * 0110010 00001 rs1 100 rd 1110011
+ */
+ asm volatile ("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+ /*
+ * HLV.HU t0, (t2)
+ * 0110010 00001 00111 100 00101 1110011
+ */
+ ".word 0x6413c2f3\n"
+ "add %[tmp], t0, zero\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo), [tmp] "=&r" (tmp)
+ : [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else if (flags & MMU_TEST_WIDTH_32BIT) {
+ /*
+ * HLV.WU rd, (rs1) instruction
+ * 0110100 00001 rs1 100 rd 1110011
+ *
+ * HLV.W rd, (rs1) instruction
+ * 0110100 00000 rs1 100 rd 1110011
+ */
+ asm volatile ("\n"
+ ".option push\n"
+ ".option norvc\n"
+ "add t1, %[tinfo], zero\n"
+ "add t2, %[addr], zero\n"
+#ifdef CONFIG_64BIT
+ /*
+ * HLV.WU t0, (t2)
+ * 0110100 00001 00111 100 00101 1110011
+ */
+ ".word 0x6813c2f3\n"
+#else
+ /*
+ * HLV.W t0, (t2)
+ * 0110100 00000 00111 100 00101 1110011
+ */
+ ".word 0x6803c2f3\n"
+#endif
+ "add %[tmp], t0, zero\n"
+ ".option pop"
+ : [tinfo] "+&r"(tinfo), [tmp] "=&r" (tmp)
+ : [addr] "r"(addr)
+ : "t0", "t1", "t2", "memory");
+ } else {
+ rc = VMM_EINVALID;
+ }
+ }
+
+ csr_write(CSR_HGATP, hgatp);
+ csr_write(CSR_VSATP, vsatp);
+ csr_write(CSR_STVEC, stvec);
+ hstatus = csr_swap(CSR_HSTATUS, hstatus);
+
+ arch_cpu_irq_restore(f);
+
+ /*
+ * We just polluted TLB by running HSV/HLV instructions so let's
+ * cleanup by invalidating all Guest and Host TLB entries.
+ */
+ __hfence_gvma_all();
+ __sfence_vma_all();
+
+ if (rc) {
+ return rc;
+ }
+
+ *out_fault_flags = 0;
+ *out_addr = 0;
+
+ if (trap.scause) {
+ switch (trap.scause) {
+ case CAUSE_LOAD_PAGE_FAULT:
+ *out_fault_flags |= MMU_TEST_FAULT_S1;
+ *out_fault_flags |= MMU_TEST_FAULT_READ;
+ break;
+ case CAUSE_STORE_PAGE_FAULT:
+ *out_fault_flags |= MMU_TEST_FAULT_S1;
+ *out_fault_flags |= MMU_TEST_FAULT_WRITE;
+ break;
+ case CAUSE_LOAD_GUEST_PAGE_FAULT:
+ *out_fault_flags |= MMU_TEST_FAULT_READ;
+ break;
+ case CAUSE_STORE_GUEST_PAGE_FAULT:
+ *out_fault_flags |= MMU_TEST_FAULT_WRITE;
+ break;
+ default:
+ *out_fault_flags |= MMU_TEST_FAULT_UNKNOWN;
+ break;
+ };
+
+ if (!(*out_fault_flags & MMU_TEST_FAULT_UNKNOWN)) {
+ if (!(hstatus & HSTATUS_GVA)) {
+ return VMM_EFAIL;
+ }
+ }
+
+ trap_gva = trap.stval;
+ trap_gpa = ((physical_addr_t)trap.htval << 2);
+ trap_gpa |= ((physical_addr_t)trap.stval & 0x3);
+
+ if (*out_fault_flags & MMU_TEST_FAULT_S1) {
+ if (!s1_avail) {
+ return VMM_EFAIL;
+ }
+
+ s1_pgtbl = mmu_pgtbl_find(MMU_STAGE1, s1_tbl_pa);
+ if (!s1_pgtbl) {
+ return VMM_EFAIL;
+ }
+
+ if (mmu_get_page(s1_pgtbl, trap_gva, &pg)) {
+ *out_fault_flags |= MMU_TEST_FAULT_NOMAP;
+ }
+
+ *out_addr = trap_gva;
+ } else {
+ s2_pgtbl = mmu_pgtbl_find(MMU_STAGE2, s2_tbl_pa);
+ if (!s2_pgtbl) {
+ return VMM_EFAIL;
+ }
+
+ if (mmu_get_page(s2_pgtbl, trap_gpa, &pg)) {
+ *out_fault_flags |= MMU_TEST_FAULT_NOMAP;
+ }
+
+ *out_addr = trap_gpa;
+ }
+ } else {
+ if (s1_avail) {
+ s1_pgtbl = mmu_pgtbl_find(MMU_STAGE1, s1_tbl_pa);
+ if (!s1_pgtbl) {
+ return VMM_EFAIL;
+ }
+
+ rc = mmu_get_page(s1_pgtbl, addr, &pg);
+ if (rc) {
+ return rc;
+ }
+
+ *out_addr = pg.oa | (addr & (pg.sz - 1));
+ } else {
+ *out_addr = addr;
+ }
+
+ s2_pgtbl = mmu_pgtbl_find(MMU_STAGE2, s2_tbl_pa);
+ if (!s2_pgtbl) {
+ return VMM_EFAIL;
+ }
+
+ rc = mmu_get_page(s2_pgtbl, *out_addr, &pg);
+ if (rc) {
+ return rc;
+ }
+
+ *out_addr = pg.oa | (*out_addr & (pg.sz - 1));
+ }
+
+ return rc;
}

physical_addr_t arch_mmu_stage2_current_pgtbl_addr(void)
diff --git a/arch/riscv/cpu/generic/cpu_vcpu_unpriv.S b/arch/riscv/cpu/generic/cpu_vcpu_unpriv.S
index 42de3993..8a7751d4 100644
--- a/arch/riscv/cpu/generic/cpu_vcpu_unpriv.S
+++ b/arch/riscv/cpu/generic/cpu_vcpu_unpriv.S
@@ -25,15 +25,7 @@
#include <riscv_asm.h>
#include <riscv_encoding.h>

-.macro SETUP_TRAP __insn_len, __stvec, __ttmp, __taddr
- /* Change to Temporary exception handler */
- la \__stvec, 998f
- csrrw \__stvec, CSR_STVEC, \__stvec
- j 999f
-
- /* Temporary exception handler */
- .align 2
-998:
+.macro TRAP_HANDLER __insn_len __ttmp, __taddr
csrr \__ttmp, CSR_SEPC
REG_S \__ttmp, RISCV_VCPU_TRAP_SEPC(\__taddr)
addi \__ttmp, \__ttmp, \__insn_len
@@ -47,6 +39,18 @@
csrr \__ttmp, CSR_HTINST
REG_S \__ttmp, RISCV_VCPU_TRAP_HTINST(\__taddr)
sret
+.endm
+
+.macro SETUP_TRAP __insn_len, __stvec, __ttmp, __taddr
+ /* Change to Temporary exception handler */
+ la \__stvec, 998f
+ csrrw \__stvec, CSR_STVEC, \__stvec
+ j 999f
+
+ /* Temporary exception handler */
+ .align 2
+998:
+ TRAP_HANDLER \__insn_len, \__ttmp, \__taddr
999:
.endm

@@ -65,6 +69,11 @@
RESTORE_TRAP \__stvec
.endm

+ .align 3
+ .global __cpu_vcpu_unpriv_trap_handler
+__cpu_vcpu_unpriv_trap_handler:
+ TRAP_HANDLER 4, t0, t1
+
.align 3
.global __cpu_vcpu_unpriv_read_insn
__cpu_vcpu_unpriv_read_insn:
diff --git a/arch/riscv/cpu/generic/include/cpu_vcpu_unpriv.h b/arch/riscv/cpu/generic/include/cpu_vcpu_unpriv.h
index 83aee5e2..5dd70a0b 100644
--- a/arch/riscv/cpu/generic/include/cpu_vcpu_unpriv.h
+++ b/arch/riscv/cpu/generic/include/cpu_vcpu_unpriv.h
@@ -28,6 +28,13 @@

struct cpu_vcpu_trap;

+/* Low-level unpriv trap handler
+ * Note: This trap handler clobber T0 and T1 registers
+ * Note: This trap handler uses T0 as temporary register
+ * Note: This trap handler expect T1 pointing to struct cpu_vcpu_trap
+ */
+void __cpu_vcpu_unpriv_trap_handler(void);
+
/* Read instruction from Guest memory
* Note: This function should only be called from normal context
*/
--
2.25.1

Anup Patel

unread,
Jun 9, 2020, 9:39:44 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
We add generic APIs in vmm_host_ram to get start and end of
all host RAM banks. This APIs will help in nested MMU testing.

Signed-off-by: Anup Patel <an...@brainfault.org>
---
core/include/vmm_host_ram.h | 6 ++++++
core/vmm_host_ram.c | 32 ++++++++++++++++++++++++++++++++
2 files changed, 38 insertions(+)

diff --git a/core/include/vmm_host_ram.h b/core/include/vmm_host_ram.h
index b4623b52..a6337a56 100644
--- a/core/include/vmm_host_ram.h
+++ b/core/include/vmm_host_ram.h
@@ -72,6 +72,12 @@ u32 vmm_host_ram_total_free_frames(void);
/** Total frame count of all RAM banks */
u32 vmm_host_ram_total_frame_count(void);

+/** Start address of all RAM Banks */
+physical_addr_t vmm_host_ram_start(void);
+
+/** Last/end address of all RAM Banks */
+physical_addr_t vmm_host_ram_end(void);
+
/** Total size of all RAM Banks */
physical_size_t vmm_host_ram_total_size(void);

diff --git a/core/vmm_host_ram.c b/core/vmm_host_ram.c
index 29caba70..fea333b8 100644
--- a/core/vmm_host_ram.c
+++ b/core/vmm_host_ram.c
@@ -347,6 +347,38 @@ u32 vmm_host_ram_total_frame_count(void)
return ret;
}

+physical_addr_t vmm_host_ram_start(void)
+{
+ u32 bn;
+ physical_addr_t start, ret = 0;
+
+ ret -= 1;
+ for (bn = 0; bn < rctrl.bank_count; bn++) {
+ start = rctrl.banks[bn].start;
+ if (start <= ret) {
+ ret = start;
+ }
+ }
+
+ return ret;
+}
+
+physical_addr_t vmm_host_ram_end(void)
+{
+ u32 bn;
+ physical_addr_t end, ret = 0;
+
+ for (bn = 0; bn < rctrl.bank_count; bn++) {
+ end = rctrl.banks[bn].start + rctrl.banks[bn].size;
+ end -= 1;
+ if (ret <= end) {
+ ret = end;
+ }
+ }
+
+ return ret;
+}
+
physical_size_t vmm_host_ram_total_size(void)
{
u32 bn;
--
2.25.1

Anup Patel

unread,
Jun 9, 2020, 9:39:50 AM6/9/20
to xvisor...@googlegroups.com, Anup Patel
We add nested MMU test-suit which hepls us verify the nested
MMU functionality of underlying host using generic MMU.

Signed-off-by: Anup Patel <an...@brainfault.org>
---
libs/wboxtest/nested_mmu/nested_mmu_test.h | 221 +++++++++++++
libs/wboxtest/nested_mmu/objects.mk | 35 +++
libs/wboxtest/{ => nested_mmu}/openconf.cfg | 24 +-
.../s1_hugepage_s2_hugepage_nordwr.c | 293 ++++++++++++++++++
.../s1_hugepage_s2_hugepage_rdonly.c | 293 ++++++++++++++++++
.../nested_mmu/s1_hugepage_s2_hugepage_rdwr.c | 279 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_nordwr.c | 292 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_rdonly.c | 292 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_rdwr.c | 278 +++++++++++++++++
libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c | 145 +++++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c | 153 +++++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c | 208 +++++++++++++
libs/wboxtest/nested_mmu/s2_page_nordwr.c | 145 +++++++++
libs/wboxtest/nested_mmu/s2_page_rdonly.c | 152 +++++++++
libs/wboxtest/nested_mmu/s2_page_rdwr.c | 207 +++++++++++++
libs/wboxtest/openconf.cfg | 1 +
16 files changed, 3003 insertions(+), 15 deletions(-)
create mode 100755 libs/wboxtest/nested_mmu/nested_mmu_test.h
create mode 100644 libs/wboxtest/nested_mmu/objects.mk
copy libs/wboxtest/{ => nested_mmu}/openconf.cfg (70%)
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdwr.c

diff --git a/libs/wboxtest/nested_mmu/nested_mmu_test.h b/libs/wboxtest/nested_mmu/nested_mmu_test.h
new file mode 100755
index 00000000..07b73822
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/nested_mmu_test.h
@@ -0,0 +1,221 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file nested_mmu_test.h
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief Nested MMU test helper routines and macros
+ */
+
+#ifndef __NESTED_MMU_TEST__
+#define __NESTED_MMU_TEST__
+
+#include <vmm_error.h>
+#include <vmm_stdio.h>
+#include <vmm_limits.h>
+#include <vmm_guest_aspace.h>
+#include <vmm_host_aspace.h>
+#include <vmm_host_ram.h>
+#include <libs/wboxtest.h>
+#include <generic_mmu.h>
+
+#ifdef DEBUG
+#define DPRINTF(__cdev, __msg...) vmm_cprintf(__cdev, __msg)
+#else
+#define DPRINTF(__cdev, __msg...)
+#endif
+
+#define NESTED_MMU_TEST_RDWR_MEM_FLAGS VMM_MEMORY_FLAGS_NORMAL
+
+#define NESTED_MMU_TEST_RDONLY_MEM_FLAGS (VMM_MEMORY_FLAGS_NORMAL_WT & \
+ ~VMM_MEMORY_WRITEABLE)
+
+#define NESTED_MMU_TEST_NORDWR_MEM_FLAGS (VMM_MEMORY_FLAGS_IO & \
+ ~(VMM_MEMORY_READABLE | \
+ VMM_MEMORY_WRITEABLE))
+
+#define NESTED_MMU_TEST_RDWR_REG_FLAGS (VMM_REGION_REAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_CACHEABLE | \
+ VMM_REGION_BUFFERABLE | \
+ VMM_REGION_ISRAM)
+
+#define NESTED_MMU_TEST_RDONLY_REG_FLAGS (VMM_REGION_REAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_CACHEABLE | \
+ VMM_REGION_READONLY | \
+ VMM_REGION_ISROM)
+
+#define NESTED_MMU_TEST_NORDWR_REG_FLAGS (VMM_REGION_VIRTUAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_ISDEVICE)
+
+#define nested_mmu_test_best_min_addr(__pgtbl) \
+ ((mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? \
+ vmm_host_ram_end() : (mmu_pgtbl_map_addr_end(__pgtbl) / 4))
+
+#define nested_mmu_test_alloc_pages(__cdev, __test, __rc, __fail_label, \
+ __page_count, __mem_flags, \
+ __output_va_ptr, __output_pa_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating %d Host pages ", \
+ (__test)->name, (__page_count)); \
+ *(__output_va_ptr) = vmm_host_alloc_pages((__page_count), \
+ (__mem_flags)); \
+ (__rc) = vmm_host_va2pa(*(__output_va_ptr), (__output_pa_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_pa_ptr)); \
+} while (0) \
+
+#define nested_mmu_test_free_pages(__cdev, __test, \
+ __va_ptr, __pa_ptr, __page_count) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing %d Host pages (0x%"PRIPADDR")\n", \
+ (__test)->name, (__page_count), *(__pa_ptr)); \
+ vmm_host_free_pages(*(__va_ptr), (__page_count)); \
+} while (0)
+
+#define nested_mmu_test_alloc_hugepages(__cdev, __test, __rc, __fail_label, \
+ __page_count, __mem_flags, \
+ __output_va_ptr, __output_pa_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating %d Host hugepages ", \
+ (__test)->name, (__page_count)); \
+ *(__output_va_ptr) = vmm_host_alloc_hugepages((__page_count), \
+ (__mem_flags)); \
+ (__rc) = vmm_host_va2pa(*(__output_va_ptr), (__output_pa_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_pa_ptr)); \
+} while (0) \
+
+#define nested_mmu_test_free_hugepages(__cdev, __test, \
+ __va_ptr, __pa_ptr, __page_count) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing %d Host hugepages (0x%"PRIPADDR")\n", \
+ (__test)->name, (__page_count), *(__pa_ptr)); \
+ vmm_host_free_hugepages(*(__va_ptr), (__page_count)); \
+} while (0)
+
+#define nested_mmu_test_alloc_pgtbl(__cdev, __test, __rc, __fail_label, \
+ __stage, __output_pgtbl_double_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating Stage%s page table ", \
+ (__test)->name, \
+ ((__stage) == MMU_STAGE2) ? "2" : "1"); \
+ *(__output_pgtbl_double_ptr) = mmu_pgtbl_alloc((__stage), -1); \
+ DPRINTF((__cdev), "%s", \
+ (!*(__output_pgtbl_double_ptr)) ? "(failed)\n" : ""); \
+ if (!*(__output_pgtbl_double_ptr)) { \
+ (__rc) = VMM_ENOMEM; \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", \
+ mmu_pgtbl_physical_addr(*(__output_pgtbl_double_ptr))); \
+} while (0)
+
+#define nested_mmu_test_free_pgtbl(__cdev, __test, __pgtbl) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing Stage%s page table (0x%"PRIPADDR")\n", \
+ (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "2" : "1", \
+ mmu_pgtbl_physical_addr(__pgtbl)); \
+ mmu_pgtbl_free(__pgtbl); \
+} while (0)
+
+#define nested_mmu_test_find_free_addr(__cdev, __test, __rc, __fail_label, \
+ __pgtbl, __min_addr, __page_order, \
+ __output_addr_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Finding free Guest %s ", \
+ (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Phys" : "Virt"); \
+ (__rc) = mmu_find_free_address((__pgtbl), (__min_addr), \
+ (__page_order), (__output_addr_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_addr_ptr)); \
+} while (0)
+
+#define nested_mmu_test_map_pgtbl(__cdev, __test, __rc, __fail_label, \
+ __pgtbl, __guest_phys, __host_phys, \
+ __guest_size, __mem_or_reg_flags) \
+do { \
+ struct mmu_page __pg; \
+ __pg.ia = (__guest_phys); \
+ __pg.oa = (__host_phys); \
+ __pg.sz = (__guest_size); \
+ arch_mmu_pgflags_set(&__pg.flags, \
+ mmu_pgtbl_stage(__pgtbl), (__mem_or_reg_flags)); \
+ DPRINTF(cdev, "%s: Mapping Stage%s Guest %s 0x%"PRIPADDR" => " \
+ "%s Phys 0x%"PRIPADDR" (%ld KB)\n", (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "2" : "1", \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Phys" : "Virt", \
+ __pg.ia, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Host" : "Guest", \
+ __pg.oa, \
+ __pg.sz / SZ_1K); \
+ (__rc) = mmu_map_page((__pgtbl), &__pg); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#define nested_mmu_test_idmap_stage1(__cdev, __test, __rc, __fail_label, \
+ __s2_pgtbl, __s1_pgtbl, __map_size, \
+ __reg_flags) \
+do { \
+ DPRINTF(cdev, "%s: Identity map Stage1 page table (0x%"PRIPADDR") " \
+ "in Stage2 page table (0x%"PRIPADDR") ", \
+ (__test)->name, \
+ mmu_pgtbl_physical_addr(__s1_pgtbl), \
+ mmu_pgtbl_physical_addr(__s2_pgtbl)); \
+ (__rc) = mmu_idmap_nested_pgtbl((__s2_pgtbl), (__s1_pgtbl), \
+ (__map_size), (__reg_flags)); \
+ DPRINTF((__cdev), "(error %d)\n", (__rc)); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#define nested_mmu_test_execute(__cdev, __test, __rc, __fail_label, \
+ __s2_pgtbl, __s1_pgtbl, \
+ __va, __flags, __exp_addr, __exp_fault) \
+do { \
+ DPRINTF((__cdev), "%s: Checking %s%s%s%s at Guest Virt 0x%lx ", \
+ (__test)->name, \
+ ((__flags) & MMU_TEST_WRITE) ? "write" : "read", \
+ ((__flags) & MMU_TEST_WIDTH_8BIT) ? "8" : "", \
+ ((__flags) & MMU_TEST_WIDTH_16BIT) ? "16" : "", \
+ ((__flags) & MMU_TEST_WIDTH_32BIT) ? "32" : "", \
+ __va); \
+ (__rc) = mmu_test_nested_pgtbl((__s2_pgtbl), (__s1_pgtbl), (__flags), \
+ (unsigned long)(__va), (__exp_addr), (__exp_fault)); \
+ DPRINTF((__cdev), "(error %d)\n", (__rc)); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#endif
diff --git a/libs/wboxtest/nested_mmu/objects.mk b/libs/wboxtest/nested_mmu/objects.mk
new file mode 100644
index 00000000..9073bef9
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/objects.mk
@@ -0,0 +1,35 @@
+#/**
+# Copyright (c) 2020 Anup Patel.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+# @file objects.mk
+# @author Anup Patel (an...@brainfault.org)
+# @brief list of nested MMU test objects to be build
+# */
+
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.o
diff --git a/libs/wboxtest/openconf.cfg b/libs/wboxtest/nested_mmu/openconf.cfg
similarity index 70%
copy from libs/wboxtest/openconf.cfg
copy to libs/wboxtest/nested_mmu/openconf.cfg
index cb310927..8a062525 100644
--- a/libs/wboxtest/openconf.cfg
+++ b/libs/wboxtest/nested_mmu/openconf.cfg
@@ -1,35 +1,29 @@
#/**
-# Copyright (c) 2016 Anup Patel.
+# Copyright (c) 2020 Anup Patel.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# @file openconf.cfg
# @author Anup Patel (an...@brainfault.org)
-# @brief config file for white-box testing library
+# @brief config file for nested MMU test
# */

-menuconfig CONFIG_WBOXTEST
- tristate "White-box testing library"
- default n
+config CONFIG_WBOXTEST_NESTED_MMU
+ tristate "Nested MMU Group"
+ depends on CONFIG_ARCH_GENERIC_MMU
+ default y
help
- Enable/Disable white-box testing library.
-
-if CONFIG_WBOXTEST
-
-source libs/wboxtest/threads/openconf.cfg
-source libs/wboxtest/stdio/openconf.cfg
-
-endif
+ Enable/Disable nested MMU test group.
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
new file mode 100755
index 00000000..419a1f18
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
@@ -0,0 +1,293 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_nordwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_nordwr test implementation
+ *
+ * This tests the handling of no-read-write hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_nordwr_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_nordwr_exit
+
+static int s1_hugepage_s2_hugepage_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_host_pa = map_host_pa + (2 * vmm_host_hugepage_size());
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_rdwr_s1_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_guest_va = map_guest_va + (2 * vmm_host_hugepage_size());
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* vmm_host_hugepage_size());
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_nordwr = {
+ .name = "s1_hugepage_s2_hugepage_nordwr",
+ .run = s1_hugepage_s2_hugepage_nordwr_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_nordwr);
+}
+
+static void __exit s1_hugepage_s2_hugepage_nordwr_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
new file mode 100755
index 00000000..957545e5
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
@@ -0,0 +1,293 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_rdonly.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_rdonly test implementation
+ *
+ * This tests the handling of read-only hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_rdonly_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_rdonly_exit
+
+static int s1_hugepage_s2_hugepage_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_host_pa = map_host_pa + (2 * vmm_host_hugepage_size());
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_rdwr_s1_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_guest_va = map_guest_va + (2 * vmm_host_hugepage_size());
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* vmm_host_hugepage_size());
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_rdonly = {
+ .name = "s1_hugepage_s2_hugepage_rdonly",
+ .run = s1_hugepage_s2_hugepage_rdonly_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_rdonly);
+}
+
+static void __exit s1_hugepage_s2_hugepage_rdonly_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
new file mode 100755
index 00000000..9fe087af
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
@@ -0,0 +1,279 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_rdwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_rdwr test implementation
+ *
+ * This tests the handling of read-write hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_rdwr_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_rdwr_exit
+
+static int s1_hugepage_s2_hugepage_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_nomap_s2_guest_va;
+ physical_addr_t map_nomap_s2_guest_pa;
+ physical_addr_t nomap_guest_va;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_nomap_s2_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_nomap_s2_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va, map_nomap_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_rdwr = {
+ .name = "s1_hugepage_s2_hugepage_rdwr",
+ .run = s1_hugepage_s2_hugepage_rdwr_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_rdwr);
+}
+
+static void __exit s1_hugepage_s2_hugepage_rdwr_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
new file mode 100755
index 00000000..796fef43
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
@@ -0,0 +1,292 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_nordwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_page_s2_page_nordwr test implementation
+ *
+ * This tests the handling of no-read-write pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_nordwr_init
+#define MODULE_EXIT s1_page_s2_page_nordwr_exit
+
+static int s1_page_s2_page_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_host_pa = map_host_pa + (2 * VMM_PAGE_SIZE);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_rdwr_s1_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_guest_va = map_guest_va + (2 * VMM_PAGE_SIZE);
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* VMM_PAGE_SIZE);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_nordwr = {
+ .name = "s1_page_s2_page_nordwr",
+ .run = s1_page_s2_page_nordwr_run,
+};
+
+static int __init s1_page_s2_page_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_nordwr);
+}
+
+static void __exit s1_page_s2_page_nordwr_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
new file mode 100755
index 00000000..18f5c378
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
@@ -0,0 +1,292 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_rdonly.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_page_s2_page_rdonly test implementation
+ *
+ * This tests the handling of read-only pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_rdonly_init
+#define MODULE_EXIT s1_page_s2_page_rdonly_exit
+
+static int s1_page_s2_page_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_host_pa = map_host_pa + (2 * VMM_PAGE_SIZE);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_rdwr_s1_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_guest_va = map_guest_va + (2 * VMM_PAGE_SIZE);
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* VMM_PAGE_SIZE);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_rdonly = {
+ .name = "s1_page_s2_page_rdonly",
+ .run = s1_page_s2_page_rdonly_run,
+};
+
+static int __init s1_page_s2_page_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_rdonly);
+}
+
+static void __exit s1_page_s2_page_rdonly_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
new file mode 100755
index 00000000..45b3c1a7
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
@@ -0,0 +1,278 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_rdwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s1_page_s2_page_rdwr test implementation
+ *
+ * This tests the handling of read-write pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_rdwr_init
+#define MODULE_EXIT s1_page_s2_page_rdwr_exit
+
+static int s1_page_s2_page_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_nomap_s2_guest_va;
+ physical_addr_t map_nomap_s2_guest_pa;
+ physical_addr_t nomap_guest_va;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_nomap_s2_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_nomap_s2_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va, map_nomap_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_rdwr = {
+ .name = "s1_page_s2_page_rdwr",
+ .run = s1_page_s2_page_rdwr_run,
+};
+
+static int __init s1_page_s2_page_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_rdwr);
+}
+
+static void __exit s1_page_s2_page_rdwr_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c b/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
new file mode 100755
index 00000000..c7b6d5a5
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
@@ -0,0 +1,145 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_nordwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_hugepage_nordwr test implementation
+ *
+ * This tests the handling no-read-write hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_nordwr_init
+#define MODULE_EXIT s2_hugepage_nordwr_exit
+
+static int s2_hugepage_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, 0,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_nordwr = {
+ .name = "s2_hugepage_nordwr",
+ .run = s2_hugepage_nordwr_run,
+};
+
+static int __init s2_hugepage_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_nordwr);
+}
+
+static void __exit s2_hugepage_nordwr_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c b/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
new file mode 100755
index 00000000..633d9930
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
@@ -0,0 +1,153 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_rdonly.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_hugepage_rdonly test implementation
+ *
+ * This tests the handling read-only hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_rdonly_init
+#define MODULE_EXIT s2_hugepage_rdonly_exit
+
+static int s2_hugepage_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_rdonly = {
+ .name = "s2_hugepage_rdonly",
+ .run = s2_hugepage_rdonly_run,
+};
+
+static int __init s2_hugepage_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_rdonly);
+}
+
+static void __exit s2_hugepage_rdonly_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c b/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
new file mode 100755
index 00000000..a19b4dcc
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
@@ -0,0 +1,208 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_rdwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_hugepage_rdwr test implementation
+ *
+ * This tests the handling of read-write hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_rdwr_init
+#define MODULE_EXIT s2_hugepage_rdwr_exit
+
+static int s2_hugepage_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+ physical_addr_t nomap_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_rdwr = {
+ .name = "s2_hugepage_rdwr",
+ .run = s2_hugepage_rdwr_run,
+};
+
+static int __init s2_hugepage_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_rdwr);
+}
+
+static void __exit s2_hugepage_rdwr_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_nordwr.c b/libs/wboxtest/nested_mmu/s2_page_nordwr.c
new file mode 100755
index 00000000..acbc4c2b
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_nordwr.c
@@ -0,0 +1,145 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_nordwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_page_nordwr test implementation
+ *
+ * This tests the handling no-read-write pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_nordwr_init
+#define MODULE_EXIT s2_page_nordwr_exit
+
+static int s2_page_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, 0,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_nordwr = {
+ .name = "s2_page_nordwr",
+ .run = s2_page_nordwr_run,
+};
+
+static int __init s2_page_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_nordwr);
+}
+
+static void __exit s2_page_nordwr_exit(void)
+{
+ wboxtest_unregister(&s2_page_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_rdonly.c b/libs/wboxtest/nested_mmu/s2_page_rdonly.c
new file mode 100755
index 00000000..56c6402b
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_rdonly.c
@@ -0,0 +1,152 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_rdonly.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_page_rdonly test implementation
+ *
+ * This tests the handling read-only pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_rdonly_init
+#define MODULE_EXIT s2_page_rdonly_exit
+
+static int s2_page_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_rdonly = {
+ .name = "s2_page_rdonly",
+ .run = s2_page_rdonly_run,
+};
+
+static int __init s2_page_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_rdonly);
+}
+
+static void __exit s2_page_rdonly_exit(void)
+{
+ wboxtest_unregister(&s2_page_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_rdwr.c b/libs/wboxtest/nested_mmu/s2_page_rdwr.c
new file mode 100755
index 00000000..4201d122
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_rdwr.c
@@ -0,0 +1,207 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_rdwr.c
+ * @author Anup Patel (an...@brainfault.org)
+ * @brief s2_page_rdwr test implementation
+ *
+ * This tests the handling of read-write pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_rdwr_init
+#define MODULE_EXIT s2_page_rdwr_exit
+
+static int s2_page_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+ physical_addr_t nomap_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_rdwr = {
+ .name = "s2_page_rdwr",
+ .run = s2_page_rdwr_run,
+};
+
+static int __init s2_page_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_rdwr);
+}
+
+static void __exit s2_page_rdwr_exit(void)
+{
+ wboxtest_unregister(&s2_page_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/openconf.cfg b/libs/wboxtest/openconf.cfg
index cb310927..1f769e41 100644
--- a/libs/wboxtest/openconf.cfg
+++ b/libs/wboxtest/openconf.cfg
@@ -29,6 +29,7 @@ menuconfig CONFIG_WBOXTEST

if CONFIG_WBOXTEST

+source libs/wboxtest/nested_mmu/openconf.cfg
source libs/wboxtest/threads/openconf.cfg
source libs/wboxtest/stdio/openconf.cfg

--
2.25.1

Anup Patel

unread,
Jun 11, 2020, 12:40:36 AM6/11/20
to Xvisor Devel
Applied this patch to the xvisor-next tree.

Regards,
Anup

Anup Patel

unread,
Jun 11, 2020, 12:40:51 AM6/11/20
to Xvisor Devel

Anup Patel

unread,
Jun 11, 2020, 12:41:05 AM6/11/20
to Xvisor Devel

Anup Patel

unread,
Jun 11, 2020, 12:41:26 AM6/11/20
to Xvisor Devel

Anup Patel

unread,
Jun 11, 2020, 12:41:47 AM6/11/20
to Xvisor Devel
Reply all
Reply to author
Forward
0 new messages