We add nested MMU test-suit which hepls us verify the nested
MMU functionality of underlying host using generic MMU.
libs/wboxtest/nested_mmu/nested_mmu_test.h | 221 +++++++++++++
libs/wboxtest/nested_mmu/
objects.mk | 35 +++
libs/wboxtest/{ => nested_mmu}/openconf.cfg | 24 +-
.../s1_hugepage_s2_hugepage_nordwr.c | 293 ++++++++++++++++++
.../s1_hugepage_s2_hugepage_rdonly.c | 293 ++++++++++++++++++
.../nested_mmu/s1_hugepage_s2_hugepage_rdwr.c | 279 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_nordwr.c | 292 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_rdonly.c | 292 +++++++++++++++++
.../nested_mmu/s1_page_s2_page_rdwr.c | 278 +++++++++++++++++
libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c | 145 +++++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c | 153 +++++++++
libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c | 208 +++++++++++++
libs/wboxtest/nested_mmu/s2_page_nordwr.c | 145 +++++++++
libs/wboxtest/nested_mmu/s2_page_rdonly.c | 152 +++++++++
libs/wboxtest/nested_mmu/s2_page_rdwr.c | 207 +++++++++++++
libs/wboxtest/openconf.cfg | 1 +
16 files changed, 3003 insertions(+), 15 deletions(-)
create mode 100755 libs/wboxtest/nested_mmu/nested_mmu_test.h
create mode 100644 libs/wboxtest/nested_mmu/
objects.mk
copy libs/wboxtest/{ => nested_mmu}/openconf.cfg (70%)
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_nordwr.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdonly.c
create mode 100755 libs/wboxtest/nested_mmu/s2_page_rdwr.c
diff --git a/libs/wboxtest/nested_mmu/nested_mmu_test.h b/libs/wboxtest/nested_mmu/nested_mmu_test.h
new file mode 100755
index 00000000..07b73822
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/nested_mmu_test.h
@@ -0,0 +1,221 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file nested_mmu_test.h
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief Nested MMU test helper routines and macros
+ */
+
+#ifndef __NESTED_MMU_TEST__
+#define __NESTED_MMU_TEST__
+
+#include <vmm_error.h>
+#include <vmm_stdio.h>
+#include <vmm_limits.h>
+#include <vmm_guest_aspace.h>
+#include <vmm_host_aspace.h>
+#include <vmm_host_ram.h>
+#include <libs/wboxtest.h>
+#include <generic_mmu.h>
+
+#ifdef DEBUG
+#define DPRINTF(__cdev, __msg...) vmm_cprintf(__cdev, __msg)
+#else
+#define DPRINTF(__cdev, __msg...)
+#endif
+
+#define NESTED_MMU_TEST_RDWR_MEM_FLAGS VMM_MEMORY_FLAGS_NORMAL
+
+#define NESTED_MMU_TEST_RDONLY_MEM_FLAGS (VMM_MEMORY_FLAGS_NORMAL_WT & \
+ ~VMM_MEMORY_WRITEABLE)
+
+#define NESTED_MMU_TEST_NORDWR_MEM_FLAGS (VMM_MEMORY_FLAGS_IO & \
+ ~(VMM_MEMORY_READABLE | \
+ VMM_MEMORY_WRITEABLE))
+
+#define NESTED_MMU_TEST_RDWR_REG_FLAGS (VMM_REGION_REAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_CACHEABLE | \
+ VMM_REGION_BUFFERABLE | \
+ VMM_REGION_ISRAM)
+
+#define NESTED_MMU_TEST_RDONLY_REG_FLAGS (VMM_REGION_REAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_CACHEABLE | \
+ VMM_REGION_READONLY | \
+ VMM_REGION_ISROM)
+
+#define NESTED_MMU_TEST_NORDWR_REG_FLAGS (VMM_REGION_VIRTUAL | \
+ VMM_REGION_MEMORY | \
+ VMM_REGION_ISDEVICE)
+
+#define nested_mmu_test_best_min_addr(__pgtbl) \
+ ((mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? \
+ vmm_host_ram_end() : (mmu_pgtbl_map_addr_end(__pgtbl) / 4))
+
+#define nested_mmu_test_alloc_pages(__cdev, __test, __rc, __fail_label, \
+ __page_count, __mem_flags, \
+ __output_va_ptr, __output_pa_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating %d Host pages ", \
+ (__test)->name, (__page_count)); \
+ *(__output_va_ptr) = vmm_host_alloc_pages((__page_count), \
+ (__mem_flags)); \
+ (__rc) = vmm_host_va2pa(*(__output_va_ptr), (__output_pa_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_pa_ptr)); \
+} while (0) \
+
+#define nested_mmu_test_free_pages(__cdev, __test, \
+ __va_ptr, __pa_ptr, __page_count) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing %d Host pages (0x%"PRIPADDR")\n", \
+ (__test)->name, (__page_count), *(__pa_ptr)); \
+ vmm_host_free_pages(*(__va_ptr), (__page_count)); \
+} while (0)
+
+#define nested_mmu_test_alloc_hugepages(__cdev, __test, __rc, __fail_label, \
+ __page_count, __mem_flags, \
+ __output_va_ptr, __output_pa_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating %d Host hugepages ", \
+ (__test)->name, (__page_count)); \
+ *(__output_va_ptr) = vmm_host_alloc_hugepages((__page_count), \
+ (__mem_flags)); \
+ (__rc) = vmm_host_va2pa(*(__output_va_ptr), (__output_pa_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_pa_ptr)); \
+} while (0) \
+
+#define nested_mmu_test_free_hugepages(__cdev, __test, \
+ __va_ptr, __pa_ptr, __page_count) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing %d Host hugepages (0x%"PRIPADDR")\n", \
+ (__test)->name, (__page_count), *(__pa_ptr)); \
+ vmm_host_free_hugepages(*(__va_ptr), (__page_count)); \
+} while (0)
+
+#define nested_mmu_test_alloc_pgtbl(__cdev, __test, __rc, __fail_label, \
+ __stage, __output_pgtbl_double_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Allocating Stage%s page table ", \
+ (__test)->name, \
+ ((__stage) == MMU_STAGE2) ? "2" : "1"); \
+ *(__output_pgtbl_double_ptr) = mmu_pgtbl_alloc((__stage), -1); \
+ DPRINTF((__cdev), "%s", \
+ (!*(__output_pgtbl_double_ptr)) ? "(failed)\n" : ""); \
+ if (!*(__output_pgtbl_double_ptr)) { \
+ (__rc) = VMM_ENOMEM; \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", \
+ mmu_pgtbl_physical_addr(*(__output_pgtbl_double_ptr))); \
+} while (0)
+
+#define nested_mmu_test_free_pgtbl(__cdev, __test, __pgtbl) \
+do { \
+ DPRINTF((__cdev), "%s: Freeing Stage%s page table (0x%"PRIPADDR")\n", \
+ (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "2" : "1", \
+ mmu_pgtbl_physical_addr(__pgtbl)); \
+ mmu_pgtbl_free(__pgtbl); \
+} while (0)
+
+#define nested_mmu_test_find_free_addr(__cdev, __test, __rc, __fail_label, \
+ __pgtbl, __min_addr, __page_order, \
+ __output_addr_ptr) \
+do { \
+ DPRINTF((__cdev), "%s: Finding free Guest %s ", \
+ (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Phys" : "Virt"); \
+ (__rc) = mmu_find_free_address((__pgtbl), (__min_addr), \
+ (__page_order), (__output_addr_ptr)); \
+ DPRINTF((__cdev), "(error %d)%s", (__rc), (__rc) ? "\n" : " "); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+ DPRINTF((__cdev), "(0x%"PRIPADDR")\n", *(__output_addr_ptr)); \
+} while (0)
+
+#define nested_mmu_test_map_pgtbl(__cdev, __test, __rc, __fail_label, \
+ __pgtbl, __guest_phys, __host_phys, \
+ __guest_size, __mem_or_reg_flags) \
+do { \
+ struct mmu_page __pg; \
+ __pg.ia = (__guest_phys); \
+ __pg.oa = (__host_phys); \
+ __
pg.sz = (__guest_size); \
+ arch_mmu_pgflags_set(&__pg.flags, \
+ mmu_pgtbl_stage(__pgtbl), (__mem_or_reg_flags)); \
+ DPRINTF(cdev, "%s: Mapping Stage%s Guest %s 0x%"PRIPADDR" => " \
+ "%s Phys 0x%"PRIPADDR" (%ld KB)\n", (__test)->name, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "2" : "1", \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Phys" : "Virt", \
+ __pg.ia, \
+ (mmu_pgtbl_stage(__pgtbl) == MMU_STAGE2) ? "Host" : "Guest", \
+ __pg.oa, \
+ __
pg.sz / SZ_1K); \
+ (__rc) = mmu_map_page((__pgtbl), &__pg); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#define nested_mmu_test_idmap_stage1(__cdev, __test, __rc, __fail_label, \
+ __s2_pgtbl, __s1_pgtbl, __map_size, \
+ __reg_flags) \
+do { \
+ DPRINTF(cdev, "%s: Identity map Stage1 page table (0x%"PRIPADDR") " \
+ "in Stage2 page table (0x%"PRIPADDR") ", \
+ (__test)->name, \
+ mmu_pgtbl_physical_addr(__s1_pgtbl), \
+ mmu_pgtbl_physical_addr(__s2_pgtbl)); \
+ (__rc) = mmu_idmap_nested_pgtbl((__s2_pgtbl), (__s1_pgtbl), \
+ (__map_size), (__reg_flags)); \
+ DPRINTF((__cdev), "(error %d)\n", (__rc)); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#define nested_mmu_test_execute(__cdev, __test, __rc, __fail_label, \
+ __s2_pgtbl, __s1_pgtbl, \
+ __va, __flags, __exp_addr, __exp_fault) \
+do { \
+ DPRINTF((__cdev), "%s: Checking %s%s%s%s at Guest Virt 0x%lx ", \
+ (__test)->name, \
+ ((__flags) & MMU_TEST_WRITE) ? "write" : "read", \
+ ((__flags) & MMU_TEST_WIDTH_8BIT) ? "8" : "", \
+ ((__flags) & MMU_TEST_WIDTH_16BIT) ? "16" : "", \
+ ((__flags) & MMU_TEST_WIDTH_32BIT) ? "32" : "", \
+ __va); \
+ (__rc) = mmu_test_nested_pgtbl((__s2_pgtbl), (__s1_pgtbl), (__flags), \
+ (unsigned long)(__va), (__exp_addr), (__exp_fault)); \
+ DPRINTF((__cdev), "(error %d)\n", (__rc)); \
+ if (__rc) { \
+ goto __fail_label; \
+ } \
+} while (0)
+
+#endif
diff --git a/libs/wboxtest/nested_mmu/
objects.mk b/libs/wboxtest/nested_mmu/
objects.mk
new file mode 100644
index 00000000..9073bef9
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/
objects.mk
@@ -0,0 +1,35 @@
+#/**
+# Copyright (c) 2020 Anup Patel.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+# @file
objects.mk
+# @author Anup Patel (
an...@brainfault.org)
+# @brief list of nested MMU test objects to be build
+# */
+
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_page_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s2_hugepage_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_page_s2_page_nordwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.o
+libs-objs-$(CONFIG_WBOXTEST_NESTED_MMU) += wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.o
diff --git a/libs/wboxtest/openconf.cfg b/libs/wboxtest/nested_mmu/openconf.cfg
similarity index 70%
copy from libs/wboxtest/openconf.cfg
copy to libs/wboxtest/nested_mmu/openconf.cfg
index cb310927..8a062525 100644
--- a/libs/wboxtest/openconf.cfg
+++ b/libs/wboxtest/nested_mmu/openconf.cfg
@@ -1,35 +1,29 @@
#/**
-# Copyright (c) 2016 Anup Patel.
+# Copyright (c) 2020 Anup Patel.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# @file openconf.cfg
# @author Anup Patel (
an...@brainfault.org)
-# @brief config file for white-box testing library
+# @brief config file for nested MMU test
# */
-menuconfig CONFIG_WBOXTEST
- tristate "White-box testing library"
- default n
+config CONFIG_WBOXTEST_NESTED_MMU
+ tristate "Nested MMU Group"
+ depends on CONFIG_ARCH_GENERIC_MMU
+ default y
help
- Enable/Disable white-box testing library.
-
-if CONFIG_WBOXTEST
-
-source libs/wboxtest/threads/openconf.cfg
-source libs/wboxtest/stdio/openconf.cfg
-
-endif
+ Enable/Disable nested MMU test group.
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
new file mode 100755
index 00000000..419a1f18
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_nordwr.c
@@ -0,0 +1,293 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_nordwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_nordwr test implementation
+ *
+ * This tests the handling of no-read-write hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_nordwr_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_nordwr_exit
+
+static int s1_hugepage_s2_hugepage_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_host_pa = map_host_pa + (2 * vmm_host_hugepage_size());
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_rdwr_s1_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_guest_va = map_guest_va + (2 * vmm_host_hugepage_size());
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* vmm_host_hugepage_size());
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_nordwr = {
+ .name = "s1_hugepage_s2_hugepage_nordwr",
+ .run = s1_hugepage_s2_hugepage_nordwr_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_nordwr);
+}
+
+static void __exit s1_hugepage_s2_hugepage_nordwr_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
new file mode 100755
index 00000000..957545e5
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdonly.c
@@ -0,0 +1,293 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_rdonly.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_rdonly test implementation
+ *
+ * This tests the handling of read-only hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_rdonly_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_rdonly_exit
+
+static int s1_hugepage_s2_hugepage_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_host_pa = map_host_pa + (2 * vmm_host_hugepage_size());
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_rdwr_s1_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+ map_rdwr_s2_guest_va = map_guest_va + (2 * vmm_host_hugepage_size());
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* vmm_host_hugepage_size());
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_rdonly = {
+ .name = "s1_hugepage_s2_hugepage_rdonly",
+ .run = s1_hugepage_s2_hugepage_rdonly_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_rdonly);
+}
+
+static void __exit s1_hugepage_s2_hugepage_rdonly_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
new file mode 100755
index 00000000..9fe087af
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_hugepage_s2_hugepage_rdwr.c
@@ -0,0 +1,279 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_hugepage_s2_hugepage_rdwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_hugepage_s2_hugepage_rdwr test implementation
+ *
+ * This tests the handling of read-write hugepages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_hugepage_s2_hugepage_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_hugepage_s2_hugepage_rdwr_init
+#define MODULE_EXIT s1_hugepage_s2_hugepage_rdwr_exit
+
+static int s1_hugepage_s2_hugepage_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_nomap_s2_guest_va;
+ physical_addr_t map_nomap_s2_guest_pa;
+ physical_addr_t nomap_guest_va;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ map_nomap_s2_guest_va = map_guest_va + vmm_host_hugepage_size();
+ map_nomap_s2_guest_pa = map_guest_pa + vmm_host_hugepage_size();
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va, map_nomap_s2_guest_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, vmm_host_hugepage_size(),
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_va);
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_hugepage_s2_hugepage_rdwr = {
+ .name = "s1_hugepage_s2_hugepage_rdwr",
+ .run = s1_hugepage_s2_hugepage_rdwr_run,
+};
+
+static int __init s1_hugepage_s2_hugepage_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_hugepage_s2_hugepage_rdwr);
+}
+
+static void __exit s1_hugepage_s2_hugepage_rdwr_exit(void)
+{
+ wboxtest_unregister(&s1_hugepage_s2_hugepage_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
new file mode 100755
index 00000000..796fef43
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_nordwr.c
@@ -0,0 +1,292 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_nordwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_page_s2_page_nordwr test implementation
+ *
+ * This tests the handling of no-read-write pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_nordwr_init
+#define MODULE_EXIT s1_page_s2_page_nordwr_exit
+
+static int s1_page_s2_page_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_host_pa = map_host_pa + (2 * VMM_PAGE_SIZE);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_rdwr_s1_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_guest_va = map_guest_va + (2 * VMM_PAGE_SIZE);
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* VMM_PAGE_SIZE);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_nordwr = {
+ .name = "s1_page_s2_page_nordwr",
+ .run = s1_page_s2_page_nordwr_run,
+};
+
+static int __init s1_page_s2_page_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_nordwr);
+}
+
+static void __exit s1_page_s2_page_nordwr_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
new file mode 100755
index 00000000..18f5c378
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdonly.c
@@ -0,0 +1,292 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_rdonly.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_page_s2_page_rdonly test implementation
+ *
+ * This tests the handling of read-only pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_rdonly_init
+#define MODULE_EXIT s1_page_s2_page_rdonly_exit
+
+static int s1_page_s2_page_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_rdwr_s1_host_pa;
+ physical_addr_t map_rdwr_s2_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_rdwr_s1_guest_va;
+ physical_addr_t map_rdwr_s1_guest_pa;
+ physical_addr_t map_rdwr_s2_guest_va;
+ physical_addr_t map_rdwr_s2_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 3, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ map_rdwr_s1_host_pa = map_host_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_host_pa = map_host_pa + (2 * VMM_PAGE_SIZE);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_rdwr_s1_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_rdwr_s1_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+ map_rdwr_s2_guest_va = map_guest_va + (2 * VMM_PAGE_SIZE);
+ map_rdwr_s2_guest_pa = map_guest_pa + (2* VMM_PAGE_SIZE);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s1_guest_va, map_rdwr_s1_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_rdwr_s2_guest_va, map_rdwr_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s1_guest_pa, map_rdwr_s1_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_rdwr_s2_guest_pa, map_rdwr_s2_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+#define chunk_mid (chunk_start + ((chunk_end - chunk_start) / 2))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s1_host_pa + chunk_mid + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s1_guest_va + chunk_mid + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s1_guest_pa + chunk_mid + sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_rdwr_s2_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_rdwr_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+#undef chunk_mid
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 3);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_rdonly = {
+ .name = "s1_page_s2_page_rdonly",
+ .run = s1_page_s2_page_rdonly_run,
+};
+
+static int __init s1_page_s2_page_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_rdonly);
+}
+
+static void __exit s1_page_s2_page_rdonly_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
new file mode 100755
index 00000000..45b3c1a7
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s1_page_s2_page_rdwr.c
@@ -0,0 +1,278 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s1_page_s2_page_rdwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s1_page_s2_page_rdwr test implementation
+ *
+ * This tests the handling of read-write pages in stage1 and
+ * stage2 page tables.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s1_page_s2_page_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s1_page_s2_page_rdwr_init
+#define MODULE_EXIT s1_page_s2_page_rdwr_exit
+
+static int s1_page_s2_page_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s1_pgtbl;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_va;
+ physical_addr_t map_guest_pa;
+ physical_addr_t map_nomap_s2_guest_va;
+ physical_addr_t map_nomap_s2_guest_pa;
+ physical_addr_t nomap_guest_va;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE1, &s1_pgtbl);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_s1_pgtbl,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nested_mmu_test_best_min_addr(s1_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_va);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ map_nomap_s2_guest_va = map_guest_va + VMM_PAGE_SIZE;
+ map_nomap_s2_guest_pa = map_guest_pa + VMM_PAGE_SIZE;
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_guest_va, map_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va, map_nomap_s2_guest_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_MEM_FLAGS);
+
+ nested_mmu_test_idmap_stage1(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl, VMM_PAGE_SIZE,
+ NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, map_nomap_s2_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_va + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_va + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s1_pgtbl, nomap_guest_va + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_va);
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_nomap_s2_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_va + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ map_nomap_s2_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_nomap_s2_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, s1_pgtbl,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_va + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_S1 | MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_s1_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s1_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s1_page_s2_page_rdwr = {
+ .name = "s1_page_s2_page_rdwr",
+ .run = s1_page_s2_page_rdwr_run,
+};
+
+static int __init s1_page_s2_page_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s1_page_s2_page_rdwr);
+}
+
+static void __exit s1_page_s2_page_rdwr_exit(void)
+{
+ wboxtest_unregister(&s1_page_s2_page_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c b/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
new file mode 100755
index 00000000..c7b6d5a5
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_nordwr.c
@@ -0,0 +1,145 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_nordwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_hugepage_nordwr test implementation
+ *
+ * This tests the handling no-read-write hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_nordwr_init
+#define MODULE_EXIT s2_hugepage_nordwr_exit
+
+static int s2_hugepage_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, 0,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_nordwr = {
+ .name = "s2_hugepage_nordwr",
+ .run = s2_hugepage_nordwr_run,
+};
+
+static int __init s2_hugepage_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_nordwr);
+}
+
+static void __exit s2_hugepage_nordwr_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c b/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
new file mode 100755
index 00000000..633d9930
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_rdonly.c
@@ -0,0 +1,153 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_rdonly.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_hugepage_rdonly test implementation
+ *
+ * This tests the handling read-only hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_rdonly_init
+#define MODULE_EXIT s2_hugepage_rdonly_exit
+
+static int s2_hugepage_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_rdonly = {
+ .name = "s2_hugepage_rdonly",
+ .run = s2_hugepage_rdonly_run,
+};
+
+static int __init s2_hugepage_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_rdonly);
+}
+
+static void __exit s2_hugepage_rdonly_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c b/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
new file mode 100755
index 00000000..a19b4dcc
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_hugepage_rdwr.c
@@ -0,0 +1,208 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_hugepage_rdwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_hugepage_rdwr test implementation
+ *
+ * This tests the handling of read-write hugepages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_hugepage_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_hugepage_rdwr_init
+#define MODULE_EXIT s2_hugepage_rdwr_exit
+
+static int s2_hugepage_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+ physical_addr_t nomap_guest_pa;
+
+ nested_mmu_test_alloc_hugepages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_hugepage,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ vmm_host_hugepage_shift(), &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ vmm_host_hugepage_size(), NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start (1 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + vmm_host_hugepage_size(),
+ vmm_host_hugepage_shift(), &nomap_guest_pa);
+
+#define chunk_start (2 * (vmm_host_hugepage_size() / 4))
+#define chunk_end (chunk_start + (vmm_host_hugepage_size() / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_hugepage:
+ nested_mmu_test_free_hugepages(cdev, test,
+ &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_hugepage_rdwr = {
+ .name = "s2_hugepage_rdwr",
+ .run = s2_hugepage_rdwr_run,
+};
+
+static int __init s2_hugepage_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_hugepage_rdwr);
+}
+
+static void __exit s2_hugepage_rdwr_exit(void)
+{
+ wboxtest_unregister(&s2_hugepage_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_nordwr.c b/libs/wboxtest/nested_mmu/s2_page_nordwr.c
new file mode 100755
index 00000000..acbc4c2b
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_nordwr.c
@@ -0,0 +1,145 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_nordwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_page_nordwr test implementation
+ *
+ * This tests the handling no-read-write pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_nordwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_nordwr_init
+#define MODULE_EXIT s2_page_nordwr_exit
+
+static int s2_page_nordwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, 0,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_NORDWR_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_nordwr = {
+ .name = "s2_page_nordwr",
+ .run = s2_page_nordwr_run,
+};
+
+static int __init s2_page_nordwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_nordwr);
+}
+
+static void __exit s2_page_nordwr_exit(void)
+{
+ wboxtest_unregister(&s2_page_nordwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_rdonly.c b/libs/wboxtest/nested_mmu/s2_page_rdonly.c
new file mode 100755
index 00000000..56c6402b
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_rdonly.c
@@ -0,0 +1,152 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_rdonly.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_page_rdonly test implementation
+ *
+ * This tests the handling read-only pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_rdonly test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_rdonly_init
+#define MODULE_EXIT s2_page_rdonly_exit
+
+static int s2_page_rdonly_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDONLY_REG_FLAGS);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_rdonly = {
+ .name = "s2_page_rdonly",
+ .run = s2_page_rdonly_run,
+};
+
+static int __init s2_page_rdonly_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_rdonly);
+}
+
+static void __exit s2_page_rdonly_exit(void)
+{
+ wboxtest_unregister(&s2_page_rdonly);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/nested_mmu/s2_page_rdwr.c b/libs/wboxtest/nested_mmu/s2_page_rdwr.c
new file mode 100755
index 00000000..4201d122
--- /dev/null
+++ b/libs/wboxtest/nested_mmu/s2_page_rdwr.c
@@ -0,0 +1,207 @@
+/**
+ * Copyright (c) 2020 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file s2_page_rdwr.c
+ * @author Anup Patel (
an...@brainfault.org)
+ * @brief s2_page_rdwr test implementation
+ *
+ * This tests the handling of read-write pages in stage2 page table.
+ */
+
+#include <vmm_error.h>
+#include <vmm_modules.h>
+
+#undef DEBUG
+
+#include "nested_mmu_test.h"
+
+#define MODULE_DESC "s2_page_rdwr test"
+#define MODULE_AUTHOR "Anup Patel"
+#define MODULE_LICENSE "GPL"
+#define MODULE_IPRIORITY (WBOXTEST_IPRIORITY+1)
+#define MODULE_INIT s2_page_rdwr_init
+#define MODULE_EXIT s2_page_rdwr_exit
+
+static int s2_page_rdwr_run(struct wboxtest *test,
+ struct vmm_chardev *cdev,
+ u32 test_hcpu)
+{
+ int rc = VMM_OK;
+ struct mmu_pgtbl *s2_pgtbl;
+ virtual_addr_t map_host_va;
+ physical_addr_t map_host_pa;
+ physical_addr_t map_guest_pa;
+ physical_addr_t nomap_guest_pa;
+
+ nested_mmu_test_alloc_pages(cdev, test, rc, fail,
+ 1, NESTED_MMU_TEST_RDWR_MEM_FLAGS, &map_host_va, &map_host_pa);
+
+ nested_mmu_test_alloc_pgtbl(cdev, test, rc, fail_free_host_page,
+ MMU_STAGE2, &s2_pgtbl);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nested_mmu_test_best_min_addr(s2_pgtbl),
+ VMM_PAGE_SHIFT, &map_guest_pa);
+
+ nested_mmu_test_map_pgtbl(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa, map_host_pa,
+ VMM_PAGE_SIZE, NESTED_MMU_TEST_RDWR_REG_FLAGS);
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, map_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start 0
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ map_host_pa + chunk_start + sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_WIDTH_8BIT,
+ nomap_guest_pa + chunk_start + sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u8),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_WIDTH_8BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u8),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start (1 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ map_host_pa + chunk_start + sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_WIDTH_16BIT,
+ nomap_guest_pa + chunk_start + sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u16),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_WIDTH_16BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u16),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+ nested_mmu_test_find_free_addr(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, nomap_guest_pa + VMM_PAGE_SIZE,
+ VMM_PAGE_SHIFT, &nomap_guest_pa);
+
+#define chunk_start (2 * (VMM_PAGE_SIZE / 4))
+#define chunk_end (chunk_start + (VMM_PAGE_SIZE / 4))
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ map_host_pa + chunk_start + sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_WIDTH_32BIT,
+ nomap_guest_pa + chunk_start + sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_READ);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ map_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ map_host_pa + chunk_end - sizeof(u32),
+ 0);
+
+ nested_mmu_test_execute(cdev, test, rc, fail_free_s2_pgtbl,
+ s2_pgtbl, NULL,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_WIDTH_32BIT | MMU_TEST_WRITE,
+ nomap_guest_pa + chunk_end - sizeof(u32),
+ MMU_TEST_FAULT_NOMAP | MMU_TEST_FAULT_WRITE);
+
+#undef chunk_start
+#undef chunk_end
+
+fail_free_s2_pgtbl:
+ nested_mmu_test_free_pgtbl(cdev, test, s2_pgtbl);
+fail_free_host_page:
+ nested_mmu_test_free_pages(cdev, test, &map_host_va, &map_host_pa, 1);
+fail:
+ return rc;
+}
+
+static struct wboxtest s2_page_rdwr = {
+ .name = "s2_page_rdwr",
+ .run = s2_page_rdwr_run,
+};
+
+static int __init s2_page_rdwr_init(void)
+{
+ return wboxtest_register("nested_mmu", &s2_page_rdwr);
+}
+
+static void __exit s2_page_rdwr_exit(void)
+{
+ wboxtest_unregister(&s2_page_rdwr);
+}
+
+VMM_DECLARE_MODULE(MODULE_DESC,
+ MODULE_AUTHOR,
+ MODULE_LICENSE,
+ MODULE_IPRIORITY,
+ MODULE_INIT,
+ MODULE_EXIT);
diff --git a/libs/wboxtest/openconf.cfg b/libs/wboxtest/openconf.cfg
index cb310927..1f769e41 100644
--- a/libs/wboxtest/openconf.cfg
+++ b/libs/wboxtest/openconf.cfg
@@ -29,6 +29,7 @@ menuconfig CONFIG_WBOXTEST
if CONFIG_WBOXTEST
+source libs/wboxtest/nested_mmu/openconf.cfg
source libs/wboxtest/threads/openconf.cfg
source libs/wboxtest/stdio/openconf.cfg
--
2.25.1