[PATCH 3/4] [x86] Change the following

5 views
Skip to first unread message

Himanshu Chauhan

unread,
Mar 5, 2021, 7:34:52 AM3/5/21
to xvisor...@googlegroups.com, Himanshu Chauhan
o Fix the CS base for guest
o Disable paging bits for guest
o Remove CLI/STI from __vmcs_run

Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
arch/x86/cpu/common/vm/vtx/vmcs.c | 20 +++++++++-----------
arch/x86/cpu/common/vm/vtx/vmx.c | 14 +++++++-------
2 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/arch/x86/cpu/common/vm/vtx/vmcs.c b/arch/x86/cpu/common/vm/vtx/vmcs.c
index 5f5b2a5d..64653f4d 100644
--- a/arch/x86/cpu/common/vm/vtx/vmcs.c
+++ b/arch/x86/cpu/common/vm/vtx/vmcs.c
@@ -35,6 +35,7 @@
#include <cpu_interrupts.h>
#include <cpu_features.h>
#include <control_reg_access.h>
+#include <vmm_guest_aspace.h>
#include <vm/vmcs.h>
#include <vm/vmx.h>
#include <vm/ept.h>
@@ -201,7 +202,8 @@ void vmx_detect_capability(void)
& (SECONDARY_EXEC_ENABLE_EPT
| SECONDARY_EXEC_ENABLE_VPID)) {
vmx_ept_vpid_cap = cpu_read_msr(MSR_IA32_VMX_EPT_VPID_CAP);
- }
+ } else
+ vmx_ept_vpid_cap = 0;
}

if (!vmx_pin_based_exec_control) {
@@ -381,10 +383,6 @@ void set_pin_based_exec_controls(void)
break;

default:
- /* we don't want to enable them by default so
- * consider the default settings. */
- if (vmx_pin_based_exec_default1 & pin_controls[i])
- vmx_pin_based_control |= pin_controls[i];
break;
}
}
@@ -726,7 +724,7 @@ typedef union {
void vmx_set_vm_to_powerup_state(struct vcpu_hw_context *context)
{
/* Control registers */
- __vmwrite(GUEST_CR0, GUEST_CRx_FILTER(0, (X86_CR0_ET | X86_CR0_CD | X86_CR0_NW)));
+ __vmwrite(GUEST_CR0, (GUEST_CRx_FILTER(0, (X86_CR0_ET | X86_CR0_CD | X86_CR0_NW)) & ~(X86_CR0_PE | X86_CR0_PG)));
__vmwrite(GUEST_CR3, 0);
__vmwrite(GUEST_CR4, GUEST_CRx_FILTER(4, 0));

@@ -764,10 +762,10 @@ void vmx_set_vm_to_powerup_state(struct vcpu_hw_context *context)
* so that 0xc0c0000 + 0x3fff0 becomes 0xc0ffff0 => The host physical
* for reset vector. Everything else then just falls in place.
*/
- __vmwrite(GUEST_CS_BASE, 0xF0000);
+ __vmwrite(GUEST_CS_BASE, 0);
__vmwrite(GUEST_CS_LIMIT, 0xFFFF);
- __vmwrite(GUEST_CS_AR_BYTES, 0x9b);
- __vmwrite(GUEST_CS_SELECTOR, 0xF000);
+ __vmwrite(GUEST_CS_AR_BYTES, 0x93);
+ __vmwrite(GUEST_CS_SELECTOR, 0);

/* Initial state */
__vmwrite(GUEST_RSP, 0x0);
@@ -786,11 +784,11 @@ void vmx_set_vm_to_powerup_state(struct vcpu_hw_context *context)
__vmwrite(GUEST_LDTR_AR_BYTES, 0x82); /* LDT */
__vmwrite(GUEST_LDTR_SELECTOR, 0);
__vmwrite(GUEST_LDTR_BASE, 0);
- __vmwrite(GUEST_LDTR_LIMIT, 0);
+ __vmwrite(GUEST_LDTR_LIMIT, 0xFFFF);

/* Guest TSS. */
__vmwrite(GUEST_TR_SELECTOR, 0);
- __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */
+ __vmwrite(GUEST_TR_AR_BYTES, 0x8b); /* 32-bit TSS (busy) */
__vmwrite(GUEST_TR_BASE, 0);
__vmwrite(GUEST_TR_LIMIT, 0xFFFF);

diff --git a/arch/x86/cpu/common/vm/vtx/vmx.c b/arch/x86/cpu/common/vm/vtx/vmx.c
index 26a0cea4..45513e45 100644
--- a/arch/x86/cpu/common/vm/vtx/vmx.c
+++ b/arch/x86/cpu/common/vm/vtx/vmx.c
@@ -223,8 +223,9 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
int rc = 0;
u64 ins_err = 0;

- __asm__ __volatile__("cli\n\t"
- "pushfq\n\t" /* Save flags */
+ VM_LOG(LVL_INFO, "Starting to %s guest...\n", (resume ? "resume" : "launch"));
+
+ __asm__ __volatile__("pushfq\n\t" /* Save flags */
"movq $vmx_return, %%rax\n\t"
"vmwrite %%rax, %%rbx\n\t"
"pushq %%rbp\n\t"
@@ -365,7 +366,7 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"popq %%rbp\n\t"
"popfq\n\t"
"sub $2, %0\n\t" /* -2 invalid failure */
- "7:sti\n\t"
+ "7:nop\n\t"
:"=q"(rc)
:[resume]"m"(resume), "d"((unsigned long)HOST_RSP),
[context]"c"(context), "b"((unsigned long)HOST_RIP),
@@ -393,11 +394,10 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
if (rc == -1) {
if ((rc = __vmread(VM_INSTRUCTION_ERROR, &ins_err)) == VMM_OK) {
vmm_printf("Instruction Error: (%s:%ld)\n", ins_err_str[ins_err], ins_err);
- //vmcs_dump(context);
- } else
+ } else {
vmm_printf("Failed to read instruction error (%d)\n", rc);
- while(1);
- //BUG();
+ BUG();
+ }
} else if (rc == -2) {
/* Invalid error: which probably means there is not current VMCS: Problem! */
if (context->vcpu_emergency_shutdown)
--
2.27.0

Himanshu Chauhan

unread,
Mar 5, 2021, 7:34:52 AM3/5/21
to xvisor...@googlegroups.com, Himanshu Chauhan
Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
arch/x86/cpu/common/include/control_reg_access.h | 12 ++++++++++++
1 file changed, 12 insertions(+)

diff --git a/arch/x86/cpu/common/include/control_reg_access.h b/arch/x86/cpu/common/include/control_reg_access.h
index 7a9989e9..35808d2c 100644
--- a/arch/x86/cpu/common/include/control_reg_access.h
+++ b/arch/x86/cpu/common/include/control_reg_access.h
@@ -121,4 +121,16 @@ static inline void write_msr(unsigned int msr, unsigned long val)
: "c" (msr), "a" (val), "d" (val >> 32)
: "memory");
}
+
+static inline u32 read_rflags(void)
+{
+ u32 rflags;
+
+ asm volatile("pushf\n\t"
+ "popq %%rax\n\t"
+ :"=a"(rflags):: "memory");
+
+ return rflags;
+}
+
#endif /* __CONTROL_REG_ACCESS_H */
--
2.27.0

Himanshu Chauhan

unread,
Mar 5, 2021, 7:34:52 AM3/5/21
to xvisor...@googlegroups.com, Himanshu Chauhan
o Add reset vector empty entry for guest to fault when it
starts running.
o Add support for EPT invalidation functions

Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
arch/x86/cpu/common/include/cpu_vm.h | 1 +
arch/x86/cpu/common/include/vm/ept.h | 61 +++++++++++----
arch/x86/cpu/common/include/vm/vmcs.h | 2 +
arch/x86/cpu/common/include/vm/vmx.h | 12 +++
arch/x86/cpu/common/vm/vtx/ept.c | 105 ++++++++++++++++++--------
5 files changed, 136 insertions(+), 45 deletions(-)

diff --git a/arch/x86/cpu/common/include/cpu_vm.h b/arch/x86/cpu/common/include/cpu_vm.h
index 0067fb95..e0bd9076 100644
--- a/arch/x86/cpu/common/include/cpu_vm.h
+++ b/arch/x86/cpu/common/include/cpu_vm.h
@@ -3,6 +3,7 @@

#include <multiboot.h>
#include <vm/vmcb.h>
+#include <vm/vmx.h>
#include <processor_flags.h>
#include <cpu_features.h>
#include <vmm_types.h>
diff --git a/arch/x86/cpu/common/include/vm/ept.h b/arch/x86/cpu/common/include/vm/ept.h
index 7863da8a..99d87481 100644
--- a/arch/x86/cpu/common/include/vm/ept.h
+++ b/arch/x86/cpu/common/include/vm/ept.h
@@ -41,6 +41,18 @@
extern struct cpuinfo_x86 cpu_info;

#define PHYS_ADDR_BIT_MASK ((0x1ul << cpu_info.phys_bits) - 1)
+#define EPT_PAGE_MASK_2M (PHYS_ADDR_BIT_MASK >> 21)
+#define EPT_PAGE_MASK_4K (PHYS_ADDR_BIT_MASK >> 12)
+#define EPT_PAGE_MASK_1G (PHYS_ADDR_BIT_MASK >> 30)
+
+#define EPT_PHYS_FILTER(_p) (_p & PHYS_ADDR_BIT_MASK)
+#define EPT_PHYS_2MB_PFN(_p) (EPT_PHYS_FILTER(_p) >> 21)
+#define EPT_PHYS_1GB_PFN(_p) (EPT_PHYS_FILTER(_p) >> 30)
+#define EPT_PHYS_4KB_PFN(_p) (EPT_PHYS_FILTER(_p) >> 12)
+
+#define EPT_PHYS_2MB_PAGE(_p) ((_p & EPT_PAGE_MASK_2M) << 21)
+#define EPT_PHYS_1GB_PAGE(_p) ((_p & EPT_PAGE_MASK_1G) << 30)
+#define EPT_PHYS_4KB_PAGE(_p) ((_p & EPT_PAGE_MASK_4K) << 12)

typedef union {
u64 val;
@@ -49,7 +61,9 @@ typedef union {
u64 mt:3; /* Memory type: 0 Uncacheable 6 Writeback */
u64 pgwl:3; /* Pagewalk length */
u64 en_ad:1; /* Enable accessed/dirty flags for EPT structures */
- u64 res:5; /* reserved */
+ u64 en_ssr:1; /* Setting this control to 1 enables enforcement of
+ access rights for supervisor shadow-stack pages */
+ u64 res:4; /* reserved */
u64 pml4:52; /* pml4 physical base, only bits N-1:12 are valid
* where N is the physical address width of the
* logical processor */
@@ -65,10 +79,12 @@ typedef union {
u64 x:1; /* Execute access */
u64 res:5; /* Reserved */
u64 accessed:1; /* Depends on Bit 6 in EPTP. Currently not set */
- u64 ign:3; /* Ignored */
+ u64 ign:1; /* Ignored */
+ u64 mbe:1; /* Mode based execution */
+ u64 ign1:1;
u64 pdpt_base:40; /* Physical address of 4-KByte aligned EPT
* page-directory-pointer table referenced by this entry */
- u64 ign1:12; /* Ignored */
+ u64 ign2:12; /* Ignored */
} bits;
} ept_pml4e_t;

@@ -84,9 +100,14 @@ typedef union {
u64 is_page:1; /* Ignore */
u64 accessed:1; /* Accessed (If bit 6 set in EPTP) */
u64 dirty:1; /* Dirty (If bit 6 set in EPTP) */
- u64 ign1:2; /* Ignored */
+ u64 mbe:1;
+ u64 ign1:1; /* Ignored */
u64 res:18; /* Must be zero */
- u64 phys:22; /* Physical address of the 1 GiB page */
+ u64 phys:22; /* physicall address of PD */
+ u64 ign2:8;
+ u64 superv_ss:1; /* supervisor shadow stack */
+ u64 ign3:2; /* ignored */
+ u64 sup_ve:1; /* suppress #VE exception */
} pe;

struct {
@@ -95,10 +116,11 @@ typedef union {
u64 x:1; /* Execute */
u64 res:5; /* Reservd */
u64 accessed:1; /* Accessed by software (if Bit 6 in EPTP is set) */
- u64 ign:3; /* Ignored */
+ u64 ign:1; /* Ignored */
+ u64 mbe:1; /* mode based exec */
+ u64 ign1:1;
u64 pd_base:40; /* Page directory base */
- u64 ign1:11; /* Ignored */
- u64 sup_ve:1; /* Supress #VE */
+ u64 ign2:12; /* Ignored */
} te;
} ept_pdpte_t;

@@ -114,10 +136,13 @@ typedef union {
u64 is_page:1; /* Must be set to 1 */
u64 accessed:1; /* Region was accessed by software */
u64 dirty:1; /* Region was written to by software */
- u64 ign:2; /* Ignored */
- u64 res:18; /* Must be zero */
- u64 phys:22; /* Physical address of 2MiB page */
- u64 ign1:11; /* Ignored */
+ u64 mbe:1;
+ u64 ign:1; /* Ignored */
+ u64 res:9; /* Must be zero */
+ u64 phys:31; /* Physical address of 2MiB page */
+ u64 ign1:8; /* Ignored */
+ u64 superv_ss:1;
+ u64 ign2:2;
u64 sup_ve:1; /* Suppress #VE */
} pe;

@@ -128,7 +153,9 @@ typedef union {
u64 res:4; /* Reserved */
u64 is_page:1; /* Must be zero */
u64 accessed:1; /* Accessed by software (if bit 6 is set in EPTP) */
- u64 ign:3; /* Ignore */
+ u64 ign:1; /* Ignore */
+ u64 mbe:1;
+ u64 ign1:1;
u64 pt_base:40; /* Physical address of the page table */
u64 res1:12; /* Reserved */
} te;
@@ -146,9 +173,13 @@ typedef union {
u64 ign:1; /* Ignored */
u64 accessed:1; /* Accessed by software (if bit 6 in eptp set) */
u64 dirty:1; /* Written by software (if bit 6 in eptp set) */
- u64 ign1:2; /* Ignored */
+ u64 mbe:1;
+ u64 ign1:1; /* Ignored */
u64 phys:40; /* Physical address of 4 KiB page mapped */
- u64 ign2:11; /* Ignored */
+ u64 ign2:8; /* Ignored */
+ u64 superv_ss:1;
+ u64 subpage_w:1;
+ u64 ign3:1;
u64 sup_ve:1; /* Suppress #VE */
} pe;
} ept_pte_t;
diff --git a/arch/x86/cpu/common/include/vm/vmcs.h b/arch/x86/cpu/common/include/vm/vmcs.h
index c0026f47..801b1e9c 100644
--- a/arch/x86/cpu/common/include/vm/vmcs.h
+++ b/arch/x86/cpu/common/include/vm/vmcs.h
@@ -329,6 +329,8 @@ enum vmcs_field {
#define GUEST_ACTIVITY_ACTIVE 0
#define GUEST_ACTIVITY_HLT 1

+struct vcpu_hw_context;
+
extern void vmx_detect_capability(void);
extern struct vmcs* create_vmcs(void);
extern struct vmcs *current_vmcs(physical_addr_t *phys);
diff --git a/arch/x86/cpu/common/include/vm/vmx.h b/arch/x86/cpu/common/include/vm/vmx.h
index a47450df..2f71d1fb 100644
--- a/arch/x86/cpu/common/include/vm/vmx.h
+++ b/arch/x86/cpu/common/include/vm/vmx.h
@@ -156,12 +156,21 @@ extern u64 vmx_ept_vpid_cap;
(vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB)
#define cpu_has_vmx_ept_2MB \
(vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
+#define cpu_has_vmx_invept \
+ (vmx_ept_vpid_cap & VMX_EPT_INVEPT_INSTRUCTION)
#define cpu_has_vmx_ept_invept_single_context \
(vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
+#define cpu_has_vmx_ept_invept_all_context \
+ (vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT)

#define INVEPT_SINGLE_CONTEXT 1
#define INVEPT_ALL_CONTEXT 2

+struct invept_desc {
+ u64 eptp;
+ u64 reserved;
+} __attribute__ ((packed));
+
#define cpu_has_vmx_vpid_invvpid_individual_addr \
(vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
#define cpu_has_vmx_vpid_invvpid_single_context \
@@ -403,6 +412,9 @@ static inline int __vmxon(u64 addr)

#define EPT_PAGETABLE_ENTRIES 512

+struct vcpu_hw_context;
+struct cpuinfo_x86;
+
extern int __init intel_init(struct cpuinfo_x86 *cpuinfo);
extern int intel_setup_vm_control(struct vcpu_hw_context *context);

diff --git a/arch/x86/cpu/common/vm/vtx/ept.c b/arch/x86/cpu/common/vm/vtx/ept.c
index 66e64911..7531a0a0 100644
--- a/arch/x86/cpu/common/vm/vtx/ept.c
+++ b/arch/x86/cpu/common/vm/vtx/ept.c
@@ -38,33 +38,25 @@

static inline u32 ept_pml4_index(physical_addr_t gphys)
{
- if (gphys & (PHYS_ADDR_BIT_MASK))
- return ((u32)-1);
-
+ gphys &= PHYS_ADDR_BIT_MASK;
return ((gphys >> 39) & 0x1fful);
}

static inline u32 ept_pdpt_index(physical_addr_t gphys)
{
- if (gphys & (PHYS_ADDR_BIT_MASK))
- return ((u32)-1);
-
+ gphys &= PHYS_ADDR_BIT_MASK;
return ((gphys >> 30) & 0x1fful);
}

static inline u32 ept_pd_index(physical_addr_t gphys)
{
- if (gphys & (PHYS_ADDR_BIT_MASK))
- return ((u32)-1);
-
+ gphys &= PHYS_ADDR_BIT_MASK;
return ((gphys >> 21) & 0x1fful);
}

static inline u32 ept_pt_index(physical_addr_t gphys)
{
- if (gphys & (PHYS_ADDR_BIT_MASK))
- return ((u32)-1);
-
+ gphys &= PHYS_ADDR_BIT_MASK;
return ((gphys >> 12) & 0x1fful);
}

@@ -84,86 +76,139 @@ int ept_create_pte(struct vcpu_hw_context *context,
physical_addr_t phys;
virtual_addr_t virt;

- if (pml4_index == -1 || pdpt_index == -1
- || pd_index == -1 || pt_index == -1) {
- VM_LOG(LVL_ERR,
- "Page table index calculation failed. (gphys: 0x%lx)\n",
- gphys);
- return VMM_EFAIL;
- }
+ VM_LOG(LVL_DEBUG, "pml4: 0x%x pdpt: 0x%x pd: 0x%x pt: 0x%x\n",
+ pml4_index, pdpt_index, pd_index, pt_index);

pml4e = (ept_pml4e_t *)(&pml4[pml4_index]);
+ pml4e->val = 0;
pml4e->val &= EPT_PROT_MASK;
- pml4e->val |= pg_prot;
+ pml4e->val |= 0x3;
virt = get_free_page_for_pagemap(context, &phys);
if (!virt) {
VM_LOG(LVL_ERR, "System is out of guest page table memory\n");
return VMM_ENOMEM;
}
- pml4e->bits.pdpt_base = phys;
+ memset((void *)virt, 0, PAGE_SIZE);
+ pml4e->bits.pdpt_base = EPT_PHYS_4KB_PFN(phys);
+ VM_LOG(LVL_DEBUG, "%s: PML4E: 0x%016lx\n", __func__, pml4e->val);

+ phys = 0;
pdpte = (ept_pdpte_t *)(&((u64 *)virt)[pdpt_index]);
+ pdpte->val = 0;
pdpte->val &= EPT_PROT_MASK;
- pdpte->val |= pg_prot;
+ pdpte->val |= 0x3;
virt = get_free_page_for_pagemap(context, &phys);
if (!virt) {
VM_LOG(LVL_ERR, "System is out of guest page table memory\n");
return VMM_ENOMEM;
}
if (pg_size == EPT_PAGE_SIZE_1G) {
- pdpte->pe.phys = hphys;
+ pdpte->pe.phys = EPT_PHYS_1GB_PFN(hphys);
pdpte->pe.mt = 6; /* write-back memory type */
pdpte->pe.ign_pat = 1; /* ignore PAT type */
pdpte->pe.is_page = 1;
goto _done;
} else {
- pdpte->te.pd_base = phys;
+ pdpte->te.pd_base = EPT_PHYS_4KB_PFN(phys);
}
+ VM_LOG(LVL_DEBUG, "%s: PDPTE: 0x%016lx\n", __func__, pdpte->val);

+ phys = 0;
pde = (ept_pde_t *)(&((u64 *)virt)[pd_index]);
+ pde->val = 0;
pde->val &= EPT_PROT_MASK;
- pde->val |= pg_prot;
+ pde->val |= 0x3;
virt = get_free_page_for_pagemap(context, &phys);
if (!virt) {
VM_LOG(LVL_ERR, "System is out of guest page table memory\n");
return VMM_ENOMEM;
}
if (pg_size == EPT_PAGE_SIZE_2M) {
- pde->pe.phys = hphys;
+ pde->pe.phys = EPT_PHYS_2MB_PFN(hphys);
pde->pe.mt = 6;
pde->pe.ign_pat = 1;
pde->pe.is_page = 1;
goto _done;
} else {
- pde->te.pt_base = phys;
+ pde->te.pt_base = EPT_PHYS_4KB_PFN(phys);
}
+ VM_LOG(LVL_DEBUG, "%s: PDE: 0x%016lx\n", __func__, pde->val);

pte = (ept_pte_t *)(&((u64 *)virt)[pt_index]);
+ pte->val = 0;
pte->val &= EPT_PROT_MASK;
pte->val |= pg_prot;
- pte->pe.phys = hphys;
+ pte->pe.mt = 6;
+ pte->pe.phys = EPT_PHYS_4KB_PFN(hphys);
+ VM_LOG(LVL_DEBUG, "%s: PTE: 0x%016lx\n", __func__, pte->val);

_done:
return VMM_OK;
}

+static inline void
+invalidate_ept (int type, struct invept_desc *desc)
+{
+ /* Specifically not using exception table here.
+ * if feature is not present, it will unnecessary
+ * cause context switch. More expensive */
+ if (likely(cpu_has_vmx_invept)) {
+ /* most modern CPUs will have this */
+ if (unlikely(type == INVEPT_ALL_CONTEXT
+ && !cpu_has_vmx_ept_invept_all_context)) {
+ VM_LOG(LVL_INFO, "EPT all context flush not supported\n");
+ return;
+ }
+ if (unlikely(type == INVEPT_SINGLE_CONTEXT
+ && !cpu_has_vmx_ept_invept_single_context)) {
+ VM_LOG(LVL_INFO, "EPT single context flush not supported\n");
+ return;
+ }
+ asm volatile("invept (%0), %1\n\t"
+ ::"D"(type), "S"(desc)
+ :"memory", "cc");
+ } else {
+ VM_LOG(LVL_INFO, "INVEPT instruction is not supported by CPU\n");
+ }
+}
+
int setup_ept(struct vcpu_hw_context *context)
{
+ struct invept_desc id;
physical_addr_t pml4_phys;
eptp_t *eptp = (eptp_t *)&context->eptp;
virtual_addr_t pml4 = get_free_page_for_pagemap(context, &pml4_phys);

+ VM_LOG(LVL_INFO, "%s: PML4 vaddr: 0x%016lx paddr: 0x%016lx\n",
+ __func__, pml4, pml4_phys);
+
if (!pml4) {
VM_LOG(LVL_ERR, "%s: Failed to allocate EPT page\n", __func__);
return VMM_ENOMEM;
+
}

- eptp->bits.mt = 6; /* Write back */
+ /* most of the reserved bits want zeros */
+ memset((void *)pml4, 0, PAGE_SIZE);
+
+ eptp->val = 0;
+ eptp->bits.mt = (vmx_ept_vpid_cap & (0x01UL << 8) ? 0 /* UC */
+ : (vmx_ept_vpid_cap & (0x1UL << 14)) ? 6 /* WB */
+ : 6);
+
eptp->bits.pgwl = 3; /* 4 page levels */
eptp->bits.en_ad = 0;
- eptp->bits.pml4 = pml4_phys;
+ eptp->bits.pml4 = EPT_PHYS_4KB_PFN(pml4_phys);
+
+ VM_LOG(LVL_DEBUG, "%s: EPTP: 0x%16lx (0x%16lx)\n", __func__, eptp->val, context->eptp);

context->n_cr3 = pml4;
+ ept_create_pte(context, 0xFFF0ULL, 0, 4096, 0);
+
+ VM_LOG(LVL_DEBUG, "Invalidating EPT\n");
+
+ id.eptp = eptp->val;
+ invalidate_ept(INVEPT_SINGLE_CONTEXT, &id);

return VMM_OK;
}
--
2.27.0

Himanshu Chauhan

unread,
Mar 5, 2021, 7:34:52 AM3/5/21
to xvisor...@googlegroups.com, Himanshu Chauhan
Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
docs/x86/x86_64_generic.txt | 3 +++
1 file changed, 3 insertions(+)

diff --git a/docs/x86/x86_64_generic.txt b/docs/x86/x86_64_generic.txt
index 4b53a3fb..0a6856ed 100644
--- a/docs/x86/x86_64_generic.txt
+++ b/docs/x86/x86_64_generic.txt
@@ -74,6 +74,9 @@ C. Preparing to Boot Guest:
c. Launch Qemu with this new harddrive:
# qemu-system-x86_64 -cpu qemu64,+svm,vendor=AuthenticAMD -cdrom bootable.iso -hda xvisor-hd.disk -m 1024M -boot d -s -serial stdio

+ OR ON INTEL
+ #qemu-system-x86_64 -cpu SandyBridge,hv_relaxed,hv_spinlocks=0x1fff,hv_vapic,hv_time,+vmx -enable-kvm -cdrom bootable.iso -hda xvisor-hd.disk -m 1024m -boot d -serial stdio -vnc :2
+
d. Load guest binary:
When Xvisor boots, we will need to load guest binary in memory:
i. First check if the correct block device and its partition are seen by Xvisor. You should see something like this:
--
2.27.0

Anup Patel

unread,
Mar 6, 2021, 11:13:28 PM3/6/21
to Xvisor Devel, Himanshu Chauhan
On Fri, Mar 5, 2021 at 6:04 PM Himanshu Chauhan <hcha...@xvisor-x86.org> wrote:
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Please add "x86: " prefix to patch subject.

Otherwise looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup
> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210305120938.1564420-1-hchauhan%40xvisor-x86.org.

Anup Patel

unread,
Mar 6, 2021, 11:13:58 PM3/6/21
to Xvisor Devel, Himanshu Chauhan
On Fri, Mar 5, 2021 at 6:04 PM Himanshu Chauhan <hcha...@xvisor-x86.org> wrote:
>
> o Add reset vector empty entry for guest to fault when it
> starts running.
> o Add support for EPT invalidation functions
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Please add "x86: " prefix to patch subject.

Otherwise looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup

> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210305120938.1564420-2-hchauhan%40xvisor-x86.org.

Anup Patel

unread,
Mar 6, 2021, 11:14:31 PM3/6/21
to Xvisor Devel, Himanshu Chauhan
On Fri, Mar 5, 2021 at 6:04 PM Himanshu Chauhan <hcha...@xvisor-x86.org> wrote:
>
> o Fix the CS base for guest
> o Disable paging bits for guest
> o Remove CLI/STI from __vmcs_run
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Please add "x86: " prefix to patch subject.

Otherwise looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup

> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210305120938.1564420-3-hchauhan%40xvisor-x86.org.

Anup Patel

unread,
Mar 6, 2021, 11:14:54 PM3/6/21
to Xvisor Devel, Himanshu Chauhan
On Fri, Mar 5, 2021 at 6:04 PM Himanshu Chauhan <hcha...@xvisor-x86.org> wrote:
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Please add "x86: " prefix to patch subject.

Otherwise looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup

> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210305120938.1564420-4-hchauhan%40xvisor-x86.org.
Reply all
Reply to author
Forward
0 new messages