From: Himanshu Chauhan <
hcha...@xvisor-x86.org>
We store HOST RSP/RIP when we are about to launch or resume guest.
So this is handled in inline assembly of __vmcs_run function. This
path adds support to catch the errors and resore the CPU state
incase the instruction fails.
arch/x86/cpu/common/include/cpu_vm.h | 3 +-
arch/x86/cpu/common/vm/vtx/intercept.c | 7 ++--
arch/x86/cpu/common/vm/vtx/vmx.c | 55 ++++++++++++++++++++------
arch/x86/cpu/x86_64/cpu_vcpu_helper.c | 1 +
4 files changed, 49 insertions(+), 17 deletions(-)
diff --git a/arch/x86/cpu/common/include/cpu_vm.h b/arch/x86/cpu/common/include/cpu_vm.h
index 13024357..2cc1b2fb 100644
--- a/arch/x86/cpu/common/include/cpu_vm.h
+++ b/arch/x86/cpu/common/include/cpu_vm.h
@@ -111,6 +111,8 @@ struct vcpu_intercept_table {
};
struct vcpu_hw_context {
+ u32 instruction_error; /* !!NOTE!!: This has to be first variable */
+ u32 dummy; /* 8-byte align */
struct cpuinfo_x86 *cpuinfo;
struct vmcb *vmcb;
struct vmcs *vmcs;
@@ -125,7 +127,6 @@ struct vcpu_hw_context {
u64 g_cr8;
u64 g_rip;
u64 vmx_last_exit_qualification;
- s32 instruction_error;
unsigned int asid;
u64 eptp;
diff --git a/arch/x86/cpu/common/vm/vtx/intercept.c b/arch/x86/cpu/common/vm/vtx/intercept.c
index 281033ad..54f4c429 100644
--- a/arch/x86/cpu/common/vm/vtx/intercept.c
+++ b/arch/x86/cpu/common/vm/vtx/intercept.c
@@ -244,7 +244,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
return;
_fail:
- if (context->vcpu_emergency_shutdown){
+ if (context->vcpu_emergency_shutdown) {
context->vcpu_emergency_shutdown(context);
}
}
@@ -352,6 +352,7 @@ int vmx_handle_crx_exit(struct vcpu_hw_context *context)
VM_LOG(LVL_ERR, "LMSW not supported yet\n");
goto guest_bad_fault;
}
+
__vmwrite(GUEST_RIP, VMX_GUEST_NEXT_RIP(context));
return VMM_OK;
@@ -400,7 +401,7 @@ int vmx_handle_vmexit(struct vcpu_hw_context *context, u32 exit_reason)
return VMM_OK;
default:
- VM_LOG(LVL_INFO, "Unhandled VM Exit reason: %d\n", exit_reason);
+ VM_LOG(LVL_DEBUG, "Unhandled VM Exit reason: %d\n", exit_reason);
goto guest_bad_fault;
}
@@ -457,7 +458,7 @@ void vmx_vcpu_exit(struct vcpu_hw_context *context)
VM_LOG(LVL_DEBUG, "Guest RIP: 0x%"PRIx64"\n", VMX_GUEST_RIP(context));
if (vmx_handle_vmexit(context, _exit_reason.bits.reason) != VMM_OK) {
- VM_LOG(LVL_ERR, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
+ VM_LOG(LVL_DEBUG, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
goto unhandled_vm_exit;
}
diff --git a/arch/x86/cpu/common/vm/vtx/vmx.c b/arch/x86/cpu/common/vm/vtx/vmx.c
index b7f76bcb..22c83745 100644
--- a/arch/x86/cpu/common/vm/vtx/vmx.c
+++ b/arch/x86/cpu/common/vm/vtx/vmx.c
@@ -190,13 +190,18 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)
static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
{
- int rc = 0;
-
context->instruction_error = 0;
+ if (context->sign != 0xdeadbeef) {
+ vmm_printf("Context: 0x%p Sign: 0x%x\n", context, context->sign);
+ BUG();
+ }
__asm__ __volatile__("pushfq\n\t" /* Save flags */
+ /* save return address in host space area */
"movq $vmx_return, %%rax\n\t"
"vmwrite %%rax, %%rbx\n\t"
+ "jz 8f\n\t"
+ "jc 8f\n\t"
"pushq %%rbp\n\t"
"pushq %%rdi\n\t"
"pushq %%rsi\n\t"
@@ -208,9 +213,13 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"pushq %%r13\n\t"
"pushq %%r14\n\t"
"pushq %%r15\n\t"
+ /* save the hardware context pointer */
"pushq %%rcx\n\t"
+ /* save host RSP in host state area */
"movq %%rsp, %%rax\n\t"
"vmwrite %%rax, %%rdx\n\t"
+ "jz 9f\n\t"
+ "jc 9f\n\t"
/*
* Check if vmlaunch or vmresume is needed, set the condition code
* appropriately for use below.
@@ -245,7 +254,7 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
*/
"ud2\n\t"
".section .fixup,\"ax\"\n"
- "2:sub $3, %0 ; jmp 7f\n" /* Return -3 if #UD or #GF */
+ "2:movq $3, (%[context]) ; jmp 7f\n" /* Return -3 if #UD or #GF */
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
@@ -260,13 +269,14 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
*/
"ud2\n\t"
".section .fixup,\"ax\"\n"
- "4:sub $4, %0 ; jmp 7f\n" /* Return -4 if #UD or #GF */
+ "4:movq $4, (%[context]) ; jmp 7f\n" /* Return -4 if #UD or #GF */
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
" "__FIXUP_WORD" 3b,4b\n"
".previous\n"
-
+ "8: movq $8, (%[context]); jmp 10f\n" /* vmwrite failure */
+ "9: movq $9, (%[context]); jmp 11f\n" /* rsp vmwrite fail */
/* We shall come here only on successful VMEXIT */
"vmx_return: \n\t"
/*
@@ -319,7 +329,7 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"popq %%rdi\n\t"
"popq %%rbp\n\t"
"popfq\n\t"
- "sub $1, %0\n\t" /* -1 valid failure */
+ "movq $1, (%[context])\n\t" /* -1 valid failure */
"jmp 7f\n\t"
"6:popq %%rcx\n\t"
"popq %%r15\n\t"
@@ -334,9 +344,24 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"popq %%rdi\n\t"
"popq %%rbp\n\t"
"popfq\n\t"
- "sub $2, %0\n\t" /* -2 invalid failure */
+ "movq $2, (%[context])\n\t" /* -2 invalid failure */
+ "jmp 7f\n\t"
+ "11:popq %%rcx\n\t"
+ "popq %%r15\n\t"
+ "popq %%r14\n\t"
+ "popq %%r13\n\t"
+ "popq %%r12\n\t"
+ "popq %%r11\n\t"
+ "popq %%r10\n\t"
+ "popq %%r9\n\t"
+ "popq %%r8\n\t"
+ "popq %%rsi\n\t"
+ "popq %%rdi\n\t"
+ "popq %%rbp\n\t"
+ "10:"
+ "popfq\n\t"
"7:sti\n\t"
- :"=q"(rc)
+ :
:[resume]"m"(resume), "d"((unsigned long)HOST_RSP),
[context]"c"(context), "b"((unsigned long)HOST_RIP),
[rax]"i"(offsetof(struct vcpu_hw_context, g_regs[GUEST_REGS_RAX])),
@@ -360,11 +385,15 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
/* TR is not reloaded back the cpu after VM exit. */
reload_host_tss();
- if (rc < 0) {
- vmm_printf("VM Entry failed: Error: %d\n", rc);
- context->instruction_error = rc;
- } else
- context->instruction_error = 0;
+ if (context->sign != 0xdeadbeef) {
+ vmm_printf("Context: 0x%p Sign: 0x%x\n", context, context->sign);
+ BUG();
+ }
+
+ if (context->instruction_error != 0) {
+ vmm_printf("VM Entry failed: Error: %d\n", context->instruction_error);
+ BUG();
+ }
arch_guest_handle_vm_exit(context);
diff --git a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
index e5b55b5d..5656c599 100644
--- a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
+++ b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
@@ -188,6 +188,7 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)
x86_vcpu_priv(vcpu)->hw_context = vmm_zalloc(sizeof(struct vcpu_hw_context));
x86_vcpu_priv(vcpu)->hw_context->assoc_vcpu = vcpu;
+ x86_vcpu_priv(vcpu)->hw_context->sign = 0xdeadbeef;
x86_vcpu_priv(vcpu)->hw_context->vcpu_emergency_shutdown = arch_vcpu_emergency_shutdown;
cpu_init_vcpu_hw_context(&cpu_info, x86_vcpu_priv(vcpu)->hw_context);
--
2.25.1