[PATCH 6/6] x86: Fixed all the warning in the code.

7 views
Skip to first unread message

hcha...@xvisor-x86.org

unread,
Mar 18, 2021, 6:52:01 AM3/18/21
to xvisor...@googlegroups.com, Himanshu Chauhan
From: Himanshu Chauhan <hcha...@xvisor-x86.org>

As the new compilers get stricter, the warning creep in.
Fixed all of them. Also fixed the warnings that creeped
in during fresh development.

Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
arch/x86/board/common/devices/video/fb_console.c | 8 --------
arch/x86/cpu/common/acpi.c | 4 ++--
arch/x86/cpu/common/vm/vtx/intercept.c | 4 ++--
arch/x86/cpu/x86_64/include/cpu_interrupts.h | 2 +-
arch/x86/cpu/x86_64/start.S | 2 +-
5 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/arch/x86/board/common/devices/video/fb_console.c b/arch/x86/board/common/devices/video/fb_console.c
index 30063ee6..84df9b2b 100644
--- a/arch/x86/board/common/devices/video/fb_console.c
+++ b/arch/x86/board/common/devices/video/fb_console.c
@@ -166,7 +166,6 @@ extern struct multiboot_info boot_info;
int fb_defterm_init(void)
{
vmm_printf("%s: init\n", __func__);
- svga_mode_info_t *svga_mode_info;

fb_fifo = fifo_alloc(sizeof(u8), 128);
if (!fb_fifo) {
@@ -178,12 +177,6 @@ int fb_defterm_init(void)
fb_key_flags = 0;
fb_key_handler_registered = FALSE;

- svga_mode_info = svga_mode_get_info(SVGA_DEFAULT_MODE);
- //vmm_printf("%s: svga mode: 0x%lx\n", __func__, (unsigned long)svga_mode_info);
- //bytesPerLine = svga_mode_info->pitch;
- //width = svga_mode_info->screen_width;
- //height = svga_mode_info->screen_height;
- //depth = svga_mode_info->bpp / 8;
bytesPerLine = boot_info.framebuffer_pitch;
width = boot_info.framebuffer_width;
height = boot_info.framebuffer_height;
@@ -191,7 +184,6 @@ int fb_defterm_init(void)
vmm_printf("%s: BPL: %d width: %d height: %d depth: %d\n", __func__,
bytesPerLine, width, height, depth);

- //video_base = (void *)svga_map_fb(svga_mode_info->physbase, svga_mode_info->pitch * svga_mode_info->screen_height);
video_base = (void*)svga_map_fb(boot_info.framebuffer_addr, bytesPerLine*height);
vmm_printf("%s: Video base: %p\n", __func__, video_base);
fb_console_set_font(&ter_i16n_raw, &ter_i16b_raw);
diff --git a/arch/x86/cpu/common/acpi.c b/arch/x86/cpu/common/acpi.c
index 35bceb24..2403ce78 100644
--- a/arch/x86/cpu/common/acpi.c
+++ b/arch/x86/cpu/common/acpi.c
@@ -252,7 +252,7 @@ static int __init acpi_populate_lapic_devtree(struct acpi_madt_hdr *madt_hdr,
return ret;
}

-static int __init process_acpi_sdt_table(char *tab_sign, u32 *tab_data)
+static int __init process_acpi_sdt_table(char *tab_sign, void *tab_data)
{
struct vmm_devtree_node *node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
VMM_DEVTREE_MOTHERBOARD_NODE_NAME);
@@ -364,7 +364,7 @@ int __init acpi_init(void)
memcpy(sign, hdr->signature, SDT_SIGN_LEN);
sign[SDT_SIGN_LEN] = 0;

- if (process_acpi_sdt_table((char *)sign, (u32 *)hdr) != VMM_OK) {
+ if (process_acpi_sdt_table((char *)sign, (void *)hdr) != VMM_OK) {
vmm_host_iounmap((virtual_addr_t)hdr);
goto sdt_fail;
}
diff --git a/arch/x86/cpu/common/vm/vtx/intercept.c b/arch/x86/cpu/common/vm/vtx/intercept.c
index 8eb25e1e..281033ad 100644
--- a/arch/x86/cpu/common/vm/vtx/intercept.c
+++ b/arch/x86/cpu/common/vm/vtx/intercept.c
@@ -193,7 +193,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
context->g_regs[GUEST_REGS_RBX] = func->resp_ebx;
context->g_regs[GUEST_REGS_RCX] = func->resp_ecx;
context->g_regs[GUEST_REGS_RDX] = func->resp_edx;
- VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx32" RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
+ VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64" RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
context->g_regs[GUEST_REGS_RAX], func->resp_eax, func->resp_ebx, func->resp_ecx, func->resp_edx);

break;
@@ -206,7 +206,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
case CPUID_EXTENDED_ADDR_BITS:
func = &priv->extended_funcs[context->g_regs[GUEST_REGS_RAX]
- CPUID_EXTENDED_LFUNCEXTD];
- VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64": EAX: 0x%"PRIx64" EBX: 0x%"PRIx64" ECX: 0x%"PRIx64" EDX: 0x%"PRIx64"\n",
+ VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64": EAX: 0x%"PRIx32" EBX: 0x%"PRIx32" ECX: 0x%"PRIx32" EDX: 0x%"PRIx32"\n",
context->g_regs[GUEST_REGS_RAX], func->resp_eax, func->resp_ebx, func->resp_ecx, func->resp_edx);
context->g_regs[GUEST_REGS_RAX] = func->resp_eax;
context->g_regs[GUEST_REGS_RBX] = func->resp_ebx;
diff --git a/arch/x86/cpu/x86_64/include/cpu_interrupts.h b/arch/x86/cpu/x86_64/include/cpu_interrupts.h
index 095bb309..aa8496e7 100644
--- a/arch/x86/cpu/x86_64/include/cpu_interrupts.h
+++ b/arch/x86/cpu/x86_64/include/cpu_interrupts.h
@@ -162,7 +162,7 @@ struct tss_64 {
u32 resvd_3;
u32 resvd_4;
u32 map_base;
-} __packed;
+};

union tss_desc_base_limit {
u32 val;
diff --git a/arch/x86/cpu/x86_64/start.S b/arch/x86/cpu/x86_64/start.S
index 8a0cf7e5..9a120467 100644
--- a/arch/x86/cpu/x86_64/start.S
+++ b/arch/x86/cpu/x86_64/start.S
@@ -182,7 +182,7 @@ __fill_IMP_table:
addl $4096, -4(%ebp)
addl $4096, -8(%ebp)
subl $1, -12(%ebp)
- cmp $0, -12(%ebp)
+ cmpl $0, -12(%ebp)
ja __fill_IMP_table
leave
ret
--
2.27.0

hcha...@xvisor-x86.org

unread,
Mar 19, 2021, 11:16:03 PM3/19/21
to xvisor...@googlegroups.com, Himanshu Chauhan
From: Himanshu Chauhan <hcha...@xvisor-x86.org>

We store HOST RSP/RIP when we are about to launch or resume guest.
So this is handled in inline assembly of __vmcs_run function. This
path adds support to catch the errors and resore the CPU state
incase the instruction fails.

Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>
---
arch/x86/cpu/common/include/cpu_vm.h | 3 +-
arch/x86/cpu/common/vm/vtx/intercept.c | 7 ++--
arch/x86/cpu/common/vm/vtx/vmx.c | 55 ++++++++++++++++++++------
arch/x86/cpu/x86_64/cpu_vcpu_helper.c | 1 +
4 files changed, 49 insertions(+), 17 deletions(-)

diff --git a/arch/x86/cpu/common/include/cpu_vm.h b/arch/x86/cpu/common/include/cpu_vm.h
index 13024357..2cc1b2fb 100644
--- a/arch/x86/cpu/common/include/cpu_vm.h
+++ b/arch/x86/cpu/common/include/cpu_vm.h
@@ -111,6 +111,8 @@ struct vcpu_intercept_table {
};

struct vcpu_hw_context {
+ u32 instruction_error; /* !!NOTE!!: This has to be first variable */
+ u32 dummy; /* 8-byte align */
struct cpuinfo_x86 *cpuinfo;
struct vmcb *vmcb;
struct vmcs *vmcs;
@@ -125,7 +127,6 @@ struct vcpu_hw_context {
u64 g_cr8;
u64 g_rip;
u64 vmx_last_exit_qualification;
- s32 instruction_error;

unsigned int asid;
u64 eptp;
diff --git a/arch/x86/cpu/common/vm/vtx/intercept.c b/arch/x86/cpu/common/vm/vtx/intercept.c
index 281033ad..54f4c429 100644
--- a/arch/x86/cpu/common/vm/vtx/intercept.c
+++ b/arch/x86/cpu/common/vm/vtx/intercept.c
@@ -244,7 +244,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
return;

_fail:
- if (context->vcpu_emergency_shutdown){
+ if (context->vcpu_emergency_shutdown) {
context->vcpu_emergency_shutdown(context);
}
}
@@ -352,6 +352,7 @@ int vmx_handle_crx_exit(struct vcpu_hw_context *context)
VM_LOG(LVL_ERR, "LMSW not supported yet\n");
goto guest_bad_fault;
}
+
__vmwrite(GUEST_RIP, VMX_GUEST_NEXT_RIP(context));

return VMM_OK;
@@ -400,7 +401,7 @@ int vmx_handle_vmexit(struct vcpu_hw_context *context, u32 exit_reason)
return VMM_OK;

default:
- VM_LOG(LVL_INFO, "Unhandled VM Exit reason: %d\n", exit_reason);
+ VM_LOG(LVL_DEBUG, "Unhandled VM Exit reason: %d\n", exit_reason);
goto guest_bad_fault;
}

@@ -457,7 +458,7 @@ void vmx_vcpu_exit(struct vcpu_hw_context *context)
VM_LOG(LVL_DEBUG, "Guest RIP: 0x%"PRIx64"\n", VMX_GUEST_RIP(context));

if (vmx_handle_vmexit(context, _exit_reason.bits.reason) != VMM_OK) {
- VM_LOG(LVL_ERR, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
+ VM_LOG(LVL_DEBUG, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
goto unhandled_vm_exit;
}

diff --git a/arch/x86/cpu/common/vm/vtx/vmx.c b/arch/x86/cpu/common/vm/vtx/vmx.c
index b7f76bcb..22c83745 100644
--- a/arch/x86/cpu/common/vm/vtx/vmx.c
+++ b/arch/x86/cpu/common/vm/vtx/vmx.c
@@ -190,13 +190,18 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)

static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
{
- int rc = 0;
-
context->instruction_error = 0;
+ if (context->sign != 0xdeadbeef) {
+ vmm_printf("Context: 0x%p Sign: 0x%x\n", context, context->sign);
+ BUG();
+ }

__asm__ __volatile__("pushfq\n\t" /* Save flags */
+ /* save return address in host space area */
"movq $vmx_return, %%rax\n\t"
"vmwrite %%rax, %%rbx\n\t"
+ "jz 8f\n\t"
+ "jc 8f\n\t"
"pushq %%rbp\n\t"
"pushq %%rdi\n\t"
"pushq %%rsi\n\t"
@@ -208,9 +213,13 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"pushq %%r13\n\t"
"pushq %%r14\n\t"
"pushq %%r15\n\t"
+ /* save the hardware context pointer */
"pushq %%rcx\n\t"
+ /* save host RSP in host state area */
"movq %%rsp, %%rax\n\t"
"vmwrite %%rax, %%rdx\n\t"
+ "jz 9f\n\t"
+ "jc 9f\n\t"
/*
* Check if vmlaunch or vmresume is needed, set the condition code
* appropriately for use below.
@@ -245,7 +254,7 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
*/
"ud2\n\t"
".section .fixup,\"ax\"\n"
- "2:sub $3, %0 ; jmp 7f\n" /* Return -3 if #UD or #GF */
+ "2:movq $3, (%[context]) ; jmp 7f\n" /* Return -3 if #UD or #GF */
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
@@ -260,13 +269,14 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
*/
"ud2\n\t"
".section .fixup,\"ax\"\n"
- "4:sub $4, %0 ; jmp 7f\n" /* Return -4 if #UD or #GF */
+ "4:movq $4, (%[context]) ; jmp 7f\n" /* Return -4 if #UD or #GF */
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
" "__FIXUP_WORD" 3b,4b\n"
".previous\n"
-
+ "8: movq $8, (%[context]); jmp 10f\n" /* vmwrite failure */
+ "9: movq $9, (%[context]); jmp 11f\n" /* rsp vmwrite fail */
/* We shall come here only on successful VMEXIT */
"vmx_return: \n\t"
/*
@@ -319,7 +329,7 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"popq %%rdi\n\t"
"popq %%rbp\n\t"
"popfq\n\t"
- "sub $1, %0\n\t" /* -1 valid failure */
+ "movq $1, (%[context])\n\t" /* -1 valid failure */
"jmp 7f\n\t"
"6:popq %%rcx\n\t"
"popq %%r15\n\t"
@@ -334,9 +344,24 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
"popq %%rdi\n\t"
"popq %%rbp\n\t"
"popfq\n\t"
- "sub $2, %0\n\t" /* -2 invalid failure */
+ "movq $2, (%[context])\n\t" /* -2 invalid failure */
+ "jmp 7f\n\t"
+ "11:popq %%rcx\n\t"
+ "popq %%r15\n\t"
+ "popq %%r14\n\t"
+ "popq %%r13\n\t"
+ "popq %%r12\n\t"
+ "popq %%r11\n\t"
+ "popq %%r10\n\t"
+ "popq %%r9\n\t"
+ "popq %%r8\n\t"
+ "popq %%rsi\n\t"
+ "popq %%rdi\n\t"
+ "popq %%rbp\n\t"
+ "10:"
+ "popfq\n\t"
"7:sti\n\t"
- :"=q"(rc)
+ :
:[resume]"m"(resume), "d"((unsigned long)HOST_RSP),
[context]"c"(context), "b"((unsigned long)HOST_RIP),
[rax]"i"(offsetof(struct vcpu_hw_context, g_regs[GUEST_REGS_RAX])),
@@ -360,11 +385,15 @@ static int __vmcs_run(struct vcpu_hw_context *context, bool resume)
/* TR is not reloaded back the cpu after VM exit. */
reload_host_tss();

- if (rc < 0) {
- vmm_printf("VM Entry failed: Error: %d\n", rc);
- context->instruction_error = rc;
- } else
- context->instruction_error = 0;
+ if (context->sign != 0xdeadbeef) {
+ vmm_printf("Context: 0x%p Sign: 0x%x\n", context, context->sign);
+ BUG();
+ }
+
+ if (context->instruction_error != 0) {
+ vmm_printf("VM Entry failed: Error: %d\n", context->instruction_error);
+ BUG();
+ }

arch_guest_handle_vm_exit(context);

diff --git a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
index e5b55b5d..5656c599 100644
--- a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
+++ b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
@@ -188,6 +188,7 @@ int arch_vcpu_init(struct vmm_vcpu *vcpu)

x86_vcpu_priv(vcpu)->hw_context = vmm_zalloc(sizeof(struct vcpu_hw_context));
x86_vcpu_priv(vcpu)->hw_context->assoc_vcpu = vcpu;
+ x86_vcpu_priv(vcpu)->hw_context->sign = 0xdeadbeef;

x86_vcpu_priv(vcpu)->hw_context->vcpu_emergency_shutdown = arch_vcpu_emergency_shutdown;
cpu_init_vcpu_hw_context(&cpu_info, x86_vcpu_priv(vcpu)->hw_context);
--
2.25.1

Anup Patel

unread,
Mar 20, 2021, 11:52:15 AM3/20/21
to Xvisor Devel, Himanshu Chauhan
On Thu, Mar 18, 2021 at 4:22 PM <hcha...@xvisor-x86.org> wrote:
>
> From: Himanshu Chauhan <hcha...@xvisor-x86.org>
>
> As the new compilers get stricter, the warning creep in.
> Fixed all of them. Also fixed the warnings that creeped
> in during fresh development.
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup
> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210318105151.3699141-1-hchauhan%40xvisor-x86.org.

Anup Patel

unread,
Mar 20, 2021, 11:52:35 AM3/20/21
to Xvisor Devel, Himanshu Chauhan
On Sat, Mar 20, 2021 at 8:46 AM <hcha...@xvisor-x86.org> wrote:
>
> From: Himanshu Chauhan <hcha...@xvisor-x86.org>
>
> We store HOST RSP/RIP when we are about to launch or resume guest.
> So this is handled in inline assembly of __vmcs_run function. This
> path adds support to catch the errors and resore the CPU state
> incase the instruction fails.
>
> Signed-off-by: Himanshu Chauhan <hcha...@xvisor-x86.org>

Looks good to me.

Reviewed-by: Anup Patel <an...@brainfault.org>

Regards,
Anup

> --
> You received this message because you are subscribed to the Google Groups "Xvisor Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to xvisor-devel...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/xvisor-devel/20210320031550.2211069-1-hchauhan%40xvisor-x86.org.
Reply all
Reply to author
Forward
0 new messages