From: Himanshu Chauhan <
hcha...@xvisor-x86.org>
This patch moves all logs in x86 code to sub-system level logging
arch/x86/cpu/common/include/cpu_vm.h | 22 ----
arch/x86/cpu/common/include/vm/ept.h | 30 ------
arch/x86/cpu/common/vm/arch_guest_helper.c | 7 +-
arch/x86/cpu/common/vm/svm/intercept.c | 115 +++++++++++----------
arch/x86/cpu/common/vm/svm/svm.c | 21 ++--
arch/x86/cpu/common/vm/vm.c | 37 +++----
arch/x86/cpu/common/vm/vtx/ept.c | 113 ++++++++++----------
arch/x86/cpu/common/vm/vtx/intercept.c | 95 ++++++++---------
arch/x86/cpu/common/vm/vtx/vmcs.c | 21 ++--
arch/x86/cpu/common/vm/vtx/vmx.c | 31 +++---
arch/x86/cpu/x86_64/cpu_vcpu_helper.c | 15 +--
11 files changed, 239 insertions(+), 268 deletions(-)
diff --git a/arch/x86/cpu/common/include/cpu_vm.h b/arch/x86/cpu/common/include/cpu_vm.h
index 0e2f8a62..4f9561fe 100644
--- a/arch/x86/cpu/common/include/cpu_vm.h
+++ b/arch/x86/cpu/common/include/cpu_vm.h
@@ -10,28 +10,6 @@
#include <cpu_pgtbl_helper.h>
#include <libs/bitmap.h>
-enum {
- VM_LOG_LVL_ERR,
- VM_LOG_LVL_INFO,
- VM_LOG_LVL_DEBUG,
- VM_LOG_LVL_VERBOSE
-};
-extern int vm_default_log_lvl;
-#define VM_LOG(lvl, fmt, args...) \
- do { \
- if (VM_LOG_##lvl <= vm_default_log_lvl) { \
- vmm_printf(fmt, ##args); \
- } \
- }while(0);
-
-#define VM_LOG_FD(lvl, fmt, args...) \
- do { \
- if (VM_LOG_##lvl <= vm_default_log_lvl) { \
- vmm_printf("(%s:%d) " fmt, __func__, \
- __LINE__, ##args); \
- } \
- }while(0);
-
#define MOV_CRn_INST_SZ 3
enum guest_regs {
diff --git a/arch/x86/cpu/common/include/vm/ept.h b/arch/x86/cpu/common/include/vm/ept.h
index b54a65fe..0d20fbc6 100644
--- a/arch/x86/cpu/common/include/vm/ept.h
+++ b/arch/x86/cpu/common/include/vm/ept.h
@@ -189,34 +189,4 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
physical_addr_t gphys, physical_addr_t hphys,
size_t pg_size, u32 pg_prot);
-#define ENABLE_EPT_LOG 0
-
-enum {
- EPT_LOG_LVL_ERR,
- EPT_LOG_LVL_INFO,
- EPT_LOG_LVL_DEBUG,
- EPT_LOG_LVL_VERBOSE
-};
-extern int ept_default_log_lvl;
-
-#if ENABLE_EPT_LOG
-#define EPT_LOG(lvl, fmt, args...) \
- do { \
- if (EPT_LOG_##lvl <= ept_default_log_lvl) { \
- vmm_printf(fmt, ##args); \
- } \
- }while(0);
-
-#define EPT_LOG_FD(lvl, fmt, args...) \
- do { \
- if (EPT_LOG_##lvl <= ept_default_log_lvl) { \
- vmm_printf("(%s:%d) " fmt, __func__, \
- __LINE__, ##args); \
- } \
- }while(0);
-#else
-#define EPT_LOG(lvl, fmt, args...)
-#define EPT_LOG_FD(lvl, fmt, args...)
-#endif
-
#endif /* __EPT_H */
diff --git a/arch/x86/cpu/common/vm/arch_guest_helper.c b/arch/x86/cpu/common/vm/arch_guest_helper.c
index 57d38a35..7e4b6b18 100644
--- a/arch/x86/cpu/common/vm/arch_guest_helper.c
+++ b/arch/x86/cpu/common/vm/arch_guest_helper.c
@@ -28,6 +28,7 @@
#include <vmm_guest_aspace.h>
#include <vmm_host_aspace.h>
#include <vmm_macros.h>
+#include <x86_debug_log.h>
#include <cpu_mmu.h>
#include <cpu_features.h>
#include <cpu_vm.h>
@@ -35,18 +36,20 @@
#include <libs/stringlib.h>
#include <arch_guest_helper.h>
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(arch_guest_helper, X86_DEBUG_LOG_LVL_INFO);
+
int arch_guest_init(struct vmm_guest * guest)
{
struct x86_guest_priv *priv = vmm_zalloc(sizeof(struct x86_guest_priv));
if (priv == NULL) {
- VM_LOG(LVL_ERR, "ERROR: Failed to create guest private data.\n");
+ X86_DEBUG_LOG(arch_guest_helper, LVL_ERR, "ERROR: Failed to create guest private data.\n");
return VMM_EFAIL;
}
guest->arch_priv = (void *)priv;
- VM_LOG(LVL_VERBOSE, "Guest init successful!\n");
+ X86_DEBUG_LOG(arch_guest_helper, LVL_VERBOSE, "Guest init successful!\n");
return VMM_OK;
}
diff --git a/arch/x86/cpu/common/vm/svm/intercept.c b/arch/x86/cpu/common/vm/svm/intercept.c
index daa6e11b..d907fc17 100644
--- a/arch/x86/cpu/common/vm/svm/intercept.c
+++ b/arch/x86/cpu/common/vm/svm/intercept.c
@@ -36,6 +36,9 @@
#include <vmm_devemu.h>
#include <vmm_manager.h>
#include <vmm_main.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(svm_intercept, X86_DEBUG_LOG_LVL_INFO);
static char *exception_names[] = {
"#DivError", /* 0 */
@@ -83,7 +86,7 @@ static int guest_read_gva(struct vcpu_hw_context *context, u32 vaddr, u8 *where
physical_addr_t gphys;
if (gva_to_gpa(context, vaddr, &gphys)) {
- VM_LOG(LVL_ERR, "Failed to convert guest virtual 0x%x to guest "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to convert guest virtual 0x%x to guest "
"physical.\n", vaddr);
return VMM_EFAIL;
}
@@ -91,7 +94,7 @@ static int guest_read_gva(struct vcpu_hw_context *context, u32 vaddr, u8 *where
/* FIXME: Should we always do cacheable memory access here ?? */
if (vmm_guest_memory_read(context->assoc_vcpu->guest, gphys,
where, size, TRUE) < size) {
- VM_LOG(LVL_ERR, "Failed to read guest pa 0x%lx\n", gphys);
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read guest pa 0x%lx\n", gphys);
return VMM_EFAIL;
}
@@ -105,7 +108,7 @@ static int guest_read_fault_inst(struct vcpu_hw_context *context,
struct vmm_guest *guest = context->assoc_vcpu->guest;
if (gva_to_gpa(context, context->vmcb->rip, &rip_phys)) {
- VM_LOG(LVL_ERR, "Failed to convert guest virtual 0x%"PRIADDR
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to convert guest virtual 0x%"PRIADDR
" to guest physical.\n", context->vmcb->rip);
return VMM_EFAIL;
}
@@ -113,7 +116,7 @@ static int guest_read_fault_inst(struct vcpu_hw_context *context,
/* FIXME: Should we always do cacheable memory access here ?? */
if (vmm_guest_memory_read(guest, rip_phys, g_ins, sizeof(x86_inst),
TRUE) < sizeof(x86_inst)) {
- VM_LOG(LVL_ERR, "Failed to read instruction at intercepted "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read instruction at intercepted "
"instruction pointer. (0x%"PRIPADDR")\n", rip_phys);
return VMM_EFAIL;
}
@@ -127,7 +130,7 @@ static inline void dump_guest_exception_insts(struct vcpu_hw_context *context)
int i;
if (guest_read_fault_inst(context, &ins)) {
- VM_LOG(LVL_ERR, "Failed to read faulting guest instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read faulting guest instruction.\n");
return;
}
vmm_printf("\n");
@@ -167,14 +170,14 @@ void handle_guest_resident_page_fault(struct vcpu_hw_context *context)
union page32 pte, pte1, pde, pde1;
u32 prot, prot1, pdprot, pdprot1;
- VM_LOG(LVL_DEBUG, "Resident page fault exit info 1: 0x%lx "
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "Resident page fault exit info 1: 0x%lx "
"2: 0x%lx rip: 0x%lx\n", context->vmcb->exitinfo1,
context->vmcb->exitinfo2, context->vmcb->rip);
if (lookup_guest_pagetable(context, fault_gphys,
&lookedup_gphys, &pde, &pte)) {
/* Lazy TLB flush by guest? */
- VM_LOG(LVL_ERR, "ERROR: No entry in guest page table in "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: No entry in guest page table in "
"protection fault! Arrrgh! (Guest virtual: 0x%lx)\n",
fault_gphys);
goto guest_bad_fault;
@@ -182,7 +185,7 @@ void handle_guest_resident_page_fault(struct vcpu_hw_context *context)
if (lookup_shadow_pagetable(context, fault_gphys,
&lookedup_gphys, &pde1, &pte1)) {
- VM_LOG(LVL_ERR, "ERROR: No entry in shadow page table? "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: No entry in shadow page table? "
"Arrrgh! (Guest virtual: 0x%lx)\n",
fault_gphys);
goto guest_bad_fault;
@@ -203,7 +206,7 @@ void handle_guest_resident_page_fault(struct vcpu_hw_context *context)
if (update_guest_shadow_pgprot(context, fault_gphys,
GUEST_PG_LVL_1, pdprot)
!= VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Could not update level 2 "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: Could not update level 2 "
"pgprot in shadow table(Guest virtual: 0x%lx)\n",
fault_gphys);
goto guest_bad_fault;
@@ -214,7 +217,7 @@ void handle_guest_resident_page_fault(struct vcpu_hw_context *context)
if (update_guest_shadow_pgprot(context, fault_gphys,
GUEST_PG_LVL_2, prot)
!= VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Could not update level 1 "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: Could not update level 1 "
"pgprot in shadow (Guest virtual: 0x%lx)\n",
fault_gphys);
goto guest_bad_fault;
@@ -238,7 +241,7 @@ void handle_guest_realmode_page_fault(struct vcpu_hw_context *context,
{
if (create_guest_shadow_map(context, fault_gphys, hphys_addr,
PAGE_SIZE, 0x3, 0x3) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to create map in"
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: Failed to create map in"
"guest's shadow page table.\n"
"Fault Gphys: 0x%lx "
"Host Phys: %lx\n",
@@ -265,7 +268,7 @@ void emulate_guest_mmio_io(struct vcpu_hw_context *context,
dinst->inst.gen_mov.src_addr,
(physical_addr_t *)
&dinst->inst.gen_mov.src_addr) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to map guest va 0x%"PRIADDR" to pa\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to map guest va 0x%"PRIADDR" to pa\n",
dinst->inst.gen_mov.src_addr);
goto guest_bad_fault;
}
@@ -287,7 +290,7 @@ void emulate_guest_mmio_io(struct vcpu_hw_context *context,
if (dinst->inst.gen_mov.dst_addr == RM_REG_AX)
context->vmcb->rax = guestrd;
} else {
- VM_LOG(LVL_ERR, "Memory to memory move instruction not "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Memory to memory move instruction not "
"supported.\n");
goto guest_bad_fault;
}
@@ -302,7 +305,7 @@ void emulate_guest_mmio_io(struct vcpu_hw_context *context,
else
guestrd = context->g_regs[index];
} else {
- VM_LOG(LVL_ERR, "Memory to memory move instruction not "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Memory to memory move instruction not "
"supported.\n");
goto guest_bad_fault;
}
@@ -330,17 +333,17 @@ void handle_guest_mmio_fault(struct vcpu_hw_context *context)
x86_decoded_inst_t dinst;
if (guest_read_fault_inst(context, &ins)) {
- VM_LOG(LVL_ERR, "Failed to read faulting guest instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read faulting guest instruction.\n");
goto guest_bad_fault;
}
if (x86_decode_inst(context, ins, &dinst) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to decode guest instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to decode guest instruction.\n");
goto guest_bad_fault;
}
if (unlikely(dinst.inst_type != INST_TYPE_MOV)) {
- VM_LOG(LVL_ERR,
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR,
"IO Fault in guest without a move instruction!\n");
goto guest_bad_fault;
}
@@ -388,11 +391,11 @@ void handle_guest_protected_mem_rw(struct vcpu_hw_context *context)
if (lookup_guest_pagetable(context, fault_gphys,
&lookedup_gphys, &pde, &pte)
!= VMM_OK) {
- VM_LOG(LVL_DEBUG, "ERROR: No page table entry "
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "ERROR: No page table entry "
"created by guest for fault address "
"0x%lx (rIP: 0x%lx)\n",
fault_gphys, context->vmcb->rip);
- VM_LOG(LVL_DEBUG, "EXITINFO1: 0x%lx\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "EXITINFO1: 0x%lx\n",
context->vmcb->exitinfo1);
inject_guest_exception(context,
VM_EXCEPTION_PAGE_FAULT);
@@ -411,7 +414,7 @@ void handle_guest_protected_mem_rw(struct vcpu_hw_context *context)
if ((PagePresent(&pte) && PageReadOnly(&pte))
||(PagePresent(&pde) && PageReadOnly(&pde))) {
if (!(context->vmcb->cr0 & (0x1UL << 16))) {
- VM_LOG(LVL_ERR, "Page fault in guest "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Page fault in guest "
"on valid page and WP unset.\n");
goto guest_bad_fault;
}
@@ -433,7 +436,7 @@ void handle_guest_protected_mem_rw(struct vcpu_hw_context *context)
if (create_guest_shadow_map(context, lookedup_gphys,
hphys_addr, PAGE_SIZE, pdprot,
prot) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to create map in"
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: Failed to create map in"
"guest's shadow page table.\n"
"Fault Gphys: 0x%lx "
"Lookup Gphys: 0x%lx "
@@ -470,14 +473,14 @@ void __handle_vm_gdt_write(struct vcpu_hw_context *context)
void __handle_vm_npf (struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: nested page fault.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: nested page fault.\n");
if (context->vcpu_emergency_shutdown)
context->vcpu_emergency_shutdown(context);
}
void __handle_vm_swint (struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: software interrupt.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: software interrupt.\n");
if (context->vcpu_emergency_shutdown)
context->vcpu_emergency_shutdown(context);
}
@@ -489,7 +492,7 @@ void __handle_vm_exception (struct vcpu_hw_context *context)
switch (context->vmcb->exitcode)
{
case VMEXIT_EXCEPTION_PF:
- VM_LOG(LVL_DEBUG, "Guest fault: 0x%"PRIx64" (rIP: %"PRIADDR")\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "Guest fault: 0x%"PRIx64" (rIP: %"PRIADDR")\n",
context->vmcb->exitinfo2, context->vmcb->rip);
int rc;
@@ -507,7 +510,7 @@ void __handle_vm_exception (struct vcpu_hw_context *context)
rc = vmm_guest_physical_map(guest, fault_gphys, PAGE_SIZE,
&hphys_addr, &availsz, &flags);
if (rc) {
- VM_LOG(LVL_ERR, "ERROR: No region mapped to "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "ERROR: No region mapped to "
"guest physical: 0x%lx\n", fault_gphys);
goto guest_bad_fault;
}
@@ -528,7 +531,7 @@ void __handle_vm_exception (struct vcpu_hw_context *context)
break;
default:
- VM_LOG(LVL_ERR, "Unhandled exception %s (rIP: 0x%"PRIADDR")\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Unhandled exception %s (rIP: 0x%"PRIADDR")\n",
exception_names[context->vmcb->exitcode - 0x40],
context->vmcb->rip);
goto guest_bad_fault;
@@ -543,28 +546,28 @@ void __handle_vm_exception (struct vcpu_hw_context *context)
void __handle_vm_wrmsr (struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: msr write.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: msr write.\n");
if (context->vcpu_emergency_shutdown)
context->vcpu_emergency_shutdown(context);
}
void __handle_popf(struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: popf.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: popf.\n");
if (context->vcpu_emergency_shutdown)
context->vcpu_emergency_shutdown(context);
}
void __handle_vm_vmmcall (struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: vmmcall.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: vmmcall.\n");
if (context->vcpu_emergency_shutdown)
context->vcpu_emergency_shutdown(context);
}
void __handle_vm_iret(struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "Unhandled Intercept: iret.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "Unhandled Intercept: iret.\n");
return;
}
@@ -576,7 +579,7 @@ void __handle_crN_read(struct vcpu_hw_context *context)
if (context->cpuinfo->decode_assist) {
if (context->vmcb->exitinfo1 & VALID_CRN_TRAP) {
cr_gpr = (context->vmcb->exitinfo1 & 0xf);
- VM_LOG(LVL_DEBUG, "Guest writing 0x%lx to Cr0 from reg"
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "Guest writing 0x%lx to Cr0 from reg"
"%d.\n",
context->g_regs[cr_gpr], cr_gpr);
}
@@ -586,13 +589,13 @@ void __handle_crN_read(struct vcpu_hw_context *context)
u64 rvalue;
if (guest_read_fault_inst(context, &ins64)) {
- VM_LOG(LVL_ERR, "Failed to read faulting guest "
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read faulting guest "
"instruction.\n");
goto guest_bad_fault;
}
if (x86_decode_inst(context, ins64, &dinst) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to decode instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to decode instruction.\n");
goto guest_bad_fault;
}
@@ -619,7 +622,7 @@ void __handle_crN_read(struct vcpu_hw_context *context)
break;
default:
- VM_LOG(LVL_ERR,
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR,
"Unknown CR 0x%"PRIx64" read by guest\n",
dinst.inst.crn_mov.src_reg);
goto guest_bad_fault;
@@ -630,10 +633,10 @@ void __handle_crN_read(struct vcpu_hw_context *context)
context->g_regs[dinst.inst.crn_mov.dst_reg] = rvalue;
context->vmcb->rip += dinst.inst_size;
- VM_LOG(LVL_VERBOSE, "GR: CR0= 0x%8lx HCR0= 0x%8lx\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_VERBOSE, "GR: CR0= 0x%8lx HCR0= 0x%8lx\n",
context->g_cr0, context->vmcb->cr0);
} else {
- VM_LOG(LVL_ERR, "Unknown fault inst: 0x%p\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Unknown fault inst: 0x%p\n",
ins64);
goto guest_bad_fault;
}
@@ -659,7 +662,7 @@ void __handle_crN_write(struct vcpu_hw_context *context)
if (context->cpuinfo->decode_assist) {
if (context->vmcb->exitinfo1 & VALID_CRN_TRAP) {
cr_gpr = (context->vmcb->exitinfo1 & 0xf);
- VM_LOG(LVL_DEBUG,
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG,
"Guest writing 0x%lx to Cr0 from reg "
"%d.\n",
context->g_regs[cr_gpr], cr_gpr);
@@ -670,12 +673,12 @@ void __handle_crN_write(struct vcpu_hw_context *context)
u32 sreg;
if (guest_read_fault_inst(context, &ins64)) {
- VM_LOG(LVL_ERR, "Failed to read guest instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read guest instruction.\n");
goto guest_bad_fault;
}
if (x86_decode_inst(context, ins64, &dinst) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to code instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to code instruction.\n");
goto guest_bad_fault;
}
@@ -705,7 +708,7 @@ void __handle_crN_write(struct vcpu_hw_context *context)
if (bits_set & X86_CR0_PG) {
context->vmcb->cr0 |= X86_CR0_PG;
- VM_LOG(LVL_DEBUG,
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG,
"Purging guest shadow page "
"table.\n");
purge_guest_shadow_pagetable(context);
@@ -778,7 +781,7 @@ void __handle_crN_write(struct vcpu_hw_context *context)
flush the shadow pagetable */
if (likely(context->g_cr0
& X86_CR0_PG)) {
- VM_LOG(LVL_DEBUG,
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG,
"Purging guest shadow "
"page table.\n");
purge_guest_shadow_pagetable(context);
@@ -793,12 +796,12 @@ void __handle_crN_write(struct vcpu_hw_context *context)
sreg = dinst.inst.crn_mov.src_reg;
context->g_cr4 = context->g_regs[sreg];
}
- VM_LOG(LVL_DEBUG, "Guest wrote 0x%lx to CR4\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "Guest wrote 0x%lx to CR4\n",
context->g_cr4);
break;
default:
- VM_LOG(LVL_ERR,
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR,
"Write to CR%d not supported.\n",
(int)(dinst.inst.crn_mov.dst_reg - RM_REG_CR0));
goto guest_bad_fault;
@@ -812,7 +815,7 @@ void __handle_crN_write(struct vcpu_hw_context *context)
break;
}
} else {
- VM_LOG(LVL_ERR, "Unknown fault instruction\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Unknown fault instruction\n");
goto guest_bad_fault;
}
@@ -820,10 +823,10 @@ void __handle_crN_write(struct vcpu_hw_context *context)
asm volatile("str %0\n"
:"=r"(htr));
- VM_LOG(LVL_DEBUG,
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG,
"GW: CR0= 0x%"PRIx64" HCR0: 0x%"PRIx64"\n",
context->g_cr0, context->vmcb->cr0);
- VM_LOG(LVL_DEBUG,
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG,
"TR: 0x%"PRIx64" HTR: 0x%"PRIx64"\n",
*((u64 *)&context->vmcb->tr), htr);
}
@@ -849,9 +852,9 @@ void __handle_ioio(struct vcpu_hw_context *context)
u32 guest_rd = 0;
u32 wval;
- VM_LOG(LVL_VERBOSE, "RIP: 0x%"PRIx64" exitinfo1: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_VERBOSE, "RIP: 0x%"PRIx64" exitinfo1: 0x%"PRIx64"\n",
context->vmcb->rip, context->vmcb->exitinfo1);
- VM_LOG(LVL_VERBOSE,
+ X86_DEBUG_LOG(svm_intercept, LVL_VERBOSE,
"IOPort: 0x%x is accssed for %sput. Size is %d. Segment: %d "
"String operation? %s Repeated access? %s\n",
io_port, (in_inst ? "in" : "out"), op_size,
@@ -871,7 +874,7 @@ void __handle_ioio(struct vcpu_hw_context *context)
context->vmcb->rax = guest_rd;
} else {
if (io_port == 0x80) {
- VM_LOG(LVL_DEBUG, "(0x%"PRIx64") CBDW: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_DEBUG, "(0x%"PRIx64") CBDW: 0x%"PRIx64"\n",
context->vmcb->rip, context->vmcb->rax);
} else {
wval = (u32)context->vmcb->rax;
@@ -945,7 +948,7 @@ void __handle_cpuid(struct vcpu_hw_context *context)
break;
default:
- VM_LOG(LVL_ERR, "GCPUID/R: Func: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "GCPUID/R: Func: 0x%"PRIx64"\n",
context->vmcb->rax);
goto _fail;
}
@@ -973,7 +976,7 @@ void __handle_cpuid(struct vcpu_hw_context *context)
*/
void __handle_triple_fault(struct vcpu_hw_context *context)
{
- VM_LOG(LVL_ERR, "Triple fault in guest: %s!!\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Triple fault in guest: %s!!\n",
context->assoc_vcpu->guest->name);
if (context->vcpu_emergency_shutdown)
@@ -984,7 +987,7 @@ void __handle_triple_fault(struct vcpu_hw_context *context)
void __handle_halt(struct vcpu_hw_context *context)
{
- VM_LOG(LVL_INFO, "\n%s issued a halt instruction. Halting it.\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_INFO, "\n%s issued a halt instruction. Halting it.\n",
context->assoc_vcpu->guest->name);
if (context->vcpu_emergency_shutdown)
@@ -998,12 +1001,12 @@ void __handle_invalpg(struct vcpu_hw_context *context)
x86_decoded_inst_t dinst;
if (guest_read_fault_inst(context, &ins64)) {
- VM_LOG(LVL_ERR, "Failed to read guest instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to read guest instruction.\n");
goto guest_bad_fault;
}
if (x86_decode_inst(context, ins64, &dinst) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to code instruction.\n");
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "Failed to code instruction.\n");
goto guest_bad_fault;
}
@@ -1026,7 +1029,7 @@ void __handle_invalpg(struct vcpu_hw_context *context)
void handle_vcpuexit(struct vcpu_hw_context *context)
{
- VM_LOG(LVL_VERBOSE, "**** #VMEXIT - exit code: %x\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_VERBOSE, "**** #VMEXIT - exit code: %x\n",
(u32) context->vmcb->exitcode);
switch (context->vmcb->exitcode) {
@@ -1099,7 +1102,7 @@ void handle_vcpuexit(struct vcpu_hw_context *context)
break;
default:
- VM_LOG(LVL_ERR, "#VMEXIT: Unhandled exit code: (0x%x:%d)\n",
+ X86_DEBUG_LOG(svm_intercept, LVL_ERR, "#VMEXIT: Unhandled exit code: (0x%x:%d)\n",
(u32)context->vmcb->exitcode,
(u32)context->vmcb->exitcode);
if (context->vcpu_emergency_shutdown)
diff --git a/arch/x86/cpu/common/vm/svm/svm.c b/arch/x86/cpu/common/vm/svm/svm.c
index cb4315ea..b0014d2b 100644
--- a/arch/x86/cpu/common/vm/svm/svm.c
+++ b/arch/x86/cpu/common/vm/svm/svm.c
@@ -35,6 +35,9 @@
#include <vm/svm_intercept.h>
#include <emu/i8259.h>
#include <arch_guest_helper.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(svm, X86_DEBUG_LOG_LVL_INFO);
enum svm_init_mode {
SVM_MODE_REAL,
@@ -396,12 +399,12 @@ static int enable_svm (struct cpuinfo_x86 *c)
u64 phys_hsa;
if (!c->hw_virt_available) {
- VM_LOG(LVL_ERR, "ERROR: Hardware virtualization is not support but Xvisor needs it.\n");
+ X86_DEBUG_LOG(svm, LVL_ERR, "ERROR: Hardware virtualization is not support but Xvisor needs it.\n");
return VMM_EFAIL;
}
if (!c->hw_nested_paging)
- VM_LOG(LVL_INFO, "Nested pagetables are not supported.\n"
+ X86_DEBUG_LOG(svm, LVL_INFO, "Nested pagetables are not supported.\n"
"Enabling software walking of page tables.\n");
/*
@@ -409,19 +412,19 @@ static int enable_svm (struct cpuinfo_x86 *c)
*/
enable_svme();
- VM_LOG(LVL_VERBOSE, "Allocating host save area.\n");
+ X86_DEBUG_LOG(svm, LVL_VERBOSE, "Allocating host save area.\n");
/* Initialize the Host Save Area */
host_save_area = alloc_host_save_area();
if (vmm_host_va2pa(host_save_area, (physical_addr_t *)&phys_hsa) != VMM_OK) {
- VM_LOG(LVL_ERR, "Host va2pa for host save area failed.\n");
+ X86_DEBUG_LOG(svm, LVL_ERR, "Host va2pa for host save area failed.\n");
return VMM_EFAIL;
}
- VM_LOG(LVL_VERBOSE, "Write HSAVE PA.\n");
+ X86_DEBUG_LOG(svm, LVL_VERBOSE, "Write HSAVE PA.\n");
cpu_write_msr(MSR_K8_VM_HSAVE_PA, phys_hsa);
- VM_LOG(LVL_VERBOSE, "All fine.\n");
+ X86_DEBUG_LOG(svm, LVL_VERBOSE, "All fine.\n");
return VMM_OK;
}
@@ -440,7 +443,7 @@ int amd_setup_vm_control(struct vcpu_hw_context *context)
if (context->icept_table.io_table_phys)
context->vmcb->iopm_base_pa = context->icept_table.io_table_phys;
- VM_LOG(LVL_INFO, "IOPM Base physical address: 0x%lx\n", context->vmcb->iopm_base_pa);
+ X86_DEBUG_LOG(svm, LVL_INFO, "IOPM Base physical address: 0x%lx\n", context->vmcb->iopm_base_pa);
/*
* FIXME: VM: What state to load should come from VMCB.
@@ -463,11 +466,11 @@ int amd_init(struct cpuinfo_x86 *cpuinfo)
{
/* FIXME: SMP: This should be done by all CPUs? */
if (enable_svm (cpuinfo) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to enable virtual machine.\n");
+ X86_DEBUG_LOG(svm, LVL_ERR, "ERROR: Failed to enable virtual machine.\n");
return VMM_EFAIL;
}
- VM_LOG(LVL_VERBOSE, "AMD SVM enable success!\n");
+ X86_DEBUG_LOG(svm, LVL_VERBOSE, "AMD SVM enable success!\n");
return VMM_OK;
}
diff --git a/arch/x86/cpu/common/vm/vm.c b/arch/x86/cpu/common/vm/vm.c
index 7ba12c75..7f0c4a8f 100644
--- a/arch/x86/cpu/common/vm/vm.c
+++ b/arch/x86/cpu/common/vm/vm.c
@@ -35,8 +35,9 @@
#include <vm/svm_intercept.h>
#include <vm/vmx.h>
#include <vm/vmx_intercept.h>
+#include <x86_debug_log.h>
-int vm_default_log_lvl = VM_LOG_LVL_INFO;
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(x86_vm_helper, X86_DEBUG_LOG_LVL_INFO);
physical_addr_t cpu_create_vcpu_intercept_table(size_t size, virtual_addr_t *tbl_vaddr)
{
@@ -64,28 +65,28 @@ void cpu_disable_vcpu_intercept(struct vcpu_hw_context *context, int flags)
{
/* disable taskswitch interception */
if (flags & USER_ITC_TASKSWITCH) {
- VM_LOG(LVL_INFO, "Disable taskswitch interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Disable taskswitch interception\n");
context->vmcb->cr_intercepts &= ~INTRCPT_WRITE_CR3;
}
if (flags & USER_ITC_SWINT) {
- VM_LOG(LVL_INFO, "Disable software interrupt interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Disable software interrupt interception\n");
context->vmcb->general1_intercepts &= ~INTRCPT_INTN;
}
if (flags & USER_ITC_IRET) {
- VM_LOG(LVL_INFO, "Enable software interrupt interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Enable software interrupt interception\n");
context->vmcb->general1_intercepts &= ~INTRCPT_IRET;
}
if (flags & USER_ITC_SYSCALL) {
- VM_LOG(LVL_INFO, "Disable syscall interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Disable syscall interception\n");
context->vmcb->general1_intercepts &= ~INTRCPT_INTN;
}
/* disable single stepping */
if (flags & USER_SINGLE_STEPPING) {
- VM_LOG(LVL_INFO, "Disable single stepping\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Disable single stepping\n");
context->vmcb->rflags &= ~X86_EFLAGS_TF;
context->vmcb->exception_intercepts &= ~INTRCPT_EXC_DB;
}
@@ -95,18 +96,18 @@ void cpu_enable_vcpu_intercept(struct vcpu_hw_context *context, int flags)
{
/* enable taskswitch interception */
if (flags & USER_ITC_TASKSWITCH) {
- VM_LOG(LVL_INFO, "Enable taskswitch interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Enable taskswitch interception\n");
context->vmcb->cr_intercepts |= INTRCPT_WRITE_CR3;
}
/* enable software interrupt interception */
if (flags & USER_ITC_SWINT) {
- VM_LOG(LVL_INFO, "Enable software interrupt interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Enable software interrupt interception\n");
context->vmcb->general1_intercepts |= INTRCPT_INTN;
}
if (flags & USER_ITC_IRET) {
- VM_LOG(LVL_INFO, "Enable software interrupt interception\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_INFO, "Enable software interrupt interception\n");
context->vmcb->general1_intercepts |= INTRCPT_IRET;
}
}
@@ -139,7 +140,7 @@ int cpu_init_vcpu_hw_context(struct cpuinfo_x86 *cpuinfo,
context->shadow_pgt = mmu_pgtbl_alloc(&host_pgtbl_ctl, PGTBL_STAGE_2);
if (!context->shadow_pgt) {
- VM_LOG(LVL_DEBUG, "ERROR: Failed to allocate shadow page table for vcpu.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_DEBUG, "ERROR: Failed to allocate shadow page table for vcpu.\n");
goto _error;
}
@@ -147,7 +148,7 @@ int cpu_init_vcpu_hw_context(struct cpuinfo_x86 *cpuinfo,
VMM_MEMORY_FLAGS_NORMAL);
if (!context->shadow32_pg_list) {
- VM_LOG(LVL_ERR, "ERROR: Failed to allocated 32bit/paged real mode shadow table.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Failed to allocated 32bit/paged real mode shadow table.\n");
goto _error;
}
@@ -164,7 +165,7 @@ int cpu_init_vcpu_hw_context(struct cpuinfo_x86 *cpuinfo,
cpu_create_vcpu_intercept_table(IO_INTCPT_TBL_SZ,
&context->icept_table.io_table_virt);
if (!context->icept_table.io_table_phys) {
- VM_LOG(LVL_ERR, "ERROR: Failed to create I/O intercept table\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Failed to create I/O intercept table\n");
goto _error;
}
@@ -172,27 +173,27 @@ int cpu_init_vcpu_hw_context(struct cpuinfo_x86 *cpuinfo,
cpu_create_vcpu_intercept_table(MSR_INTCPT_TBL_SZ,
&context->icept_table.msr_table_virt);
if (!context->icept_table.msr_table_phys) {
- VM_LOG(LVL_ERR, "ERROR: Failed to create MSR intercept table for vcpu.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Failed to create MSR intercept table for vcpu.\n");
goto _error;
}
switch (cpuinfo->vendor) {
case x86_VENDOR_AMD:
if((ret = amd_setup_vm_control(context)) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to setup VM control.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Failed to setup VM control.\n");
goto _error;
}
break;
case x86_VENDOR_INTEL:
if ((ret = intel_setup_vm_control(context)) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to setup vm control.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Failed to setup vm control.\n");
goto _error;
}
break;
default:
- VM_LOG(LVL_ERR, "ERROR: Invalid vendor %d\n", cpuinfo->vendor);
+ X86_DEBUG_LOG(x86_vm_helper, LVL_ERR, "ERROR: Invalid vendor %d\n", cpuinfo->vendor);
goto _error;
break;
}
@@ -224,12 +225,12 @@ int cpu_enable_vm_extensions(struct cpuinfo_x86 *cpuinfo)
switch (cpuinfo->vendor) {
case x86_VENDOR_AMD:
- VM_LOG(LVL_VERBOSE, "Initializing SVM on AMD.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_VERBOSE, "Initializing SVM on AMD.\n");
ret = amd_init(cpuinfo);
break;
case x86_VENDOR_INTEL:
- VM_LOG(LVL_VERBOSE, "Initializing VMX on Intel.\n");
+ X86_DEBUG_LOG(x86_vm_helper, LVL_VERBOSE, "Initializing VMX on Intel.\n");
ret = intel_init(cpuinfo);
break;
diff --git a/arch/x86/cpu/common/vm/vtx/ept.c b/arch/x86/cpu/common/vm/vtx/ept.c
index dc412177..5c142f88 100644
--- a/arch/x86/cpu/common/vm/vtx/ept.c
+++ b/arch/x86/cpu/common/vm/vtx/ept.c
@@ -34,9 +34,10 @@
#include <vmm_devemu.h>
#include <vmm_manager.h>
#include <vmm_main.h>
+#include <x86_debug_log.h>
#include <vm/ept.h>
-int ept_default_log_lvl = EPT_LOG_LVL_DEBUG;
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(ept, X86_DEBUG_LOG_LVL_INFO);
#define TRACE_EPT 1
@@ -126,20 +127,20 @@ invalidate_ept (int type, struct invept_desc *desc)
if (likely(cpu_has_vmx_invept)) {
/* most modern CPUs will have this */
if (unlikely(type == INVEPT_ALL_CONTEXT
- && !cpu_has_vmx_ept_invept_all_context)) {
- EPT_LOG(LVL_DEBUG, "EPT all context flush not supported\n");
- return;
- }
+ && !cpu_has_vmx_ept_invept_all_context)) {
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "EPT all context flush not supported\n");
+ return;
+ }
if (unlikely(type == INVEPT_SINGLE_CONTEXT
&& !cpu_has_vmx_ept_invept_single_context)) {
- EPT_LOG(LVL_DEBUG, "EPT single context flush not supported\n");
- return;
- }
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "EPT single context flush not supported\n");
+ return;
+ }
asm volatile("invept (%0), %1\n\t"
- ::"D"(type), "S"(desc)
- :"memory", "cc");
+ ::"D"(type), "S"(desc)
+ :"memory", "cc");
} else {
- EPT_LOG(LVL_DEBUG, "INVEPT instruction is not supported by CPU\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "INVEPT instruction is not supported by CPU\n");
}
}
@@ -151,7 +152,7 @@ enum ept_level {
};
static void decode_ept_entry(enum ept_level level, void *entry,
- physical_addr_t *paddr, u32 *pg_prot)
+ physical_addr_t *paddr, u32 *pg_prot)
{
ept_pml4e_t *pml4e;
ept_pdpte_t *pdpte;
@@ -205,7 +206,7 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
int rc = 0;
struct invept_desc id;
- EPT_LOG(LVL_DEBUG, "pml4: 0x%"PRIx32" pdpt: 0x%"PRIx32" pd: 0x%"PRIx32" pt: 0x%"PRIx32"\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "pml4: 0x%"PRIx32" pdpt: 0x%"PRIx32" pd: 0x%"PRIx32" pt: 0x%"PRIx32"\n",
pml4_index, pdpt_index, pd_index, pt_index);
add_ept_trace_point(gphys, hphys, pml4_index, pdpt_index, pd_index, pt_index, pg_size, pg_prot);
@@ -218,29 +219,29 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pml4e->val |= pg_prot;
virt = get_free_page_for_pagemap(context, &phys);
if (!virt) {
- EPT_LOG(LVL_ERR, "System is out of guest page table memory\n");
+ X86_DEBUG_LOG(ept, LVL_ERR, "System is out of guest page table memory\n");
rc = VMM_ENOMEM;
goto _done;
}
- EPT_LOG(LVL_DEBUG, "New PDPT Page at 0x%"PRIx64" (Phys: 0x%"PRIx64") for PML4 Index %d.\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New PDPT Page at 0x%"PRIx64" (Phys: 0x%"PRIx64") for PML4 Index %d.\n",
virt, phys, pml4_index);
memset((void *)virt, 0, PAGE_SIZE);
pml4e->bits.pdpt_base = EPT_PHYS_4KB_PFN(phys);
} else {
if (vmm_host_pa2va(e_phys, &virt) != VMM_OK) {
- EPT_LOG(LVL_ERR, "Couldn't map PDPTE physical 0x%"PRIx64" to virtual\n",
+ X86_DEBUG_LOG(ept, LVL_ERR, "Couldn't map PDPTE physical 0x%"PRIx64" to virtual\n",
e_phys);
rc = VMM_ENOENT;
goto _done;
}
- EPT_LOG(LVL_DEBUG, "Found PDPT Page at 0x%"PRIx64" (phys: 0x%"PRIx64") for PML4 Index: %d\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Found PDPT Page at 0x%"PRIx64" (phys: 0x%"PRIx64") for PML4 Index: %d\n",
virt, e_phys, pml4_index);
}
- EPT_LOG(LVL_DEBUG, "%s: PML4E: 0x%"PRIx64"\n", __func__, pml4e->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PML4E: 0x%"PRIx64"\n", __func__, pml4e->val);
phys = e_phys = e_pg_prot = 0;
pdpte = (ept_pdpte_t *)(&((u64 *)virt)[pdpt_index]);
- EPT_LOG(LVL_DEBUG, "%s: PDPTE: 0x%"PRIx64" (PDPT Index: %d)\n", __func__, pdpte->val, pdpt_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PDPTE: 0x%"PRIx64" (PDPT Index: %d)\n", __func__, pdpte->val, pdpt_index);
decode_ept_entry(EPT_LEVEL_PDPTE, (void *)pdpte, &e_phys, &e_pg_prot);
/*
@@ -249,12 +250,12 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
* up the other one.
*/
if (pdpte->pe.is_page) {
- EPT_LOG(LVL_DEBUG, "PDPTE is page\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PDPTE is page\n");
/* this is marked as 1GB page and new mapping wants otherwise
* then its a problem. Caller didn't free this mapping prior
* to calling this function */
if (pg_size != EPT_PAGE_SIZE_1G) {
- EPT_LOG(LVL_DEBUG, "New page size is not 1G (0x%"PRIx64"). Delete existing entry first.\n", pg_size);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New page size is not 1G (0x%"PRIx64"). Delete existing entry first.\n", pg_size);
rc = VMM_EBUSY;
goto _done;
}
@@ -279,7 +280,7 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
}
if (pg_size == EPT_PAGE_SIZE_1G) {
- EPT_LOG(LVL_DEBUG, "Creating map of 1G page at pdpt index: %d\n", pdpt_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Creating map of 1G page at pdpt index: %d\n", pdpt_index);
pdpte->val = 0;
pdpte->val &= EPT_PROT_MASK;
pdpte->val |= pg_prot;
@@ -287,18 +288,18 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pdpte->
pe.mt = 6; /* write-back memory type */
pdpte->pe.ign_pat = 1; /* ignore PAT type */
pdpte->pe.is_page = 1;
- EPT_LOG(LVL_DEBUG, "New PDPT Entry: 0x%"PRIx64"\n", pdpte->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New PDPT Entry: 0x%"PRIx64"\n", pdpte->val);
rc = VMM_OK;
/* new entry. Invalidate EPT */
goto _invalidate_ept;
} else { /* not a 1G page */
- EPT_LOG(LVL_DEBUG, "PDPTE doesn't point to 1G page. Looking for PDE\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PDPTE doesn't point to 1G page. Looking for PDE\n");
if (!e_pg_prot) { /* if the page is not currently set */
- EPT_LOG(LVL_DEBUG, "PDE page protection not set. Creating new one\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PDE page protection not set. Creating new one\n");
virt = get_free_page_for_pagemap(context, &phys);
/* allocate a new PDPTE page */
if (!virt) {
- EPT_LOG(LVL_ERR, "System is out of guest page table memory\n");
+ X86_DEBUG_LOG(ept, LVL_ERR, "System is out of guest page table memory\n");
rc = VMM_ENOMEM;
goto _done;
}
@@ -307,53 +308,53 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pdpte->te.pd_base = EPT_PHYS_4KB_PFN(phys);
pdpte->val &= EPT_PROT_MASK;
pdpte->val |= pg_prot;
- EPT_LOG(LVL_DEBUG, "New PD Page at 0x%"PRIx64" (Phys: 0x%"PRIx64")\n", virt, phys);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New PD Page at 0x%"PRIx64" (Phys: 0x%"PRIx64")\n", virt, phys);
} else { /* page is already allocated, a mapping in locality exists */
if (vmm_host_pa2va(e_phys, &virt) != VMM_OK) {
- EPT_LOG(LVL_ERR, "Couldn't map PDE physical 0x%"PRIx64" to virtual\n",
+ X86_DEBUG_LOG(ept, LVL_ERR, "Couldn't map PDE physical 0x%"PRIx64" to virtual\n",
e_phys);
rc = VMM_ENOENT;
goto _done;
}
- EPT_LOG(LVL_DEBUG, "Found PDE at virtual address 0x%"PRIx64"\n", virt);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Found PDE at virtual address 0x%"PRIx64"\n", virt);
}
}
- EPT_LOG(LVL_DEBUG, "%s: PDPTE: 0x%"PRIx64"\n", __func__, pdpte->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PDPTE: 0x%"PRIx64"\n", __func__, pdpte->val);
phys = e_phys = e_pg_prot = 0;
pde = (ept_pde_t *)(&((u64 *)virt)[pd_index]);
- EPT_LOG(LVL_DEBUG, "PDPTE Entry at index %d = 0x%"PRIx64"\n", pd_index, pde->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PDPTE Entry at index %d = 0x%"PRIx64"\n", pd_index, pde->val);
decode_ept_entry(EPT_LEVEL_PDE, (void *)pde, &e_phys, &e_pg_prot);
if (pde->pe.is_page) {
- EPT_LOG(LVL_DEBUG, "PDE is a 2MB Page!\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PDE is a 2MB Page!\n");
/* this is marked as 1GB page and new mapping wants otherwise
* then its a problem. Caller didn't free this mapping prior
* to calling this function */
if (pg_size != EPT_PAGE_SIZE_2M) {
- EPT_LOG(LVL_DEBUG, "New page is not 2M. Delete previous entry first.\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New page is not 2M. Delete previous entry first.\n");
rc = VMM_EBUSY;
goto _done;
}
/* caller is trying to create same mapping? */
if (e_phys == hphys) {
- EPT_LOG(LVL_DEBUG, "Found same physical addres at pd index: %d\n", pd_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Found same physical addres at pd index: %d\n", pd_index);
if (pg_prot != e_pg_prot) {
- EPT_LOG(LVL_DEBUG, "PG prot are not same. Old: 0x%"PRIx32" New: 0x%"PRIx32"\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PG prot are not same. Old: 0x%"PRIx32" New: 0x%"PRIx32"\n",
e_pg_prot, pg_prot);
pde->val |= pg_prot;
rc = VMM_OK;
/* pgprot changed, invalidate ept */
goto _invalidate_ept;
} else {
- EPT_LOG(LVL_DEBUG, "No change in page table entry.\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "No change in page table entry.\n");
/* no change, same as existing mapping */
rc = VMM_OK;
goto _done;
}
} else {
- EPT_LOG(LVL_DEBUG, "pd index %d is busy. Val: 0x%"PRIx64"\n", pd_index, pde->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "pd index %d is busy. Val: 0x%"PRIx64"\n", pd_index, pde->val);
/* existing physical is not same as new one. flag as error.
* caller should have unmapped this mapping first */
rc = VMM_EBUSY;
@@ -363,7 +364,7 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
/* not a 2MB page, is caller trying to create a 2MB page? */
if (pg_size == EPT_PAGE_SIZE_2M) {
- EPT_LOG(LVL_DEBUG, "Ask is to create 2MB page\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Ask is to create 2MB page\n");
pdpte->val = 0;
pdpte->val &= EPT_PROT_MASK;
pdpte->val |= pg_prot;
@@ -371,17 +372,17 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pdpte->
pe.mt = 6; /* write-back memory type */
pdpte->pe.ign_pat = 1; /* ignore PAT type */
pdpte->pe.is_page = 1;
- EPT_LOG(LVL_DEBUG, "New 2MB page. PDE Value: 0x%"PRIx64" at index: %d.\n", pdpte->val, pd_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New 2MB page. PDE Value: 0x%"PRIx64" at index: %d.\n", pdpte->val, pd_index);
rc = VMM_OK;
goto _invalidate_ept;
} else {
/* Ok. So this is PDE. Lets find PTE now. */
if (!e_pg_prot) { /* page for PTE is not currently set */
- EPT_LOG(LVL_DEBUG, "Page protection bits not set in PTE page. Creating new one.\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Page protection bits not set in PTE page. Creating new one.\n");
virt = get_free_page_for_pagemap(context, &phys);
/* allocate a new PTE page */
if (!virt) {
- EPT_LOG(LVL_ERR, "System is out of guest page table memory\n");
+ X86_DEBUG_LOG(ept, LVL_ERR, "System is out of guest page table memory\n");
rc = VMM_ENOMEM;
goto _done;
}
@@ -390,30 +391,30 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pde->te.pt_base = EPT_PHYS_4KB_PFN(phys);
pde->val &= EPT_PROT_MASK;
pde->val |= pg_prot;
- EPT_LOG(LVL_DEBUG, "New PT page at 0x%"PRIx64" (Phys: 0x%"PRIx64")\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "New PT page at 0x%"PRIx64" (Phys: 0x%"PRIx64")\n",
virt, phys);
} else { /* page is already allocated, a mapping in locality exists */
if (vmm_host_pa2va(e_phys, &virt) != VMM_OK) {
- EPT_LOG(LVL_ERR, "Couldn't map PDE physical 0x%"PRIx64" to virtual\n",
+ X86_DEBUG_LOG(ept, LVL_ERR, "Couldn't map PDE physical 0x%"PRIx64" to virtual\n",
e_phys);
rc = VMM_ENOENT;
goto _done;
}
- EPT_LOG(LVL_DEBUG, "Found PT at virt 0x%"PRIx64"\n", virt);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Found PT at virt 0x%"PRIx64"\n", virt);
}
}
- EPT_LOG(LVL_DEBUG, "%s: PDE: 0x%"PRIx64"\n", __func__, pde->val);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PDE: 0x%"PRIx64"\n", __func__, pde->val);
e_phys = e_pg_prot = 0;
pte = (ept_pte_t *)(&((u64 *)virt)[pt_index]);
- EPT_LOG(LVL_DEBUG, "PT Entry 0x%"PRIx64" at index: %d\n", pte->val, pt_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "PT Entry 0x%"PRIx64" at index: %d\n", pte->val, pt_index);
decode_ept_entry(EPT_LEVEL_PTE, (void *)pte, &e_phys, &e_pg_prot);
if (e_pg_prot) { /* mapping exists */
- EPT_LOG(LVL_DEBUG, "Page mapping exists: current pgprot: 0x%"PRIx32"\n", e_pg_prot);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Page mapping exists: current pgprot: 0x%"PRIx32"\n", e_pg_prot);
if (e_phys == hphys) {
- EPT_LOG(LVL_DEBUG, "Existing physical and asked are same. (e_phys: 0x%"PRIx64" h_phys: 0x%"PRIx64")\n", e_phys, hphys);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Existing physical and asked are same. (e_phys: 0x%"PRIx64" h_phys: 0x%"PRIx64")\n", e_phys, hphys);
if (e_pg_prot == pg_prot) { /* same mapping */
- EPT_LOG(LVL_DEBUG, "Same PG prot: old: 0x%"PRIx32" new: 0x%"PRIx32"\n", e_pg_prot, pg_prot);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Same PG prot: old: 0x%"PRIx32" new: 0x%"PRIx32"\n", e_pg_prot, pg_prot);
rc = VMM_OK;
goto _done; /* no change */
}
@@ -422,25 +423,25 @@ int ept_create_pte_map(struct vcpu_hw_context *context,
pte->val |= pg_prot;
goto _invalidate_ept;
} else {
- EPT_LOG(LVL_DEBUG, "Existing PTE entry found at index: %d but with phys: 0x%"PRIx64" (new: 0x%"PRIx64")\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Existing PTE entry found at index: %d but with phys: 0x%"PRIx64" (new: 0x%"PRIx64")\n",
pt_index, e_phys, hphys);
rc = VMM_EBUSY;
goto _done;
}
} else {
- EPT_LOG(LVL_DEBUG, "No page protection bits set in PTE. Creating new one\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "No page protection bits set in PTE. Creating new one\n");
pte->val = 0;
pte->val &= EPT_PROT_MASK;
pte->val |= pg_prot;
pte->
pe.mt = 6;
pte->pe.phys = EPT_PHYS_4KB_PFN(hphys);
rc = VMM_OK;
- EPT_LOG(LVL_DEBUG, "%s: PTE: 0x%"PRIx64" at index %d\n", __func__, pte->val, pt_index);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PTE: 0x%"PRIx64" at index %d\n", __func__, pte->val, pt_index);
goto _invalidate_ept;
}
_invalidate_ept:
- EPT_LOG(LVL_DEBUG, "Invalidating EPT\n");
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "Invalidating EPT\n");
id.eptp = context->eptp;
invalidate_ept(INVEPT_SINGLE_CONTEXT, &id);
@@ -454,11 +455,11 @@ int setup_ept(struct vcpu_hw_context *context)
eptp_t *eptp = (eptp_t *)&context->eptp;
virtual_addr_t pml4 = get_free_page_for_pagemap(context, &pml4_phys);
- EPT_LOG(LVL_DEBUG, "%s: PML4 vaddr: 0x%016lx paddr: 0x%016lx\n",
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: PML4 vaddr: 0x%016lx paddr: 0x%016lx\n",
__func__, pml4, pml4_phys);
if (!pml4) {
- EPT_LOG(LVL_ERR, "%s: Failed to allocate EPT page\n", __func__);
+ X86_DEBUG_LOG(ept, LVL_ERR, "%s: Failed to allocate EPT page\n", __func__);
return VMM_ENOMEM;
}
@@ -478,7 +479,7 @@ int setup_ept(struct vcpu_hw_context *context)
eptp->bits.en_ad = 0;
eptp->bits.pml4 = EPT_PHYS_4KB_PFN(pml4_phys);
- EPT_LOG(LVL_DEBUG, "%s: EPTP: 0x%16lx (0x%16lx)\n", __func__, eptp->val, context->eptp);
+ X86_DEBUG_LOG(ept, LVL_DEBUG, "%s: EPTP: 0x%16lx (0x%16lx)\n", __func__, eptp->val, context->eptp);
context->n_cr3 = pml4;
diff --git a/arch/x86/cpu/common/vm/vtx/intercept.c b/arch/x86/cpu/common/vm/vtx/intercept.c
index 1bb86b66..88f9b733 100644
--- a/arch/x86/cpu/common/vm/vtx/intercept.c
+++ b/arch/x86/cpu/common/vm/vtx/intercept.c
@@ -38,6 +38,9 @@
#include <vm/vmx.h>
#include <vm/ept.h>
#include <vm/vmx_intercept.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(vtx_intercept, X86_DEBUG_LOG_LVL_INFO);
extern u64 vmx_cr0_fixed0;
extern u64 vmx_cr0_fixed1;
@@ -95,14 +98,14 @@ int vmx_handle_guest_realmode_page_fault(struct vcpu_hw_context *context)
u8 is_reset = 0;
physical_addr_t gla = vmr(GUEST_LINEAR_ADDRESS);
- VM_LOG(LVL_DEBUG, "[Real Mode] Guest Linear Address: 0x%"PRIx64"\n", gla);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "[Real Mode] Guest Linear Address: 0x%"PRIx64"\n", gla);
if (gla == 0xFFF0) {
is_reset = 1;
/* effective address = segment selector * 16 + offset.
* we will have a region for effective address. */
gla = ((0xF000 << 4) | gla);
}
- VM_LOG(LVL_DEBUG, "[Real Mode] Faulting Address: 0x%"PRIx64"\n", gla);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "[Real Mode] Faulting Address: 0x%"PRIx64"\n", gla);
/*
* At reset, the offset of execution is 0xfff0.
@@ -110,19 +113,19 @@ int vmx_handle_guest_realmode_page_fault(struct vcpu_hw_context *context)
rc = vmm_guest_physical_map(guest, (gla & PAGE_MASK),
PAGE_SIZE, &hphys_addr, &availsz, &flags);
if (rc) {
- VM_LOG(LVL_ERR, "ERROR: No region mapped to guest physical 0x%"PRIx64"\n", gla);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "ERROR: No region mapped to guest physical 0x%"PRIx64"\n", gla);
goto guest_bad_fault;
}
if (availsz < PAGE_SIZE) {
- VM_LOG(LVL_ERR, "ERROR: Size of the available mapping less "
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "ERROR: Size of the available mapping less "
"than page size (%lu)\n", availsz);
rc = VMM_EFAIL;
goto guest_bad_fault;
}
if (flags & (VMM_REGION_REAL | VMM_REGION_ALIAS)) {
- VM_LOG(LVL_DEBUG, "GP: 0x%"PRIx64" HP: 0x%"PRIx64" Size: %lu\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "GP: 0x%"PRIx64" HP: 0x%"PRIx64" Size: %lu\n",
gla, hphys_addr, availsz);
gla &= PAGE_MASK;
@@ -131,12 +134,12 @@ int vmx_handle_guest_realmode_page_fault(struct vcpu_hw_context *context)
gla &= 0xFFFFUL;
hphys_addr &= PAGE_MASK;
- VM_LOG(LVL_DEBUG, "Handle Page Fault: gphys: 0x%"PRIx64" hphys: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Handle Page Fault: gphys: 0x%"PRIx64" hphys: 0x%"PRIx64"\n",
gla, hphys_addr);
rc = ept_create_pte_map(context, gla, hphys_addr, PAGE_SIZE,
(EPT_PROT_READ | EPT_PROT_WRITE | EPT_PROT_EXEC_S));
- VM_LOG(LVL_DEBUG, "ept_create_pte_map returned with %d\n", rc);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "ept_create_pte_map returned with %d\n", rc);
} else
rc = VMM_EFAIL;
@@ -155,25 +158,25 @@ int vmx_handle_guest_protected_mode_page_fault(struct vcpu_hw_context *context)
fault_gphys = vmr(GUEST_LINEAR_ADDRESS);
- VM_LOG(LVL_DEBUG, "(Protected Mode) Looking for map from guest address: 0x%08lx\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "(Protected Mode) Looking for map from guest address: 0x%08lx\n",
(fault_gphys & PAGE_MASK));
rc = vmm_guest_physical_map(guest, (fault_gphys & PAGE_MASK),
PAGE_SIZE, &hphys_addr, &availsz, &flags);
if (rc) {
- VM_LOG(LVL_ERR, "ERROR: No region mapped to guest physical 0x%"PRIx64"\n", fault_gphys);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "ERROR: No region mapped to guest physical 0x%"PRIx64"\n", fault_gphys);
return VMM_EFAIL;
}
if (availsz < PAGE_SIZE) {
- VM_LOG(LVL_ERR, "ERROR: Size of the available mapping less than page size (%lu)\n", availsz);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "ERROR: Size of the available mapping less than page size (%lu)\n", availsz);
return VMM_EFAIL;
}
fault_gphys &= PAGE_MASK;
hphys_addr &= PAGE_MASK;
- VM_LOG(LVL_DEBUG, "GP: 0x%"PRIx64" HP: 0x%"PRIx64" Size: %lu\n", fault_gphys, hphys_addr, availsz);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "GP: 0x%"PRIx64" HP: 0x%"PRIx64" Size: %lu\n", fault_gphys, hphys_addr, availsz);
return ept_create_pte_map(context, fault_gphys, hphys_addr, PAGE_SIZE,
(EPT_PROT_READ | EPT_PROT_WRITE | EPT_PROT_EXEC_S));
@@ -204,7 +207,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
context->g_regs[GUEST_REGS_RBX] = func->resp_ebx;
context->g_regs[GUEST_REGS_RCX] = func->resp_ecx;
context->g_regs[GUEST_REGS_RDX] = func->resp_edx;
- VM_LOG(LVL_DEBUG, "RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
func->resp_eax, func->resp_ebx, func->resp_ecx, func->resp_edx);
break;
@@ -214,7 +217,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
context->g_regs[GUEST_REGS_RBX] = func->resp_ebx;
context->g_regs[GUEST_REGS_RCX] = func->resp_ecx;
context->g_regs[GUEST_REGS_RDX] = func->resp_edx;
- VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64" RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "CPUID: 0x%"PRIx64" RAX: 0x%"PRIx32" RBX: 0x%"PRIx32" RCX: 0x%"PRIx32" RDX: 0x%"PRIx32"\n",
context->g_regs[GUEST_REGS_RAX], func->resp_eax, func->resp_ebx, func->resp_ecx, func->resp_edx);
break;
@@ -227,7 +230,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
case CPUID_EXTENDED_ADDR_BITS:
func = &priv->extended_funcs[context->g_regs[GUEST_REGS_RAX]
- CPUID_EXTENDED_LFUNCEXTD];
- VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64": EAX: 0x%"PRIx32" EBX: 0x%"PRIx32" ECX: 0x%"PRIx32" EDX: 0x%"PRIx32"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "CPUID: 0x%"PRIx64": EAX: 0x%"PRIx32" EBX: 0x%"PRIx32" ECX: 0x%"PRIx32" EDX: 0x%"PRIx32"\n",
context->g_regs[GUEST_REGS_RAX], func->resp_eax, func->resp_ebx, func->resp_ecx, func->resp_edx);
context->g_regs[GUEST_REGS_RAX] = func->resp_eax;
context->g_regs[GUEST_REGS_RBX] = func->resp_ebx;
@@ -246,7 +249,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
/* Reserved for VM */
case CPUID_VM_CPUID_BASE ... CPUID_VM_CPUID_MAX:
- VM_LOG(LVL_DEBUG, "CPUID: 0x%"PRIx64" will read zeros\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "CPUID: 0x%"PRIx64" will read zeros\n",
context->g_regs[GUEST_REGS_RAX]);
context->g_regs[GUEST_REGS_RAX] = 0;
context->g_regs[GUEST_REGS_RBX] = 0;
@@ -255,7 +258,7 @@ void vmx_handle_cpuid(struct vcpu_hw_context *context)
break;
default:
- VM_LOG(LVL_ERR, "GCPUID/R: Func: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "GCPUID/R: Func: 0x%"PRIx64"\n",
context->g_regs[GUEST_REGS_RAX]);
goto _fail;
}
@@ -281,24 +284,24 @@ int vmx_handle_io_instruction_exit(struct vcpu_hw_context *context)
if (ioe.bits.direction == 0) {
if (ioe.bits.port == 0x80) {
- VM_LOG(LVL_DEBUG, "(0x%"PRIx64") CBDW: 0x%"PRIx64"\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "(0x%"PRIx64") CBDW: 0x%"PRIx64"\n",
VMX_GUEST_RIP(context), context->g_regs[GUEST_REGS_RAX]);
} else {
- VM_LOG(LVL_DEBUG, "Write on IO Port: 0x%04x\n", ioe.bits.port);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Write on IO Port: 0x%04x\n", ioe.bits.port);
wval = (u32)context->g_regs[GUEST_REGS_RAX];
if (vmm_devemu_emulate_iowrite(context->assoc_vcpu, ioe.bits.port,
&wval, io_sz, VMM_DEVEMU_NATIVE_ENDIAN) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to emulate OUT instruction in"
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Failed to emulate OUT instruction in"
" guest.\n");
goto guest_bad_fault;
}
}
} else {
- VM_LOG(LVL_DEBUG, "Read on IO Port: 0x%04x\n", ioe.bits.port);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Read on IO Port: 0x%04x\n", ioe.bits.port);
if (vmm_devemu_emulate_ioread(context->assoc_vcpu, ioe.bits.port, &wval, io_sz,
VMM_DEVEMU_NATIVE_ENDIAN) != VMM_OK) {
- VM_LOG(LVL_ERR, "Failed to emulate IO instruction in "
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Failed to emulate IO instruction in "
"guest.\n");
goto guest_bad_fault;
}
@@ -323,7 +326,7 @@ int vmx_handle_crx_exit(struct vcpu_hw_context *context)
crx_eq.val = VMX_GUEST_EQ(context);
if (crx_eq.bits.reg > GUEST_REGS_R15) {
- VM_LOG(LVL_ERR, "Guest Move to CR0 with invalid reg %d\n", crx_eq.bits.reg);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Guest Move to CR0 with invalid reg %d\n", crx_eq.bits.reg);
goto guest_bad_fault;
}
@@ -339,21 +342,21 @@ int vmx_handle_crx_exit(struct vcpu_hw_context *context)
VMX_GUEST_CR0(context) = (X86_CR0_ET | X86_CR0_CD | X86_CR0_NW
| context->g_regs[crx_eq.bits.reg]);
//__vmwrite(GUEST_CR0, (VMX_GUEST_CR0(context) | context->g_regs[crx_eq.bits.reg]));
- VM_LOG(LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR0\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR0\n",
crx_eq.bits.reg, gcr0);
break;
case 3:
__vmwrite(GUEST_CR3, context->g_regs[crx_eq.bits.reg]);
- VM_LOG(LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR3\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR3\n",
crx_eq.bits.reg, context->g_regs[crx_eq.bits.reg]);
break;
case 4:
__vmwrite(GUEST_CR4, context->g_regs[crx_eq.bits.reg]);
- VM_LOG(LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR4\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving %d register (value: 0x%"PRIx64") to CR4\n",
crx_eq.bits.reg, context->g_regs[crx_eq.bits.reg]);
break;
default:
- VM_LOG(LVL_ERR, "Guest trying to write to reserved CR%d\n", crx_eq.bits.cr_num);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Guest trying to write to reserved CR%d\n", crx_eq.bits.cr_num);
goto guest_bad_fault;
}
} else if (crx_eq.bits.type == 1) { /* Move from CRx */
@@ -361,25 +364,25 @@ int vmx_handle_crx_exit(struct vcpu_hw_context *context)
case 0:
//context->g_regs[crx_eq.bits.reg] = vmr(GUEST_CR0);
context->g_regs[crx_eq.bits.reg] = VMX_GUEST_CR0(context);
- VM_LOG(LVL_DEBUG, "Moving CR3 to register %d\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving CR3 to register %d\n",
crx_eq.bits.reg);
break;
case 3:
context->g_regs[crx_eq.bits.reg] = vmr(GUEST_CR3);
- VM_LOG(LVL_DEBUG, "Moving CR3 to register %d\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving CR3 to register %d\n",
crx_eq.bits.reg);
break;
case 4:
context->g_regs[crx_eq.bits.reg] = vmr(GUEST_CR4);
- VM_LOG(LVL_DEBUG, "Moving CR4 to register %d\n",
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Moving CR4 to register %d\n",
crx_eq.bits.reg);
break;
default:
- VM_LOG(LVL_ERR, "Guest trying to write to reserved CR%d\n", crx_eq.bits.cr_num);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Guest trying to write to reserved CR%d\n", crx_eq.bits.cr_num);
goto guest_bad_fault;
}
} else {
- VM_LOG(LVL_ERR, "LMSW not supported yet\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "LMSW not supported yet\n");
goto guest_bad_fault;
}
@@ -403,7 +406,7 @@ int vmx_handle_vmexit(struct vcpu_hw_context *context, u32 exit_reason)
if (is_guest_linear_address_valid(VMX_GUEST_EQ(context))) {
return vmx_handle_guest_realmode_page_fault(context);
} else {
- VM_LOG(LVL_ERR, "(Realmode pagefault) VMX reported invalid linear address.\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "(Realmode pagefault) VMX reported invalid linear address.\n");
return VMM_EFAIL;
}
} else { /* Protected mode */
@@ -415,11 +418,11 @@ int vmx_handle_vmexit(struct vcpu_hw_context *context, u32 exit_reason)
return vmx_handle_io_instruction_exit(context);
case EXIT_REASON_CR_ACCESS:
- VM_LOG(LVL_DEBUG, "CRx Access\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "CRx Access\n");
return vmx_handle_crx_exit(context);
case EXIT_REASON_CPUID:
- VM_LOG(LVL_DEBUG, "Guest CPUID Request: 0x%"PRIx64"\n", context->g_regs[GUEST_REGS_RAX]);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Guest CPUID Request: 0x%"PRIx64"\n", context->g_regs[GUEST_REGS_RAX]);
vmx_handle_cpuid(context);
return VMM_OK;
@@ -432,7 +435,7 @@ int vmx_handle_vmexit(struct vcpu_hw_context *context, u32 exit_reason)
return VMM_OK;
default:
- VM_LOG(LVL_DEBUG, "Unhandled VM Exit reason: %d\n", exit_reason);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Unhandled VM Exit reason: %d\n", exit_reason);
goto guest_bad_fault;
}
@@ -448,18 +451,18 @@ void vmx_vcpu_exit(struct vcpu_hw_context *context)
if (unlikely(context->instruction_error)) {
if ((rc = __vmread(VM_INSTRUCTION_ERROR, &ins_err)) == VMM_OK) {
- VM_LOG(LVL_ERR, "Instruction Error: (%ld:%s)\n", ins_err, ins_err_str[ins_err]);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Instruction Error: (%ld:%s)\n", ins_err, ins_err_str[ins_err]);
}
if (context->instruction_error == -1) {
if ((rc = __vmread(VM_INSTRUCTION_ERROR, &ins_err)) == VMM_OK) {
- VM_LOG(LVL_ERR, "Instruction Error: (%ld:%s)\n", ins_err, ins_err_str[ins_err]);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "Instruction Error: (%ld:%s)\n", ins_err, ins_err_str[ins_err]);
}
} else if (context->instruction_error == -2) {
- VM_LOG(LVL_ERR, "vmlaunch/resume without an active VMCS!\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "vmlaunch/resume without an active VMCS!\n");
}
- VM_LOG(LVL_DEBUG, "VM Entry Failure with Error: %d\n", context->instruction_error);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "VM Entry Failure with Error: %d\n", context->instruction_error);
goto unhandled_vm_exit;
}
@@ -470,26 +473,26 @@ void vmx_vcpu_exit(struct vcpu_hw_context *context)
if (unlikely(_exit_reason.bits.vm_entry_failure)) {
switch(_exit_reason.bits.reason) {
case 33:
- VM_LOG(LVL_ERR, "VM Entry failed due to invalid guest state.\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "VM Entry failed due to invalid guest state.\n");
break;
case 34:
- VM_LOG(LVL_ERR, "VM Entry failed due to MSR loading.\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "VM Entry failed due to MSR loading.\n");
break;
case 41:
- VM_LOG(LVL_ERR, "VM Entry failed due to machine-check event.\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "VM Entry failed due to machine-check event.\n");
break;
default:
- VM_LOG(LVL_ERR, "VM Entry failed due to unknown reason %d.\n", _exit_reason.bits.reason);
+ X86_DEBUG_LOG(vtx_intercept, LVL_ERR, "VM Entry failed due to unknown reason %d.\n", _exit_reason.bits.reason);
break;
}
} else {
VMX_GUEST_SAVE_EQ(context);
VMX_GUEST_SAVE_CR0(context);
VMX_GUEST_SAVE_RIP(context);
- VM_LOG(LVL_DEBUG, "Guest RIP: 0x%"PRIx64"\n", VMX_GUEST_RIP(context));
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Guest RIP: 0x%"PRIx64"\n", VMX_GUEST_RIP(context));
if (vmx_handle_vmexit(context, _exit_reason.bits.reason) != VMM_OK) {
- VM_LOG(LVL_DEBUG, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Error handling VMExit (Reason: %d)\n", _exit_reason.bits.reason);
goto unhandled_vm_exit;
}
@@ -497,6 +500,6 @@ void vmx_vcpu_exit(struct vcpu_hw_context *context)
}
unhandled_vm_exit:
- VM_LOG(LVL_DEBUG, "Unhandled VM Exit\n");
+ X86_DEBUG_LOG(vtx_intercept, LVL_DEBUG, "Unhandled VM Exit\n");
context->vcpu_emergency_shutdown(context);
}
diff --git a/arch/x86/cpu/common/vm/vtx/vmcs.c b/arch/x86/cpu/common/vm/vtx/vmcs.c
index 2a4feadc..8621d65d 100644
--- a/arch/x86/cpu/common/vm/vtx/vmcs.c
+++ b/arch/x86/cpu/common/vm/vtx/vmcs.c
@@ -39,6 +39,9 @@
#include <vm/vmcs.h>
#include <vm/vmx.h>
#include <vm/ept.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(vmcs, X86_DEBUG_LOG_LVL_INFO);
#define BYTES_PER_LONG (BITS_PER_LONG/8)
@@ -131,12 +134,12 @@ void vmx_detect_capability(void)
/* save the revision_id */
vmcs_revision_id = vmx_basic_msr_low;
- VM_LOG(LVL_VERBOSE, "%s: Basic MSR: 0x%lx\n", __func__, cpu_read_msr(MSR_IA32_VMX_BASIC));
- VM_LOG(LVL_VERBOSE, "%s: Basic low: 0x%x\n", __func__, vmx_basic_msr_low);
+ X86_DEBUG_LOG(vmcs, LVL_VERBOSE, "%s: Basic MSR: 0x%lx\n", __func__, cpu_read_msr(MSR_IA32_VMX_BASIC));
+ X86_DEBUG_LOG(vmcs, LVL_VERBOSE, "%s: Basic low: 0x%x\n", __func__, vmx_basic_msr_low);
vmxon_region_size = VMM_ROUNDUP2_PAGE_SIZE(vmx_basic_msr_high
& 0x1ffful);
- VM_LOG(LVL_VERBOSE, "%s: VMXON Region Size: 0x%x\n", __func__, vmxon_region_size);
+ X86_DEBUG_LOG(vmcs, LVL_VERBOSE, "%s: VMXON Region Size: 0x%x\n", __func__, vmxon_region_size);
vmxon_region_nr_pages = VMM_SIZE_TO_PAGE(vmxon_region_size);
@@ -257,13 +260,13 @@ struct vmcs *current_vmcs(physical_addr_t *phys)
/* There is not current VMCS */
if (!vmcs_phys || vmcs_phys == 0xFFFFFFFFFFFFFFFFULL) {
- VM_LOG(LVL_ERR, "%s: There is not active(current) VMCS on this "
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "%s: There is not active(current) VMCS on this "
"logical processor.\n", __func__);
return NULL;
}
if (vmm_host_pa2va(vmcs_phys, &vmcs_virt) != VMM_OK) {
- VM_LOG(LVL_ERR, "%s: Could not find virtual address for current VMCS\n", __func__);
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "%s: Could not find virtual address for current VMCS\n", __func__);
return NULL;
}
@@ -279,19 +282,19 @@ struct vmcs* create_vmcs(void)
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
if ((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE) {
- VM_LOG(LVL_ERR, "VMCS size larger than 4K\n");
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "VMCS size larger than 4K\n");
return NULL;
}
/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
if (vmx_basic_msr_high & (1u<<16)) {
- VM_LOG(LVL_ERR, "VMX_BASIC_MSR[48] = 1\n");
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "VMX_BASIC_MSR[48] = 1\n");
return NULL;
}
/* Require Write-Back (WB) memory type for VMCS accesses. */
if (((vmx_basic_msr_high >> 18) & 15) != 6) {
- VM_LOG(LVL_ERR, "Write-back memory required for VMCS\n");
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "Write-back memory required for VMCS\n");
return NULL;
}
@@ -642,7 +645,7 @@ int vmx_set_control_params(struct vcpu_hw_context *context)
/* Set up the VCPU's guest extended page tables */
if ((rc = setup_ept(context)) != VMM_OK) {
- VM_LOG(LVL_ERR, "EPT Setup failed with error: %d\n", rc);
+ X86_DEBUG_LOG(vmcs, LVL_ERR, "EPT Setup failed with error: %d\n", rc);
return rc;
}
diff --git a/arch/x86/cpu/common/vm/vtx/vmx.c b/arch/x86/cpu/common/vm/vtx/vmx.c
index 22c83745..e3c95f85 100644
--- a/arch/x86/cpu/common/vm/vtx/vmx.c
+++ b/arch/x86/cpu/common/vm/vtx/vmx.c
@@ -34,6 +34,9 @@
#include <vm/vmx.h>
#include <vm/vmx_intercept.h>
#include <vm/vmcs_auditor.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(vmx, X86_DEBUG_LOG_LVL_INFO);
extern void vmx_vcpu_exit(struct vcpu_hw_context *context);
@@ -63,7 +66,7 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)
/* FIXME: Detect VMX support */
if (!cpuinfo->hw_virt_available) {
- VM_LOG(LVL_ERR, "No VMX feature!\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "No VMX feature!\n");
return VMM_EFAIL;
}
@@ -72,12 +75,12 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)
/* EPT and VPID support is required */
if (!cpu_has_vmx_ept) {
- VM_LOG(LVL_ERR, "No EPT support!\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "No EPT support!\n");
return VMM_EFAIL;
}
if (!cpu_has_vmx_vpid) {
- VM_LOG(LVL_ERR, "No VPID support!\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "No VPID support!\n");
return VMM_EFAIL;
}
@@ -116,17 +119,17 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)
cr0 = read_cr0();
cr4 = read_cr4();
- VM_LOG(LVL_VERBOSE, "CR0: 0x%lx CR4: 0x%lx\n", cr0, cr4);
+ X86_DEBUG_LOG(vmx, LVL_VERBOSE, "CR0: 0x%lx CR4: 0x%lx\n", cr0, cr4);
if ((~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1)) {
- VM_LOG(LVL_ERR, "Some settings of host CR0 are not allowed in VMX"
+ X86_DEBUG_LOG(vmx, LVL_ERR, "Some settings of host CR0 are not allowed in VMX"
" operation. (Host CR0: 0x%lx CR0 Fixed0: 0x%lx CR0 Fixed1: 0x%lx)\n",
cr0, vmx_cr0_fixed0, vmx_cr0_fixed1);
return VMM_EFAIL;
}
if ((~cr4 & vmx_cr4_fixed0) || (cr4 & ~vmx_cr4_fixed1)) {
- VM_LOG(LVL_ERR, "Some settings of host CR4 are not allowed in VMX"
+ X86_DEBUG_LOG(vmx, LVL_ERR, "Some settings of host CR4 are not allowed in VMX"
" operation. (Host CR4: 0x%lx CR4 Fixed0: 0x%lx CR4 Fixed1: 0x%lx)\n",
cr4, vmx_cr4_fixed0, vmx_cr4_fixed1);
return VMM_EFAIL;
@@ -141,43 +144,43 @@ static int enable_vmx (struct cpuinfo_x86 *cpuinfo)
bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
if (bios_locked) {
if (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ) {
- VM_LOG(LVL_ERR, "VMX disabled by BIOS.\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "VMX disabled by BIOS.\n");
return VMM_EFAIL;
}
}
vmx_on_region = alloc_vmx_on_region();
if (vmx_on_region == NULL) {
- VM_LOG(LVL_ERR, "Failed to create vmx on region.\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "Failed to create vmx on region.\n");
ret = VMM_ENOMEM;
goto _fail;
}
if (vmm_host_va2pa((virtual_addr_t)vmx_on_region,
&vmx_on_region_pa) != VMM_OK) {
- VM_LOG(LVL_ERR, "Critical conversion of vmx on regsion VA=>PA failed!\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "Critical conversion of vmx on regsion VA=>PA failed!\n");
ret = VMM_EINVALID;
goto _fail;
}
- VM_LOG(LVL_VERBOSE, "%s: VMCS Revision Identifier: 0x%x\n",
+ X86_DEBUG_LOG(vmx, LVL_VERBOSE, "%s: VMCS Revision Identifier: 0x%x\n",
__func__, vmcs_revision_id);
vmxon_rev = (u32 *)vmx_on_region;
*vmxon_rev = vmcs_revision_id;
*vmxon_rev &= ~(0x1UL << 31);
- VM_LOG(LVL_VERBOSE, "%s: VMXON PTR: 0x%lx\n", __func__,
+ X86_DEBUG_LOG(vmx, LVL_VERBOSE, "%s: VMXON PTR: 0x%lx\n", __func__,
(unsigned long)vmx_on_region_pa);
/* get in VMX ON state */
if ((ret = __vmxon(vmx_on_region_pa)) != VMM_OK) {
- VM_LOG(LVL_ERR, "VMXON returned with error: %d\n", ret);
+ X86_DEBUG_LOG(vmx, LVL_ERR, "VMXON returned with error: %d\n", ret);
ret = VMM_EACCESS;
goto _fail;
}
- VM_LOG(LVL_INFO, "%s: Entered VMX operations successfully!\n", __func__);
+ X86_DEBUG_LOG(vmx, LVL_INFO, "%s: Entered VMX operations successfully!\n", __func__);
this_cpu(vmxon_region) = (virtual_addr_t)vmx_on_region;
this_cpu(vmxon_region_pa) = vmx_on_region_pa;
@@ -495,7 +498,7 @@ int __init intel_init(struct cpuinfo_x86 *cpuinfo)
{
/* Enable VMX */
if (enable_vmx(cpuinfo) != VMM_OK) {
- VM_LOG(LVL_ERR, "ERROR: Failed to enable virtual machine.\n");
+ X86_DEBUG_LOG(vmx, LVL_ERR, "ERROR: Failed to enable virtual machine.\n");
return VMM_EFAIL;
}
diff --git a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
index 5656c599..1f247105 100644
--- a/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
+++ b/arch/x86/cpu/x86_64/cpu_vcpu_helper.c
@@ -35,6 +35,9 @@
#include <libs/stringlib.h>
#include <libs/bitops.h>
#include <arch_guest_helper.h>
+#include <x86_debug_log.h>
+
+DEFINE_X86_DEBUG_LOG_SUBSYS_LEVEL(x86_vcpu, X86_DEBUG_LOG_LVL_INFO);
void arch_vcpu_emergency_shutdown(struct vcpu_hw_context *context);
@@ -61,7 +64,7 @@ static void init_vcpu_capabilities(struct vmm_vcpu *vcpu)
/* TODO: CPUID 7 as more features. Limiting to 4 right now. */
func_response->resp_eax = CPUID_BASE_CACHE_CONF;
- VM_LOG(LVL_INFO, "Guest base CPUID Limited to 0x%"PRIx32"\n", func_response->resp_eax);
+ X86_DEBUG_LOG(x86_vcpu, LVL_INFO, "Guest base CPUID Limited to 0x%"PRIx32"\n", func_response->resp_eax);
break;
case CPUID_BASE_FEATURES:
@@ -95,7 +98,7 @@ static void init_vcpu_capabilities(struct vmm_vcpu *vcpu)
break;
default:
- VM_LOG(LVL_INFO, "CPUID: 0x%"PRIx32" defaulting to CPU reported values\n", funcs);
+ X86_DEBUG_LOG(x86_vcpu, LVL_INFO, "CPUID: 0x%"PRIx32" defaulting to CPU reported values\n", funcs);
func_response->resp_eax = 0;
func_response->resp_ebx = 0;
func_response->resp_ecx = 0;
@@ -114,7 +117,7 @@ static void init_vcpu_capabilities(struct vmm_vcpu *vcpu)
&func_response->resp_edx);
func_response->resp_eax = CPUID_EXTENDED_ADDR_BITS;
- VM_LOG(LVL_INFO, "Guest extended CPUID Limited to 0x%"PRIx32"\n", func_response->resp_eax);
+ X86_DEBUG_LOG(x86_vcpu, LVL_INFO, "Guest extended CPUID Limited to 0x%"PRIx32"\n", func_response->resp_eax);
break;
case CPUID_EXTENDED_FEATURES: /* replica of base features */
@@ -143,7 +146,7 @@ static void init_vcpu_capabilities(struct vmm_vcpu *vcpu)
break;
default:
- VM_LOG(LVL_INFO, "CPUID: 0x%"PRIx32" defaulting to CPU reported values\n", funcs);
+ X86_DEBUG_LOG(x86_vcpu, LVL_INFO, "CPUID: 0x%"PRIx32" defaulting to CPU reported values\n", funcs);
cpuid(funcs, &func_response->resp_eax,
&func_response->resp_ebx,
&func_response->resp_ecx,
@@ -155,9 +158,9 @@ static void init_vcpu_capabilities(struct vmm_vcpu *vcpu)
static void arch_guest_vcpu_trampoline(struct vmm_vcpu *vcpu)
{
- VM_LOG(LVL_DEBUG, "Running VCPU %s\n", vcpu->name);
+ X86_DEBUG_LOG(x86_vcpu, LVL_DEBUG, "Running VCPU %s\n", vcpu->name);
cpu_boot_vcpu(x86_vcpu_priv(vcpu)->hw_context);
- VM_LOG(LVL_ERR, "ERROR: Guest VCPU exited from run loop!\n");
+ X86_DEBUG_LOG(x86_vcpu, LVL_ERR, "ERROR: Guest VCPU exited from run loop!\n");
while(1); /* Should never come here! */
}
--
2.34.1