Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

[PATCH 0/11] Another set of nested svm fixes and optimizations

1 view
Skip to first unread message

Joerg Roedel

unread,
Feb 24, 2010, 1:10:03 PM2/24/10
to
Hi,

here is another set of patches I collected in the last days while trying
to get Hyper-V running with nested svm. I was not successful yet but the
patches in this set make sense anyway :-)
The patches fix coding style issues which annoyed me for some time
because my vim shows all c-space-errors with red color. Other patches in
this set make nested svm more complete (nmi and selective cr0 related).
The vm_cr and hwcr.ignne patches are related to Hyper-V tests. This
hypervisor expects to write to those msrs.
The most interesting thing is patch 11 which reduces the amount of
memory which is touched in the vmrun emulation for merging the msrpms
from 24kb to 48 bytes. Please review and comment (or apply if
perfect ;-) ) these patches.

Thanks,

Joerg

diffstat:

arch/x86/include/asm/svm.h | 4 +
arch/x86/kvm/svm.c | 282 +++++++++++++++++++++++++++++++-------------
arch/x86/kvm/trace.h | 22 ++++
arch/x86/kvm/x86.c | 3 +-
4 files changed, 226 insertions(+), 85 deletions(-)

shortlog:

Joerg Roedel (11):
KVM: SVM: Coding style cleanup
KVM: SVM: Reset MMU on nested_svm_vmrun for NPT too
KVM: SVM: Check for nested intercepts on NMI injection
KVM: SVM: Restore tracing of nested vmcb address
KVM: SVM: Add kvm_nested_intercepts tracepoint
KVM: SVM: Implement emulation of vm_cr msr
KVM: SVM: Ignore write of hwcr.ignne
KVM: x86: Don't set arch.cr0 in kvm_set_cr0
KVM: SVM: Handle nested selective_cr0 intercept correctly
KVM: SVM: Clear exit_info for injected INTR exits
KVM: SVM: Optimize nested svm msrpm merging


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

Joerg Roedel

unread,
Feb 24, 2010, 1:10:02 PM2/24/10
to
This patch removes whitespace errors, fixes comment formats
and most of checkpatch warnings. Now vim does not show
c-space-errors anymore.

Signed-off-by: Joerg Roedel <joerg....@amd.com>
---
arch/x86/kvm/svm.c | 148 ++++++++++++++++++++++++++++-----------------------
1 files changed, 81 insertions(+), 67 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d11ff46..217b8b0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -119,7 +119,7 @@ struct vcpu_svm {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
-static bool npt_enabled = false;
+static bool npt_enabled;
#endif
static int npt = 1;

@@ -167,8 +167,8 @@ static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ unsigned base1:8, type:5, dpl:2, p:1;
+ unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3;
u32 zero1;
} __attribute__((packed));
@@ -217,7 +217,7 @@ static inline void stgi(void)

static inline void invlpga(unsigned long addr, u32 asid)
{
- asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
+ asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
}

static inline void force_new_asid(struct kvm_vcpu *vcpu)
@@ -289,8 +289,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
{
struct vcpu_svm *svm = to_svm(vcpu);

- /* If we are within a nested VM we'd better #VMEXIT and let the
- guest handle the exception */
+ /*
+ * If we are within a nested VM we'd better #VMEXIT and let the guest
+ * handle the exception
+ */
if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
return;

@@ -543,7 +545,7 @@ static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
- SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
+ SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
seg->limit = 0xffff;
seg->base = 0;
}
@@ -563,16 +565,16 @@ static void init_vmcb(struct vcpu_svm *svm)

svm->vcpu.fpu_active = 1;

- control->intercept_cr_read = INTERCEPT_CR0_MASK |
+ control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK;

- control->intercept_cr_write = INTERCEPT_CR0_MASK |
+ control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK |
INTERCEPT_CR8_MASK;

- control->intercept_dr_read = INTERCEPT_DR0_MASK |
+ control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -581,7 +583,7 @@ static void init_vmcb(struct vcpu_svm *svm)
INTERCEPT_DR6_MASK |
INTERCEPT_DR7_MASK;

- control->intercept_dr_write = INTERCEPT_DR0_MASK |
+ control->intercept_dr_write = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -595,7 +597,7 @@ static void init_vmcb(struct vcpu_svm *svm)
(1 << MC_VECTOR);


- control->intercept = (1ULL << INTERCEPT_INTR) |
+ control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
(1ULL << INTERCEPT_SELECTIVE_CR0) |
@@ -656,7 +658,8 @@ static void init_vmcb(struct vcpu_svm *svm)
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;

- /* This is the guest-visible cr0 value.
+ /*
+ * This is the guest-visible cr0 value.
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@ -897,7 +900,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;

- /* AMD's VMCB does not have an explicit unusable field, so emulate it
+ /*
+ * AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
@@ -933,7 +937,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->type |= 0x1;
break;
case VCPU_SREG_SS:
- /* On AMD CPUs sometimes the DB bit in the segment
+ /*
+ * On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating.
@@ -1264,7 +1269,7 @@ static int db_interception(struct vcpu_svm *svm)
}

if (svm->vcpu.guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1309,7 +1314,7 @@ static void svm_fpu_activate(struct kvm_vcpu *vcpu)
excp = h_excp | n_excp;
} else {
excp = svm->vmcb->control.intercept_exceptions;
- excp &= ~(1 << NM_VECTOR);
+ excp &= ~(1 << NM_VECTOR);
}

svm->vmcb->control.intercept_exceptions = excp;
@@ -1548,13 +1553,13 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
return NESTED_EXIT_HOST;
- /* For now we are always handling NPFs when using them */
case SVM_EXIT_NPF:
+ /* For now we are always handling NPFs when using them */
if (npt_enabled)
return NESTED_EXIT_HOST;
break;
- /* When we're shadowing, trap PFs */
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+ /* When we're shadowing, trap PFs */
if (!npt_enabled)
return NESTED_EXIT_HOST;
break;
@@ -1789,7 +1794,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!nested_msrpm)
return false;

- for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
+ for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];

svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
@@ -1823,8 +1828,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);

- /* Save the old vmcb, so we don't need to pick what we save, but
- can restore everything when a VMEXIT occurs */
+ /*
+ * Save the old vmcb, so we don't need to pick what we save, but can
+ * restore everything when a VMEXIT occurs
+ */
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
@@ -1872,6 +1879,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
+
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
@@ -1903,8 +1911,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
}

- /* We don't want a nested guest to be more powerful than the guest,
- so all intercepts are ORed */
+ /*
+ * We don't want a nested guest to be more powerful than the guest, so
+ * all intercepts are ORed
+ */
svm->vmcb->control.intercept_cr_read |=
nested_vmcb->control.intercept_cr_read;
svm->vmcb->control.intercept_cr_write |=
@@ -2218,9 +2228,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp;
break;
- /* Nobody will change the following 5 values in the VMCB so
- we can safely return them on rdmsr. They will always be 0
- until LBRV is implemented. */
+ /*
+ * Nobody will change the following 5 values in the VMCB so we can
+ * safely return them on rdmsr. They will always be 0 until LBRV is
+ * implemented.
+ */
case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl;
break;
@@ -2399,16 +2411,16 @@ static int pause_interception(struct vcpu_svm *svm)
}

static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_READ_CR0] = emulate_on_interception,
- [SVM_EXIT_READ_CR3] = emulate_on_interception,
- [SVM_EXIT_READ_CR4] = emulate_on_interception,
- [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_READ_CR0] = emulate_on_interception,
+ [SVM_EXIT_READ_CR3] = emulate_on_interception,
+ [SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
- [SVM_EXIT_READ_DR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
+ [SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
[SVM_EXIT_READ_DR3] = emulate_on_interception,
@@ -2427,15 +2439,14 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
- [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
- [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
- [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
- [SVM_EXIT_INTR] = intr_interception,
+ [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
+ [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
+ [SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
- /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
@@ -2443,7 +2454,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
- [SVM_EXIT_IOIO] = io_interception,
+ [SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
@@ -2644,10 +2655,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

- /* In case GIF=0 we can't rely on the CPU to tell us when
- * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
- * The next time we get that intercept, this function will be
- * called again though and we'll get the vintr intercept. */
+ /*
+ * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
+ * 1, because that's a separate STGI/VMRUN intercept. The next time we
+ * get that intercept, this function will be called again though and
+ * we'll get the vintr intercept.
+ */
if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
@@ -2662,9 +2675,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */

- /* Something prevents NMI from been injected. Single step over
- possible problem (IRET or exception injection or interrupt
- shadow) */
+ /*
+ * Something prevents NMI from been injected. Single step over possible
+ * problem (IRET or exception injection or interrupt shadow)
+ */
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(vcpu);
@@ -2972,24 +2986,24 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
}

static const struct trace_print_flags svm_exit_reasons_str[] = {
- { SVM_EXIT_READ_CR0, "read_cr0" },
- { SVM_EXIT_READ_CR3, "read_cr3" },
- { SVM_EXIT_READ_CR4, "read_cr4" },
- { SVM_EXIT_READ_CR8, "read_cr8" },
- { SVM_EXIT_WRITE_CR0, "write_cr0" },
- { SVM_EXIT_WRITE_CR3, "write_cr3" },
- { SVM_EXIT_WRITE_CR4, "write_cr4" },
- { SVM_EXIT_WRITE_CR8, "write_cr8" },
- { SVM_EXIT_READ_DR0, "read_dr0" },
- { SVM_EXIT_READ_DR1, "read_dr1" },
- { SVM_EXIT_READ_DR2, "read_dr2" },
- { SVM_EXIT_READ_DR3, "read_dr3" },
- { SVM_EXIT_WRITE_DR0, "write_dr0" },
- { SVM_EXIT_WRITE_DR1, "write_dr1" },
- { SVM_EXIT_WRITE_DR2, "write_dr2" },
- { SVM_EXIT_WRITE_DR3, "write_dr3" },
- { SVM_EXIT_WRITE_DR5, "write_dr5" },
- { SVM_EXIT_WRITE_DR7, "write_dr7" },
+ { SVM_EXIT_READ_CR0, "read_cr0" },
+ { SVM_EXIT_READ_CR3, "read_cr3" },
+ { SVM_EXIT_READ_CR4, "read_cr4" },
+ { SVM_EXIT_READ_CR8, "read_cr8" },
+ { SVM_EXIT_WRITE_CR0, "write_cr0" },
+ { SVM_EXIT_WRITE_CR3, "write_cr3" },
+ { SVM_EXIT_WRITE_CR4, "write_cr4" },
+ { SVM_EXIT_WRITE_CR8, "write_cr8" },
+ { SVM_EXIT_READ_DR0, "read_dr0" },
+ { SVM_EXIT_READ_DR1, "read_dr1" },
+ { SVM_EXIT_READ_DR2, "read_dr2" },
+ { SVM_EXIT_READ_DR3, "read_dr3" },
+ { SVM_EXIT_WRITE_DR0, "write_dr0" },
+ { SVM_EXIT_WRITE_DR1, "write_dr1" },
+ { SVM_EXIT_WRITE_DR2, "write_dr2" },
+ { SVM_EXIT_WRITE_DR3, "write_dr3" },
+ { SVM_EXIT_WRITE_DR5, "write_dr5" },
+ { SVM_EXIT_WRITE_DR7, "write_dr7" },
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
--
1.7.0

Joerg Roedel

unread,
Feb 24, 2010, 1:10:03 PM2/24/10
to
This patch implements the emulation of the vm_cr msr for
nested svm.

Signed-off-by: Joerg Roedel <joerg....@amd.com>
---

arch/x86/include/asm/svm.h | 4 ++++
arch/x86/kvm/svm.c | 29 ++++++++++++++++++++++++++++-
2 files changed, 32 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 38638cd..b26a38d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -115,6 +115,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)

+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6b7590a..2450a7c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -70,6 +70,7 @@ struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
+ u64 vm_cr_msr;
u64 vmcb;

/* These are the merged vectors */
@@ -2273,7 +2274,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
- *data = 0;
+ *data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
@@ -2303,6 +2304,31 @@ static int rdmsr_interception(struct vcpu_svm *svm)
return 1;
}

+static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int svm_dis, chg_mask;
+
+ if (data & ~SVM_VM_CR_VALID_MASK)
+ return 1;
+
+ chg_mask = SVM_VM_CR_VALID_MASK;
+
+ if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
+ chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
+
+ svm->nested.vm_cr_msr &= ~chg_mask;
+ svm->nested.vm_cr_msr |= (data & chg_mask);
+
+ svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
+
+ /* check for svm_disable while efer.svme is set */
+ if (svm_dis && (vcpu->arch.efer & EFER_SVME))
+ return 1;
+
+ return 0;
+}
+
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)


{
struct vcpu_svm *svm = to_svm(vcpu);

@@ -2369,6 +2395,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
+ return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
--
1.7.0

Joerg Roedel

unread,
Feb 24, 2010, 1:10:02 PM2/24/10
to
The vcpu->arch.cr0 variable is already set in the
architecture specific set_cr0 callbacks. There is no need to
set it in the common code.
This allows the architecture code to keep the old arch.cr0
value if it wants. This is required for nested svm to decide
if a selective_cr0 exit needs to be injected.

Signed-off-by: Joerg Roedel <joerg....@amd.com>
---

arch/x86/kvm/x86.c | 1 -
1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 31d44c1..a81046b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -485,7 +485,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}

kvm_x86_ops->set_cr0(vcpu, cr0);
- vcpu->arch.cr0 = cr0;

kvm_mmu_reset_context(vcpu);
return;
--
1.7.0

Avi Kivity

unread,
Feb 25, 2010, 5:30:01 AM2/25/10
to
On 02/24/2010 07:59 PM, Joerg Roedel wrote:
> This patch implements the emulation of the vm_cr msr for
> nested svm.
>
>

This needs to be exposed to userspace for save/restore.

--
error compiling committee.c: too many arguments to function

Avi Kivity

unread,
Feb 25, 2010, 5:40:02 AM2/25/10
to
On 02/24/2010 07:59 PM, Joerg Roedel wrote:
> Hi,
>
> here is another set of patches I collected in the last days while trying
> to get Hyper-V running with nested svm. I was not successful yet but the
> patches in this set make sense anyway :-)
> The patches fix coding style issues which annoyed me for some time
> because my vim shows all c-space-errors with red color. Other patches in
> this set make nested svm more complete (nmi and selective cr0 related).
> The vm_cr and hwcr.ignne patches are related to Hyper-V tests. This
> hypervisor expects to write to those msrs.
> The most interesting thing is patch 11 which reduces the amount of
> memory which is touched in the vmrun emulation for merging the msrpms
> from 24kb to 48 bytes. Please review and comment (or apply if
> perfect ;-) ) these patches.
>

1-10 applied. 11 had about 32 suggestions for improvement, please pick
one and resubmit.

--
error compiling committee.c: too many arguments to function

--

0 new messages