WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] svm: a few cleanups

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] svm: a few cleanups
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 29 Sep 2009 03:35:13 -0700
Delivery-date: Tue, 29 Sep 2009 03:35:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1254220113 -3600
# Node ID bd376919f03ada900baffb62feea745f57e0a760
# Parent  ad35f39e5fdccab3b158b38e139ca3498c347cb5
svm: a few cleanups

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c |  921 ++++++++++++++++++++++-----------------------
 1 files changed, 455 insertions(+), 466 deletions(-)

diff -r ad35f39e5fdc -r bd376919f03a xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Sep 29 11:27:53 2009 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Sep 29 11:28:33 2009 +0100
@@ -57,22 +57,11 @@ u32 svm_feature_flags;
 #define set_segment_register(name, value)  \
     asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
 
+static struct hvm_function_table svm_function_table;
+
 enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
 
 asmlinkage void do_IRQ(struct cpu_user_regs *);
-
-static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
-static void svm_update_guest_efer(struct vcpu *v);
-static void svm_inject_exception(
-    unsigned int trapnr, int errcode, unsigned long cr2);
-static void svm_cpuid_intercept(
-    unsigned int *eax, unsigned int *ebx,
-    unsigned int *ecx, unsigned int *edx);
-static void svm_wbinvd_intercept(void);
-static void svm_fpu_dirty_intercept(void);
-static int svm_msr_read_intercept(struct cpu_user_regs *regs);
-static int svm_msr_write_intercept(struct cpu_user_regs *regs);
-static void svm_invlpg_intercept(unsigned long vaddr);
 
 /* va of hardware host save area     */
 static void *hsa[NR_CPUS] __read_mostly;
@@ -103,7 +92,7 @@ static void inline __update_guest_eip(
     curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
 
     if ( regs->eflags & X86_EFLAGS_TF )
-        svm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
 }
 
 static void svm_cpu_down(void)
@@ -255,9 +244,9 @@ static int svm_vmcb_restore(struct vcpu 
     v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
     v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
     v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
-    svm_update_guest_cr(v, 0);
-    svm_update_guest_cr(v, 2);
-    svm_update_guest_cr(v, 4);
+    hvm_update_guest_cr(v, 0);
+    hvm_update_guest_cr(v, 2);
+    hvm_update_guest_cr(v, 4);
 
     v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
     v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
@@ -314,7 +303,7 @@ static void svm_load_cpu_state(struct vc
     vmcb->cstar      = data->msr_cstar;
     vmcb->sfmask     = data->msr_syscall_mask;
     v->arch.hvm_vcpu.guest_efer = data->msr_efer;
-    svm_update_guest_efer(v);
+    hvm_update_guest_efer(v);
 
     hvm_set_guest_tsc(v, data->tsc);
 }
@@ -823,6 +812,452 @@ static int svm_do_pmu_interrupt(struct c
 static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
 {
     return 0;
+}
+
+static int svm_cpu_up(struct cpuinfo_x86 *c)
+{
+    u32 eax, edx, phys_hsa_lo, phys_hsa_hi;   
+    u64 phys_hsa;
+    int cpu = smp_processor_id();
+ 
+    /* Check whether SVM feature is disabled in BIOS */
+    rdmsr(MSR_K8_VM_CR, eax, edx);
+    if ( eax & K8_VMCR_SVME_DISABLE )
+    {
+        printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu);
+        return 0;
+    }
+
+    if ( ((hsa[cpu] == NULL) &&
+          ((hsa[cpu] = alloc_host_save_area()) == NULL)) ||
+         ((root_vmcb[cpu] == NULL) &&
+          ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) )
+        return 0;
+
+    write_efer(read_efer() | EFER_SVME);
+
+    /* Initialize the HSA for this core. */
+    phys_hsa = (u64)virt_to_maddr(hsa[cpu]);
+    phys_hsa_lo = (u32)phys_hsa;
+    phys_hsa_hi = (u32)(phys_hsa >> 32);    
+    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
+
+    /* Initialize core's ASID handling. */
+    svm_asid_init(c);
+
+    return 1;
+}
+
+void start_svm(struct cpuinfo_x86 *c)
+{
+    static bool_t bootstrapped;
+
+    if ( test_and_set_bool(bootstrapped) )
+    {
+        if ( hvm_enabled && !svm_cpu_up(c) )
+        {
+            printk("SVM: FATAL: failed to initialise CPU%d!\n",
+                   smp_processor_id());
+            BUG();
+        }
+        return;
+    }
+
+    /* Xen does not fill x86_capability words except 0. */
+    boot_cpu_data.x86_capability[5] = cpuid_ecx(0x80000001);
+
+    if ( !test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability) )
+        return;
+
+    if ( !svm_cpu_up(c) )
+    {
+        printk("SVM: failed to initialise.\n");
+        return;
+    }
+
+    setup_vmcb_dump();
+
+    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
+                         cpuid_edx(0x8000000A) : 0);
+
+    svm_function_table.hap_supported = cpu_has_svm_npt;
+
+    hvm_enable(&svm_function_table);
+}
+
+static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
+{
+    p2m_type_t p2mt;
+    mfn_t mfn;
+    unsigned long gfn = gpa >> PAGE_SHIFT;
+
+    /*
+     * If this GFN is emulated MMIO or marked as read-only, pass the fault
+     * to the mmio handler.
+     */
+    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
+    if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
+    {
+        if ( !handle_mmio() )
+            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return;
+    }
+
+    /* Log-dirty: mark the page dirty and let the guest write it again */
+    if ( p2mt == p2m_ram_logdirty )
+    {
+        paging_mark_dirty(current->domain, mfn_x(mfn));
+        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+        return;
+    }
+
+    /* Okay, this shouldn't happen.  Maybe the guest was writing to a
+       read-only grant mapping? */
+    if ( p2mt == p2m_grant_map_ro )
+    {
+        /* Naughty... */
+        gdprintk(XENLOG_WARNING,
+                 "trying to write to read-only grant mapping\n");
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return;
+    }
+
+    /* Something bad has happened; either Xen or the hardware have
+       screwed up. */
+    gdprintk(XENLOG_WARNING, "unexpected SVM nested page fault\n");
+}
+
+static void svm_fpu_dirty_intercept(void)
+{
+    struct vcpu *curr = current;
+    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+
+    svm_fpu_enter(curr);
+
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+        vmcb->cr0 &= ~X86_CR0_TS;
+}
+
+#define bitmaskof(idx)  (1U << ((idx) & 31))
+static void svm_cpuid_intercept(
+    unsigned int *eax, unsigned int *ebx,
+    unsigned int *ecx, unsigned int *edx)
+{
+    unsigned int input = *eax;
+    struct vcpu *v = current;
+
+    hvm_cpuid(input, eax, ebx, ecx, edx);
+
+    if ( input == 0x80000001 )
+    {
+        /* Fix up VLAPIC details. */
+        if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
+            __clear_bit(X86_FEATURE_APIC & 31, edx);
+    }
+
+    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
+}
+
+static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
+{
+    unsigned int eax, ebx, ecx, edx, inst_len;
+
+    if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 )
+        return;
+
+    eax = regs->eax;
+    ebx = regs->ebx;
+    ecx = regs->ecx;
+    edx = regs->edx;
+
+    svm_cpuid_intercept(&eax, &ebx, &ecx, &edx);
+
+    regs->eax = eax;
+    regs->ebx = ebx;
+    regs->ecx = ecx;
+    regs->edx = edx;
+
+    __update_guest_eip(regs, inst_len);
+}
+
+static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
+{
+    HVMTRACE_0D(DR_WRITE);
+    __restore_debug_registers(v);
+}
+
+static int svm_msr_read_intercept(struct cpu_user_regs *regs)
+{
+    u64 msr_content = 0;
+    u32 ecx = regs->ecx, eax, edx;
+    struct vcpu *v = current;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    switch ( ecx )
+    {
+    case MSR_EFER:
+        msr_content = v->arch.hvm_vcpu.guest_efer;
+        break;
+
+    case MSR_IA32_SYSENTER_CS:
+        msr_content = v->arch.hvm_svm.guest_sysenter_cs;
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        msr_content = v->arch.hvm_svm.guest_sysenter_esp;
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        msr_content = v->arch.hvm_svm.guest_sysenter_eip;
+        break;
+
+    case MSR_IA32_MC4_MISC: /* Threshold register */
+    case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
+        /*
+         * MCA/MCE: We report that the threshold register is unavailable
+         * for OS use (locked by the BIOS).
+         */
+        msr_content = 1ULL << 61; /* MC4_MISC.Locked */
+        break;
+
+    case MSR_IA32_EBC_FREQUENCY_ID:
+        /*
+         * This Intel-only register may be accessed if this HVM guest
+         * has been migrated from an Intel host. The value zero is not
+         * particularly meaningful, but at least avoids the guest crashing!
+         */
+        msr_content = 0;
+        break;
+
+    case MSR_K8_VM_HSAVE_PA:
+        goto gpf;
+
+    case MSR_IA32_DEBUGCTLMSR:
+        msr_content = vmcb->debugctlmsr;
+        break;
+
+    case MSR_IA32_LASTBRANCHFROMIP:
+        msr_content = vmcb->lastbranchfromip;
+        break;
+
+    case MSR_IA32_LASTBRANCHTOIP:
+        msr_content = vmcb->lastbranchtoip;
+        break;
+
+    case MSR_IA32_LASTINTFROMIP:
+        msr_content = vmcb->lastintfromip;
+        break;
+
+    case MSR_IA32_LASTINTTOIP:
+        msr_content = vmcb->lastinttoip;
+        break;
+
+    default:
+
+        if ( rdmsr_viridian_regs(ecx, &msr_content) ||
+             rdmsr_hypervisor_regs(ecx, &msr_content) )
+            break;
+
+        if ( rdmsr_safe(ecx, eax, edx) == 0 )
+        {
+            msr_content = ((uint64_t)edx << 32) | eax;
+            break;
+        }
+
+        goto gpf;
+    }
+
+    regs->eax = (uint32_t)msr_content;
+    regs->edx = (uint32_t)(msr_content >> 32);
+
+    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
+    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
+                ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
+    return X86EMUL_OKAY;
+
+ gpf:
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return X86EMUL_EXCEPTION;
+}
+
+static int svm_msr_write_intercept(struct cpu_user_regs *regs)
+{
+    u64 msr_content = 0;
+    u32 ecx = regs->ecx;
+    struct vcpu *v = current;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+
+    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
+
+    switch ( ecx )
+    {
+    case MSR_K8_VM_HSAVE_PA:
+        goto gpf;
+
+    case MSR_IA32_SYSENTER_CS:
+        v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        v->arch.hvm_svm.guest_sysenter_esp = msr_content;
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        v->arch.hvm_svm.guest_sysenter_eip = msr_content;
+        break;
+
+    case MSR_IA32_DEBUGCTLMSR:
+        vmcb->debugctlmsr = msr_content;
+        if ( !msr_content || !cpu_has_svm_lbrv )
+            break;
+        vmcb->lbr_control.fields.enable = 1;
+        svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
+        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
+        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
+        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
+        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
+        break;
+
+    case MSR_IA32_LASTBRANCHFROMIP:
+        vmcb->lastbranchfromip = msr_content;
+        break;
+
+    case MSR_IA32_LASTBRANCHTOIP:
+        vmcb->lastbranchtoip = msr_content;
+        break;
+
+    case MSR_IA32_LASTINTFROMIP:
+        vmcb->lastintfromip = msr_content;
+        break;
+
+    case MSR_IA32_LASTINTTOIP:
+        vmcb->lastinttoip = msr_content;
+        break;
+
+    default:
+        if ( wrmsr_viridian_regs(ecx, msr_content) )
+            break;
+
+        switch ( long_mode_do_msr_write(regs) )
+        {
+        case HNDL_unhandled:
+            wrmsr_hypervisor_regs(ecx, msr_content);
+            break;
+        case HNDL_exception_raised:
+            return X86EMUL_EXCEPTION;
+        case HNDL_done:
+            break;
+        }
+        break;
+    }
+
+    return X86EMUL_OKAY;
+
+ gpf:
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return X86EMUL_EXCEPTION;
+}
+
+static void svm_do_msr_access(struct cpu_user_regs *regs)
+{
+    int rc, inst_len;
+    struct vcpu *v = current;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    if ( vmcb->exitinfo1 == 0 )
+    {
+        if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )
+            return;
+        rc = hvm_msr_read_intercept(regs);
+    }
+    else
+    {
+        if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
+            return;
+        rc = hvm_msr_write_intercept(regs);
+    }
+
+    if ( rc == X86EMUL_OKAY )
+        __update_guest_eip(regs, inst_len);
+}
+
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
+                              struct cpu_user_regs *regs)
+{
+    unsigned int inst_len;
+
+    if ( (inst_len = __get_instruction_length(current, INSTR_HLT)) == 0 )
+        return;
+    __update_guest_eip(regs, inst_len);
+
+    hvm_hlt(regs->eflags);
+}
+
+static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
+{
+    unsigned int inst_len;
+
+    if ( (inst_len = __get_instruction_length(current, INSTR_RDTSC)) == 0 )
+        return;
+    __update_guest_eip(regs, inst_len);
+
+    hvm_rdtsc_intercept(regs);
+}
+
+static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs)
+{
+    struct hvm_emulate_ctxt ctxt;
+    int rc;
+
+    hvm_emulate_prepare(&ctxt, regs);
+
+    rc = hvm_emulate_one(&ctxt);
+
+    switch ( rc )
+    {
+    case X86EMUL_UNHANDLEABLE:
+        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        break;
+    case X86EMUL_EXCEPTION:
+        if ( ctxt.exn_pending )
+            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+        /* fall through */
+    default:
+        hvm_emulate_writeback(&ctxt);
+        break;
+    }
+}
+
+static void wbinvd_ipi(void *info)
+{
+    wbinvd();
+}
+
+static void svm_wbinvd_intercept(void)
+{
+    if ( has_arch_pdevs(current->domain) )
+        on_each_cpu(wbinvd_ipi, NULL, 1);
+}
+
+static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
+{
+    enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD };
+    int inst_len;
+
+    inst_len = __get_instruction_length_from_list(
+        current, list, ARRAY_SIZE(list));
+    if ( inst_len == 0 )
+        return;
+
+    svm_wbinvd_intercept();
+
+    __update_guest_eip(regs, inst_len);
+}
+
+static void svm_invlpg_intercept(unsigned long vaddr)
+{
+    struct vcpu *curr = current;
+    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
+    paging_invlpg(curr, vaddr);
+    svm_asid_g_invlpg(curr, vaddr);
 }
 
 static struct hvm_function_table svm_function_table = {
@@ -857,452 +1292,6 @@ static struct hvm_function_table svm_fun
     .set_rdtsc_exiting    = svm_set_rdtsc_exiting
 };
 
-static int svm_cpu_up(struct cpuinfo_x86 *c)
-{
-    u32 eax, edx, phys_hsa_lo, phys_hsa_hi;   
-    u64 phys_hsa;
-    int cpu = smp_processor_id();
- 
-    /* Check whether SVM feature is disabled in BIOS */
-    rdmsr(MSR_K8_VM_CR, eax, edx);
-    if ( eax & K8_VMCR_SVME_DISABLE )
-    {
-        printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu);
-        return 0;
-    }
-
-    if ( ((hsa[cpu] == NULL) &&
-          ((hsa[cpu] = alloc_host_save_area()) == NULL)) ||
-         ((root_vmcb[cpu] == NULL) &&
-          ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) )
-        return 0;
-
-    write_efer(read_efer() | EFER_SVME);
-
-    /* Initialize the HSA for this core. */
-    phys_hsa = (u64)virt_to_maddr(hsa[cpu]);
-    phys_hsa_lo = (u32)phys_hsa;
-    phys_hsa_hi = (u32)(phys_hsa >> 32);    
-    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
-
-    /* Initialize core's ASID handling. */
-    svm_asid_init(c);
-
-    return 1;
-}
-
-void start_svm(struct cpuinfo_x86 *c)
-{
-    static bool_t bootstrapped;
-
-    if ( test_and_set_bool(bootstrapped) )
-    {
-        if ( hvm_enabled && !svm_cpu_up(c) )
-        {
-            printk("SVM: FATAL: failed to initialise CPU%d!\n",
-                   smp_processor_id());
-            BUG();
-        }
-        return;
-    }
-
-    /* Xen does not fill x86_capability words except 0. */
-    boot_cpu_data.x86_capability[5] = cpuid_ecx(0x80000001);
-
-    if ( !test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability) )
-        return;
-
-    if ( !svm_cpu_up(c) )
-    {
-        printk("SVM: failed to initialise.\n");
-        return;
-    }
-
-    setup_vmcb_dump();
-
-    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
-                         cpuid_edx(0x8000000A) : 0);
-
-    svm_function_table.hap_supported = cpu_has_svm_npt;
-
-    hvm_enable(&svm_function_table);
-}
-
-static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
-{
-    p2m_type_t p2mt;
-    mfn_t mfn;
-    unsigned long gfn = gpa >> PAGE_SHIFT;
-
-    /*
-     * If this GFN is emulated MMIO or marked as read-only, pass the fault
-     * to the mmio handler.
-     */
-    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
-    if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
-    {
-        if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
-        return;
-    }
-
-    /* Log-dirty: mark the page dirty and let the guest write it again */
-    if ( p2mt == p2m_ram_logdirty )
-    {
-        paging_mark_dirty(current->domain, mfn_x(mfn));
-        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        return;
-    }
-
-    /* Okay, this shouldn't happen.  Maybe the guest was writing to a
-       read-only grant mapping? */
-    if ( p2mt == p2m_grant_map_ro )
-    {
-        /* Naughty... */
-        gdprintk(XENLOG_WARNING,
-                 "trying to write to read-only grant mapping\n");
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
-        return;
-    }
-
-    /* Something bad has happened; either Xen or the hardware have
-       screwed up. */
-    gdprintk(XENLOG_WARNING, "unexpected SVM nested page fault\n");
-}
-
-static void svm_fpu_dirty_intercept(void)
-{
-    struct vcpu *curr = current;
-    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
-
-    svm_fpu_enter(curr);
-
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
-        vmcb->cr0 &= ~X86_CR0_TS;
-}
-
-#define bitmaskof(idx)  (1U << ((idx) & 31))
-static void svm_cpuid_intercept(
-    unsigned int *eax, unsigned int *ebx,
-    unsigned int *ecx, unsigned int *edx)
-{
-    unsigned int input = *eax;
-    struct vcpu *v = current;
-
-    hvm_cpuid(input, eax, ebx, ecx, edx);
-
-    if ( input == 0x80000001 )
-    {
-        /* Fix up VLAPIC details. */
-        if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
-            __clear_bit(X86_FEATURE_APIC & 31, edx);
-    }
-
-    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
-}
-
-static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
-{
-    unsigned int eax, ebx, ecx, edx, inst_len;
-
-    if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 )
-        return;
-
-    eax = regs->eax;
-    ebx = regs->ebx;
-    ecx = regs->ecx;
-    edx = regs->edx;
-
-    svm_cpuid_intercept(&eax, &ebx, &ecx, &edx);
-
-    regs->eax = eax;
-    regs->ebx = ebx;
-    regs->ecx = ecx;
-    regs->edx = edx;
-
-    __update_guest_eip(regs, inst_len);
-}
-
-static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
-{
-    HVMTRACE_0D(DR_WRITE);
-    __restore_debug_registers(v);
-}
-
-static int svm_msr_read_intercept(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx, eax, edx;
-    struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    switch ( ecx )
-    {
-    case MSR_EFER:
-        msr_content = v->arch.hvm_vcpu.guest_efer;
-        break;
-
-    case MSR_IA32_SYSENTER_CS:
-        msr_content = v->arch.hvm_svm.guest_sysenter_cs;
-        break;
-    case MSR_IA32_SYSENTER_ESP:
-        msr_content = v->arch.hvm_svm.guest_sysenter_esp;
-        break;
-    case MSR_IA32_SYSENTER_EIP:
-        msr_content = v->arch.hvm_svm.guest_sysenter_eip;
-        break;
-
-    case MSR_IA32_MC4_MISC: /* Threshold register */
-    case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
-        /*
-         * MCA/MCE: We report that the threshold register is unavailable
-         * for OS use (locked by the BIOS).
-         */
-        msr_content = 1ULL << 61; /* MC4_MISC.Locked */
-        break;
-
-    case MSR_IA32_EBC_FREQUENCY_ID:
-        /*
-         * This Intel-only register may be accessed if this HVM guest
-         * has been migrated from an Intel host. The value zero is not
-         * particularly meaningful, but at least avoids the guest crashing!
-         */
-        msr_content = 0;
-        break;
-
-    case MSR_K8_VM_HSAVE_PA:
-        goto gpf;
-
-    case MSR_IA32_DEBUGCTLMSR:
-        msr_content = vmcb->debugctlmsr;
-        break;
-
-    case MSR_IA32_LASTBRANCHFROMIP:
-        msr_content = vmcb->lastbranchfromip;
-        break;
-
-    case MSR_IA32_LASTBRANCHTOIP:
-        msr_content = vmcb->lastbranchtoip;
-        break;
-
-    case MSR_IA32_LASTINTFROMIP:
-        msr_content = vmcb->lastintfromip;
-        break;
-
-    case MSR_IA32_LASTINTTOIP:
-        msr_content = vmcb->lastinttoip;
-        break;
-
-    default:
-
-        if ( rdmsr_viridian_regs(ecx, &msr_content) ||
-             rdmsr_hypervisor_regs(ecx, &msr_content) )
-            break;
-
-        if ( rdmsr_safe(ecx, eax, edx) == 0 )
-        {
-            msr_content = ((uint64_t)edx << 32) | eax;
-            break;
-        }
-
-        goto gpf;
-    }
-
-    regs->eax = (uint32_t)msr_content;
-    regs->edx = (uint32_t)(msr_content >> 32);
-
-    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
-    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
-                ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
-    return X86EMUL_OKAY;
-
- gpf:
-    svm_inject_exception(TRAP_gp_fault, 0, 0);
-    return X86EMUL_EXCEPTION;
-}
-
-static int svm_msr_write_intercept(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx;
-    struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-
-    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
-
-    switch ( ecx )
-    {
-    case MSR_K8_VM_HSAVE_PA:
-        goto gpf;
-
-    case MSR_IA32_SYSENTER_CS:
-        v->arch.hvm_svm.guest_sysenter_cs = msr_content;
-        break;
-    case MSR_IA32_SYSENTER_ESP:
-        v->arch.hvm_svm.guest_sysenter_esp = msr_content;
-        break;
-    case MSR_IA32_SYSENTER_EIP:
-        v->arch.hvm_svm.guest_sysenter_eip = msr_content;
-        break;
-
-    case MSR_IA32_DEBUGCTLMSR:
-        vmcb->debugctlmsr = msr_content;
-        if ( !msr_content || !cpu_has_svm_lbrv )
-            break;
-        vmcb->lbr_control.fields.enable = 1;
-        svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
-        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
-        svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
-        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
-        svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
-        break;
-
-    case MSR_IA32_LASTBRANCHFROMIP:
-        vmcb->lastbranchfromip = msr_content;
-        break;
-
-    case MSR_IA32_LASTBRANCHTOIP:
-        vmcb->lastbranchtoip = msr_content;
-        break;
-
-    case MSR_IA32_LASTINTFROMIP:
-        vmcb->lastintfromip = msr_content;
-        break;
-
-    case MSR_IA32_LASTINTTOIP:
-        vmcb->lastinttoip = msr_content;
-        break;
-
-    default:
-        if ( wrmsr_viridian_regs(ecx, msr_content) )
-            break;
-
-        switch ( long_mode_do_msr_write(regs) )
-        {
-        case HNDL_unhandled:
-            wrmsr_hypervisor_regs(ecx, msr_content);
-            break;
-        case HNDL_exception_raised:
-            return X86EMUL_EXCEPTION;
-        case HNDL_done:
-            break;
-        }
-        break;
-    }
-
-    return X86EMUL_OKAY;
-
- gpf:
-    svm_inject_exception(TRAP_gp_fault, 0, 0);
-    return X86EMUL_EXCEPTION;
-}
-
-static void svm_do_msr_access(struct cpu_user_regs *regs)
-{
-    int rc, inst_len;
-    struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    if ( vmcb->exitinfo1 == 0 )
-    {
-        if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )
-            return;
-        rc = hvm_msr_read_intercept(regs);
-    }
-    else
-    {
-        if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
-            return;
-        rc = hvm_msr_write_intercept(regs);
-    }
-
-    if ( rc == X86EMUL_OKAY )
-        __update_guest_eip(regs, inst_len);
-}
-
-static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
-                              struct cpu_user_regs *regs)
-{
-    unsigned int inst_len;
-
-    if ( (inst_len = __get_instruction_length(current, INSTR_HLT)) == 0 )
-        return;
-    __update_guest_eip(regs, inst_len);
-
-    hvm_hlt(regs->eflags);
-}
-
-static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
-{
-    unsigned int inst_len;
-
-    if ( (inst_len = __get_instruction_length(current, INSTR_RDTSC)) == 0 )
-        return;
-    __update_guest_eip(regs, inst_len);
-
-    hvm_rdtsc_intercept(regs);
-}
-
-static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs)
-{
-    struct hvm_emulate_ctxt ctxt;
-    int rc;
-
-    hvm_emulate_prepare(&ctxt, regs);
-
-    rc = hvm_emulate_one(&ctxt);
-
-    switch ( rc )
-    {
-    case X86EMUL_UNHANDLEABLE:
-        svm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
-        break;
-    case X86EMUL_EXCEPTION:
-        if ( ctxt.exn_pending )
-            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
-        /* fall through */
-    default:
-        hvm_emulate_writeback(&ctxt);
-        break;
-    }
-}
-
-static void wbinvd_ipi(void *info)
-{
-    wbinvd();
-}
-
-static void svm_wbinvd_intercept(void)
-{
-    if ( has_arch_pdevs(current->domain) )
-        on_each_cpu(wbinvd_ipi, NULL, 1);
-}
-
-static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
-{
-    enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD };
-    int inst_len;
-
-    inst_len = __get_instruction_length_from_list(
-        current, list, ARRAY_SIZE(list));
-    if ( inst_len == 0 )
-        return;
-
-    svm_wbinvd_intercept();
-
-    __update_guest_eip(regs, inst_len);
-}
-
-static void svm_invlpg_intercept(unsigned long vaddr)
-{
-    struct vcpu *curr = current;
-    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
-    paging_invlpg(curr, vaddr);
-    svm_asid_g_invlpg(curr, vaddr);
-}
-
 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
 {
     unsigned int exit_reason;
@@ -1411,7 +1400,7 @@ asmlinkage void svm_vmexit_handler(struc
             break;
         }
 
-        svm_inject_exception(TRAP_page_fault, regs->error_code, va);
+        hvm_inject_exception(TRAP_page_fault, regs->error_code, va);
         break;
     }
 
@@ -1514,7 +1503,7 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_STGI:
     case VMEXIT_CLGI:
     case VMEXIT_SKINIT:
-        svm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
         break;
 
     case VMEXIT_NPF:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] svm: a few cleanups, Xen patchbot-unstable <=