WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM][SVM] Reintroduce ASIDs.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM][SVM] Reintroduce ASIDs.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 15 May 2007 11:30:08 -0700
Delivery-date: Tue, 15 May 2007 11:29:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1179238442 -3600
# Node ID d4a0706d6747cd54613328fe630c3bcb7cafbac5
# Parent  5f09e34f56d7c3b77fc87dcb9310e05946b82670
[HVM][SVM] Reintroduce ASIDs.

ASIDs partition the physical TLB for SVM.  In the current implementation
ASIDs are used to reduce the number of TLB flushes.  Each time the
guest's virtual address space changes (e.g. due to an INVLPG,
MOV-TO-{CR3, CR4} operation), instead of flushing the TLB, a new ASID is
assigned.  This reduces the number of TLB flushes to at most 1/#ASIDs
(currently 1/64).  The biggest advantage is that hot parts of the
hypervisor's code and data remain in the TLB.

From: Sebastian Biemueller <Sebastian.Biemueller@xxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c              |    4 
 xen/arch/x86/hvm/svm/Makefile       |    1 
 xen/arch/x86/hvm/svm/asid.c         |  232 ++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c          |   36 ++++-
 xen/arch/x86/hvm/svm/vmcb.c         |   11 -
 xen/arch/x86/hvm/svm/x86_32/exits.S |    5 
 xen/arch/x86/hvm/svm/x86_64/exits.S |    5 
 xen/include/asm-x86/hvm/svm/asid.h  |   72 +++++++++++
 xen/include/asm-x86/hvm/svm/vmcb.h  |    5 
 9 files changed, 356 insertions(+), 15 deletions(-)

diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/cpu/amd.c    Tue May 15 15:14:02 2007 +0100
@@ -9,6 +9,8 @@
 #include <asm/hvm/support.h>
 
 #include "cpu.h"
+
+int start_svm(struct cpuinfo_x86 *c);
 
 /*
  * amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
@@ -335,7 +337,7 @@ static void __init init_amd(struct cpuin
        if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
                disable_c1_ramping();
 
-       start_svm();
+       start_svm(c);
 }
 
 static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/Makefile
--- a/xen/arch/x86/hvm/svm/Makefile     Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/Makefile     Tue May 15 15:14:02 2007 +0100
@@ -1,6 +1,7 @@ subdir-$(x86_32) += x86_32
 subdir-$(x86_32) += x86_32
 subdir-$(x86_64) += x86_64
 
+obj-y += asid.o
 obj-y += emulate.o
 obj-y += intr.o
 obj-y += svm.o
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/asid.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/svm/asid.c       Tue May 15 15:14:02 2007 +0100
@@ -0,0 +1,232 @@
+/*
+ * asid.c: handling ASIDs in SVM.
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/perfc.h>
+#include <asm/hvm/svm/asid.h>
+
+/*
+ * This is the interface to SVM's ASID management.  ASIDs partition the
+ * physical TLB for SVM.  In the current implementation ASIDs are introduced
+ * to reduce the number of TLB flushes.  Each time the guest's virtual
+ * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
+ * instead of flushing the TLB, a new ASID is assigned.  This reduces the
+ * number of TLB flushes to at most 1/#ASIDs (currently 1/64).  The biggest
+ * advantage is that hot parts of the hypervisor's code and data retain in
+ * the TLB.
+ *
+ * Sketch of the Implementation:
+ *
+ * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
+ * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
+ * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
+ * 64-bit generation.  Only on a generation overflow the code needs to
+ * invalidate all ASID information stored at the VCPUs with are run on the
+ * specific physical processor.  This overflow appears after about 2^80
+ * host processor cycles, so we do not optimize this case, but simply disable
+ * ASID useage to retain correctness.
+ */
+
+/* usable guest asids  [ 1 .. get_max_asid() ) */
+#define SVM_ASID_FIRST_GUEST_ASID       1
+
+#define SVM_ASID_FIRST_GENERATION       0
+
+/* triggers the flush of all generations on all VCPUs */
+#define SVM_ASID_LAST_GENERATION        (0xfffffffffffffffd)
+
+/* triggers assignment of new ASID to a VCPU */
+#define SVM_ASID_INVALID_GENERATION     (SVM_ASID_LAST_GENERATION + 1)
+
+/* Per-CPU ASID management. */
+struct svm_asid_data {
+   u64 core_asid_generation;
+   u32 next_asid;
+   u32 max_asid;
+   u32 erratum170;
+};
+
+static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
+
+/*
+ * Get handle to CPU-local ASID management data.
+ */
+static struct svm_asid_data *svm_asid_core_data(void)
+{
+    return &get_cpu_var(svm_asid_data);
+}
+
+/*
+ * Init ASID management for the current physical CPU.
+ */
+void svm_asid_init(struct cpuinfo_x86 *c)
+{
+    int nasids;
+    struct svm_asid_data *data = svm_asid_core_data();
+
+    /* Find #ASID. */
+    nasids = cpuid_ebx(0x8000000A);
+    data->max_asid = nasids - 1;
+
+    /* Check if we can use ASIDs. */
+    data->erratum170 =
+        !((c->x86 == 0x10) ||
+          ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
+
+    printk("AMD SVM: ASIDs %s \n",
+           (data->erratum170 ? "disabled." : "enabled."));
+
+    /* Initialize ASID assigment. */
+    if ( data->erratum170 )
+    {
+        /* On errata #170, VCPUs and phys processors should have same
+          generation.  We set both to invalid. */
+        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
+    }
+    else
+    {
+        data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
+    }
+
+    /* ASIDs are assigned round-robin.  Start with the first. */
+    data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
+}
+
+/*
+ * Force VCPU to fetch a new ASID.
+ */
+void svm_asid_init_vcpu(struct vcpu *v)
+{
+    struct svm_asid_data *data = svm_asid_core_data();
+
+    /* Trigger asignment of a new ASID. */
+    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
+
+    /*
+     * This erratum is bound to a physical processor.  The tlb_control
+     * field is not changed by the processor.  We only set tlb_control
+     * on VMCB creation and on a migration.
+     */
+    if ( data->erratum170 )
+    {
+        /* Flush TLB every VMRUN to handle Errata #170. */
+        v->arch.hvm_svm.vmcb->tlb_control = 1;
+        /* All guests use same ASID. */
+        v->arch.hvm_svm.vmcb->guest_asid  = 1;
+    }
+    else
+    {
+        /* These fields are handled on VMRUN */
+        v->arch.hvm_svm.vmcb->tlb_control = 0;
+        v->arch.hvm_svm.vmcb->guest_asid  = 0;
+    }
+}
+
+/*
+ * Increase the Generation to make free ASIDs.  Flush physical TLB and give
+ * ASID.
+ */
+static void svm_asid_handle_inc_generation(struct vcpu *v)
+{
+    struct svm_asid_data *data = svm_asid_core_data();
+
+    if ( likely(data->core_asid_generation <  SVM_ASID_LAST_GENERATION) )
+    {
+        /* Handle ASID overflow. */
+        data->core_asid_generation++;
+        data->next_asid = SVM_ASID_FIRST_GUEST_ASID + 1;
+
+        /* Handle VCPU. */
+        v->arch.hvm_svm.vmcb->guest_asid = SVM_ASID_FIRST_GUEST_ASID;
+        v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
+
+        /* Trigger flush of physical TLB. */
+        v->arch.hvm_svm.vmcb->tlb_control = 1;
+        return;
+    }
+
+    /*
+     * ASID generations are 64 bit.  Overflow of generations never happens.
+     * For safety, we simply disable ASIDs and switch to erratum #170 mode on
+     * this core (flushing TLB always). So correctness is established; it
+     * only runs a bit slower.
+     */
+    printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
+    data->erratum170 = 1;
+    data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
+
+    svm_asid_init_vcpu(v);
+}
+
+/*
+ * Called directly before VMRUN.  Checks if the VCPU needs a new ASID,
+ * assigns it, and if required, issues required TLB flushes.
+ */
+asmlinkage void svm_asid_handle_vmrun(void)
+{
+    struct vcpu *v = current;
+    struct svm_asid_data *data = svm_asid_core_data();
+
+    /* On erratum #170 systems we must flush the TLB. 
+     * Generation overruns are taken here, too. */
+    if ( data->erratum170 )
+    {
+        v->arch.hvm_svm.vmcb->guest_asid  = 1;
+        v->arch.hvm_svm.vmcb->tlb_control = 1;
+        return;
+    }
+
+    /* Test if VCPU has valid ASID. */
+    if ( likely(v->arch.hvm_svm.asid_generation ==
+                data->core_asid_generation) )
+    {
+        /* May revert previous TLB-flush command. */
+        v->arch.hvm_svm.vmcb->tlb_control = 0;
+        return;
+    }
+
+    /* Different ASID generations trigger fetching of a fresh ASID. */
+    if ( likely(data->next_asid <= data->max_asid) )
+    {
+        /* There is a free ASID. */
+        v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
+        v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
+        v->arch.hvm_svm.vmcb->tlb_control = 0;
+        return;
+    }
+
+    /* Slow path, may cause TLB flush. */
+    svm_asid_handle_inc_generation(v);
+}
+
+void svm_asid_inv_asid(struct vcpu *v)
+{
+    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue May 15 15:14:02 2007 +0100
@@ -1,7 +1,7 @@
 /*
  * svm.c: handling SVM architecture-related VM exits
  * Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/io.h>
+#include <asm/hvm/svm/asid.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/hvm/svm/emulate.h>
@@ -490,6 +491,9 @@ int svm_vmcb_restore(struct vcpu *v, str
     }
 
     paging_update_paging_modes(v);
+    /* signal paging update to ASID handler */
+    svm_asid_g_update_paging (v);
+
     return 0;
  
  bad_cr3:
@@ -855,6 +859,9 @@ static void svm_do_resume(struct vcpu *v
     {
         v->arch.hvm_svm.launch_core = smp_processor_id();
         hvm_migrate_timers(v);
+
+        /* Migrating to another ASID domain.  Request a new ASID. */
+        svm_asid_init_vcpu(v);
     }
 
     hvm_do_resume(v);
@@ -945,7 +952,7 @@ void svm_npt_detect(void)
     }
 }
 
-int start_svm(void)
+int start_svm(struct cpuinfo_x86 *c)
 {
     u32 eax, ecx, edx;
     u32 phys_hsa_lo, phys_hsa_hi;   
@@ -959,7 +966,7 @@ int start_svm(void)
     if ( !(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)) )
         return 0;
 
-    /* check whether SVM feature is disabled in BIOS */
+    /* Check whether SVM feature is disabled in BIOS */
     rdmsr(MSR_K8_VM_CR, eax, edx);
     if ( eax & K8_VMCR_SVME_DISABLE )
     {
@@ -975,11 +982,14 @@ int start_svm(void)
 
     svm_npt_detect();
 
-    /* Initialize the HSA for this core */
+    /* Initialize the HSA for this core. */
     phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
     phys_hsa_lo = (u32) phys_hsa;
     phys_hsa_hi = (u32) (phys_hsa >> 32);    
     wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
+
+    /* Initialize core's ASID handling. */
+    svm_asid_init(c);
 
     if ( cpu != 0 )
         return 1;
@@ -1669,7 +1679,11 @@ static int svm_set_cr0(unsigned long val
         vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
 
     if ( (value ^ old_value) & X86_CR0_PG )
+    {
         paging_update_paging_modes(v);
+        /* signal paging update to ASID handler */
+        svm_asid_g_update_paging (v);
+    }
 
     return 1;
 }
@@ -1764,6 +1778,8 @@ static int mov_to_cr(int gpreg, int cr, 
             if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
                 goto bad_cr3;
             paging_update_cr3(v);
+            /* signal paging update to ASID handler */
+            svm_asid_g_mov_to_cr3 (v);
         }
         else 
         {
@@ -1785,6 +1801,8 @@ static int mov_to_cr(int gpreg, int cr, 
             v->arch.hvm_svm.cpu_cr3 = value;
             update_cr3(v);
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
+            /* signal paging update to ASID handler */
+            svm_asid_g_mov_to_cr3 (v);
         }
         break;
 
@@ -1793,6 +1811,8 @@ static int mov_to_cr(int gpreg, int cr, 
         {
             vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 = value;
             paging_update_paging_modes(v);
+            /* signal paging update to ASID handler */
+            svm_asid_g_update_paging (v);
             break;
         }
 
@@ -1818,6 +1838,8 @@ static int mov_to_cr(int gpreg, int cr, 
                 if ( old_base_mfn )
                     put_page(mfn_to_page(old_base_mfn));
                 paging_update_paging_modes(v);
+                /* signal paging update to ASID handler */
+                svm_asid_g_update_paging (v);
 
                 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
                             (unsigned long) (mfn << PAGE_SHIFT));
@@ -1844,7 +1866,11 @@ static int mov_to_cr(int gpreg, int cr, 
          * all TLB entries except global entries.
          */
         if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
+        {
             paging_update_paging_modes(v);
+            /* signal paging update to ASID handler */
+            svm_asid_g_update_paging (v);
+        }
         break;
 
     case 8:
@@ -2174,6 +2200,8 @@ void svm_handle_invlpg(const short invlp
     HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
 
     paging_invlpg(v, g_vaddr);
+    /* signal invplg to ASID handler */
+    svm_asid_g_invlpg (v, g_vaddr);
 }
 
 
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue May 15 15:14:02 2007 +0100
@@ -1,6 +1,6 @@
 /*
  * vmcb.c: VMCB management
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
  * Copyright (c) 2004, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/intr.h>
+#include <asm/hvm/svm/asid.h>
 #include <xen/event.h>
 #include <xen/kernel.h>
 #include <xen/domain_page.h>
@@ -109,11 +110,9 @@ static int construct_vmcb(struct vcpu *v
     struct vmcb_struct *vmcb = arch_svm->vmcb;
     svm_segment_attributes_t attrib;
 
-    /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */
-    vmcb->tlb_control = 1;
-    vmcb->guest_asid  = 1;
-
-    /* SVM intercepts. */
+    /* TLB control, and ASID assigment. */
+    svm_asid_init_vcpu (v);
+
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
         GENERAL1_INTERCEPT_SMI          | GENERAL1_INTERCEPT_INIT        |
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S       Tue May 15 15:14:02 2007 +0100
@@ -1,7 +1,7 @@
 /*
  * exits.S: SVM architecture-specific exit handling.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
  * Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,9 @@ ENTRY(svm_asm_do_resume)
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%ebx)
         CLGI
+        /* Run ASID stuff. */
+        call svm_asid_handle_vmrun
+
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         testl $~0,irq_stat(%eax,1)
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Tue May 15 10:50:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S       Tue May 15 15:14:02 2007 +0100
@@ -1,7 +1,7 @@
 /*
  * exits.S: AMD-V architecture-specific exit handling.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
  * Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,9 @@ ENTRY(svm_asm_do_resume)
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%rbx)
         CLGI
+        /* Run ASID stuff. */
+        call svm_asid_handle_vmrun
+
         movl VCPU_processor(%rbx),%eax
         shl  $IRQSTAT_shift,%rax
         leaq irq_stat(%rip),%rdx
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/include/asm-x86/hvm/svm/asid.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/hvm/svm/asid.h        Tue May 15 15:14:02 2007 +0100
@@ -0,0 +1,72 @@
+/*
+ * asid.h: handling ASIDs in SVM.
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ASM_X86_HVM_SVM_ASID_H__
+#define __ASM_X86_HVM_SVM_ASID_H__
+
+#include <xen/config.h>
+#include <asm/types.h>
+#include <asm/hvm/hvm.h>
+#include <asm/hvm/support.h>
+#include <asm/hvm/svm/svm.h>
+#include <asm/hvm/svm/vmcb.h>
+#include <asm/percpu.h>
+
+void svm_asid_init(struct cpuinfo_x86 *c);
+void svm_asid_init_vcpu(struct vcpu *v);
+void svm_asid_inv_asid(struct vcpu *v);
+
+/*
+ * ASID related, guest triggered events.
+ */
+
+static inline void svm_asid_g_update_paging(struct vcpu *v)
+{
+    svm_asid_inv_asid(v);
+}
+
+static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
+{
+    svm_asid_inv_asid(v);
+}
+
+static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
+{
+#if 0
+    /* Optimization? */
+    asm volatile (".byte 0x0F,0x01,0xDF    \n"
+                  : /* output */
+                  : /* input */
+                  "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) );
+#endif
+
+    /* Safe fallback. Take a new ASID. */
+    svm_asid_inv_asid(v);
+}
+
+#endif /* __ASM_X86_HVM_SVM_ASID_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 5f09e34f56d7 -r d4a0706d6747 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue May 15 10:50:09 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue May 15 15:14:02 2007 +0100
@@ -1,6 +1,6 @@
 /*
  * vmcb.h: VMCB related definitions
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc
  * Copyright (c) 2004, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -23,7 +23,6 @@
 #include <asm/config.h>
 #include <asm/hvm/hvm.h>
 
-int start_svm(void);
 
 /* general 1 intercepts */
 enum GenericIntercept1bits
@@ -444,6 +443,8 @@ struct arch_svm_struct {
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
     u64                 vmcb_pa;
+    u64                 asid_generation; /* ASID tracking, moved here to
+                                            prevent cacheline misses. */
     u32                *msrpm;
     int                 launch_core;
     bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM][SVM] Reintroduce ASIDs., Xen patchbot-unstable <=