# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1169475206 0
# Node ID baa9b76ea3e1de27dbe46ba9b3fb117e09637518
# Parent 1c0ca58e8c16ded161e9431df5a55fede19ffa02
[SVM] Remove ASID logic. Errata prevent this feature being used
reliably in current SVM processor implementations.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 161 ------------------------------------
xen/arch/x86/hvm/svm/vmcb.c | 16 ---
xen/arch/x86/hvm/svm/x86_32/exits.S | 1
xen/arch/x86/hvm/svm/x86_64/exits.S | 1
xen/include/asm-x86/hvm/svm/svm.h | 1
xen/include/asm-x86/hvm/svm/vmcb.h | 12 --
6 files changed, 6 insertions(+), 186 deletions(-)
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Mon Jan 22 14:13:26 2007 +0000
@@ -74,108 +74,6 @@ static void *root_vmcb[NR_CPUS] __read_m
/* physical address of above for host VMSAVE/VMLOAD */
u64 root_vmcb_pa[NR_CPUS] __read_mostly;
-
-/* ASID API */
-enum {
- ASID_AVAILABLE = 0,
- ASID_INUSE,
- ASID_RETIRED
-};
-#define INITIAL_ASID 0
-#define ASID_MAX 64
-
-struct asid_pool {
- spinlock_t asid_lock;
- u32 asid[ASID_MAX];
-};
-
-static DEFINE_PER_CPU(struct asid_pool, asid_pool);
-
-
-/*
- * Initializes the POOL of ASID used by the guests per core.
- */
-void asidpool_init(int core)
-{
- int i;
-
- spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
-
- /* Host ASID is always in use */
- per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
- for ( i = 1; i < ASID_MAX; i++ )
- per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
-}
-
-
-/* internal function to get the next available ASID */
-static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
-{
- int i;
- for ( i = 1; i < ASID_MAX; i++ )
- {
- if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
- {
- vmcb->guest_asid = i;
- per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
- return i;
- }
- }
- return -1;
-}
-
-
-/*
- * This functions assigns on the passed VMCB, the next
- * available ASID number. If none are available, the
- * TLB flush flag is set, and all retireds ASID
- * are made available.
- *
- * Returns: 1 -- sucess;
- * 0 -- failure -- no more ASID numbers
- * available.
- */
-int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
- int oldcore, int newcore )
-{
- int i;
- int res = 1;
- static unsigned long cnt=0;
-
- spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
- if( retire_current && vmcb->guest_asid ) {
- per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] =
- ASID_RETIRED;
- }
- spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
- spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
- if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
- if (svm_dbg_on)
- printk( "SVM: tlb(%ld)\n", cnt++ );
- /* FLUSH the TLB and all retired slots are made available */
- vmcb->tlb_control = 1;
- for( i = 1; i < ASID_MAX; i++ ) {
- if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
- per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
- }
- }
- /* Get the First slot available */
- res = asidpool_fetch_next( vmcb, newcore ) > 0;
- }
- spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
- return res;
-}
-
-void asidpool_retire( struct vmcb_struct *vmcb, int core )
-{
- spin_lock(&per_cpu(asid_pool,core).asid_lock);
- if( vmcb->guest_asid ) {
- per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] =
- ASID_RETIRED;
- }
- spin_unlock(&per_cpu(asid_pool,core).asid_lock);
-}
-
static inline void svm_inject_exception(struct vcpu *v, int trap,
int ev, int error_code)
{
@@ -851,7 +749,6 @@ int start_svm(void)
rdmsr(MSR_EFER, eax, edx);
eax |= EFER_SVME;
wrmsr(MSR_EFER, eax, edx);
- asidpool_init( cpu );
printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
/* Initialize the HSA for this core */
@@ -920,28 +817,11 @@ void arch_svm_do_resume(struct vcpu *v)
static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
{
- struct vcpu *v = current;
- unsigned long eip;
- int result;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- ASSERT(vmcb);
-
-//#if HVM_DEBUG
- eip = vmcb->rip;
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
- va, eip, (unsigned long)regs->error_code);
-//#endif
-
- result = shadow_fault(va, regs);
-
- if( result ) {
- /* Let's make sure that the Guest TLB is flushed */
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
- }
-
- return result;
+ va, (unsigned long)current->arch.hvm_svm.vmcb->rip,
+ (unsigned long)regs->error_code);
+ return shadow_fault(va, regs);
}
@@ -1578,8 +1458,6 @@ static int svm_set_cr0(unsigned long val
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
-
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
@@ -1600,7 +1478,6 @@ static int svm_set_cr0(unsigned long val
return 0;
}
shadow_update_paging_modes(v);
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
{
@@ -1611,7 +1488,6 @@ static int svm_set_cr0(unsigned long val
}
/* we should take care of this kind of situation */
shadow_update_paging_modes(v);
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
return 1;
@@ -1702,7 +1578,6 @@ static int mov_to_cr(int gpreg, int cr,
v->arch.hvm_svm.cpu_cr3 = value;
break;
}
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
/* We make a new one if the shadow does not exist. */
if (value == v->arch.hvm_svm.cpu_cr3)
@@ -1795,10 +1670,7 @@ static int mov_to_cr(int gpreg, int cr,
* all TLB entries except global entries.
*/
if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
- {
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
shadow_update_paging_modes(v);
- }
break;
case 8:
@@ -2140,8 +2012,6 @@ void svm_handle_invlpg(const short invlp
__update_guest_eip (vmcb, inst_len);
}
- /* Overkill, we may not this */
- set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
shadow_invlpg(v, g_vaddr);
}
@@ -2892,31 +2762,6 @@ asmlinkage void svm_load_cr2(void)
local_irq_disable();
asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
}
-
-asmlinkage void svm_asid(void)
-{
- struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- /*
- * if need to assign new asid, or if switching cores,
- * retire asid for the old core, and assign a new asid to the current core.
- */
- if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
- ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
- /* recycle asid */
- if ( !asidpool_assign_next(vmcb, 1,
- v->arch.hvm_svm.asid_core,
- v->arch.hvm_svm.launch_core) )
- {
- /* If we get here, we have a major problem */
- domain_crash_synchronous();
- }
-
- v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
- clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
- }
-}
/*
* Local variables:
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c Mon Jan 22 14:13:26 2007 +0000
@@ -38,8 +38,6 @@
#include <xen/keyhandler.h>
extern int svm_dbg_on;
-extern int asidpool_assign_next(
- struct vmcb_struct *vmcb, int retire_current, int oldcore, int newcore);
#define GUEST_SEGMENT_LIMIT 0xffffffff
@@ -92,8 +90,9 @@ static int construct_vmcb(struct vcpu *v
struct vmcb_struct *vmcb = arch_svm->vmcb;
svm_segment_attributes_t attrib;
- /* Always flush the TLB on VMRUN. */
+ /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */
vmcb->tlb_control = 1;
+ vmcb->guest_asid = 1;
/* SVM intercepts. */
vmcb->general1_intercepts =
@@ -240,10 +239,7 @@ void svm_destroy_vmcb(struct vcpu *v)
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
if ( arch_svm->vmcb != NULL )
- {
- asidpool_retire(arch_svm->vmcb, arch_svm->asid_core);
free_vmcb(arch_svm->vmcb);
- }
if ( arch_svm->iopm != NULL )
{
@@ -264,16 +260,10 @@ void svm_destroy_vmcb(struct vcpu *v)
void svm_do_launch(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int core = smp_processor_id();
-
hvm_stts(v);
/* current core is the one we intend to perform the VMRUN on */
- v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
- clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
- if ( !asidpool_assign_next(vmcb, 0, core, core) )
- BUG();
+ v->arch.hvm_svm.launch_core = smp_processor_id();
v->arch.schedule_tail = arch_svm_do_resume;
}
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Jan 22 14:13:26 2007 +0000
@@ -150,7 +150,6 @@ svm_test_all_events:
jnz svm_process_softirqs
svm_restore_all_guest:
call svm_intr_assist
- call svm_asid
call svm_load_cr2
/*
* Check if we are going back to AMD-V based VM
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S Mon Jan 22 14:13:26 2007 +0000
@@ -163,7 +163,6 @@ svm_test_all_events:
jnz svm_process_softirqs
svm_restore_all_guest:
call svm_intr_assist
- call svm_asid
call svm_load_cr2
/*
* Check if we are going back to AMD-V based VM
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/svm.h Mon Jan 22 14:13:26 2007 +0000
@@ -28,7 +28,6 @@
#include <asm/hvm/svm/vmcb.h>
#include <asm/i387.h>
-extern void asidpool_retire(struct vmcb_struct *vmcb, int core);
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
extern void svm_do_launch(struct vcpu *v);
extern void arch_svm_do_resume(struct vcpu *v);
diff -r 1c0ca58e8c16 -r baa9b76ea3e1 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Mon Jan 22 13:38:04 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Mon Jan 22 14:13:26 2007 +0000
@@ -457,7 +457,6 @@ struct arch_svm_struct {
u64 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
int saved_irq_vector;
u32 launch_core;
- u32 asid_core;
unsigned long flags; /* VMCB flags */
unsigned long cpu_shadow_cr0; /* Guest value for CR0 */
@@ -476,17 +475,6 @@ void svm_destroy_vmcb(struct vcpu *v);
void svm_destroy_vmcb(struct vcpu *v);
void setup_vmcb_dump(void);
-
-#define VMCB_USE_HOST_ENV 1
-#define VMCB_USE_SEPARATE_ENV 0
-
-enum {
- ARCH_SVM_VMCB_LOADED = 0,
- ARCH_SVM_VMCB_ASSIGN_ASID
-};
-
-#define VMCB_EFLAGS_RESERVED_0 0xffc08028 /* bitmap for 0 */
-#define VMCB_EFLAGS_RESERVED_1 0x00000002 /* bitmap for 1 */
/* These bits in the CR4 are owned by the host */
#if CONFIG_PAGING_LEVELS >= 3
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|