# HG changeset patch
# User shand@xxxxxxxxxxxxxxxxxxxxxxxx
# Node ID d20e1835c24b8a5051b6e8996d1387ee25a88ffd
# Parent 5d42f6f0a187684ca9ab6deb30becadec9855b19
Various HVM clean-ups.
Signed-off-by: Steven Hand <steven@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 52 ++++
xen/arch/x86/hvm/i8254.c | 2
xen/arch/x86/hvm/i8259.c | 9
xen/arch/x86/hvm/io.c | 31 ++
xen/arch/x86/hvm/svm/emulate.c | 4
xen/arch/x86/hvm/svm/intr.c | 46 ---
xen/arch/x86/hvm/svm/svm.c | 439 +++++++++++++++++++-----------------
xen/arch/x86/hvm/svm/vmcb.c | 203 +++++++---------
xen/arch/x86/hvm/svm/x86_32/exits.S | 8
xen/arch/x86/hvm/svm/x86_64/exits.S | 17 -
xen/arch/x86/hvm/vlapic.c | 2
xen/arch/x86/hvm/vmx/io.c | 88 -------
xen/arch/x86/hvm/vmx/vmcs.c | 14 -
xen/arch/x86/hvm/vmx/vmx.c | 51 +++-
xen/arch/x86/hvm/vmx/x86_32/exits.S | 2
xen/arch/x86/hvm/vmx/x86_64/exits.S | 2
xen/arch/x86/x86_32/asm-offsets.c | 1
xen/arch/x86/x86_64/asm-offsets.c | 1
xen/include/asm-x86/hvm/hvm.h | 12
xen/include/asm-x86/hvm/io.h | 1
xen/include/asm-x86/hvm/svm/svm.h | 52 ----
xen/include/asm-x86/hvm/svm/vmcb.h | 17 -
xen/include/asm-x86/hvm/vmx/vmcs.h | 1
xen/include/asm-x86/hvm/vmx/vmx.h | 25 --
xen/include/asm-x86/processor.h | 58 +++-
xen/include/asm-x86/system.h | 4
26 files changed, 563 insertions(+), 579 deletions(-)
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c Tue Aug 15 18:20:03 2006 +0100
@@ -199,6 +199,55 @@ void hvm_create_event_channels(struct vc
}
}
+
+void hvm_stts(struct vcpu *v)
+{
+ /* FPU state already dirty? Then no need to setup_fpu() lazily. */
+ if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ return;
+
+ hvm_funcs.stts(v);
+}
+
+void hvm_set_guest_time(struct vcpu *v, u64 gtime)
+{
+ u64 host_tsc;
+
+ rdtscll(host_tsc);
+
+ v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+}
+
+void hvm_do_resume(struct vcpu *v)
+{
+ ioreq_t *p;
+ struct periodic_time *pt =
+ &v->domain->arch.hvm_domain.pl_time.periodic_tm;
+
+ hvm_stts(v);
+
+ /* pick up the elapsed PIT ticks and re-enable pit_timer */
+ if ( pt->enabled && pt->first_injected ) {
+ if ( v->arch.hvm_vcpu.guest_time ) {
+ hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
+ v->arch.hvm_vcpu.guest_time = 0;
+ }
+ pickup_deactive_ticks(pt);
+ }
+
+ p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+ wait_on_xen_event_channel(v->arch.hvm.xen_port,
+ p->state != STATE_IOREQ_READY &&
+ p->state != STATE_IOREQ_INPROCESS);
+ if ( p->state == STATE_IORESP_READY )
+ hvm_io_assist(v);
+ if ( p->state != STATE_INVALID ) {
+ printf("Weird HVM iorequest state %d.\n", p->state);
+ domain_crash(v->domain);
+ }
+}
+
void hvm_release_assist_channel(struct vcpu *v)
{
free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
@@ -299,8 +348,7 @@ int cpu_get_interrupt(struct vcpu *v, in
/*
* Copy from/to guest virtual.
*/
-int
-hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
+int hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
{
unsigned long mfn;
char *addr;
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/i8254.c Tue Aug 15 18:20:03 2006 +0100
@@ -389,7 +389,7 @@ void pit_init(struct vcpu *v, unsigned l
register_portio_handler(PIT_BASE, 4, handle_pit_io);
/* register the speaker port */
register_portio_handler(0x61, 1, handle_speaker_io);
- ticks_per_sec(v) = cpu_khz * (int64_t)1000;
+ ticks_per_sec(v) = cpu_khz * (int64_t)1000;
#ifdef DEBUG_PIT
printk("HVM_PIT: guest frequency =%lld\n", (long long)ticks_per_sec(v));
#endif
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/i8259.c
--- a/xen/arch/x86/hvm/i8259.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/i8259.c Tue Aug 15 18:20:03 2006 +0100
@@ -480,7 +480,6 @@ void pic_init(struct hvm_virpic *s, void
s->pics[1].elcr_mask = 0xde;
s->irq_request = irq_request;
s->irq_request_opaque = irq_request_opaque;
- return;
}
void pic_set_alt_irq_func(struct hvm_virpic *s,
@@ -568,10 +567,10 @@ static int intercept_elcr_io(ioreq_t *p)
}
void register_pic_io_hook (void)
{
- register_portio_handler(0x20, 2, intercept_pic_io);
- register_portio_handler(0x4d0, 1, intercept_elcr_io);
- register_portio_handler(0xa0, 2, intercept_pic_io);
- register_portio_handler(0x4d1, 1, intercept_elcr_io);
+ register_portio_handler(0x20, 2, intercept_pic_io);
+ register_portio_handler(0x4d0, 1, intercept_elcr_io);
+ register_portio_handler(0xa0, 2, intercept_pic_io);
+ register_portio_handler(0x4d1, 1, intercept_elcr_io);
}
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/io.c Tue Aug 15 18:20:03 2006 +0100
@@ -668,6 +668,37 @@ static void hvm_mmio_assist(struct cpu_u
}
}
+void hvm_interrupt_post(struct vcpu *v, int vector, int type)
+{
+ struct periodic_time *pt =
+ &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
+
+ if ( is_pit_irq(v, vector, type) ) {
+ if ( !pt->first_injected ) {
+ pt->pending_intr_nr = 0;
+ pt->last_plt_gtime = hvm_get_guest_time(v);
+ pt->scheduled = NOW() + pt->period;
+ set_timer(&pt->timer, pt->scheduled);
+ pt->first_injected = 1;
+ } else {
+ pt->pending_intr_nr--;
+ pt->last_plt_gtime += pt->period_cycles;
+ hvm_set_guest_time(v, pt->last_plt_gtime);
+ pit_time_fired(v, pt->priv);
+ }
+ }
+
+ switch(type) {
+ case APIC_DM_EXTINT:
+ break;
+
+ default:
+ vlapic_post_injection(v, vector, type);
+ break;
+ }
+}
+
+
void hvm_io_assist(struct vcpu *v)
{
vcpu_iodata_t *vio;
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/emulate.c Tue Aug 15 18:20:03 2006 +0100
@@ -78,7 +78,7 @@ static inline unsigned long DECODE_GPR_V
case 0x4:
value = (unsigned long)vmcb->rsp;
case 0x5:
- value = regs->ebp;
+ value = regs->ebp;
break;
case 0x6:
value = regs->esi;
@@ -429,7 +429,7 @@ int __get_instruction_length_from_list(s
enum instruction_index *list, unsigned int list_count,
u8 *guest_eip_buf, enum instruction_index *match)
{
- unsigned int inst_len = 0;
+ unsigned int inst_len = 0;
unsigned int i;
unsigned int j;
int found = 0;
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Tue Aug 15 18:20:03 2006 +0100
@@ -42,48 +42,6 @@
* Most of this code is copied from vmx_io.c and modified
* to be suitable for SVM.
*/
-#define BSP_CPU(v) (!(v->vcpu_id))
-
-void svm_set_guest_time(struct vcpu *v, u64 gtime)
-{
- u64 host_tsc;
-
- rdtscll(host_tsc);
-
- v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
- v->arch.hvm_svm.vmcb->tsc_offset = v->arch.hvm_vcpu.cache_tsc_offset;
-}
-
-static inline void
-interrupt_post_injection(struct vcpu * v, int vector, int type)
-{
- struct periodic_time *pt =
&(v->domain->arch.hvm_domain.pl_time.periodic_tm);
-
- if ( is_pit_irq(v, vector, type) ) {
- if ( !pt->first_injected ) {
- pt->pending_intr_nr = 0;
- pt->last_plt_gtime = hvm_get_guest_time(v);
- pt->scheduled = NOW() + pt->period;
- set_timer(&pt->timer, pt->scheduled);
- pt->first_injected = 1;
- } else {
- pt->pending_intr_nr--;
- pt->last_plt_gtime += pt->period_cycles;
- svm_set_guest_time(v, pt->last_plt_gtime);
- pit_time_fired(v, pt->priv);
- }
- }
-
- switch(type)
- {
- case APIC_DM_EXTINT:
- break;
-
- default:
- vlapic_post_injection(v, vector, type);
- break;
- }
-}
static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
{
@@ -109,7 +67,7 @@ asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- struct hvm_domain *plat=&v->domain->arch.hvm_domain;
+ struct hvm_domain *plat=&v->domain->arch.hvm_domain;
struct periodic_time *pt = &plat->pl_time.periodic_tm;
struct hvm_virpic *pic= &plat->vpic;
int callback_irq;
@@ -194,7 +152,7 @@ asmlinkage void svm_intr_assist(void)
/* let's inject this interrupt */
TRACE_3D(TRC_VMX_INT, v->domain->domain_id, intr_vector, 0);
svm_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE);
- interrupt_post_injection(v, intr_vector, intr_type);
+ hvm_interrupt_post(v, intr_vector, intr_type);
break;
case APIC_DM_SMI:
case APIC_DM_NMI:
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Tue Aug 15 18:20:03 2006 +0100
@@ -54,8 +54,7 @@
#define set_segment_register(name, value) \
__asm__ __volatile__ ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
-/*
- * External functions, etc. We should move these to some suitable header
file(s) */
+/* External functions. We should move these to some suitable header file(s) */
extern void do_nmi(struct cpu_user_regs *, unsigned long);
extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
@@ -72,12 +71,32 @@ static int svm_do_vmmcall_reset_to_realm
static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
struct cpu_user_regs *regs);
-
-
-extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
-
-/* Host save area and ASID glogal data */
-struct svm_percore_globals svm_globals[NR_CPUS];
+/* va of hardware host save area */
+static void *hsa[NR_CPUS] __read_mostly;
+
+/* vmcb used for extended host state */
+static void *root_vmcb[NR_CPUS] __read_mostly;
+
+/* physical address of above for host VMSAVE/VMLOAD */
+u64 root_vmcb_pa[NR_CPUS] __read_mostly;
+
+
+/* ASID API */
+enum {
+ ASID_AVAILABLE = 0,
+ ASID_INUSE,
+ ASID_RETIRED
+};
+#define INITIAL_ASID 0
+#define ASID_MAX 64
+
+struct asid_pool {
+ spinlock_t asid_lock;
+ u32 asid[ASID_MAX];
+};
+
+static DEFINE_PER_CPU(struct asid_pool, asid_pool);
+
/*
* Initializes the POOL of ASID used by the guests per core.
@@ -86,25 +105,25 @@ void asidpool_init(int core)
{
int i;
- spin_lock_init(&svm_globals[core].ASIDpool.asid_lock);
+ spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
/* Host ASID is always in use */
- svm_globals[core].ASIDpool.asid[INITIAL_ASID] = ASID_INUSE;
+ per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
for ( i = 1; i < ASID_MAX; i++ )
- svm_globals[core].ASIDpool.asid[i] = ASID_AVAILABLE;
+ per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
}
/* internal function to get the next available ASID */
static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
{
- int i;
+ int i;
for ( i = 1; i < ASID_MAX; i++ )
{
- if ( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
+ if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
{
vmcb->guest_asid = i;
- svm_globals[core].ASIDpool.asid[i] = ASID_INUSE;
+ per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
return i;
}
}
@@ -125,43 +144,46 @@ int asidpool_assign_next( struct vmcb_st
int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
int oldcore, int newcore )
{
- int i;
+ int i;
int res = 1;
static unsigned long cnt=0;
- spin_lock(&svm_globals[oldcore].ASIDpool.asid_lock);
+ spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
if( retire_current && vmcb->guest_asid ) {
- svm_globals[oldcore].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] =
ASID_RETIRED;
- }
- spin_unlock(&svm_globals[oldcore].ASIDpool.asid_lock);
- spin_lock(&svm_globals[newcore].ASIDpool.asid_lock);
+ per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] =
+ ASID_RETIRED;
+ }
+ spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
+ spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
if (svm_dbg_on)
printk( "SVM: tlb(%ld)\n", cnt++ );
/* FLUSH the TLB and all retired slots are made available */
vmcb->tlb_control = 1;
for( i = 1; i < ASID_MAX; i++ ) {
- if( svm_globals[newcore].ASIDpool.asid[i] == ASID_RETIRED ) {
- svm_globals[newcore].ASIDpool.asid[i] = ASID_AVAILABLE;
+ if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
+ per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
}
}
/* Get the First slot available */
res = asidpool_fetch_next( vmcb, newcore ) > 0;
}
- spin_unlock(&svm_globals[newcore].ASIDpool.asid_lock);
+ spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
return res;
}
void asidpool_retire( struct vmcb_struct *vmcb, int core )
{
- spin_lock(&svm_globals[core].ASIDpool.asid_lock);
+ spin_lock(&per_cpu(asid_pool,core).asid_lock);
if( vmcb->guest_asid ) {
- svm_globals[core].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] =
ASID_RETIRED;
+ per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] =
+ ASID_RETIRED;
}
- spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
-}
-
-static inline void svm_inject_exception(struct vcpu *v, int trap, int ev, int
error_code)
+ spin_unlock(&per_cpu(asid_pool,core).asid_lock);
+}
+
+static inline void svm_inject_exception(struct vcpu *v, int trap,
+ int ev, int error_code)
{
eventinj_t event;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -178,7 +200,7 @@ static inline void svm_inject_exception(
vmcb->eventinj = event;
}
-void stop_svm(void)
+static void stop_svm(void)
{
u32 eax, edx;
int cpu = smp_processor_id();
@@ -189,22 +211,18 @@ void stop_svm(void)
wrmsr(MSR_EFER, eax, edx);
/* release the HSA */
- free_host_save_area( svm_globals[cpu].hsa );
- free_host_save_area( svm_globals[cpu].scratch_hsa );
- svm_globals[cpu].hsa = NULL;
- svm_globals[cpu].hsa_pa = 0;
- svm_globals[cpu].scratch_hsa = NULL;
- svm_globals[cpu].scratch_hsa_pa = 0;
+ free_host_save_area(hsa[cpu]);
+ hsa[cpu] = NULL;
wrmsr(MSR_K8_VM_HSAVE_PA, 0, 0 );
+ /* free up the root vmcb */
+ free_vmcb(root_vmcb[cpu]);
+ root_vmcb[cpu] = NULL;
+ root_vmcb_pa[cpu] = 0;
+
printk("AMD SVM Extension is disabled.\n");
}
-int svm_initialize_guest_resources(struct vcpu *v)
-{
- svm_final_setup_guest(v);
- return 1;
-}
static void svm_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
@@ -233,11 +251,15 @@ static void svm_store_cpu_guest_regs(
}
}
-static void svm_load_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- svm_load_cpu_user_regs(v, regs);
-}
+static int svm_paging_enabled(struct vcpu *v)
+{
+ unsigned long cr0;
+
+ cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+
+ return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
+}
+
#define IS_CANO_ADDRESS(add) 1
@@ -281,7 +303,6 @@ static inline int long_mode_do_msr_read(
case MSR_SYSCALL_MASK:
msr_content = vmcb->sfmask;
break;
-
default:
return 0;
}
@@ -296,7 +317,7 @@ static inline int long_mode_do_msr_read(
static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
- u64 msr_content = regs->eax | ((u64)regs->edx << 32);
+ u64 msr_content = regs->eax | ((u64)regs->edx << 32);
struct vcpu *vc = current;
struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
@@ -318,7 +339,7 @@ static inline int long_mode_do_msr_write
/* LME: 0 -> 1 */
if ( msr_content & EFER_LME &&
- !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state)
)
+ !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state))
{
if ( svm_paging_enabled(vc) ||
!test_bit(SVM_CPU_STATE_PAE_ENABLED,
@@ -385,7 +406,7 @@ static inline int long_mode_do_msr_write
return 1;
}
-int svm_realmode(struct vcpu *v)
+static int svm_realmode(struct vcpu *v)
{
unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
@@ -393,7 +414,7 @@ int svm_realmode(struct vcpu *v)
return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
}
-int svm_instruction_length(struct vcpu *v)
+static int svm_instruction_length(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long cr0 = vmcb->cr0, eflags = vmcb->rflags, mode;
@@ -405,7 +426,7 @@ int svm_instruction_length(struct vcpu *
return svm_instrlen(guest_cpu_user_regs(), mode);
}
-unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
+static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
{
switch ( num )
{
@@ -422,9 +443,34 @@ unsigned long svm_get_ctrl_reg(struct vc
}
+/* Make sure that xen intercepts any FP accesses from current */
+static void svm_stts(struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ /*
+ * If the guest does not have TS enabled then we must cause and handle an
+ * exception on first use of the FPU. If the guest *does* have TS enabled
+ * then this is not necessary: no FPU activity can occur until the guest
+ * clears CR0.TS, and we will initialise the FPU when that happens.
+ */
+ if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+ {
+ v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
+ vmcb->cr0 |= X86_CR0_TS;
+ }
+}
+
+
+static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
+{
+ v->arch.hvm_svm.vmcb->tsc_offset = offset;
+}
+
+
/* SVM-specific intitialization code for VCPU application processors */
-void svm_init_ap_context(struct vcpu_guest_context *ctxt,
- int vcpuid, int trampoline_vector)
+static void svm_init_ap_context(struct vcpu_guest_context *ctxt,
+ int vcpuid, int trampoline_vector)
{
int i;
struct vcpu *v, *bsp = current;
@@ -453,7 +499,7 @@ void svm_init_ap_context(struct vcpu_gue
* the code. We will execute this code in real mode.
*/
ctxt->user_regs.eip = 0x0;
- ctxt->user_regs.cs = (trampoline_vector << 8);
+ ctxt->user_regs.cs = (trampoline_vector << 8);
ctxt->flags = VGCF_HVM_GUEST;
}
@@ -479,60 +525,8 @@ static void svm_init_hypercall_page(stru
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-int start_svm(void)
-{
- u32 eax, ecx, edx;
- u32 phys_hsa_lo, phys_hsa_hi;
- u64 phys_hsa;
- int cpu = smp_processor_id();
-
- /* Xen does not fill x86_capability words except 0. */
- ecx = cpuid_ecx(0x80000001);
- boot_cpu_data.x86_capability[5] = ecx;
-
- if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
- return 0;
- svm_globals[cpu].hsa = alloc_host_save_area();
- if (! svm_globals[cpu].hsa)
- return 0;
-
- rdmsr(MSR_EFER, eax, edx);
- eax |= EFER_SVME;
- wrmsr(MSR_EFER, eax, edx);
- asidpool_init( cpu );
- printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
-
- /* Initialize the HSA for this core */
- phys_hsa = (u64) virt_to_maddr( svm_globals[cpu].hsa );
- phys_hsa_lo = (u32) phys_hsa;
- phys_hsa_hi = (u32) (phys_hsa >> 32);
- wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
- svm_globals[cpu].hsa_pa = phys_hsa;
-
- svm_globals[cpu].scratch_hsa = alloc_host_save_area();
- svm_globals[cpu].scratch_hsa_pa = (u64)virt_to_maddr(
svm_globals[cpu].scratch_hsa );
-
- /* Setup HVM interfaces */
- hvm_funcs.disable = stop_svm;
-
- hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
- hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
-
- hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
- hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
-
- hvm_funcs.realmode = svm_realmode;
- hvm_funcs.paging_enabled = svm_paging_enabled;
- hvm_funcs.instruction_length = svm_instruction_length;
- hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
- hvm_funcs.init_ap_context = svm_init_ap_context;
-
- hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
-
- hvm_enabled = 1;
-
- return 1;
-}
+
+
int svm_dbg_on = 0;
@@ -596,7 +590,7 @@ static inline int svm_do_debugout(unsign
return 1;
}
-void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
+static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -615,7 +609,7 @@ void save_svm_cpu_user_regs(struct vcpu
ctxt->ds = vmcb->ds.sel;
}
-void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
+static void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -629,7 +623,7 @@ void svm_store_cpu_user_regs(struct cpu_
}
/* XXX Use svm_load_cpu_guest_regs instead */
-void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
+static void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
@@ -647,37 +641,13 @@ void svm_load_cpu_user_regs(struct vcpu
*intercepts &= ~EXCEPTION_BITMAP_DB;
}
-int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long cr0;
-
- cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-
- return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
-}
-
-
-/* Make sure that xen intercepts any FP accesses from current */
-void svm_stts(struct vcpu *v)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- /* FPU state already dirty? Then no need to setup_fpu() lazily. */
- if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
- return;
-
- /*
- * If the guest does not have TS enabled then we must cause and handle an
- * exception on first use of the FPU. If the guest *does* have TS enabled
- * then this is not necessary: no FPU activity can occur until the guest
- * clears CR0.TS, and we will initialise the FPU when that happens.
- */
- if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
- {
- v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
- vmcb->cr0 |= X86_CR0_TS;
- }
-}
+static void svm_load_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs)
+{
+ svm_load_cpu_user_regs(v, regs);
+}
+
+
static void arch_svm_do_launch(struct vcpu *v)
{
@@ -708,9 +678,9 @@ static void arch_svm_do_launch(struct vc
{
u16 cs_sel = regs->cs;
/*
- * This is the launch of an AP; set state so that we begin executing
+ * This is the launch of an AP; set state so that we begin executing
* the trampoline code in real-mode.
- */
+ */
svm_do_vmmcall_reset_to_realmode(v, regs);
/* Adjust the state to execute the trampoline code.*/
v->arch.hvm_svm.vmcb->rip = 0;
@@ -731,6 +701,7 @@ static void svm_freeze_time(struct vcpu
}
}
+
static void svm_ctxt_switch_from(struct vcpu *v)
{
svm_freeze_time(v);
@@ -738,7 +709,7 @@ static void svm_ctxt_switch_from(struct
static void svm_ctxt_switch_to(struct vcpu *v)
{
-#if __x86_64__
+#ifdef __x86_64__
/*
* This is required, because VMRUN does consistency check
* and some of the DOM0 selectors are pointing to
@@ -751,7 +722,8 @@ static void svm_ctxt_switch_to(struct vc
#endif
}
-void svm_final_setup_guest(struct vcpu *v)
+
+static void svm_final_setup_guest(struct vcpu *v)
{
struct domain *d = v->domain;
struct vcpu *vc;
@@ -778,15 +750,82 @@ void svm_final_setup_guest(struct vcpu *
* Put the domain in shadow mode even though we're going to be using
* the shared 1:1 page table initially. It shouldn't hurt
*/
- shadow_mode_enable(d,
- SHM_enable|SHM_refcounts|
+ shadow_mode_enable(d, SHM_enable|SHM_refcounts|
SHM_translate|SHM_external|SHM_wr_pt_pte);
}
+static int svm_initialize_guest_resources(struct vcpu *v)
+{
+ svm_final_setup_guest(v);
+ return 1;
+}
+
+
+int start_svm(void)
+{
+ u32 eax, ecx, edx;
+ u32 phys_hsa_lo, phys_hsa_hi;
+ u64 phys_hsa;
+ int cpu = smp_processor_id();
+
+ /* Xen does not fill x86_capability words except 0. */
+ ecx = cpuid_ecx(0x80000001);
+ boot_cpu_data.x86_capability[5] = ecx;
+
+ if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
+ return 0;
+
+ if (!(hsa[cpu] = alloc_host_save_area()))
+ return 0;
+
+ rdmsr(MSR_EFER, eax, edx);
+ eax |= EFER_SVME;
+ wrmsr(MSR_EFER, eax, edx);
+ asidpool_init( cpu );
+ printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
+
+ /* Initialize the HSA for this core */
+ phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
+ phys_hsa_lo = (u32) phys_hsa;
+ phys_hsa_hi = (u32) (phys_hsa >> 32);
+ wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
+
+ if (!(root_vmcb[cpu] = alloc_vmcb()))
+ return 0;
+ root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
+
+ if (cpu == 0)
+ setup_vmcb_dump();
+
+ /* Setup HVM interfaces */
+ hvm_funcs.disable = stop_svm;
+
+ hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
+ hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
+
+ hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
+ hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
+
+ hvm_funcs.realmode = svm_realmode;
+ hvm_funcs.paging_enabled = svm_paging_enabled;
+ hvm_funcs.instruction_length = svm_instruction_length;
+ hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
+
+ hvm_funcs.stts = svm_stts;
+ hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
+
+ hvm_funcs.init_ap_context = svm_init_ap_context;
+ hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
+
+ hvm_enabled = 1;
+
+ return 1;
+}
+
+
static void svm_relinquish_guest_resources(struct domain *d)
{
- extern void destroy_vmcb(struct arch_svm_struct *); /* XXX */
struct vcpu *v;
for_each_vcpu ( d, v )
@@ -817,11 +856,25 @@ static void svm_relinquish_guest_resourc
}
+static void svm_migrate_timers(struct vcpu *v)
+{
+ struct periodic_time *pt =
+ &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
+
+ if ( pt->enabled ) {
+ migrate_timer( &pt->timer, v->processor );
+ migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
+ }
+ if ( hvm_apic_support(v->domain) && VLAPIC( v ))
+ migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
+}
+
+
void arch_svm_do_resume(struct vcpu *v)
{
/* pinning VCPU to a different core? */
if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
- svm_do_resume( v );
+ hvm_do_resume( v );
reset_stack_and_jump( svm_asm_do_resume );
}
else {
@@ -830,23 +883,11 @@ void arch_svm_do_resume(struct vcpu *v)
v->arch.hvm_svm.launch_core, smp_processor_id() );
v->arch.hvm_svm.launch_core = smp_processor_id();
svm_migrate_timers( v );
- svm_do_resume( v );
+ hvm_do_resume( v );
reset_stack_and_jump( svm_asm_do_resume );
}
}
-
-void svm_migrate_timers(struct vcpu *v)
-{
- struct periodic_time *pt =
&(v->domain->arch.hvm_domain.pl_time.periodic_tm);
-
- if ( pt->enabled ) {
- migrate_timer( &pt->timer, v->processor );
- migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
- }
- if ( hvm_apic_support(v->domain) && VLAPIC( v ))
- migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
-}
static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
@@ -888,7 +929,7 @@ static int svm_do_page_fault(unsigned lo
inst_len = svm_instruction_length(v);
if (inst_len == -1)
{
- printf("%s: INST_LEN - Unable to decode properly.\n",
__func__);
+ printf("%s: INST_LEN - Unable to decode properly\n", __func__);
domain_crash_synchronous();
}
@@ -1137,7 +1178,7 @@ static inline unsigned long *get_reg_p(u
case SVM_REG_ESP:
reg_p = (unsigned long *)&vmcb->rsp;
break;
-#if __x86_64__
+#ifdef __x86_64__
case SVM_REG_R8:
reg_p = (unsigned long *)®s->r8;
break;
@@ -1195,7 +1236,7 @@ static void svm_dr_access (struct vcpu *
unsigned long *reg_p = 0;
unsigned int gpreg = 0;
unsigned long eip;
- int inst_len;
+ int inst_len;
int index;
struct vmcb_struct *vmcb;
u8 buffer[MAX_INST_LEN];
@@ -1264,7 +1305,7 @@ static void svm_get_prefix_info(
case 0xf2: /* REPNZ */
case 0xf0: /* LOCK */
case 0x66: /* data32 */
-#if __x86_64__
+#ifdef __x86_64__
/* REX prefixes */
case 0x40:
case 0x41:
@@ -1330,7 +1371,7 @@ static inline int svm_get_io_address(
info.bytes = vmcb->exitinfo1;
- /* If we're in long mode, we shouldn't check the segment presence and
limit */
+ /* If we're in long mode, we shouldn't check the segment presence & limit
*/
long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
/* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit.
@@ -1832,7 +1873,8 @@ static int mov_to_cr(int gpreg, int cr,
* arch->shadow_table should hold the next CR3 for shadow
*/
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn =
%lx",
+ HVM_DBG_LOG(DBG_LEVEL_VMMU,
+ "Update CR3 value = %lx, mfn = %lx",
v->arch.hvm_svm.cpu_cr3, mfn);
#endif
}
@@ -1847,7 +1889,7 @@ static int mov_to_cr(int gpreg, int cr,
* it must enable PG after that, and it is a 32-bit PAE
* guest */
- if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3)
)
+ if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3))
{
printk("Unsupported guest paging levels\n");
domain_crash_synchronous();
@@ -1855,8 +1897,7 @@ static int mov_to_cr(int gpreg, int cr,
}
else
{
- if ( !shadow_set_guest_paging_levels(v->domain,
- PAGING_L4) )
+ if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L4))
{
printk("Unsupported guest paging levels\n");
domain_crash_synchronous();
@@ -1920,9 +1961,9 @@ static int svm_cr_access(struct vcpu *v,
ASSERT(vmcb);
inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), sizeof(buffer));
- /* get index to first actual instruction byte - as we will need to know
where the
- * prefix lives later on
- */
+
+ /* get index to first actual instruction byte - as we will need to know
+ where the prefix lives later on */
index = skip_prefix_bytes(buffer, sizeof(buffer));
if (type == TYPE_MOV_TO_CR)
@@ -2071,7 +2112,7 @@ static inline void svm_do_msr_access(
switch (regs->ecx)
{
case MSR_IA32_TIME_STAMP_COUNTER:
- svm_set_guest_time(v, msr_content);
+ hvm_set_guest_time(v, msr_content);
break;
case MSR_IA32_SYSENTER_CS:
vmcb->sysenter_cs = msr_content;
@@ -2116,7 +2157,7 @@ static inline void svm_vmexit_do_hlt(str
/* check for interrupt not handled or new interrupt */
if ( vmcb->vintr.fields.irq || cpu_has_pending_irq(v) )
- return;
+ return;
if ( !v->vcpu_id )
next_pit = get_scheduled(v, pt->irq, pt);
@@ -2138,8 +2179,8 @@ static void svm_vmexit_do_invd(struct vm
* have cache-snooping that solves it anyways. -- Mats P.
*/
- /* Tell the user that we did this - just in case someone runs some really
weird
- * operating system and wants to know why it's not working as it should...
+ /* Tell the user that we did this - just in case someone runs some really
+ * weird operating system and wants to know why it's not working...
*/
printk("INVD instruction intercepted - ignored\n");
@@ -2198,7 +2239,8 @@ void svm_handle_invlpg(const short invlp
*/
if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
{
- printk("svm_handle_invlpg (): Error reading memory %d bytes\n",
length);
+ printk("svm_handle_invlpg (): Error reading memory %d bytes\n",
+ length);
__hvm_bug(regs);
}
@@ -2463,7 +2505,7 @@ void svm_dump_host_regs(const char *from
__asm__ __volatile__ ("\tmov %%cr0,%0\n"
"\tmov %%cr3,%1\n"
- : "=r" (cr0), "=r"(cr3));
+ : "=r" (cr0), "=r"(cr3));
printf("%s: pt = %lx, cr3 = %lx, cr0 = %lx\n", __func__, pt, cr3, cr0);
}
@@ -2626,17 +2668,21 @@ void walk_shadow_and_guest_pt(unsigned l
spte = l1e_empty();
- /* This is actually overkill - we only need to make sure the hl2 is
in-sync. */
+ /* This is actually overkill - we only need to ensure the hl2 is in-sync.*/
shadow_sync_va(v, gva);
gpte.l1 = 0;
- __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
sizeof(gpte) );
+ __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
+ sizeof(gpte) );
printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
- __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ],
+ __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ],
sizeof(spte) );
printk( "S-PTE = %x, flags=%x\n", spte.l1, l1e_get_flags(spte));
}
#endif /* SVM_WALK_GUEST_PAGES */
+
+
+
asmlinkage void svm_vmexit_handler(struct cpu_user_regs regs)
{
@@ -2654,6 +2700,13 @@ asmlinkage void svm_vmexit_handler(struc
vmcb->tlb_control = 1;
+
+ if (exit_reason == VMEXIT_INVALID)
+ {
+ svm_dump_vmcb(__func__, vmcb);
+ domain_crash_synchronous();
+ }
+
#ifdef SVM_EXTRA_DEBUG
{
#if defined(__i386__)
@@ -2666,8 +2719,8 @@ asmlinkage void svm_vmexit_handler(struc
{
if (svm_paging_enabled(v) && !mmio_space(gva_to_gpa(vmcb->exitinfo2)))
{
- printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx,
gpa=%llx\n",
- intercepts_counter,
+ printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx, "
+ "gpa=%llx\n", intercepts_counter,
exit_reasons[exit_reason], exit_reason, regs.cs,
(unsigned long long) regs.rip,
(unsigned long long) vmcb->exitinfo1,
@@ -2750,13 +2803,6 @@ asmlinkage void svm_vmexit_handler(struc
}
#endif /* SVM_EXTRA_DEBUG */
- if (exit_reason == -1)
- {
- svm_dump_vmcb(__func__, vmcb);
- printk("%s: exit_reason == -1 - Did someone clobber the VMCB\n",
- __func__);
- domain_crash_synchronous();
- }
perfc_incra(svmexits, exit_reason);
eip = vmcb->rip;
@@ -3011,7 +3057,7 @@ asmlinkage void svm_vmexit_handler(struc
#ifdef SVM_EXTRA_DEBUG
if (do_debug)
{
- printk("%s: Done switch on vmexit_code\n", __func__);
+ printk("%s: Done switch on vmexit_code\n", __func__);
svm_dump_regs(__func__, ®s);
}
@@ -3058,9 +3104,6 @@ asmlinkage void svm_asid(void)
v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
}
-
- /* make sure the HSA is set for the current core */
- set_hsa_to_guest( &v->arch.hvm_svm );
}
/*
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c Tue Aug 15 18:20:03 2006 +0100
@@ -35,72 +35,61 @@
#include <xen/event.h>
#include <xen/kernel.h>
#include <xen/domain_page.h>
-
-extern struct svm_percore_globals svm_globals[];
+#include <xen/keyhandler.h>
+
extern int svm_dbg_on;
extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
int oldcore, int newcore);
-extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
-
-#define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
#define GUEST_SEGMENT_LIMIT 0xffffffff
#define IOPM_SIZE (12 * 1024)
#define MSRPM_SIZE (8 * 1024)
+/* VMCBs and HSAs are architecturally defined to be a 4K page each */
+#define VMCB_ORDER 0
+#define HSA_ORDER 0
+
+
struct vmcb_struct *alloc_vmcb(void)
{
- struct vmcb_struct *vmcb = NULL;
- unsigned int order;
- order = get_order_from_bytes(sizeof(struct vmcb_struct));
- ASSERT(order >= 0);
- vmcb = alloc_xenheap_pages(order);
+ struct vmcb_struct *vmcb = alloc_xenheap_pages(VMCB_ORDER);
+
+ if (!vmcb) {
+ printk("Warning: failed to allocate vmcb.\n");
+ return NULL;
+ }
+
+ memset(vmcb, 0, (PAGE_SIZE << VMCB_ORDER));
+ return vmcb;
+}
+
+
+void free_vmcb(struct vmcb_struct *vmcb)
+{
ASSERT(vmcb);
-
- if (vmcb)
- memset(vmcb, 0, sizeof(struct vmcb_struct));
-
- return vmcb;
-}
-
-
-void free_vmcb(struct vmcb_struct *vmcb)
-{
- unsigned int order;
-
- order = get_order_from_bytes(sizeof(struct vmcb_struct));
- ASSERT(vmcb);
-
- if (vmcb)
- free_xenheap_pages(vmcb, order);
+ free_xenheap_pages(vmcb, VMCB_ORDER);
}
struct host_save_area *alloc_host_save_area(void)
{
- unsigned int order = 0;
- struct host_save_area *hsa = NULL;
-
- hsa = alloc_xenheap_pages(order);
+ struct host_save_area *hsa = alloc_xenheap_pages(HSA_ORDER);
+
+ if (!hsa) {
+ printk("Warning: failed to allocate vmcb.\n");
+ return NULL;
+ }
+
+ memset(hsa, 0, (PAGE_SIZE << HSA_ORDER));
+ return hsa;
+}
+
+
+void free_host_save_area(struct host_save_area *hsa)
+{
ASSERT(hsa);
-
- if (hsa)
- memset(hsa, 0, PAGE_SIZE);
-
- return hsa;
-}
-
-
-void free_host_save_area(struct host_save_area *hsa)
-{
- unsigned int order;
-
- order = get_order_from_bytes(PAGE_SIZE);
- ASSERT(hsa);
-
- if (hsa)
- free_xenheap_pages(hsa, order);
+ free_xenheap_pages(hsa, HSA_ORDER);
}
@@ -187,7 +176,7 @@ static int construct_init_vmcb_guest(str
vmcb->cs.sel = regs->cs;
vmcb->es.sel = regs->es;
vmcb->ss.sel = regs->ss;
- vmcb->ds.sel = regs->ds;
+ vmcb->ds.sel = regs->ds;
vmcb->fs.sel = regs->fs;
vmcb->gs.sel = regs->gs;
@@ -221,7 +210,7 @@ static int construct_init_vmcb_guest(str
attrib.fields.g = 1; /* 4K pages in limit */
/* Data selectors */
- vmcb->es.attributes = attrib;
+ vmcb->es.attributes = attrib;
vmcb->ss.attributes = attrib;
vmcb->ds.attributes = attrib;
vmcb->fs.attributes = attrib;
@@ -257,7 +246,7 @@ static int construct_init_vmcb_guest(str
/* CR3 is set in svm_final_setup_guest */
- __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) :);
+ __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) :);
crn &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
arch_svm->cpu_shadow_cr4 = crn;
vmcb->cr4 = crn | SVM_CR4_HOST_MASK;
@@ -306,7 +295,8 @@ void destroy_vmcb(struct arch_svm_struct
* construct the vmcb.
*/
-int construct_vmcb(struct arch_svm_struct *arch_svm, struct cpu_user_regs
*regs)
+int construct_vmcb(struct arch_svm_struct *arch_svm,
+ struct cpu_user_regs *regs)
{
int error;
long rc=0;
@@ -320,7 +310,9 @@ int construct_vmcb(struct arch_svm_struc
}
/* update the HSA for the current Core */
+#if 0
set_hsa_to_guest( arch_svm );
+#endif
arch_svm->vmcb_pa = (u64) virt_to_maddr(arch_svm->vmcb);
if ((error = construct_vmcb_controls(arch_svm)))
@@ -359,7 +351,7 @@ void svm_do_launch(struct vcpu *v)
ASSERT(vmcb);
/* Update CR3, GDT, LDT, TR */
- svm_stts(v);
+ hvm_stts(v);
/* current core is the one we intend to perform the VMRUN on */
v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
@@ -393,10 +385,8 @@ void svm_do_launch(struct vcpu *v)
printk("%s: phys_table = %lx\n", __func__, pt);
}
- if ( svm_paging_enabled(v) )
- vmcb->cr3 = pagetable_get_paddr(v->arch.guest_table);
- else
- vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+ /* At launch we always use the phys_table */
+ vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
if (svm_dbg_on)
{
@@ -410,7 +400,7 @@ void svm_do_launch(struct vcpu *v)
v->arch.hvm_svm.saved_irq_vector = -1;
- svm_set_guest_time(v, 0);
+ hvm_set_guest_time(v, 0);
if (svm_dbg_on)
svm_dump_vmcb(__func__, vmcb);
@@ -419,61 +409,12 @@ void svm_do_launch(struct vcpu *v)
}
-void set_hsa_to_guest( struct arch_svm_struct *arch_svm )
-{
- arch_svm->host_save_pa = svm_globals[ smp_processor_id() ].scratch_hsa_pa;
-}
-
-/*
- * Resume the guest.
- */
-/* XXX svm_do_resume and vmx_do_resume are remarkably similar; could
- they be unified? */
-void svm_do_resume(struct vcpu *v)
-{
- struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
- ioreq_t *p;
-
- svm_stts(v);
-
- /* pick up the elapsed PIT ticks and re-enable pit_timer */
- if ( pt->enabled && pt->first_injected ) {
- if ( v->arch.hvm_vcpu.guest_time ) {
- svm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
- v->arch.hvm_vcpu.guest_time = 0;
- }
- pickup_deactive_ticks(pt);
- }
-
- p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- wait_on_xen_event_channel(v->arch.hvm.xen_port,
- p->state != STATE_IOREQ_READY &&
- p->state != STATE_IOREQ_INPROCESS);
- if ( p->state == STATE_IORESP_READY )
- hvm_io_assist(v);
- if ( p->state != STATE_INVALID ) {
- printf("Weird HVM iorequest state %d.\n", p->state);
- domain_crash(v->domain);
- }
-}
-
-void svm_launch_fail(unsigned long eflags)
-{
- BUG();
-}
-
-
-void svm_resume_fail(unsigned long eflags)
-{
- BUG();
-}
-
-
-void svm_dump_sel(char *name, segment_selector_t *s)
+
+static void svm_dump_sel(char *name, segment_selector_t *s)
{
printf("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n",
name, s->sel, s->attributes.bytes, s->limit,
- (unsigned long long)s->base);
+ (unsigned long long)s->base);
}
@@ -483,9 +424,10 @@ void svm_dump_vmcb(const char *from, str
printf("Size of VMCB = %d, address = %p\n",
(int) sizeof(struct vmcb_struct), vmcb);
- printf("cr_intercepts = 0x%08x dr_intercepts = 0x%08x exception_intercepts
"
- "= 0x%08x\n", vmcb->cr_intercepts, vmcb->dr_intercepts,
- vmcb->exception_intercepts);
+ printf("cr_intercepts = 0x%08x dr_intercepts = 0x%08x "
+ "exception_intercepts = 0x%08x\n",
+ vmcb->cr_intercepts, vmcb->dr_intercepts,
+ vmcb->exception_intercepts);
printf("general1_intercepts = 0x%08x general2_intercepts = 0x%08x\n",
vmcb->general1_intercepts, vmcb->general2_intercepts);
printf("iopm_base_pa = %016llx msrpm_base_pa = 0x%016llx tsc_offset = "
@@ -519,7 +461,8 @@ void svm_dump_vmcb(const char *from, str
printf("DR6 = 0x%016llx, DR7 = 0x%016llx\n",
(unsigned long long) vmcb->dr6, (unsigned long long) vmcb->dr7);
printf("CSTAR = 0x%016llx SFMask = 0x%016llx\n",
- (unsigned long long) vmcb->cstar, (unsigned long long)
vmcb->sfmask);
+ (unsigned long long) vmcb->cstar,
+ (unsigned long long) vmcb->sfmask);
printf("KernGSBase = 0x%016llx PAT = 0x%016llx \n",
(unsigned long long) vmcb->kerngsbase,
(unsigned long long) vmcb->g_pat);
@@ -537,6 +480,38 @@ void svm_dump_vmcb(const char *from, str
svm_dump_sel("TR", &vmcb->tr);
}
+static void vmcb_dump(unsigned char ch)
+{
+ struct domain *d;
+ struct vcpu *v;
+
+ printk("*********** VMCB Areas **************\n");
+ for_each_domain(d) {
+ printk("\n>>> Domain %d <<<\n", d->domain_id);
+ for_each_vcpu(d, v) {
+
+ /*
+ * Presumably, if a domain is not an HVM guest,
+ * the very first CPU will not pass this test
+ */
+ if (!hvm_guest(v)) {
+ printk("\t\tNot HVM guest\n");
+ break;
+ }
+ printk("\tVCPU %d\n", v->vcpu_id);
+
+ svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb);
+ }
+ }
+
+ printk("**************************************\n");
+}
+
+void setup_vmcb_dump(void)
+{
+ register_keyhandler('v', vmcb_dump, "dump AMD-V VMCBs");
+}
+
/*
* Local variables:
* mode: C
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Tue Aug 15 18:20:03 2006 +0100
@@ -95,7 +95,8 @@ ENTRY(svm_asm_do_launch)
movl VCPU_svm_vmcb(%ebx), %ecx
movl 24(%esp), %eax
movl %eax, VMCB_rax(%ecx)
- movl VCPU_svm_hsa_pa(%ebx), %eax
+ movl VCPU_processor(%ebx), %eax
+ movl root_vmcb_pa(,%eax,8), %eax
VMSAVE
movl VCPU_svm_vmcb_pa(%ebx), %eax
@@ -119,7 +120,8 @@ ENTRY(svm_asm_do_launch)
GET_CURRENT(%eax)
- movl VCPU_svm_hsa_pa(%eax), %eax
+ movl VCPU_processor(%eax), %eax
+ movl root_vmcb_pa(,%eax,8), %eax
VMLOAD
HVM_SAVE_ALL_NOSEGREGS
@@ -133,7 +135,7 @@ svm_test_all_events:
svm_test_all_events:
GET_CURRENT(%ebx)
pushl %ebx
- call svm_do_resume
+ call hvm_do_resume
addl $4, %esp
/*test_all_events:*/
xorl %ecx,%ecx
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S Tue Aug 15 18:20:03 2006 +0100
@@ -105,7 +105,10 @@ ENTRY(svm_asm_do_launch)
movq VCPU_svm_vmcb(%rbx), %rcx
movq UREGS_rax(%rsp), %rax
movq %rax, VMCB_rax(%rcx)
- movq VCPU_svm_hsa_pa(%rbx), %rax
+ leaq root_vmcb_pa(%rip), %rax
+ movl VCPU_processor(%rbx), %ecx
+ shll $3, %ecx
+ addq %rcx, %rax
VMSAVE
movq VCPU_svm_vmcb_pa(%rbx), %rax
@@ -133,13 +136,15 @@ ENTRY(svm_asm_do_launch)
VMLOAD
VMRUN
VMSAVE
- /* rax is the only register we're allowed to touch here... */
+ HVM_SAVE_ALL_NOSEGREGS
- GET_CURRENT(%rax)
- movq VCPU_svm_hsa_pa(%rax), %rax
+ GET_CURRENT(%rbx)
+ movl VCPU_processor(%rbx), %ecx
+ leaq root_vmcb_pa(%rip), %rax
+ shll $3, %ecx
+ addq %rcx, %rax
VMLOAD
- HVM_SAVE_ALL_NOSEGREGS
STGI
call svm_vmexit_handler
jmp svm_asm_do_resume
@@ -148,7 +153,7 @@ svm_test_all_events:
svm_test_all_events:
GET_CURRENT(%rbx)
movq %rbx, %rdi
- call svm_do_resume
+ call hvm_do_resume
/*test_all_events:*/
cli # tests must not race interrupts
/*test_softirqs:*/
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Tue Aug 15 18:20:03 2006 +0100
@@ -493,7 +493,7 @@ static void vlapic_read_aligned(struct v
case APIC_ESR:
vlapic->err_write_count = 0;
- *result = vlapic_get_reg(vlapic, offset);
+ *result = vlapic_get_reg(vlapic, offset);
break;
default:
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vmx/io.c
--- a/xen/arch/x86/hvm/vmx/io.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/io.c Tue Aug 15 18:20:03 2006 +0100
@@ -38,57 +38,6 @@
#include <asm/hvm/vlapic.h>
#include <public/hvm/ioreq.h>
-#define BSP_CPU(v) (!(v->vcpu_id))
-
-static inline
-void __set_tsc_offset(u64 offset)
-{
- __vmwrite(TSC_OFFSET, offset);
-#if defined (__i386__)
- __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
-#endif
-}
-
-void set_guest_time(struct vcpu *v, u64 gtime)
-{
- u64 host_tsc;
-
- rdtscll(host_tsc);
-
- v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
- __set_tsc_offset(v->arch.hvm_vcpu.cache_tsc_offset);
-}
-
-static inline void
-interrupt_post_injection(struct vcpu * v, int vector, int type)
-{
- struct periodic_time *pt =
&(v->domain->arch.hvm_domain.pl_time.periodic_tm);
-
- if ( is_pit_irq(v, vector, type) ) {
- if ( !pt->first_injected ) {
- pt->pending_intr_nr = 0;
- pt->last_plt_gtime = hvm_get_guest_time(v);
- pt->scheduled = NOW() + pt->period;
- set_timer(&pt->timer, pt->scheduled);
- pt->first_injected = 1;
- } else {
- pt->pending_intr_nr--;
- pt->last_plt_gtime += pt->period_cycles;
- set_guest_time(v, pt->last_plt_gtime);
- pit_time_fired(v, pt->priv);
- }
- }
-
- switch(type)
- {
- case APIC_DM_EXTINT:
- break;
-
- default:
- vlapic_post_injection(v, vector, type);
- break;
- }
-}
static inline void
enable_irq_window(struct vcpu *v)
@@ -194,7 +143,8 @@ asmlinkage void vmx_intr_assist(void)
if (likely(!has_ext_irq)) return;
- if (unlikely(is_interruptibility_state())) { /* pre-cleared for
emulated instruction */
+ if (unlikely(is_interruptibility_state())) {
+ /* pre-cleared for emulated instruction */
enable_irq_window(v);
HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
return;
@@ -206,7 +156,7 @@ asmlinkage void vmx_intr_assist(void)
return;
}
- highest_vector = cpu_get_interrupt(v, &intr_type);
+ highest_vector = cpu_get_interrupt(v, &intr_type);
switch (intr_type) {
case APIC_DM_EXTINT:
case APIC_DM_FIXED:
@@ -224,37 +174,9 @@ asmlinkage void vmx_intr_assist(void)
BUG();
break;
}
-
- interrupt_post_injection(v, highest_vector, intr_type);
+
+ hvm_interrupt_post(v, highest_vector, intr_type);
return;
-}
-
-void vmx_do_resume(struct vcpu *v)
-{
- ioreq_t *p;
- struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
-
- vmx_stts();
-
- /* pick up the elapsed PIT ticks and re-enable pit_timer */
- if ( pt->enabled && pt->first_injected ) {
- if ( v->arch.hvm_vcpu.guest_time ) {
- set_guest_time(v, v->arch.hvm_vcpu.guest_time);
- v->arch.hvm_vcpu.guest_time = 0;
- }
- pickup_deactive_ticks(pt);
- }
-
- p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- wait_on_xen_event_channel(v->arch.hvm.xen_port,
- p->state != STATE_IOREQ_READY &&
- p->state != STATE_IOREQ_INPROCESS);
- if ( p->state == STATE_IORESP_READY )
- hvm_io_assist(v);
- if ( p->state != STATE_INVALID ) {
- printf("Weird HVM iorequest state %d.\n", p->state);
- domain_crash(v->domain);
- }
}
/*
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Aug 15 18:20:03 2006 +0100
@@ -261,7 +261,7 @@ static void vmx_do_launch(struct vcpu *v
error |= __vmwrite(CR4_READ_SHADOW, cr4);
- vmx_stts();
+ hvm_stts(v);
if(hvm_apic_support(v->domain))
vlapic_init(v);
@@ -282,7 +282,7 @@ static void vmx_do_launch(struct vcpu *v
v->arch.schedule_tail = arch_vmx_do_resume;
/* init guest tsc to start from 0 */
- set_guest_time(v, 0);
+ hvm_set_guest_time(v, 0);
}
/*
@@ -539,7 +539,7 @@ void arch_vmx_do_resume(struct vcpu *v)
vmx_set_host_env(v);
}
- vmx_do_resume(v);
+ hvm_do_resume(v);
reset_stack_and_jump(vmx_asm_do_vmentry);
}
@@ -642,13 +642,11 @@ static void vmcs_dump(unsigned char ch)
printk("**************************************\n");
}
-static int __init setup_vmcs_dump(void)
+void setup_vmcs_dump(void)
{
register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
- return 0;
-}
-
-__initcall(setup_vmcs_dump);
+}
+
/*
* Local variables:
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Aug 15 18:20:03 2006 +0100
@@ -628,6 +628,45 @@ static unsigned long vmx_get_ctrl_reg(st
return 0; /* dummy */
}
+
+
+/* Make sure that xen intercepts any FP accesses from current */
+static void vmx_stts(struct vcpu *v)
+{
+ unsigned long cr0;
+
+ /* VMX depends on operating on the current vcpu */
+ ASSERT(v == current);
+
+ /*
+ * If the guest does not have TS enabled then we must cause and handle an
+ * exception on first use of the FPU. If the guest *does* have TS enabled
+ * then this is not necessary: no FPU activity can occur until the guest
+ * clears CR0.TS, and we will initialise the FPU when that happens.
+ */
+ __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
+ if ( !(cr0 & X86_CR0_TS) )
+ {
+ __vmread_vcpu(v, GUEST_CR0, &cr0);
+ __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
+ __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
+ }
+}
+
+
+static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
+{
+ /* VMX depends on operating on the current vcpu */
+ ASSERT(v == current);
+
+ __vmwrite(TSC_OFFSET, offset);
+#if defined (__i386__)
+ __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
+#endif
+}
+
+
+
/* SMP VMX guest support */
static void vmx_init_ap_context(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector)
@@ -716,6 +755,9 @@ static void vmx_setup_hvm_funcs(void)
hvm_funcs.paging_enabled = vmx_paging_enabled;
hvm_funcs.instruction_length = vmx_instruction_length;
hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
+
+ hvm_funcs.stts = vmx_stts;
+ hvm_funcs.set_tsc_offset = vmx_set_tsc_offset;
hvm_funcs.init_ap_context = vmx_init_ap_context;
@@ -768,6 +810,8 @@ int start_vmx(void)
set_in_cr4(X86_CR4_VMXE);
vmx_init_vmcs_config();
+
+ setup_vmcs_dump();
if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
{
@@ -916,7 +960,7 @@ static void vmx_vmexit_do_cpuid(struct c
if ( input == CPUID_LEAF_0x1 )
{
/* mask off reserved bits */
- ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
+ ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
if ( !hvm_apic_support(v->domain) ||
!vlapic_global_enabled((VLAPIC(v))) )
@@ -930,7 +974,7 @@ static void vmx_vmexit_do_cpuid(struct c
#if CONFIG_PAGING_LEVELS < 3
edx &= ~(bitmaskof(X86_FEATURE_PAE) |
bitmaskof(X86_FEATURE_PSE) |
- bitmaskof(X86_FEATURE_PSE36));
+ bitmaskof(X86_FEATURE_PSE36));
#else
if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
{
@@ -1043,6 +1087,7 @@ static void vmx_vmexit_do_invlpg(unsigne
*/
shadow_invlpg(v, va);
}
+
static int check_for_null_selector(unsigned long eip)
{
@@ -1977,7 +2022,7 @@ static inline void vmx_do_msr_write(stru
switch (regs->ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
- set_guest_time(v, msr_content);
+ hvm_set_guest_time(v, msr_content);
break;
case MSR_IA32_SYSENTER_CS:
__vmwrite(GUEST_SYSENTER_CS, msr_content);
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Tue Aug 15 18:20:03 2006 +0100
@@ -95,7 +95,7 @@ ENTRY(vmx_asm_do_vmentry)
ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(%ebx)
pushl %ebx
- call vmx_do_resume
+ call hvm_do_resume
addl $4, %esp
cli # tests must not race interrupts
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Tue Aug 15 18:20:03 2006 +0100
@@ -106,7 +106,7 @@ ENTRY(vmx_asm_do_vmentry)
ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(%rbx)
movq %rbx, %rdi
- call vmx_do_resume
+ call hvm_do_resume
cli # tests must not race interrupts
movl VCPU_processor(%rbx),%eax
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Tue Aug 15 18:20:03 2006 +0100
@@ -81,7 +81,6 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
- OFFSET(VCPU_svm_hsa_pa, struct vcpu, arch.hvm_svm.host_save_pa);
OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
BLANK();
diff -r 5d42f6f0a187 -r d20e1835c24b xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Tue Aug 15 18:20:03 2006 +0100
@@ -75,7 +75,6 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
- OFFSET(VCPU_svm_hsa_pa, struct vcpu, arch.hvm_svm.host_save_pa);
OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
BLANK();
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Tue Aug 15 18:20:03 2006 +0100
@@ -58,6 +58,14 @@ struct hvm_function_table {
int (*paging_enabled)(struct vcpu *v);
int (*instruction_length)(struct vcpu *v);
unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
+
+ /*
+ * Update specifics of the guest state:
+ * 1) TS bit in guest cr0
+ * 2) TSC offset in guest
+ */
+ void (*stts)(struct vcpu *v);
+ void (*set_tsc_offset)(struct vcpu *v, u64 offset);
void (*init_ap_context)(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector);
@@ -142,6 +150,10 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u
return 0; /* force to fail */
}
+extern void hvm_stts(struct vcpu *v);
+extern void hvm_set_guest_time(struct vcpu *v, u64 gtime);
+extern void hvm_do_resume(struct vcpu *v);
+
static inline void
hvm_init_ap_context(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector)
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/io.h Tue Aug 15 18:20:03 2006 +0100
@@ -150,6 +150,7 @@ static inline int irq_masked(unsigned lo
#endif
extern void handle_mmio(unsigned long, unsigned long);
+extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
extern void hvm_io_assist(struct vcpu *v);
extern void pic_irq_request(void *data, int level);
extern void hvm_pic_assist(struct vcpu *v);
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Tue Aug 15 18:20:03 2006 +0100
@@ -28,54 +28,12 @@
#include <asm/hvm/svm/vmcb.h>
#include <asm/i387.h>
-extern void asidpool_retire( struct vmcb_struct *vmcb, int core );
+extern void asidpool_retire(struct vmcb_struct *vmcb, int core);
+extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
+extern void svm_do_launch(struct vcpu *v);
+extern void arch_svm_do_resume(struct vcpu *v);
-extern void svm_asm_vmexit_handler(struct cpu_user_regs);
-extern void svm_setup_function_table(struct vcpu *v);
-
-extern int vmcb_size;
-extern unsigned int cpu_rev;
-
-extern void svm_stop(void);
-extern void svm_save_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
-extern void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
-extern void svm_vmread(struct vcpu *v, int index, unsigned long *value);
-extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
-extern void svm_final_setup_guest(struct vcpu *v);
-extern int svm_paging_enabled(struct vcpu *v);
-extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-extern void svm_stts(struct vcpu *v);
-extern void svm_do_launch(struct vcpu *v);
-extern void svm_do_resume(struct vcpu *v);
-extern void svm_set_guest_time(struct vcpu *v, u64 gtime);
-extern void arch_svm_do_resume(struct vcpu *v);
-extern int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa);
-/* For debugging. Remove when no longer needed. */
-extern void svm_dump_host_regs(const char *from);
-
-extern void svm_migrate_timers(struct vcpu *v);
-
-/* ASID API */
-enum {
- ASID_AVAILABLE = 0,
- ASID_INUSE,
- ASID_RETIRED
-};
-#define INITIAL_ASID 0
-#define ASID_MAX 64
-
-struct asid_pool {
- spinlock_t asid_lock;
- u32 asid[ASID_MAX];
-};
-
-struct svm_percore_globals {
- void *hsa;
- u64 hsa_pa;
- void *scratch_hsa;
- u64 scratch_hsa_pa;
- struct asid_pool ASIDpool;
-};
+extern u64 root_vmcb_pa[NR_CPUS];
#define SVM_REG_EAX (0)
#define SVM_REG_ECX (1)
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Tue Aug 15 18:20:03 2006 +0100
@@ -434,8 +434,6 @@ struct vmcb_struct {
struct arch_svm_struct {
struct vmcb_struct *vmcb;
- void *host_save_area;
- u64 host_save_pa;
u64 vmcb_pa;
u32 *iopm;
u32 *msrpm;
@@ -453,12 +451,15 @@ struct arch_svm_struct {
struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
-struct vmcb_struct *alloc_vmcb(void);
-struct host_save_area *alloc_host_save_area(void);
-void free_vmcb(struct vmcb_struct *vmcb);
-void free_host_save_area(struct host_save_area *hsa);
-void dump_vmcb(void);
-int construct_vmcb(struct arch_svm_struct *, struct cpu_user_regs *);
+extern struct vmcb_struct *alloc_vmcb(void);
+extern struct host_save_area *alloc_host_save_area(void);
+extern void free_vmcb(struct vmcb_struct *vmcb);
+extern void free_host_save_area(struct host_save_area *hsa);
+
+extern int construct_vmcb(struct arch_svm_struct *, struct cpu_user_regs *);
+extern void destroy_vmcb(struct arch_svm_struct *);
+
+extern void setup_vmcb_dump(void);
#define VMCB_USE_HOST_ENV 1
#define VMCB_USE_SEPARATE_ENV 0
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Aug 15 18:20:03 2006 +0100
@@ -27,6 +27,7 @@ extern int start_vmx(void);
extern int start_vmx(void);
extern void vmcs_dump_vcpu(void);
extern void vmx_init_vmcs_config(void);
+extern void setup_vmcs_dump(void);
enum {
VMX_CPU_STATE_PAE_ENABLED=0,
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Aug 15 18:20:03 2006 +0100
@@ -395,31 +395,6 @@ static inline int __vmxon (u64 addr)
return rc;
}
-/* Make sure that xen intercepts any FP accesses from current */
-static inline void vmx_stts(void)
-{
- unsigned long cr0;
- struct vcpu *v = current;
-
- /* FPU state already dirty? Then no need to setup_fpu() lazily. */
- if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
- return;
-
- /*
- * If the guest does not have TS enabled then we must cause and handle an
- * exception on first use of the FPU. If the guest *does* have TS enabled
- * then this is not necessary: no FPU activity can occur until the guest
- * clears CR0.TS, and we will initialise the FPU when that happens.
- */
- __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
- if ( !(cr0 & X86_CR0_TS) )
- {
- __vmread_vcpu(v, GUEST_CR0, &cr0);
- __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
- __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
- }
-}
-
/* Works only for vcpu == current */
static inline int vmx_paging_enabled(struct vcpu *v)
{
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/processor.h Tue Aug 15 18:20:03 2006 +0100
@@ -277,27 +277,43 @@ static always_inline unsigned int cpuid_
}
-#define read_cr0() ({ \
- unsigned long __dummy; \
- __asm__( \
- "mov %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-
-#define write_cr0(x) \
- __asm__("mov %0,%%cr0": :"r" ((unsigned long)x));
-
-#define read_cr4() ({ \
- unsigned long __dummy; \
- __asm__( \
- "mov %%cr4,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-
-#define write_cr4(x) \
- __asm__("mov %0,%%cr4": :"r" ((unsigned long)x));
+
+static inline unsigned long read_cr0(void)
+{
+ unsigned long __cr0;
+ __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
+ return __cr0;
+}
+
+static inline void write_cr0(unsigned long val)
+{
+ __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
+}
+
+static inline unsigned long read_cr4(void)
+{
+ unsigned long __cr4;
+ __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
+ return __cr4;
+}
+
+static inline void write_cr4(unsigned long val)
+{
+ __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
+}
+
+
+/* Clear and set 'TS' bit respectively */
+static inline void clts(void)
+{
+ __asm__ __volatile__ ("clts");
+}
+
+static inline void stts(void)
+{
+ write_cr0(X86_CR0_TS|read_cr0());
+}
+
/*
* Save the cr4 feature set we're using (ie
diff -r 5d42f6f0a187 -r d20e1835c24b xen/include/asm-x86/system.h
--- a/xen/include/asm-x86/system.h Tue Aug 15 17:03:06 2006 +0100
+++ b/xen/include/asm-x86/system.h Tue Aug 15 18:20:03 2006 +0100
@@ -10,10 +10,6 @@
__asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
__sel; \
})
-
-/* Clear and set 'TS' bit respectively */
-#define clts() __asm__ __volatile__ ("clts")
-#define stts() write_cr0(X86_CR0_TS|read_cr0())
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|