# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID e2e7f4c17b772c67fe56621f1779051328b8c926
# Parent ebd289e3d2052799c4355f569a46c83d0e4d6635
[HVM] Provide common support function for HLT emulation: hvm_hlt().
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 30 ++++++++++++++++++++++++
xen/arch/x86/hvm/svm/svm.c | 43 ++++-------------------------------
xen/arch/x86/hvm/svm/vmcb.c | 3 --
xen/arch/x86/hvm/vmx/vmcs.c | 2 -
xen/arch/x86/hvm/vmx/vmx.c | 45 ++++---------------------------------
xen/include/asm-x86/hvm/support.h | 2 +
xen/include/asm-x86/hvm/svm/vmcb.h | 1
xen/include/asm-x86/hvm/vcpu.h | 7 ++++-
xen/include/asm-x86/hvm/vmx/vmcs.h | 1
9 files changed, 50 insertions(+), 84 deletions(-)
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c Wed Aug 16 14:27:30 2006 +0100
@@ -345,6 +345,36 @@ int cpu_get_interrupt(struct vcpu *v, in
return -1;
}
+void hvm_hlt(unsigned long rflags)
+{
+ struct vcpu *v = current;
+ struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
+ s_time_t next_pit = -1, next_wakeup;
+
+ /*
+ * Detect machine shutdown. Only do this for vcpu 0, to avoid potentially
+ * shutting down the domain early. If we halt with interrupts disabled,
+ * that's a pretty sure sign that we want to shut down. In a real
+ * processor, NMIs are the only way to break out of this.
+ */
+ if ( (v->vcpu_id == 0) && !(rflags & X86_EFLAGS_IF) )
+ {
+ printk("D%d: HLT with interrupts enabled -- shutting down.\n",
+ current->domain->domain_id);
+ domain_shutdown(current->domain, SHUTDOWN_poweroff);
+ return;
+ }
+
+ if ( !v->vcpu_id )
+ next_pit = get_scheduled(v, pt->irq, pt);
+ next_wakeup = get_apictime_scheduled(v);
+ if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
+ next_wakeup = next_pit;
+ if ( next_wakeup != - 1 )
+ set_timer(¤t->arch.hvm_vcpu.hlt_timer, next_wakeup);
+ do_sched_op_compat(SCHEDOP_block, 0);
+}
+
/*
* Copy from/to guest virtual.
*/
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Aug 16 14:27:30 2006 +0100
@@ -835,7 +835,7 @@ static void svm_relinquish_guest_resourc
destroy_vmcb(&v->arch.hvm_svm);
free_monitor_pagetable(v);
- kill_timer(&v->arch.hvm_svm.hlt_timer);
+ kill_timer(&v->arch.hvm_vcpu.hlt_timer);
if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
kill_timer( &(VLAPIC(v)->vlapic_timer) );
@@ -863,7 +863,7 @@ static void svm_migrate_timers(struct vc
if ( pt->enabled ) {
migrate_timer( &pt->timer, v->processor );
- migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
+ migrate_timer( &v->arch.hvm_vcpu.hlt_timer, v->processor );
}
if ( hvm_apic_support(v->domain) && VLAPIC( v ))
migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
@@ -2144,47 +2144,16 @@ done:
}
-/*
- * Need to use this exit to reschedule
- */
static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
- struct vcpu *v = current;
- struct periodic_time *pt =
- &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
- s_time_t next_pit = -1, next_wakeup;
-
__update_guest_eip(vmcb, 1);
- /* check for interrupt not handled or new interrupt */
- if ( vmcb->vintr.fields.irq || cpu_has_pending_irq(v) )
+ /* Check for interrupt not handled or new interrupt. */
+ if ( (vmcb->rflags & X86_EFLAGS_IF) &&
+ (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
return;
- /* Detect machine shutdown. Only do this for vcpu 0, to avoid
- potentially shutting down the domain early. */
- if (v->vcpu_id == 0) {
- unsigned long rflags = vmcb->rflags;
- /* If we halt with interrupts disabled, that's a pretty sure
- sign that we want to shut down. In a real processor, NMIs
- are the only way to break out of this. Our SVM code won't
- deliver interrupts, but will wake it up whenever one is
- pending... */
- if(!(rflags & X86_EFLAGS_IF)) {
- printk("D%d: HLT with interrupts enabled @0x%lx Shutting down.\n",
- current->domain->domain_id, (unsigned long)vmcb->rip);
- domain_shutdown(current->domain, SHUTDOWN_poweroff);
- return;
- }
- }
-
- if ( !v->vcpu_id )
- next_pit = get_scheduled(v, pt->irq, pt);
- next_wakeup = get_apictime_scheduled(v);
- if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
- next_wakeup = next_pit;
- if ( next_wakeup != - 1 )
- set_timer(¤t->arch.hvm_svm.hlt_timer, next_wakeup);
- do_sched_op_compat(SCHEDOP_block, 0);
+ hvm_hlt(vmcb->rflags);
}
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Aug 16 14:27:30 2006 +0100
@@ -360,8 +360,7 @@ void svm_do_launch(struct vcpu *v)
if (hvm_apic_support(v->domain))
vlapic_init(v);
- init_timer(&v->arch.hvm_svm.hlt_timer,
- hlt_timer_fn, v, v->processor);
+ init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
vmcb->ldtr.sel = 0;
vmcb->ldtr.base = 0;
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Aug 16 14:27:30 2006 +0100
@@ -267,7 +267,7 @@ static void vmx_do_launch(struct vcpu *v
vlapic_init(v);
vmx_set_host_env(v);
- init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
+ init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
error |= __vmwrite(GUEST_LDTR_BASE, 0);
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 16 14:27:30 2006 +0100
@@ -134,7 +134,7 @@ static void vmx_relinquish_guest_resourc
if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
continue;
free_monitor_pagetable(v);
- kill_timer(&v->arch.hvm_vmx.hlt_timer);
+ kill_timer(&v->arch.hvm_vcpu.hlt_timer);
if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
kill_timer(&VLAPIC(v)->vlapic_timer);
@@ -496,7 +496,7 @@ void vmx_migrate_timers(struct vcpu *v)
if ( pt->enabled ) {
migrate_timer(&pt->timer, v->processor);
- migrate_timer(&v->arch.hvm_vmx.hlt_timer, v->processor);
+ migrate_timer(&v->arch.hvm_vcpu.hlt_timer, v->processor);
}
if ( hvm_apic_support(v->domain) && VLAPIC(v))
migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
@@ -2049,46 +2049,11 @@ static inline void vmx_do_msr_write(stru
(unsigned long)regs->edx);
}
-/*
- * Need to use this exit to reschedule
- */
void vmx_vmexit_do_hlt(void)
{
- struct vcpu *v = current;
- struct periodic_time *pt =
- &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
- s_time_t next_pit = -1, next_wakeup;
-
-
- /* Detect machine shutdown. Only do this for vcpu 0, to avoid
- potentially shutting down the domain early. */
- if (v->vcpu_id == 0) {
- unsigned long rflags;
-
- __vmread(GUEST_RFLAGS, &rflags);
- /* If we halt with interrupts disabled, that's a pretty sure
- sign that we want to shut down. In a real processor, NMIs
- are the only way to break out of this. Our VMX code won't
- deliver interrupts, but will wake it up whenever one is
- pending... */
- if(!(rflags & X86_EFLAGS_IF)) {
- unsigned long rip;
- __vmread(GUEST_RIP, &rip);
- printk("D%d: HLT with interrupts enabled @0x%lx Shutting down.\n",
- current->domain->domain_id, rip);
- domain_shutdown(current->domain, SHUTDOWN_poweroff);
- return;
- }
- }
-
- if ( !v->vcpu_id )
- next_pit = get_scheduled(v, pt->irq, pt);
- next_wakeup = get_apictime_scheduled(v);
- if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
- next_wakeup = next_pit;
- if ( next_wakeup != - 1 )
- set_timer(¤t->arch.hvm_vmx.hlt_timer, next_wakeup);
- do_sched_op_compat(SCHEDOP_block, 0);
+ unsigned long rflags;
+ __vmread(GUEST_RFLAGS, &rflags);
+ hvm_hlt(rflags);
}
static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 16 14:27:30 2006 +0100
@@ -148,4 +148,6 @@ void hvm_do_hypercall(struct cpu_user_re
void hvm_prod_vcpu(struct vcpu *v);
+void hvm_hlt(unsigned long rflags);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Wed Aug 16 14:27:30 2006 +0100
@@ -448,7 +448,6 @@ struct arch_svm_struct {
unsigned long cpu_cr2;
unsigned long cpu_cr3;
unsigned long cpu_state;
- struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
extern struct vmcb_struct *alloc_vmcb(void);
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h Wed Aug 16 14:27:30 2006 +0100
@@ -43,15 +43,18 @@ struct hvm_vcpu {
/* Flags */
int flag_dr_dirty;
+ /* hlt ins emulation wakeup timer */
+ struct timer hlt_timer;
+
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
} u;
};
-#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
+#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
-#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs,
error_code))
+#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
#endif /* __ASM_X86_HVM_VCPU_H__ */
diff -r ebd289e3d205 -r e2e7f4c17b77 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Aug 16 14:26:59 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Aug 16 14:27:30 2006 +0100
@@ -93,7 +93,6 @@ struct arch_vmx_struct {
unsigned long cpu_based_exec_control;
struct vmx_msr_state msr_content;
void *io_bitmap_a, *io_bitmap_b;
- struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
#define vmx_schedule_tail(next) \
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|