ChangeSet 1.1336, 2005/04/20 11:25:15+01:00, arun.sharma@xxxxxxxxx
[PATCH] x86-64-enable-vmx.patch
Enable CONFIG_VMX for x86_64.
- Provides vmexit/entry handling code based on the x86_32 code
- Fix find_highest_vector for 64 bit (Benjamin Liu)
Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>
arch/x86/Makefile | 4 -
arch/x86/vmx.c | 5 +
arch/x86/vmx_intercept.c | 2
arch/x86/vmx_io.c | 65 +++++++++++++++++++---
arch/x86/vmx_vmcs.c | 13 ++++
arch/x86/x86_64/entry.S | 133 +++++++++++++++++++++++++++++++++++++++++++++++
include/asm-x86/config.h | 2
7 files changed, 206 insertions(+), 18 deletions(-)
diff -Nru a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/Makefile 2005-04-20 07:03:28 -04:00
@@ -7,10 +7,6 @@
OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
-ifneq ($(TARGET_SUBARCH),x86_32)
-OBJS := $(patsubst vmx%.o,,$(OBJS))
-endif
-
ifneq ($(crash_debug),y)
OBJS := $(patsubst cdb%.o,,$(OBJS))
endif
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/vmx.c 2005-04-20 07:03:28 -04:00
@@ -959,7 +959,12 @@
struct exec_domain *d = current;
local_irq_disable();
+#ifdef __i386__
asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
+#else
+ asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
+#endif
+
}
#endif /* CONFIG_VMX */
diff -Nru a/xen/arch/x86/vmx_intercept.c b/xen/arch/x86/vmx_intercept.c
--- a/xen/arch/x86/vmx_intercept.c 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/vmx_intercept.c 2005-04-20 07:03:28 -04:00
@@ -37,7 +37,7 @@
struct exec_domain *d = current;
struct vmx_handler_t *handler =
&(d->arch.arch_vmx.vmx_platform.vmx_handler);
int i;
- unsigned addr, offset;
+ unsigned long addr, offset;
for (i = 0; i < handler->num_slot; i++) {
addr = handler->hdl_list[i].addr;
offset = handler->hdl_list[i].offset;
diff -Nru a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/vmx_io.c 2005-04-20 07:03:28 -04:00
@@ -169,6 +169,17 @@
break;
}
}
+#else
+static void load_xen_regs(struct xen_regs *regs)
+{
+ /* XXX: TBD */
+ return;
+}
+static void set_reg_value (int size, int index, int seg, struct xen_regs
*regs, long value)
+{
+ /* XXX: TBD */
+ return;
+}
#endif
void vmx_io_assist(struct exec_domain *ed)
@@ -271,7 +282,8 @@
}
}
-static inline int __fls(unsigned long word)
+#ifdef __i386__
+static inline int __fls(u32 word)
{
int bit;
@@ -280,26 +292,57 @@
:"rm" (word));
return word ? bit : -1;
}
+#else
+#define __fls(x) generic_fls(x)
+static __inline__ int generic_fls(u32 x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
/* Simple minded Local APIC priority implementation. Fix later */
-static __inline__ int find_highest_irq(unsigned long *pintr)
+static __inline__ int find_highest_irq(u32 *pintr)
{
if (pintr[7])
- return __fls(pintr[7]) + (256-32*1);
+ return __fls(pintr[7]) + (255-32*1);
if (pintr[6])
- return __fls(pintr[6]) + (256-32*2);
+ return __fls(pintr[6]) + (255-32*2);
if (pintr[5])
- return __fls(pintr[5]) + (256-32*3);
+ return __fls(pintr[5]) + (255-32*3);
if (pintr[4])
- return __fls(pintr[4]) + (256-32*4);
+ return __fls(pintr[4]) + (255-32*4);
if (pintr[3])
- return __fls(pintr[3]) + (256-32*5);
+ return __fls(pintr[3]) + (255-32*5);
if (pintr[2])
- return __fls(pintr[2]) + (256-32*6);
+ return __fls(pintr[2]) + (255-32*6);
if (pintr[1])
- return __fls(pintr[1]) + (256-32*7);
- return __fls(pintr[0]);
+ return __fls(pintr[1]) + (255-32*7);
+ return (__fls(pintr[0])-1);
}
/*
@@ -317,7 +360,7 @@
domain_crash_synchronous();
}
- return find_highest_irq(&vio->vp_intr[0]);
+ return find_highest_irq((unsigned int *)&vio->vp_intr[0]);
}
static inline void clear_highest_bit(struct exec_domain *d, int vector)
diff -Nru a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/vmx_vmcs.c 2005-04-20 07:03:28 -04:00
@@ -327,7 +327,11 @@
error |= __vmwrite(GUEST_EFLAGS, eflags);
error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
+#ifdef __i386__
__asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
+#else
+ __asm__ __volatile__ ("movq %%dr7, %0\n" : "=r" (dr7));
+#endif
error |= __vmwrite(GUEST_DR7, dr7);
error |= __vmwrite(GUEST_VMCS0, 0xffffffff);
error |= __vmwrite(GUEST_VMCS1, 0xffffffff);
@@ -363,12 +367,21 @@
host_env->idtr_base = desc.address;
error |= __vmwrite(HOST_IDTR_BASE, host_env->idtr_base);
+#ifdef __i386__
__asm__ __volatile__ ("movl %%cr0,%0" : "=r" (crn) : );
+#else
+ __asm__ __volatile__ ("movq %%cr0,%0" : "=r" (crn) : );
+#endif
+
host_env->cr0 = crn;
error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
/* CR3 is set in vmx_final_setup_hostos */
+#ifdef __i386__
__asm__ __volatile__ ("movl %%cr4,%0" : "=r" (crn) : );
+#else
+ __asm__ __volatile__ ("movq %%cr4,%0" : "=r" (crn) : );
+#endif
host_env->cr4 = crn;
error |= __vmwrite(HOST_CR4, crn);
error |= __vmwrite(HOST_EIP, (unsigned long) vmx_asm_vmexit_handler);
diff -Nru a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S 2005-04-20 07:03:28 -04:00
+++ b/xen/arch/x86/x86_64/entry.S 2005-04-20 07:03:28 -04:00
@@ -151,6 +151,139 @@
movb $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
jmp test_all_events
+#ifdef CONFIG_VMX
+/*
+ * At VMExit time the processor saves the guest selectors, rsp, rip,
+ * and rflags. Therefore we don't save them, but simply decrement
+ * the kernel stack pointer to make it consistent with the stack frame
+ * at usual interruption time. The rflags of the host is not saved by VMX,
+ * and we set it to the fixed value.
+ *
+ * We also need the room, especially because orig_eax field is used
+ * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
+ * (13) u64 gs_base_user;
+ * (12) u64 gs_base_kernel;
+ * (11) u64 fs_base;
+ * (10) u64 gs;
+ * (9) u64 fs;
+ * (8) u64 ds;
+ * (7) u64 es;
+ * <- get_stack_bottom() (= HOST_ESP)
+ * (6) u64 ss;
+ * (5) u64 rsp;
+ * (4) u64 rflags;
+ * (3) u64 cs;
+ * (2) u64 rip;
+ * (2/1) u32 entry_vector;
+ * (1/1) u32 error_code;
+ * However, get_stack_bottom() actually returns 64 bytes before the real
+ * bottom of the stack to allow space for:
+ * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
+ */
+#define VMX_MONITOR_RFLAGS 0x202 /* IF on */
+#define NR_SKIPPED_REGS 6 /* See the above explanation */
+#define VMX_SAVE_ALL_NOSEGREGS \
+ pushq $VMX_MONITOR_RFLAGS; \
+ popfq; \
+ subq $(NR_SKIPPED_REGS*8), %rsp; \
+ pushq %rdi; \
+ pushq %rsi; \
+ pushq %rdx; \
+ pushq %rcx; \
+ pushq %rax; \
+ pushq %r8; \
+ pushq %r9; \
+ pushq %r10; \
+ pushq %r11; \
+ pushq %rbx; \
+ pushq %rbp; \
+ pushq %r12; \
+ pushq %r13; \
+ pushq %r14; \
+ pushq %r15; \
+
+ENTRY(vmx_asm_vmexit_handler)
+ /* selectors are restored/saved by VMX */
+ VMX_SAVE_ALL_NOSEGREGS
+ call SYMBOL_NAME(vmx_vmexit_handler)
+ jmp vmx_asm_do_resume
+
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|