Here is the main hypervisor/hvmloader patch.
Signed-off-by: Steve Ofsthun <sofsthun@xxxxxxxxxxxxxxx>
Steve
--
Steve Ofsthun - Virtual Iron Software, Inc.
--- ./tools/firmware/hvmloader/hvmloader.c 2006-04-07 14:39:43.000000000
-0400
+++ ../xen-unstable-test64/./tools/firmware/hvmloader/hvmloader.c
2006-04-12 10:28:18.000000000 -0400
@@ -23,15 +23,13 @@
*/
#include "roms.h"
#include "../acpi/acpi2_0.h" /* for ACPI_PHYSICAL_ADDRESS */
+#include <asm/hvm/svm/vmmcall.h>
/* memory map */
#define VGABIOS_PHYSICAL_ADDRESS 0x000C0000
#define VMXASSIST_PHYSICAL_ADDRESS 0x000D0000
#define ROMBIOS_PHYSICAL_ADDRESS 0x000F0000
-/* invoke SVM's paged realmode support */
-#define SVM_VMMCALL_RESET_TO_REALMODE 0x00000001
-
/*
* C runtime start off
*/
@@ -138,7 +136,7 @@
__asm__ __volatile__(
".byte 0x0F,0x01,0xD9"
: "=a" (eax)
- : "a"(0x58454E00), /* XEN\0 key */
+ : "a"(VMMCALL_MAGIC), /* XEN\0 key */
"b"(ebx), "c"(ecx), "d"(edx), "D"(edi), "S"(esi)
);
return eax;
@@ -192,7 +190,7 @@
if (check_amd()) {
/* AMD implies this is SVM */
puts("SVM go ...\n");
- vmmcall(SVM_VMMCALL_RESET_TO_REALMODE, 0, 0, 0, 0);
+ vmmcall(VMMCALL_RESET_TO_REALMODE, 0, 0, 0, 0);
} else {
puts("Loading VMXAssist ...\n");
memcpy((void *)VMXASSIST_PHYSICAL_ADDRESS,
--- ./tools/firmware/hvmloader/Makefile 2006-04-12 09:07:37.000000000 -0400
+++ ../xen-unstable-test64/./tools/firmware/hvmloader/Makefile 2006-04-12
10:26:39.000000000 -0400
@@ -29,7 +29,7 @@
LOADADDR = 0x100000
DEFINES =-DDEBUG
-XENINC =-I$(XEN_ROOT)/tools/libxc
+XENINC =-I$(XEN_ROOT)/tools/libxc -I$(XEN_ROOT)/xen/include
OBJECTS = hvmloader.o acpi_madt.o
--- ./xen/arch/x86/hvm/hvm.c 2006-04-07 14:39:47.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/hvm.c 2006-04-12
11:16:38.000000000 -0400
@@ -357,6 +357,94 @@
}
/*
+ * Process HVM guest hypercalls.
+ */
+void hvm_hypercall(struct cpu_user_regs *pregs, unsigned long rip)
+{
+ unsigned long rc;
+ unsigned long cmd, a1, a2, a3, a4, a5;
+
+ /*
+ * Retreive the arguments from the appropriate registers.
+ */
+#if BITS_PER_LONG == 64
+ /*
+ * 64-bit hypervisor must handle 32-bit and 64-bit guests.
+ */
+ if (current->domain->arch.ops->guest_paging_levels != PAGING_L4) {
+ /*
+ * 32-bit guest.
+ */
+ cmd = pregs->eax;
+ a1 = pregs->ebx;
+ a2 = pregs->ecx;
+ a3 = pregs->edx;
+ a4 = pregs->esi;
+ a5 = pregs->edi;
+ } else {
+ /*
+ * 64-bit guest.
+ */
+ cmd = pregs->rax;
+ a1 = pregs->rdi;
+ a2 = pregs->rsi;
+ a3 = pregs->rdx;
+ a4 = pregs->r10;
+ a5 = pregs->r8;
+ }
+#else /* BITS_PER_LONG != 64 */
+ /*
+ * 32-bit hypervisor must handle 32-bit guests.
+ */
+ cmd = pregs->eax;
+ a1 = pregs->ebx;
+ a2 = pregs->ecx;
+ a3 = pregs->edx;
+ a4 = pregs->esi;
+ a5 = pregs->edi;
+#endif /* BITS_PER_LONG != 64 */
+ /*
+ * Don't allow user-mode hypercalls.
+ */
+ if (ring_3(pregs)) {
+ rc = -EPERM;
+ goto out;
+ }
+ /*
+ * Check the hypercall index.
+ */
+ if (cmd >= NR_hypercalls) {
+ rc = -ENOSYS;
+ goto out;
+ }
+#ifdef HYPERCALL_DEBUG
+ printf("VMCALL[%ld] @ %08lx (%08lx,%08lx,%08lx,%08lx,%08lx)\n",
+ cmd, rip, a1, a2, a3, a4, a5);
+#endif
+
+ /*
+ * Execute the hypercall.
+ */
+ rc = hypercall_table[cmd](a1, a2, a3, a4, a5);
+
+out:
+#ifdef HYPERCALL_DEBUG
+ if (rc)
+ printf("VMCALL[%ld] @ %08lx (%08lx,%08lx,%08lx,%08lx,%08lx)"
+ " returns %ld\n", cmd, rip, a1, a2, a3, a4, a5, rc);
+#endif
+ /*
+ * Save the result in the proper return value register,
+ */
+#if BITS_PER_LONG == 64
+ pregs->rax = rc;
+#else
+ pregs->eax = rc;
+#endif
+ return;
+}
+
+/*
* Local variables:
* mode: C
* c-set-style: "BSD"
--- ./xen/arch/x86/hvm/svm/svm.c 2006-04-07 14:39:47.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/svm/svm.c 2006-04-11
17:49:56.000000000 -0400
@@ -2003,6 +2003,7 @@
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len;
+ unsigned long eip;
ASSERT(vmcb);
ASSERT(regs);
@@ -2010,33 +2011,37 @@
inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
- /* VMMCALL sanity check */
- if (vmcb->cpl > get_vmmcall_cpl(regs->edi))
- {
- printf("VMMCALL CPL check failed\n");
- return -1;
- }
+ eip = vmcb->rip;
- /* handle the request */
- switch (regs->edi)
+ /* VMMCALL sanity check */
+ if (regs->eax == VMMCALL_MAGIC)
{
- case VMMCALL_RESET_TO_REALMODE:
- if (svm_do_vmmcall_reset_to_realmode(v, regs))
- {
- printf("svm_do_vmmcall_reset_to_realmode() failed\n");
- return -1;
- }
-
- /* since we just reset the VMCB, return without adjusting the eip */
- return 0;
- case VMMCALL_DEBUG:
- printf("DEBUG features not implemented yet\n");
- break;
- default:
- break;
+ if (vmcb->cpl > get_vmmcall_cpl(regs->edi))
+ {
+ /* Don't print anything on user mode errors */
+ return -1;
+ }
+
+ /* handle the request */
+ switch (regs->edi)
+ {
+ case VMMCALL_RESET_TO_REALMODE:
+ if (svm_do_vmmcall_reset_to_realmode(v, regs))
+ {
+ printf("svm_do_vmmcall_reset_to_realmode() failed\n");
+ return -1;
+ }
+
+ /* since we just reset the VMCB, return without adjusting the eip */
+ return 0;
+ case VMMCALL_DEBUG:
+ break;
+ default:
+ break;
+ }
}
- hvm_print_line(v, regs->eax); /* provides the current domain */
+ hvm_hypercall(regs, eip);
__update_guest_eip(vmcb, inst_len);
return 0;
--- ./xen/arch/x86/hvm/vmx/vmx.c 2006-04-12 09:07:38.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/vmx/vmx.c 2006-04-12
09:08:12.000000000 -0400
@@ -2207,16 +2207,15 @@
__update_guest_eip(inst_len);
break;
}
-#if 0 /* keep this for debugging */
case EXIT_REASON_VMCALL:
__get_instruction_length(inst_len);
__vmread(GUEST_RIP, &eip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- hvm_print_line(v, regs.eax); /* provides the current domain */
+ hvm_hypercall(®s, eip);
+
__update_guest_eip(inst_len);
break;
-#endif
case EXIT_REASON_CR_ACCESS:
{
__vmread(GUEST_RIP, &eip);
@@ -2257,7 +2256,6 @@
case EXIT_REASON_MWAIT_INSTRUCTION:
__hvm_bug(®s);
break;
- case EXIT_REASON_VMCALL:
case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMPTRLD:
--- ./xen/arch/x86/usercopy.c 2006-04-07 14:39:47.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/usercopy.c 2006-04-11
15:52:49.000000000 -0400
@@ -91,6 +91,8 @@
return (unsigned)__n;
}
+#include <asm/hvm/support.h>
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
@@ -107,6 +109,11 @@
unsigned long
copy_to_user(void __user *to, const void *from, unsigned n)
{
+ if (hvm_guest(current) && !hypervisor_va(to)) {
+ if (hvm_copy((void *)from, (unsigned long)to, n, HVM_COPY_OUT))
+ return 0;
+ return n;
+ }
if (access_ok(to, n))
n = __copy_to_user(to, from, n);
return n;
@@ -131,6 +138,11 @@
unsigned long
copy_from_user(void *to, const void __user *from, unsigned n)
{
+ if (hvm_guest(current) && !hypervisor_va(from)) {
+ if (hvm_copy((void *)to, (unsigned long)from, n, HVM_COPY_IN))
+ return 0;
+ return n;
+ }
if (access_ok(from, n))
n = __copy_from_user(to, from, n);
else
--- ./xen/include/asm-x86/hvm/support.h 2006-04-07 14:39:49.000000000 -0400
+++ ../xen-unstable-test64/./xen/include/asm-x86/hvm/support.h 2006-04-11
08:46:07.000000000 -0400
@@ -149,4 +149,8 @@
extern void hvm_print_line(struct vcpu *v, const char c);
extern void hlt_timer_fn(void *data);
+typedef unsigned long (*hypercall_ptr_t)(unsigned long, ...);
+extern hypercall_ptr_t hypercall_table[];
+extern void hvm_hypercall(struct cpu_user_regs *pregs, unsigned long rip);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
--- ./xen/include/asm-x86/hvm/svm/vmmcall.h 2006-04-07 14:39:49.000000000
-0400
+++ ../xen-unstable-test64/./xen/include/asm-x86/hvm/svm/vmmcall.h
2006-04-11 17:32:36.000000000 -0400
@@ -22,6 +22,9 @@
#ifndef __ASM_X86_HVM_SVM_VMMCALL_H__
#define __ASM_X86_HVM_SVM_VMMCALL_H__
+/* VMMCALL signature field */
+#define VMMCALL_MAGIC 0x58454E00 /* XEN\0 key */
+
/* VMMCALL command fields */
#define VMMCALL_CODE_CPL_MASK 0xC0000000
#define VMMCALL_CODE_MBZ_MASK 0x3FFF0000
--- ./xen/include/public/arch-x86_32.h 2006-04-07 14:39:49.000000000 -0400
+++ ../xen-unstable-test64/./xen/include/public/arch-x86_32.h 2006-04-11
15:52:49.000000000 -0400
@@ -80,6 +80,8 @@
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
#endif
+#define hypervisor_va(va) (__HYPERVISOR_VIRT_START <= (unsigned long)(va))
+
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
#endif
--- ./xen/include/public/arch-x86_64.h 2006-04-07 14:39:49.000000000 -0400
+++ ../xen-unstable-test64/./xen/include/public/arch-x86_64.h 2006-04-11
15:52:49.000000000 -0400
@@ -89,6 +89,9 @@
#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
#endif
+#define hypervisor_va(va) (__HYPERVISOR_VIRT_START <= (unsigned long)(va) && \
+ (unsigned long)(va) < __HYPERVISOR_VIRT_END)
+
/* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 32
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|