Here is the basic plumbing for hypercalls from HVM guests. The changes cover
the following:
o Modify VMX VMCALL exit handler to call the generic hvm_hypercall()
o Modify SVM VMMCALL exit handler to safely handle VMMCALL for hvmloader
else call the generic hvm_hypercall()
o Modify copy_[to/from]_guest to use hvm_copy() for hvm guests. Without
this the hvm domain gets hung and eventually hangs dom0 as well.
o Modify hvmloader to use VMMCALL symbols from vmmcall.h.
With these changes you can make raw hypercalls from HVM guests.
Testing included 32/64 bit guests on both VMX hardware and SVM hardware.
Signed-off-by: Steve Ofsthun <sofsthun@xxxxxxxxxxxxxxx>
Steve
--
Steve Ofsthun - Virtual Iron Software, Inc.
--- ./tools/firmware/hvmloader/hvmloader.c 2006-04-07 14:39:43.000000000
-0400
+++ ../xen-unstable-test64/./tools/firmware/hvmloader/hvmloader.c
2006-04-12 10:28:18.000000000 -0400
@@ -23,15 +23,13 @@
*/
#include "roms.h"
#include "../acpi/acpi2_0.h" /* for ACPI_PHYSICAL_ADDRESS */
+#include <asm/hvm/svm/vmmcall.h>
/* memory map */
#define VGABIOS_PHYSICAL_ADDRESS 0x000C0000
#define VMXASSIST_PHYSICAL_ADDRESS 0x000D0000
#define ROMBIOS_PHYSICAL_ADDRESS 0x000F0000
-/* invoke SVM's paged realmode support */
-#define SVM_VMMCALL_RESET_TO_REALMODE 0x00000001
-
/*
* C runtime start off
*/
@@ -138,7 +136,7 @@
__asm__ __volatile__(
".byte 0x0F,0x01,0xD9"
: "=a" (eax)
- : "a"(0x58454E00), /* XEN\0 key */
+ : "a"(VMMCALL_MAGIC), /* XEN\0 key */
"b"(ebx), "c"(ecx), "d"(edx), "D"(edi), "S"(esi)
);
return eax;
@@ -192,7 +190,7 @@
if (check_amd()) {
/* AMD implies this is SVM */
puts("SVM go ...\n");
- vmmcall(SVM_VMMCALL_RESET_TO_REALMODE, 0, 0, 0, 0);
+ vmmcall(VMMCALL_RESET_TO_REALMODE, 0, 0, 0, 0);
} else {
puts("Loading VMXAssist ...\n");
memcpy((void *)VMXASSIST_PHYSICAL_ADDRESS,
--- ./tools/firmware/hvmloader/Makefile 2006-04-12 09:07:37.000000000 -0400
+++ ../xen-unstable-test64/./tools/firmware/hvmloader/Makefile 2006-04-12
10:26:39.000000000 -0400
@@ -29,7 +29,7 @@
LOADADDR = 0x100000
DEFINES =-DDEBUG
-XENINC =-I$(XEN_ROOT)/tools/libxc
+XENINC =-I$(XEN_ROOT)/tools/libxc -I$(XEN_ROOT)/xen/include
OBJECTS = hvmloader.o acpi_madt.o
--- ./xen/arch/x86/hvm/hvm.c 2006-04-07 14:39:47.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/hvm.c 2006-04-12
16:37:56.000000000 -0400
@@ -357,6 +357,94 @@
}
/*
+ * Process HVM guest hypercalls.
+ */
+void hvm_hypercall(struct cpu_user_regs *pregs, unsigned long rip)
+{
+ unsigned long rc;
+ unsigned long cmd, a1, a2, a3, a4, a5;
+
+ /*
+ * Retreive the arguments from the appropriate registers.
+ */
+#if BITS_PER_LONG == 64
+ /*
+ * 64-bit hypervisor must handle 32-bit and 64-bit guests.
+ */
+ if (current->domain->arch.ops->guest_paging_levels != PAGING_L4) {
+ /*
+ * 32-bit guest.
+ */
+ cmd = pregs->eax;
+ a1 = pregs->ebx;
+ a2 = pregs->ecx;
+ a3 = pregs->edx;
+ a4 = pregs->esi;
+ a5 = pregs->edi;
+ } else {
+ /*
+ * 64-bit guest.
+ */
+ cmd = pregs->rax;
+ a1 = pregs->rdi;
+ a2 = pregs->rsi;
+ a3 = pregs->rdx;
+ a4 = pregs->r10;
+ a5 = pregs->r8;
+ }
+#else /* BITS_PER_LONG != 64 */
+ /*
+ * 32-bit hypervisor must handle 32-bit guests.
+ */
+ cmd = pregs->eax;
+ a1 = pregs->ebx;
+ a2 = pregs->ecx;
+ a3 = pregs->edx;
+ a4 = pregs->esi;
+ a5 = pregs->edi;
+#endif /* BITS_PER_LONG != 64 */
+ /*
+ * Don't allow user-mode hypercalls.
+ */
+ if (ring_3(pregs)) {
+ rc = -EPERM;
+ goto out;
+ }
+ /*
+ * Check the hypercall index.
+ */
+ if (cmd >= NR_hypercalls) {
+ rc = -ENOSYS;
+ goto out;
+ }
+#ifdef HYPERCALL_DEBUG
+ printf("VMCALL[%ld] @ %08lx (%08lx,%08lx,%08lx,%08lx,%08lx)\n",
+ cmd, rip, a1, a2, a3, a4, a5);
+#endif
+
+ /*
+ * Execute the hypercall.
+ */
+ rc = hypercall_table[cmd](a1, a2, a3, a4, a5);
+
+out:
+#ifdef HYPERCALL_DEBUG
+ if (rc)
+ printf("VMCALL[%ld] @ %08lx (%08lx,%08lx,%08lx,%08lx,%08lx)"
+ " returns %ld\n", cmd, rip, a1, a2, a3, a4, a5, rc);
+#endif
+ /*
+ * Save the result in the proper return value register,
+ */
+#if BITS_PER_LONG == 64
+ pregs->rax = rc;
+#else
+ pregs->eax = rc;
+#endif
+ return;
+}
+
+/*
* Local variables:
* mode: C
* c-set-style: "BSD"
--- ./xen/arch/x86/hvm/svm/svm.c 2006-04-07 14:39:47.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/svm/svm.c 2006-04-12
16:37:02.000000000 -0400
@@ -2003,6 +2003,7 @@
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len;
+ unsigned long eip;
ASSERT(vmcb);
ASSERT(regs);
@@ -2010,33 +2011,37 @@
inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
- /* VMMCALL sanity check */
- if (vmcb->cpl > get_vmmcall_cpl(regs->edi))
- {
- printf("VMMCALL CPL check failed\n");
- return -1;
- }
+ eip = vmcb->rip;
- /* handle the request */
- switch (regs->edi)
+ /* VMMCALL sanity check */
+ if (regs->eax == VMMCALL_MAGIC)
{
- case VMMCALL_RESET_TO_REALMODE:
- if (svm_do_vmmcall_reset_to_realmode(v, regs))
- {
- printf("svm_do_vmmcall_reset_to_realmode() failed\n");
- return -1;
- }
-
- /* since we just reset the VMCB, return without adjusting the eip */
- return 0;
- case VMMCALL_DEBUG:
- printf("DEBUG features not implemented yet\n");
- break;
- default:
- break;
+ if (vmcb->cpl > get_vmmcall_cpl(regs->edi))
+ {
+ /* Don't print anything on user mode errors */
+ return -1;
+ }
+
+ /* handle the request */
+ switch (regs->edi)
+ {
+ case VMMCALL_RESET_TO_REALMODE:
+ if (svm_do_vmmcall_reset_to_realmode(v, regs))
+ {
+ printf("svm_do_vmmcall_reset_to_realmode() failed\n");
+ return -1;
+ }
+
+ /* since we just reset the VMCB, return without adjusting the eip */
+ return 0;
+ case VMMCALL_DEBUG:
+ break;
+ default:
+ break;
+ }
}
- hvm_print_line(v, regs->eax); /* provides the current domain */
+ hvm_hypercall(regs, eip);
__update_guest_eip(vmcb, inst_len);
return 0;
--- ./xen/arch/x86/hvm/vmx/vmx.c 2006-04-12 09:07:38.000000000 -0400
+++ ../xen-unstable-test64/./xen/arch/x86/hvm/vmx/vmx.c 2006-04-12
09:08:12.000000000 -0400
@@ -2207,16 +2207,15 @@
__update_guest_eip(inst_len);
break;
}
-#if 0 /* keep this for debugging */
case EXIT_REASON_VMCALL:
__get_instruction_length(inst_len);
__vmread(GUEST_RIP, &eip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- hvm_print_line(v, regs.eax); /* provides the current domain */
+ hvm_hypercall(®s, eip);
+
__update_guest_eip(inst_len);
break;
-#endif
case EXIT_REASON_CR_ACCESS:
{
__vmread(GUEST_RIP, &eip);
@@ -2257,7 +2256,6 @@
case EXIT_REASON_MWAIT_INSTRUCTION:
__hvm_bug(®s);
break;
- case EXIT_REASON_VMCALL:
case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMPTRLD:
--- ./xen/include/asm-x86/guest_access.h 2006-04-07 14:39:49.000000000
-0400
+++ ../xen-unstable-test64/./xen/include/asm-x86/guest_access.h 2006-04-13
10:55:21.000000000 -0400
@@ -21,6 +21,53 @@
(GUEST_HANDLE(type)) { _x }; \
})
+/* Determine which guest copy function is required */
+
+#include <asm/hvm/support.h>
+/*
+ * Copy to any guest safely.
+ */
+static inline unsigned long
+copy_to_any_guest(void __user *to, const void *from, unsigned n)
+{
+ if (!hvm_guest(current))
+ return copy_to_user(to, from, n);
+ return (hvm_copy((void *)from, (unsigned long)to, n, HVM_COPY_OUT) ? 0 :
n);
+}
+
+/*
+ * Copy from any guest safely.
+ */
+static inline unsigned long
+copy_from_any_guest(void *to, const void __user *from, unsigned n)
+{
+ if (!hvm_guest(current))
+ return copy_from_user(to, from, n);
+ return (hvm_copy((void *)to, (unsigned long)from, n, HVM_COPY_IN) ? 0 : n);
+}
+
+/*
+ * Copy to any guest fast.
+ */
+static inline unsigned long
+__copy_to_any_guest(void *to, const void *from, unsigned n)
+{
+ if (!hvm_guest(current))
+ return __copy_to_user(to, from, n);
+ return (hvm_copy((void *)from, (unsigned long)to, n, HVM_COPY_OUT) ? 0 :
n);
+}
+
+/*
+ * Copy from any guest fast.
+ */
+static inline unsigned long
+__copy_from_any_guest(void *to, const void __user *from, unsigned n)
+{
+ if (!hvm_guest(current))
+ return __copy_from_user(to, from, n);
+ return (hvm_copy((void *)to, (unsigned long)from, n, HVM_COPY_IN) ? 0 : n);
+}
+
/*
* Copy an array of objects to guest context via a guest handle,
* specifying an offset into the guest array.
@@ -28,7 +75,7 @@
#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+ copy_to_any_guest(_x+(off), _y, sizeof(*_x)*(nr)); \
})
/*
@@ -38,21 +85,21 @@
#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+ copy_from_any_guest(_y, _x+(off), sizeof(*_x)*(nr));\
})
/* Copy sub-field of a structure to guest context via a guest handle. */
#define copy_field_to_guest(hnd, ptr, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- copy_to_user(_x, _y, sizeof(*_x)); \
+ copy_to_any_guest(_x, _y, sizeof(*_x)); \
})
/* Copy sub-field of a structure from guest context via a guest handle. */
#define copy_field_from_guest(ptr, hnd, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- copy_from_user(_y, _x, sizeof(*_x)); \
+ copy_from_any_guest(_y, _x, sizeof(*_x)); \
})
/*
@@ -65,25 +112,25 @@
#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+ __copy_to_any_guest(_x+(off), _y, sizeof(*_x)*(nr));\
})
#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+ __copy_from_any_guest(_y, _x+(off), sizeof(*_x)*(nr));\
})
#define __copy_field_to_guest(hnd, ptr, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- __copy_to_user(_x, _y, sizeof(*_x)); \
+ __copy_to_any_guest(_x, _y, sizeof(*_x)); \
})
#define __copy_field_from_guest(ptr, hnd, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- __copy_from_user(_y, _x, sizeof(*_x)); \
+ __copy_from_any_guest(_y, _x, sizeof(*_x)); \
})
#endif /* __ASM_X86_GUEST_ACCESS_H__ */
--- ./xen/include/asm-x86/hvm/support.h 2006-04-07 14:39:49.000000000 -0400
+++ ../xen-unstable-test64/./xen/include/asm-x86/hvm/support.h 2006-04-11
08:46:07.000000000 -0400
@@ -149,4 +149,8 @@
extern void hvm_print_line(struct vcpu *v, const char c);
extern void hlt_timer_fn(void *data);
+typedef unsigned long (*hypercall_ptr_t)(unsigned long, ...);
+extern hypercall_ptr_t hypercall_table[];
+extern void hvm_hypercall(struct cpu_user_regs *pregs, unsigned long rip);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
--- ./xen/include/asm-x86/hvm/svm/vmmcall.h 2006-04-07 14:39:49.000000000
-0400
+++ ../xen-unstable-test64/./xen/include/asm-x86/hvm/svm/vmmcall.h
2006-04-11 17:32:36.000000000 -0400
@@ -22,6 +22,9 @@
#ifndef __ASM_X86_HVM_SVM_VMMCALL_H__
#define __ASM_X86_HVM_SVM_VMMCALL_H__
+/* VMMCALL signature field */
+#define VMMCALL_MAGIC 0x58454E00 /* XEN\0 key */
+
/* VMMCALL command fields */
#define VMMCALL_CODE_CPL_MASK 0xC0000000
#define VMMCALL_CODE_MBZ_MASK 0x3FFF0000
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|