WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/hvm: hypercall adjustments

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/hvm: hypercall adjustments
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 15 May 2007 08:21:38 -0700
Delivery-date: Tue, 15 May 2007 08:24:22 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1179221308 -3600
# Node ID f4390e34ad120afd4f7d65789d2394b7c6dfd7a5
# Parent  eb027b704dc55bb7f6e01a4b068c1ac407331ec1
x86/hvm: hypercall adjustments

- share more code between 32- and 64-bit variants
- properly handle continuations for 32-bit guests on 64-bit hv
- properly handle preemption (this must *not* rely on regs->eip, as
- other code may overwrite the value there by calling
- hvm_store_cpu_guest_regs()
- deny hypercall access when called from guest in vm86 mode, which
  requires that ???_guest_x86_mode() make real and vm86 modes distinguishable

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/domain.c           |    9 ++
 xen/arch/x86/hvm/hvm.c          |  121 +++++++++++++++++-----------------------
 xen/arch/x86/hvm/platform.c     |    3 
 xen/arch/x86/hvm/svm/svm.c      |   24 ++-----
 xen/arch/x86/hvm/vmx/vmx.c      |   29 ++-------
 xen/include/asm-x86/hypercall.h |    9 ++
 6 files changed, 90 insertions(+), 105 deletions(-)

diff -r eb027b704dc5 -r f4390e34ad12 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue May 15 10:13:11 2007 +0100
+++ b/xen/arch/x86/domain.c     Tue May 15 10:28:28 2007 +0100
@@ -38,6 +38,7 @@
 #include <asm/mpspec.h>
 #include <asm/ldt.h>
 #include <asm/paging.h>
+#include <asm/hypercall.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
 #include <asm/msr.h>
@@ -1231,6 +1232,8 @@ void sync_vcpu_execstate(struct vcpu *v)
     __arg;                                                                  \
 })
 
+DEFINE_PER_CPU(char, hc_preempted);
+
 unsigned long hypercall_create_continuation(
     unsigned int op, const char *format, ...)
 {
@@ -1262,7 +1265,9 @@ unsigned long hypercall_create_continuat
         regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */
 
 #ifdef __x86_64__
-        if ( !is_pv_32on64_domain(current->domain) )
+        if ( !is_hvm_vcpu(current) ?
+             !is_pv_32on64_vcpu(current) :
+             (hvm_guest_x86_mode(current) == 8) )
         {
             for ( i = 0; *p != '\0'; i++ )
             {
@@ -1298,6 +1303,8 @@ unsigned long hypercall_create_continuat
                 }
             }
         }
+
+        this_cpu(hc_preempted) = 1;
     }
 
     va_end(args);
diff -r eb027b704dc5 -r f4390e34ad12 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue May 15 10:13:11 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue May 15 10:28:28 2007 +0100
@@ -663,7 +663,7 @@ typedef unsigned long hvm_hypercall_t(
 
 #if defined(__i386__)
 
-static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
+static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
     HYPERCALL(memory_op),
     HYPERCALL(multicall),
     HYPERCALL(xen_version),
@@ -672,21 +672,6 @@ static hvm_hypercall_t *hvm_hypercall_ta
     HYPERCALL(hvm_op)
 };
 
-static void __hvm_do_hypercall(struct cpu_user_regs *pregs)
-{
-    if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
-    {
-        if ( pregs->eax != __HYPERVISOR_grant_table_op )
-            gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %d.\n",
-                     current->domain->domain_id, current->vcpu_id, pregs->eax);
-        pregs->eax = -ENOSYS;
-        return;
-    }
-
-    pregs->eax = hvm_hypercall_table[pregs->eax](
-        pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
-}
-
 #else /* defined(__x86_64__) */
 
 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
@@ -746,49 +731,38 @@ static hvm_hypercall_t *hvm_hypercall32_
     HYPERCALL(hvm_op)
 };
 
-static void __hvm_do_hypercall(struct cpu_user_regs *pregs)
-{
-    pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
-    if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
-    {
-        if ( pregs->rax != __HYPERVISOR_grant_table_op )
-            gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %ld.\n",
-                     current->domain->domain_id, current->vcpu_id, pregs->rax);
-        pregs->rax = -ENOSYS;
-        return;
-    }
-
-    if ( current->arch.paging.mode->guest_levels == 4 )
-    {
-        pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
-                                                       pregs->rsi,
-                                                       pregs->rdx,
-                                                       pregs->r10,
-                                                       pregs->r8);
-    }
-    else
-    {
-        pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
-                                                       (uint32_t)pregs->ecx,
-                                                       (uint32_t)pregs->edx,
-                                                       (uint32_t)pregs->esi,
-                                                       (uint32_t)pregs->edi);
-    }
-}
-
 #endif /* defined(__x86_64__) */
 
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
-    int flush, preempted;
-    unsigned long old_eip;
-
-    hvm_store_cpu_guest_regs(current, regs, NULL);
-
-    if ( unlikely(ring_3(regs)) )
-    {
-        regs->eax = -EPERM;
-        return 0;
+    int flush, mode = hvm_guest_x86_mode(current);
+    uint32_t eax = regs->eax;
+
+    switch ( mode )
+    {
+#ifdef __x86_64__
+    case 8:
+#endif
+    case 4:
+    case 2:
+        hvm_store_cpu_guest_regs(current, regs, NULL);
+        if ( unlikely(ring_3(regs)) )
+        {
+    default:
+            regs->eax = -EPERM;
+            return HVM_HCALL_completed;
+        }
+    case 0:
+        break;
+    }
+
+    if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
+    {
+        if ( eax != __HYPERVISOR_grant_table_op )
+            gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %u.\n",
+                     current->domain->domain_id, current->vcpu_id, eax);
+        regs->eax = -ENOSYS;
+        return HVM_HCALL_completed;
     }
 
     /*
@@ -796,20 +770,29 @@ int hvm_do_hypercall(struct cpu_user_reg
      * For now we also need to flush when pages are added, as qemu-dm is not
      * yet capable of faulting pages into an existing valid mapcache bucket.
      */
-    flush = ((uint32_t)regs->eax == __HYPERVISOR_memory_op);
-
-    /* Check for preemption: RIP will be modified from this dummy value. */
-    old_eip = regs->eip;
-    regs->eip = 0xF0F0F0FF;
-
-    __hvm_do_hypercall(regs);
-
-    preempted = (regs->eip != 0xF0F0F0FF);
-    regs->eip = old_eip;
-
-    hvm_load_cpu_guest_regs(current, regs);
-
-    return (preempted ? HVM_HCALL_preempted :
+    flush = (eax == __HYPERVISOR_memory_op);
+    this_cpu(hc_preempted) = 0;
+
+#ifdef __x86_64__
+    if ( mode == 8 )
+    {
+        regs->rax = hvm_hypercall64_table[eax](regs->rdi,
+                                               regs->rsi,
+                                               regs->rdx,
+                                               regs->r10,
+                                               regs->r8);
+    }
+    else
+#endif
+    {
+        regs->eax = hvm_hypercall32_table[eax]((uint32_t)regs->ebx,
+                                               (uint32_t)regs->ecx,
+                                               (uint32_t)regs->edx,
+                                               (uint32_t)regs->esi,
+                                               (uint32_t)regs->edi);
+    }
+
+    return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
             flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
 }
 
diff -r eb027b704dc5 -r f4390e34ad12 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Tue May 15 10:13:11 2007 +0100
+++ b/xen/arch/x86/hvm/platform.c       Tue May 15 10:28:28 2007 +0100
@@ -1037,6 +1037,9 @@ void handle_mmio(unsigned long gpa)
     df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
 
     address_bytes = hvm_guest_x86_mode(v);
+    if (address_bytes < 2)
+        /* real or vm86 modes */
+        address_bytes = 2;
     inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
     inst_len = hvm_instruction_length(inst_addr, address_bytes);
     if ( inst_len <= 0 )
diff -r eb027b704dc5 -r f4390e34ad12 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue May 15 10:13:11 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue May 15 10:28:28 2007 +0100
@@ -554,14 +554,6 @@ static inline void svm_restore_dr(struct
         __restore_debug_registers(v);
 }
 
-static int svm_realmode(struct vcpu *v)
-{
-    unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-    unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
-
-    return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
-}
-
 static int svm_interrupts_enabled(struct vcpu *v)
 {
     unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
@@ -572,13 +564,13 @@ static int svm_guest_x86_mode(struct vcp
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    if ( svm_long_mode_enabled(v) && vmcb->cs.attr.fields.l )
+    if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
+        return 0;
+    if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
+        return 1;
+    if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
         return 8;
-
-    if ( svm_realmode(v) )
-        return 2;
-
-    return (vmcb->cs.attr.fields.db ? 4 : 2);
+    return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
 }
 
 static void svm_update_host_cr3(struct vcpu *v)
@@ -1950,7 +1942,9 @@ static int svm_cr_access(struct vcpu *v,
     case INSTR_SMSW:
         value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
         modrm = buffer[index+2];
-        addr_size = svm_guest_x86_mode( v );
+        addr_size = svm_guest_x86_mode(v);
+        if ( addr_size < 2 )
+            addr_size = 2;
         if ( likely((modrm & 0xC0) >> 6 == 3) )
         {
             gpreg = decode_src_reg(prefix, modrm);
diff -r eb027b704dc5 -r f4390e34ad12 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue May 15 10:13:11 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue May 15 10:28:28 2007 +0100
@@ -994,31 +994,20 @@ static void vmx_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
-static int vmx_realmode(struct vcpu *v)
-{
-    unsigned long rflags;
+static int vmx_guest_x86_mode(struct vcpu *v)
+{
+    unsigned int cs_ar_bytes;
 
     ASSERT(v == current);
 
-    rflags = __vmread(GUEST_RFLAGS);
-    return rflags & X86_EFLAGS_VM;
-}
-
-static int vmx_guest_x86_mode(struct vcpu *v)
-{
-    unsigned long cs_ar_bytes;
-
-    ASSERT(v == current);
-
+    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
+        return 0;
+    if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
+        return 1;
     cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-
-    if ( vmx_long_mode_enabled(v) && (cs_ar_bytes & (1u<<13)) )
+    if ( vmx_long_mode_enabled(v) && likely(cs_ar_bytes & (1u<<13)) )
         return 8;
-
-    if ( vmx_realmode(v) )
-        return 2;
-
-    return ((cs_ar_bytes & (1u<<14)) ? 4 : 2);
+    return (likely(cs_ar_bytes & (1u<<14)) ? 4 : 2);
 }
 
 static int vmx_pae_enabled(struct vcpu *v)
diff -r eb027b704dc5 -r f4390e34ad12 xen/include/asm-x86/hypercall.h
--- a/xen/include/asm-x86/hypercall.h   Tue May 15 10:13:11 2007 +0100
+++ b/xen/include/asm-x86/hypercall.h   Tue May 15 10:28:28 2007 +0100
@@ -14,6 +14,15 @@
  * invocation of do_mmu_update() is resuming a previously preempted call.
  */
 #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
+
+/*
+ * This gets set to a non-zero value whenever hypercall_create_continuation()
+ * is used (outside of multicall context; in multicall context the second call
+ * from do_multicall() itself will have this effect). Internal callers of
+ * hypercall handlers interested in this condition must clear the flag prior
+ * to invoking the respective handler(s).
+ */
+DECLARE_PER_CPU(char, hc_preempted);
 
 extern long
 do_event_channel_op_compat(

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/hvm: hypercall adjustments, Xen patchbot-unstable <=