WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 5 of 7] REDO2: mem_access & mem_access 2: added INT3/

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 5 of 7] REDO2: mem_access & mem_access 2: added INT3/CRx capture
From: Joe Epstein <jepstein98@xxxxxxxxx>
Date: Wed, 5 Jan 2011 19:53:42 -0800
Delivery-date: Wed, 05 Jan 2011 20:04:50 -0800
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:mime-version:received:from:date :message-id:subject:to:content-type; bh=Z8mlk1MOzInKZDGddbNz6eioCyr/eUY6A3bxa6bSQQ4=; b=cEkD4wLMrlE99XXPKZoOhMQ2lKRVeMFFFJ5MMX8a9iVqXjRUmG3enCUNG5QtXdydHY GbASiDw0y7b9FWOYjB+x4RDMKEuhCXIX299A8U3S3tg4LrLnu1q4GVWXBrkszbZFzW7s ZNRvGu+a+PplbqGCUGUhWa5xwl4fGdJVy8rXQ=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:from:date:message-id:subject:to:content-type; b=iRy10ff7Il2YjHSLMCcuUqUG/7wV+Rvycsv2pXt+LwYokxy5TvGZsKaUj8Mu8vjQfd zCm+d5eZt9+tv37V4VdhRC9TsI0qxQxF4NR8TNpXAjnduQBVMj6JYACzgqnM8kZdPQO6 SuVoZA8VW+wqF16l/sZKf29+wca08hhBt6jCc=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
* Allows a memory event listener to register for events on changes to
  CR0, CR3, and CR4, as well as INT3 instructions, as a part of the
  mem_access mechanism.  These events can be either synchronous or
  asynchronous.

* For INT3, the logic works independent of a debugger, and so both can
  be supported.

* The presence and type of listener are stored and accessed through
  HVM params.

* Changed the event mask handling to ensure that the right events are
  captured based on the listeners.

* Added the ability to inject HW/SW traps into a VCPU when it next
  resumes (rather than try to modify the existing IRQ injection
  code paths).  Only one trap to inject can be outstanding at a time.

Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx


diff -r 4b6de511642e -r cda142e7b912 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/arch/x86/hvm/hvm.c    Wed Jan 05 19:32:32 2011 -0800
@@ -309,6 +309,15 @@ void hvm_do_resume(struct vcpu *v)
             return; /* bail */
         }
     }
+
+    /* Inject pending hw/sw trap */
+    if (v->arch.hvm_vcpu.inject_trap != -1)
+    {
+        hvm_inject_exception(v->arch.hvm_vcpu.inject_trap,
+                             v->arch.hvm_vcpu.inject_error_code,
+                             v->arch.hvm_vcpu.inject_cr2);
+        v->arch.hvm_vcpu.inject_trap = -1;
+    }
 }
 
 static void hvm_init_ioreq_page(
@@ -949,6 +958,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
 
+    v->arch.hvm_vcpu.inject_trap = -1;
+
 #ifdef CONFIG_COMPAT
     rc = setup_compat_arg_xlat(v);
     if ( rc != 0 )
@@ -3238,10 +3249,45 @@ long do_hvm_op(unsigned long op, XEN_GUE
             case HVM_PARAM_ACPI_IOPORTS_LOCATION:
                 rc = pmtimer_change_ioport(d, a.value);
                 break;
+            case HVM_PARAM_MEMORY_EVENT_CR0:
+            case HVM_PARAM_MEMORY_EVENT_CR3:
+            case HVM_PARAM_MEMORY_EVENT_CR4:
+                if ( d->domain_id == current->domain->domain_id )
+                    rc = -EPERM;
+                break;
+            case HVM_PARAM_MEMORY_EVENT_INT3:
+                if ( d->domain_id == current->domain->domain_id )
+                {
+                    rc = -EPERM;
+                    break;
+                }
+                if ( a.value & HVMPME_onchangeonly )
+                    rc = -EINVAL;
+                break;
             }
 
-            if ( rc == 0 )
+            if ( rc == 0 )
+            {
                 d->arch.hvm_domain.params[a.index] = a.value;
+
+                switch( a.index )
+                {
+                case HVM_PARAM_MEMORY_EVENT_INT3:
+                {
+                    domain_pause(d);
+                    domain_unpause(d); /* Causes guest to latch new status */
+                    break;
+                }
+                case HVM_PARAM_MEMORY_EVENT_CR3:
+                {
+                    for_each_vcpu ( d, v )
+                        hvm_funcs.update_guest_cr(v, 0); /* Latches new CR3 mask through CR0 code */
+                    break;
+                }
+                }
+
+            }
+
         }
         else
         {
@@ -3659,6 +3705,44 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
+    case HVMOP_inject_trap:
+    {
+        xen_hvm_inject_trap_t tr;
+        struct domain *d;
+        struct vcpu *v;
+
+        if ( copy_from_guest(&tr, arg, 1 ) )
+            return -EFAULT;
+
+        if ( current->domain->domain_id == tr.domid )
+            return -EPERM;
+
+        rc = rcu_lock_target_domain_by_id(tr.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) )
+            goto param_fail8;
+
+        rc = -ENOENT;
+        if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
+            goto param_fail8;
+       
+        if ( v->arch.hvm_vcpu.inject_trap != -1 )
+            rc = -EBUSY;
+        else
+        {
+            v->arch.hvm_vcpu.inject_trap       = tr.trap;
+            v->arch.hvm_vcpu.inject_error_code = tr.error_code;
+            v->arch.hvm_vcpu.inject_cr2        = tr.cr2;
+        }
+
+    param_fail8:
+        rcu_unlock_domain(d);
+        break;
+    }
+
     default:
     {
         gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
@@ -3699,6 +3783,84 @@ int hvm_debug_op(struct vcpu *v, int32_t
     return rc;
 }
 
+static int hvm_memory_event_traps(long p, uint32_t reason,
+                                  unsigned long value, unsigned long old,
+                                  bool_t gla_valid, unsigned long gla)
+{
+    struct vcpu* v = current;
+    struct domain *d = v->domain;
+    mem_event_request_t req;
+    int rc;
+
+    if ( !(p & HVMPME_MODE_MASK) )
+        return 0;
+
+    if ( (p & HVMPME_onchangeonly) && (value == old) )
+        return 1;
+   
+    rc = mem_event_check_ring(d);
+    if ( rc )
+        return rc;
+   
+    memset(&req, 0, sizeof(req));
+    req.type = MEM_EVENT_TYPE_ACCESS;
+    req.reason = reason;
+   
+    if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+    {
+        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;   
+        vcpu_pause_nosync(v);  
+    }
+
+    req.gfn = value;
+    req.vcpu_id = v->vcpu_id;
+    if ( gla_valid )
+    {
+        req.offset = gla & ((1 << PAGE_SHIFT) - 1);
+        req.gla = gla;
+        req.gla_valid = 1;
+    }
+   
+    mem_event_put_request(d, &req);     
+   
+    return 1;
+}
+
+void hvm_memory_event_cr0(unsigned long value, unsigned long old)
+{
+    hvm_memory_event_traps(current->domain->arch.hvm_domain
+                             .params[HVM_PARAM_MEMORY_EVENT_CR0],
+                           MEM_EVENT_REASON_CR0,
+                           value, old, 0, 0);
+}
+
+void hvm_memory_event_cr3(unsigned long value, unsigned long old)
+{
+    hvm_memory_event_traps(current->domain->arch.hvm_domain
+                             .params[HVM_PARAM_MEMORY_EVENT_CR3],
+                           MEM_EVENT_REASON_CR3,
+                           value, old, 0, 0);
+}
+
+void hvm_memory_event_cr4(unsigned long value, unsigned long old)
+{
+    hvm_memory_event_traps(current->domain->arch.hvm_domain
+                             .params[HVM_PARAM_MEMORY_EVENT_CR4],
+                           MEM_EVENT_REASON_CR4,
+                           value, old, 0, 0);
+}
+
+int hvm_memory_event_int3(unsigned long gla)
+{
+    uint32_t pfec = PFEC_page_present;
+    unsigned long gfn;
+    gfn = paging_gva_to_gfn(current, gla, &pfec);
+
+    return hvm_memory_event_traps(current->domain->arch.hvm_domain
+                                    .params[HVM_PARAM_MEMORY_EVENT_INT3],
+                                  MEM_EVENT_REASON_INT3,
+                                  gfn, 0, 1, gla);
+}
 
 /*
  * Local variables:
diff -r 4b6de511642e -r cda142e7b912 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/arch/x86/hvm/vmx/vmcs.c    Wed Jan 05 19:32:32 2011 -0800
@@ -1082,7 +1082,9 @@ void vmx_do_resume(struct vcpu *v)
         hvm_asid_flush_vcpu(v);
     }
 
-    debug_state = v->domain->debugger_attached;
+    debug_state = v->domain->debugger_attached
+                  || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+
     if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
     {
         v->arch.hvm_vcpu.debug_state_latch = debug_state;
diff -r 4b6de511642e -r cda142e7b912 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c    Wed Jan 05 19:32:32 2011 -0800
@@ -1064,12 +1064,16 @@ static void vmx_update_guest_cr(struct v
 
         if ( paging_mode_hap(v->domain) )
         {
-            /* We manage GUEST_CR3 when guest CR0.PE is zero. */
+            /* We manage GUEST_CR3 when guest CR0.PE is zero or when cr3 memevents are on */           
             uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
                                  CPU_BASED_CR3_STORE_EXITING);
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
+
+            if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
+                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+
             vmx_update_cpu_exec_control(v);
 
             /* Changing CR0.PE can change some bits in real CR4. */
@@ -1252,9 +1256,12 @@ void vmx_inject_hw_exception(int trap, i
     unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
     struct vcpu *curr = current;
 
+    int type = X86_EVENTTYPE_HW_EXCEPTION;
+
     switch ( trap )
     {
     case TRAP_debug:
+        type = X86_EVENTTYPE_SW_EXCEPTION;
         if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
         {
             __restore_debug_registers(curr);
@@ -1269,6 +1276,9 @@ void vmx_inject_hw_exception(int trap, i
             domain_pause_for_debugger();
             return;
         }
+
+        type = X86_EVENTTYPE_SW_EXCEPTION;
+        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
     }
 
     if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
@@ -1279,7 +1289,7 @@ void vmx_inject_hw_exception(int trap, i
             error_code = 0;
     }
 
-    __vmx_inject_exception(trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
+    __vmx_inject_exception(trap, type, error_code);
 
     if ( trap == TRAP_page_fault )
         HVMTRACE_LONG_2D(PF_INJECT, error_code,
@@ -1565,6 +1575,8 @@ static int mov_to_cr(int gp, int cr, str
     unsigned long value;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
+    int rc = 0;
+    unsigned long old;
 
     switch ( gp )
     {
@@ -1589,13 +1601,25 @@ static int mov_to_cr(int gp, int cr, str
     switch ( cr )
     {
     case 0:
-        return !hvm_set_cr0(value);
+        old = v->arch.hvm_vcpu.guest_cr[0];
+        rc = !hvm_set_cr0(value);
+        if (rc)
+            hvm_memory_event_cr0(value, old);
+        return rc;
 
     case 3:
-        return !hvm_set_cr3(value);
+        old = v->arch.hvm_vcpu.guest_cr[3];
+        rc = !hvm_set_cr3(value);
+        if (rc)
+            hvm_memory_event_cr3(value, old);       
+        return rc;
 
     case 4:
-        return !hvm_set_cr4(value);
+        old = v->arch.hvm_vcpu.guest_cr[4];
+        rc = !hvm_set_cr4(value);
+        if (rc)
+            hvm_memory_event_cr4(value, old);
+        return rc;
 
     case 8:
         vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1676,11 +1700,17 @@ static int vmx_cr_access(unsigned long e
         cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
         mov_from_cr(cr, gp, regs);
         break;
-    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+    {
+        unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
         v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
         vmx_update_guest_cr(v, 0);
+
+        hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
+
         HVMTRACE_0D(CLTS);
         break;
+    }
     case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
         value = v->arch.hvm_vcpu.guest_cr[0];
         /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
@@ -2351,13 +2381,29 @@ asmlinkage void vmx_vmexit_handler(struc
                 goto exit_and_crash;
             domain_pause_for_debugger();
             break;
-        case TRAP_int3:
-            if ( !v->domain->debugger_attached )
-                goto exit_and_crash;
-            update_guest_eip(); /* Safe: INT3 */
-            current->arch.gdbsx_vcpu_event = TRAP_int3;
-            domain_pause_for_debugger();
-            break;
+        case TRAP_int3:
+        {
+            if ( v->domain->debugger_attached )
+            {
+                update_guest_eip(); /* Safe: INT3 */           
+                current->arch.gdbsx_vcpu_event = TRAP_int3;
+                domain_pause_for_debugger();
+                break;
+            }
+            else {
+                int handled = hvm_memory_event_int3(regs->eip);
+               
+                if ( handled < 0 )
+                {
+                    vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 0);
+                    break;
+                }
+                else if ( handled )
+                    break;
+            }
+
+            goto exit_and_crash;
+        }
         case TRAP_no_device:
             vmx_fpu_dirty_intercept();
             break;
diff -r 4b6de511642e -r cda142e7b912 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/include/asm-x86/hvm/hvm.h    Wed Jan 05 19:32:32 2011 -0800
@@ -372,4 +372,12 @@ bool_t hvm_hap_nested_page_fault(unsigne
 int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
 int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
 
+/* Called for current VCPU on crX changes by guest */
+void hvm_memory_event_cr0(unsigned long value, unsigned long old);
+void hvm_memory_event_cr3(unsigned long value, unsigned long old);
+void hvm_memory_event_cr4(unsigned long value, unsigned long old);
+
+/* Called for current VCPU on int3: returns -1 if no listener */
+int hvm_memory_event_int3(unsigned long gla);
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r 4b6de511642e -r cda142e7b912 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed Jan 05 19:32:32 2011 -0800
@@ -114,6 +114,11 @@ struct hvm_vcpu {
     /* We may write up to m128 as a number of device-model transactions. */
     paddr_t mmio_large_write_pa;
     unsigned int mmio_large_write_bytes;
+
+    /* Pending hw/sw interrupt */
+    int           inject_trap;       /* -1 for nothing to inject */
+    int           inject_error_code;
+    unsigned long inject_cr2;
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */
diff -r 4b6de511642e -r cda142e7b912 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/include/public/hvm/hvm_op.h    Wed Jan 05 19:32:32 2011 -0800
@@ -200,4 +200,26 @@ struct xen_hvm_get_mem_access {
 };
 typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
+
+#define HVMOP_inject_trap            14
+/* Inject a trap into a VCPU, which will get taken up on the next
+ * scheduling of it. Note that the caller should know enough of the
+ * state of the CPU before injecting, to know what the effect of
+ * injecting the trap will be.
+ */
+struct xen_hvm_inject_trap {
+    /* Domain to be queried. */
+    domid_t domid;
+    /* VCPU */
+    uint32_t vcpuid;
+    /* Trap number */
+    uint32_t trap;
+    /* Error code, or -1 to skip */
+    uint32_t error_code;
+    /* CR2 for page faults */
+    uint64_t cr2;
+};
+typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
+
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff -r 4b6de511642e -r cda142e7b912 xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/include/public/hvm/params.h    Wed Jan 05 19:32:32 2011 -0800
@@ -124,6 +124,19 @@
  */
 #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
 
-#define HVM_NR_PARAMS          20
+/* Enable blocking memory events, async or sync (pause vcpu until response)
+ * onchangeonly indicates messages only on a change of value */
+#define HVM_PARAM_MEMORY_EVENT_CR0   20
+#define HVM_PARAM_MEMORY_EVENT_CR3   21
+#define HVM_PARAM_MEMORY_EVENT_CR4   22
+#define HVM_PARAM_MEMORY_EVENT_INT3  23
+
+#define HVMPME_MODE_MASK       (3 << 0)
+#define HVMPME_mode_disabled   0
+#define HVMPME_mode_async      1
+#define HVMPME_mode_sync       2
+#define HVMPME_onchangeonly    (1 << 2)
+
+#define HVM_NR_PARAMS          24
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff -r 4b6de511642e -r cda142e7b912 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h    Wed Jan 05 19:30:26 2011 -0800
+++ b/xen/include/public/mem_event.h    Wed Jan 05 19:32:32 2011 -0800
@@ -37,6 +37,10 @@
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
 #define MEM_EVENT_REASON_VIOLATION   1    /* access violation, GFN is address */
+#define MEM_EVENT_REASON_CR0         2    /* CR0 was hit: gfn is CR0 value */
+#define MEM_EVENT_REASON_CR3         3    /* CR3 was hit: gfn is CR3 value */
+#define MEM_EVENT_REASON_CR4         4    /* CR4 was hit: gfn is CR4 value */
+#define MEM_EVENT_REASON_INT3        5    /* int3 was hit: gla/gfn are RIP */
 
 typedef struct mem_event_shared_page {
     uint32_t port;

Attachment: 5.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 5 of 7] REDO2: mem_access & mem_access 2: added INT3/CRx capture, Joe Epstein <=