WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1 of 3] mem_access changes: trap injection, INT3 and

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 1 of 3] mem_access changes: trap injection, INT3 and CR0, 3, 4 mem events
From: Joe Epstein <jepstein98@xxxxxxxxx>
Date: Sun, 2 Jan 2011 14:13:48 -0800
Delivery-date: Sun, 02 Jan 2011 14:18:59 -0800
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:mime-version:received:from:date :message-id:subject:to:content-type; bh=uoGCVOPjxCN+VjuFGByEzIVMoeiioDxIcRMNh4bF9hA=; b=VjMvEiIa6gts8cW1tqlmfGirskUy6AmatjFebZWrsLfobQPZ7e1iNmHS5KyuiVv+hw NlTiS9j/sfLpWK55DO7I950GgEkbGuJJlKMJXkDrh8+MJyH5iHprfrPw2Qu6xbMrIRrj cFDe0ZfSRb5bDwsbLIOvO/cKomv4ZfFkE8JDk=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:from:date:message-id:subject:to:content-type; b=D+iBJCucP7gkG/kByKOoP26E7Jodmgvu6zUdMsuMN9B7qDhNvrP0ENB8j7RB5YOnVh GDHQaqlSIsAQSXs5OP2Z+VGXQJB9SNQ4TInMJYSzWGST3ZtIEuMqCa/LBZfAmbSxfbVS zO1o1w2YMBaL8KYSEaL6/8UhnnQz0J5t4phrI=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
* Added ability to inject software traps and faults on next resume.
  I was uncomfortable trying to modify the IRQ injection to accomodate this.
  However, the way that I have now might not be the best way to do it, so
  comments would be most appreciated.

* Added CR0, CR3, and CR4 capture, synchronous or asynchronous, for memory
  events, as set by HVM parameters.

* Added an INT3 memory event capture that works independently of the debugger.

NOTE: the included patches are based on a previous patch series from
me on Dec 28th,
purely for readability.
To apply the patches to the repository, see the attached patch on the
PATCH 0 of 3 email.

Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>

diff -r 1535fee95f47 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Sun Jan 02 13:19:51 2011 -0800
+++ b/xen/arch/x86/hvm/hvm.c    Sun Jan 02 13:21:16 2011 -0800
@@ -61,6 +61,8 @@
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
 #include <public/memory.h>
+#include <asm/mem_event.h>
+#include <public/mem_event.h>

 bool_t __read_mostly hvm_enabled;

@@ -307,6 +309,15 @@
             return; /* bail */
         }
     }
+
+    /* Inject pending hw/sw trap */
+    if (v->arch.hvm_vcpu.inject_trap != -1)
+    {
+        hvm_inject_exception(v->arch.hvm_vcpu.inject_trap,
+                             v->arch.hvm_vcpu.inject_error_code,
+                             v->arch.hvm_vcpu.inject_cr2);
+        v->arch.hvm_vcpu.inject_trap = -1;
+    }
 }

 static void hvm_init_ioreq_page(
@@ -947,6 +958,8 @@
     spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);

+    v->arch.hvm_vcpu.inject_trap = -1;
+
 #ifdef CONFIG_COMPAT
     rc = setup_compat_arg_xlat(v);
     if ( rc != 0 )
@@ -3214,10 +3227,34 @@
             case HVM_PARAM_ACPI_IOPORTS_LOCATION:
                 rc = pmtimer_change_ioport(d, a.value);
                 break;
+            case HVM_PARAM_MEMORY_EVENT_INT3:
+                if ( a.value & HVMPME_onchangeonly )
+                    rc = -EINVAL;
+                break;
             }

-            if ( rc == 0 )
+            if ( rc == 0 )
+            {
                 d->arch.hvm_domain.params[a.index] = a.value;
+
+                switch( a.index )
+                {
+                case HVM_PARAM_MEMORY_EVENT_INT3:
+                {
+                    domain_pause(d);
+                    domain_unpause(d); /* Causes guest to latch new status */
+                    break;
+                }
+                case HVM_PARAM_MEMORY_EVENT_CR3:
+                {
+                    for_each_vcpu ( d, v )
+                        hvm_funcs.update_guest_cr(v, 0); /* Latches
new CR3 mask through CR0 code */
+                    break;
+                }
+                }
+
+            }
+
         }
         else
         {
@@ -3629,6 +3666,41 @@
         break;
     }

+    case HVMOP_inject_trap:
+    {
+        xen_hvm_inject_trap_t tr;
+        struct domain *d;
+        struct vcpu *v;
+
+        if ( copy_from_guest(&tr, arg, 1 ) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(tr.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) )
+            goto param_fail8;
+
+        rc = -ENOENT;
+        if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
+            goto param_fail8;
+
+        if ( v->arch.hvm_vcpu.inject_trap != -1 )
+            rc = -EBUSY;
+        else
+        {
+            v->arch.hvm_vcpu.inject_trap       = tr.trap;
+            v->arch.hvm_vcpu.inject_error_code = tr.error_code;
+            v->arch.hvm_vcpu.inject_cr2        = tr.cr2;
+        }
+
+    param_fail8:
+        rcu_unlock_domain(d);
+        break;
+    }
+
     default:
     {
         gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
@@ -3669,6 +3741,161 @@
     return rc;
 }

+int hvm_memory_event_cr0(unsigned long value, unsigned long old) {
+    struct vcpu* v = current;
+    struct domain *d = v->domain;
+    mem_event_request_t req;
+
+    long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR0];
+
+    if ( p & HVMPME_MODE_MASK )
+    {
+        int rc;
+
+        if ( (p & HVMPME_onchangeonly) && value == old )
+            return 1;
+
+        rc = mem_event_check_ring(d);
+        if ( rc )
+            return rc;
+
+        memset(&req, 0, sizeof(req));
+        req.type = MEM_EVENT_TYPE_ACCESS;
+        req.reason = MEM_EVENT_REASON_CR0;
+
+        if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+        {
+            req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+            vcpu_pause_nosync(v);
+        }
+
+        req.gfn = value;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+
+        return 1;
+    }
+    return 0;
+}
+
+int hvm_memory_event_cr3(unsigned long value, unsigned long old) {
+    struct vcpu* v = current;
+    struct domain *d = v->domain;
+    mem_event_request_t req;
+
+    long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3];
+
+    if ( p & HVMPME_MODE_MASK )
+    {
+        int rc;
+
+        if ( (p & HVMPME_onchangeonly) && value == old )
+            return 1;
+
+        rc = mem_event_check_ring(d);
+        if ( rc )
+            return rc;
+
+        memset(&req, 0, sizeof(req));
+        req.type = MEM_EVENT_TYPE_ACCESS;
+        req.reason = MEM_EVENT_REASON_CR3;
+
+        if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+        {
+            req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+            vcpu_pause_nosync(v);
+        }
+
+        req.gfn = value;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+
+        return 1;
+    }
+    return 0;
+}
+
+int hvm_memory_event_cr4(unsigned long value, unsigned long old) {
+    struct vcpu* v = current;
+    struct domain *d = v->domain;
+    mem_event_request_t req;
+
+    long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR4];
+
+    if ( p & HVMPME_MODE_MASK )
+    {
+        int rc;
+
+        if ( (p & HVMPME_onchangeonly) && value == old )
+            return 1;
+
+        rc = mem_event_check_ring(d);
+        if ( rc )
+            return rc;
+
+        memset(&req, 0, sizeof(req));
+        req.type = MEM_EVENT_TYPE_ACCESS;
+        req.reason = MEM_EVENT_REASON_CR4;
+
+        if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+        {
+            req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+            vcpu_pause_nosync(v);
+        }
+
+        req.gfn = value;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+
+        return 1;
+    }
+    return 0;
+}
+
+int hvm_memory_event_int3(unsigned long gla) {
+    struct vcpu* v = current;
+    struct domain *d = v->domain;
+    mem_event_request_t req;
+
+    long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+
+    if ( p & HVMPME_MODE_MASK )
+    {
+        uint32_t pfec = PFEC_page_present;
+        unsigned long gfn;
+        int rc;
+
+        rc = mem_event_check_ring(d);
+        if ( rc )
+            return rc;
+
+        gfn = paging_gva_to_gfn(current, gla, &pfec);
+
+        memset(&req, 0, sizeof(req));
+        req.type = MEM_EVENT_TYPE_ACCESS;
+        req.reason = MEM_EVENT_REASON_INT3;
+
+        if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+        {
+            req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+            vcpu_pause_nosync(v);
+        }
+
+        req.gfn = gfn;
+        req.offset = gla & ((1 << PAGE_SHIFT) - 1);
+        req.gla = gla;
+        req.gla_valid = 1;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+
+        return 1;
+    }
+    return 0;
+}

 /*
  * Local variables:
diff -r 1535fee95f47 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Sun Jan 02 13:19:51 2011 -0800
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Sun Jan 02 13:21:16 2011 -0800
@@ -1082,7 +1082,9 @@
         hvm_asid_flush_vcpu(v);
     }

-    debug_state = v->domain->debugger_attached;
+    debug_state = v->domain->debugger_attached
+                  ||
v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+
     if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
     {
         v->arch.hvm_vcpu.debug_state_latch = debug_state;
diff -r 1535fee95f47 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Sun Jan 02 13:19:51 2011 -0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Sun Jan 02 13:21:16 2011 -0800
@@ -1064,12 +1064,16 @@

         if ( paging_mode_hap(v->domain) )
         {
-            /* We manage GUEST_CR3 when guest CR0.PE is zero. */
+            /* We manage GUEST_CR3 when guest CR0.PE is zero or when
cr3 memevents are on */
             uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
                                  CPU_BASED_CR3_STORE_EXITING);
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
+
+            if (
v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
+                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+
             vmx_update_cpu_exec_control(v);

             /* Changing CR0.PE can change some bits in real CR4. */
@@ -1252,9 +1256,12 @@
     unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
     struct vcpu *curr = current;

+    int type = X86_EVENTTYPE_HW_EXCEPTION;
+
     switch ( trap )
     {
     case TRAP_debug:
+        type = X86_EVENTTYPE_SW_EXCEPTION;
         if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
         {
             __restore_debug_registers(curr);
@@ -1269,6 +1276,9 @@
             domain_pause_for_debugger();
             return;
         }
+
+        type = X86_EVENTTYPE_SW_EXCEPTION;
+        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
     }

     if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
@@ -1279,7 +1289,7 @@
             error_code = 0;
     }

-    __vmx_inject_exception(trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
+    __vmx_inject_exception(trap, type, error_code);

     if ( trap == TRAP_page_fault )
         HVMTRACE_LONG_2D(PF_INJECT, error_code,
@@ -1565,6 +1575,8 @@
     unsigned long value;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
+    int rc = 0;
+    unsigned long old;

     switch ( gp )
     {
@@ -1589,13 +1601,25 @@
     switch ( cr )
     {
     case 0:
-        return !hvm_set_cr0(value);
+        old = v->arch.hvm_vcpu.guest_cr[0];
+        rc = !hvm_set_cr0(value);
+        if (rc)
+            hvm_memory_event_cr0(value, old);
+        return rc;

     case 3:
-        return !hvm_set_cr3(value);
+        old = v->arch.hvm_vcpu.guest_cr[3];
+        rc = !hvm_set_cr3(value);
+        if (rc)
+            hvm_memory_event_cr3(value, old);
+        return rc;

     case 4:
-        return !hvm_set_cr4(value);
+        old = v->arch.hvm_vcpu.guest_cr[4];
+        rc = !hvm_set_cr4(value);
+        if (rc)
+            hvm_memory_event_cr4(value, old);
+        return rc;

     case 8:
         vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1676,11 +1700,17 @@
         cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
         mov_from_cr(cr, gp, regs);
         break;
-    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+    {
+        unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
         v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
         vmx_update_guest_cr(v, 0);
+
+        hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
+
         HVMTRACE_0D(CLTS);
         break;
+    }
     case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
         value = v->arch.hvm_vcpu.guest_cr[0];
         /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
@@ -2350,13 +2380,29 @@
                 goto exit_and_crash;
             domain_pause_for_debugger();
             break;
-        case TRAP_int3:
-            if ( !v->domain->debugger_attached )
-                goto exit_and_crash;
-            update_guest_eip(); /* Safe: INT3 */
-            current->arch.gdbsx_vcpu_event = TRAP_int3;
-            domain_pause_for_debugger();
-            break;
+        case TRAP_int3:
+        {
+            if ( v->domain->debugger_attached )
+            {
+                update_guest_eip(); /* Safe: INT3 */
+                current->arch.gdbsx_vcpu_event = TRAP_int3;
+                domain_pause_for_debugger();
+                break;
+            }
+            else {
+                int handled = hvm_memory_event_int3(regs->eip);
+
+                if ( handled < 0 )
+                {
+                    vmx_inject_exception(TRAP_int3,
HVM_DELIVER_NO_ERROR_CODE, 0);
+                    break;
+                }
+                else if ( handled )
+                    break;
+            }
+
+            goto exit_and_crash;
+        }
         case TRAP_no_device:
             vmx_fpu_dirty_intercept();
             break;
diff -r 1535fee95f47 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Sun Jan 02 13:19:51 2011 -0800
+++ b/xen/arch/x86/mm/p2m.c     Sun Jan 02 13:21:16 2011 -0800
@@ -2910,6 +2910,7 @@

     memset(&req, 0, sizeof(req));
     req.type = MEM_EVENT_TYPE_ACCESS;
+    req.reason = MEM_EVENT_REASON_VIOLATION;

     /* Pause the current VCPU unconditionally */
     vcpu_pause_nosync(v);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 1 of 3] mem_access changes: trap injection, INT3 and CR0, 3, 4 mem events, Joe Epstein <=