WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests

To: xen-devel list <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
From: Simon Kagstrom <simon.kagstrom@xxxxxx>
Date: Thu, 11 May 2006 17:02:59 +0200
Delivery-date: Thu, 11 May 2006 08:03:08 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Wanderlust/2.15.2 (Almost Unreal) SEMI/1.14.6 (Maruoka) FLIM/1.14.6 (Marutamachi) APEL/10.6 Emacs/21.4 (i386-pc-linux-gnu) MULE/5.0 (SAKAKI)
Debugging with the gdbserver has been broken for paravirtualized
guests for a while since parts of xc_ptrace.c assumes HVM guests. The
following patch fixes this along with single-stepping in attached
domains. The patch tries to separate HVM-specific address
handling. Changes:

- Functions va_to_ma32 and va_to_ma64 have been added. These translate
  virtual to machine addresses through a given page directory/table
  and optionally through page_array for HVM guests. These have
  replaced page_array-references.

- paging_enabled now contains a check for HVM guests, and always
  returns 1 for PV guests.

- Reversed logic in PTRACE_SINGLESTEP to allow stepping in attached
  domains and disallow stepping in corefiles.

NOTE: I only have access to "regular" 32-bit hardware, so I've only
been able to test map_domain_va32 for PV guests. It would be nice if
other people could test it on HVM, PAE and 64-bit guests.

// Simon

Signed-off-by: Simon Kagstrom <simon.kagstrom@xxxxxx>

===File /tmp/xc_ptrace.patch================================
diff -r fc9ec6fd3400 tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c   Mon May 08 14:56:18 2006 +0100
+++ b/tools/libxc/xc_ptrace.c   Thu May 11 16:49:59 2006 +0200
@@ -100,8 +100,15 @@ static inline int
 static inline int 
 paging_enabled(vcpu_guest_context_t *v)
 {
-    unsigned long cr0 = v->ctrlreg[0];
-    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
+    /* This check can be removed when Xen places the correct values in
+     * cr0 for paravirtualized guests.
+     */
+    if ( (v->flags & VGCF_HVM_GUEST) == 1 ) {
+            unsigned long cr0 = v->ctrlreg[0];
+
+            return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
+    }
+    return 1;
 }
 
 /*
@@ -157,6 +164,43 @@ static long                     nr_pages
 static long                     nr_pages = 0;
 static unsigned long           *page_array = NULL;
 
+
+static unsigned long
+va_to_ma32(int cpu,
+         uint32_t *table,
+         unsigned long idx)
+{
+    unsigned long out;
+
+    /* Paravirtualized domains store machine addresses in tables while
+     * HVM domains keep pseudo-physical addresses. HVM domains
+     * therefore need one extra translation.
+     */
+    if ( (out = table[idx]) == 0)
+        return 0;
+    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+        out = page_array[idx] << PAGE_SHIFT;
+    return out;
+}
+
+static unsigned long
+va_to_ma64(int cpu,
+         uint64_t *table,
+         unsigned long idx)
+{
+    unsigned long out;
+
+    /* Paravirtualized domains store machine addresses in tables while
+     * HVM domains keep pseudo-physical addresses. HVM domains
+     * therefore need one extra translation.
+     */
+    if ( (out = table[idx]) == 0)
+        return 0;
+    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+        out = page_array[idx] << PAGE_SHIFT;
+    return out;
+}
+
 static void *
 map_domain_va_32(
     int xc_handle,
@@ -188,10 +232,8 @@ map_domain_va_32(
         if ( cr3_virt[cpu] == NULL )
             return NULL;
     }
-    if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
-        return NULL;
-    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
-        pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
+    if ( (pde = va_to_ma32(cpu, cr3_virt[cpu], vtopdi(va))) == 0 )
+        return NULL;
     if ( pde != pde_phys[cpu] )
     {
         pde_phys[cpu] = pde;
@@ -203,10 +245,9 @@ map_domain_va_32(
         if ( pde_virt[cpu] == NULL )
             return NULL;
     }
-    if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
-        return NULL;
-    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
-        page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
+    if ( (page = va_to_ma32(cpu, pde_virt[cpu], vtopti(va))) == 0 )
+        return NULL;
+
     if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
     {
         page_phys[cpu] = page;
@@ -243,25 +284,25 @@ map_domain_va_pae(
     if ( l3 == NULL )
         return NULL;
 
-    l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
-    l2p = page_array[l2p];
-    l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l2p);
+    if ( ( l2p = va_to_ma64(cpu, l3, l3_table_offset_pae(va)) ) == 0 )
+        return NULL;
+    l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l2p >> PAGE_SHIFT);
     munmap(l3, PAGE_SIZE);
     if ( l2 == NULL )
         return NULL;
 
-    l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
-    l1p = page_array[l1p];
-    l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
+    if ( ( l1p = va_to_ma64(cpu, l2, l2_table_offset_pae(va)) ) == 0 )
+        return NULL;
+    l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p 
>> PAGE_SHIFT);
     munmap(l2, PAGE_SIZE);
     if ( l1 == NULL )
         return NULL;
 
-    p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
-    p = page_array[p];
+    if ( ( p = va_to_ma64(cpu, l1, l1_table_offset_pae(va)) ) == 0 )
+        return NULL;
     if ( v != NULL )
         munmap(v, PAGE_SIZE);
-    v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+    v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> 
PAGE_SHIFT);
     munmap(l1, PAGE_SIZE);
     if ( v == NULL )
         return NULL;
@@ -289,38 +330,41 @@ map_domain_va_64(
     if ( l4 == NULL )
         return NULL;
 
-    l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT;
-    l3p = page_array[l3p];
-    l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l3p);
+    if ( ( l3p = va_to_ma64(cpu, l4, l4_table_offset(va)) ) == 0 )
+        return NULL;
+    l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l3p >> PAGE_SHIFT);
     munmap(l4, PAGE_SIZE);
     if ( l3 == NULL )
         return NULL;
 
-    l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT;
-    l2p = page_array[l2p];
-    l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l2p);
+    if ( ( l2p = va_to_ma64(cpu, l3, l3_table_offset(va)) ) == 0 )
+        return NULL;
+    l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, 
l2p >> PAGE_SHIFT);
     munmap(l3, PAGE_SIZE);
     if ( l2 == NULL )
         return NULL;
 
     l1 = NULL;
-    l1e = l2[l2_table_offset(va)];
+    if ( ( l1e = va_to_ma64(cpu, l2, l2_table_offset(va)) ) == 0 )
+        return NULL;
     l1p = l1e >> PAGE_SHIFT;
     if (l1e & 0x80)  { /* 2M pages */
         p = (l1p + l1_table_offset(va));
+        if ( (ctxt[cpu].flags & VGCF_HVM_GUEST))
+            p = page_array[p] << PAGE_SHIFT;
     } else { /* 4K pages */
-        l1p = page_array[l1p];
-        l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, 
l1p);
+        if ( ( l1p = va_to_ma64(cpu, l1e, l1_table_offset(va)) ) == 0 )
+            return NULL;
+        l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, 
l1p >> PAGE_SHIFT);
         munmap(l2, PAGE_SIZE);
         if ( l1 == NULL )
             return NULL;
 
-        p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
-    }
-    p = page_array[p];
+        p = va_to_ma64(cpu, l1, l1_table_offset(va));
+    }
     if ( v != NULL )
         munmap(v, PAGE_SIZE);
-    v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+    v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> 
PAGE_SHIFT);
     if (l1)
         munmap(l1, PAGE_SIZE);
     if ( v == NULL )
@@ -381,7 +425,7 @@ map_domain_va(
         if ( v != NULL )
             munmap(v, PAGE_SIZE);
 
-        page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
+        page = va_to_ma32(cpu, (uint32_t*)page_array, va);
 
         v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, 
                 perm, page >> PAGE_SHIFT);
@@ -526,7 +570,7 @@ xc_ptrace(
         break;
 
     case PTRACE_SINGLESTEP:
-        if (!current_isfile)
+        if (current_isfile)
               goto out_unspported; /* XXX not yet supported */
         /*  XXX we can still have problems if the user switches threads
          *  during single-stepping - but that just seems retarded
============================================================

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel