Hi Simon, Keir,
With this patch I am seeing the gdbserver is broken for HVM guests. I
am bit busy now, will look into it once I get time.
Thanks & Regards,
Nitin
------------------------------------------------------------------------
-----------
Open Source Technology Center, Intel Corp
>-----Original Message-----
>From: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx [mailto:xen-changelog-
>bounces@xxxxxxxxxxxxxxxxxxx] On Behalf Of Xen patchbot-unstable
>Sent: Friday, May 19, 2006 8:40 AM
>To: xen-changelog@xxxxxxxxxxxxxxxxxxx
>Subject: [Xen-changelog] Fix a crash in gdbserver-xen where it
>referencesunmapped memory if
>
># HG changeset patch
># User kaf24@xxxxxxxxxxxxxxxxxxxx
># Node ID 1855124935e2810ebb1e8c577dadacc079f6487e
># Parent aa17b7173325ca8a86c14f4111383d041290cd03
>Fix a crash in gdbserver-xen where it references unmapped memory if
>the page directory / page table pages are not present (for example
>through dereferencing NULL in the gdb session). Changes:
>
>- map_domain_va_32 has been rewritten to match map_domain_va_pae and
> map_domain_va_64.
>
>- All three functions have been extended to handle multiple vCPUs,
> (untested, but shuld work), and check for _PAGE_PRESENT.
>
>- Keir's workaround for the compile error in map_domain_va_64 has been
> removed and hopefully fixed.
>
>Signed-Off-By: Simon Kagstrom <simon.kagstrom@xxxxxx>
>---
> tools/libxc/xc_ptrace.c | 140
+++++++++++++++++++++----------------------
>-----
> 1 files changed, 64 insertions(+), 76 deletions(-)
>
>diff -r aa17b7173325 -r 1855124935e2 tools/libxc/xc_ptrace.c
>--- a/tools/libxc/xc_ptrace.c Thu May 18 21:41:56 2006 +0100
>+++ b/tools/libxc/xc_ptrace.c Fri May 19 15:22:11 2006 +0100
>@@ -185,61 +185,34 @@ map_domain_va_32(
> void *guest_va,
> int perm)
> {
>- unsigned long pde, page;
>- unsigned long va = (unsigned long)guest_va;
>-
>- static unsigned long cr3_phys[MAX_VIRT_CPUS];
>- static uint32_t *cr3_virt[MAX_VIRT_CPUS];
>- static unsigned long pde_phys[MAX_VIRT_CPUS];
>- static uint32_t *pde_virt[MAX_VIRT_CPUS];
>- static unsigned long page_phys[MAX_VIRT_CPUS];
>- static uint32_t *page_virt[MAX_VIRT_CPUS];
>- static int prev_perm[MAX_VIRT_CPUS];
>-
>- if (ctxt[cpu].ctrlreg[3] == 0)
>- return NULL;
>- if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
>- {
>- cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
>- if ( cr3_virt[cpu] )
>- munmap(cr3_virt[cpu], PAGE_SIZE);
>- cr3_virt[cpu] = xc_map_foreign_range(
>- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
>- cr3_phys[cpu] >> PAGE_SHIFT);
>- if ( cr3_virt[cpu] == NULL )
>- return NULL;
>- }
>- pde = to_ma(cpu, cr3_virt[cpu][vtopdi(va)]);
>- if ( pde != pde_phys[cpu] )
>- {
>- pde_phys[cpu] = pde;
>- if ( pde_virt[cpu] )
>- munmap(pde_virt[cpu], PAGE_SIZE);
>- pde_virt[cpu] = xc_map_foreign_range(
>- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
>- pde_phys[cpu] >> PAGE_SHIFT);
>- if ( pde_virt[cpu] == NULL )
>- return NULL;
>- }
>- page = to_ma(cpu, pde_virt[cpu][vtopti(va)]);
>-
>- if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
>- {
>- page_phys[cpu] = page;
>- if ( page_virt[cpu] )
>- munmap(page_virt[cpu], PAGE_SIZE);
>- page_virt[cpu] = xc_map_foreign_range(
>- xc_handle, current_domid, PAGE_SIZE, perm,
>- page_phys[cpu] >> PAGE_SHIFT);
>- if ( page_virt[cpu] == NULL )
>- {
>- page_phys[cpu] = 0;
>- return NULL;
>- }
>- prev_perm[cpu] = perm;
>- }
>-
>- return (void *)(((unsigned long)page_virt[cpu]) | (va &
>BSD_PAGE_MASK));
>+ unsigned long l1p, p, va = (unsigned long)guest_va;
>+ uint32_t *l2, *l1;
>+ static void *v[MAX_VIRT_CPUS];
>+
>+ l2 = xc_map_foreign_range(
>+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
>ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
>+ if ( l2 == NULL )
>+ return NULL;
>+
>+ l1p = to_ma(cpu, l2[l2_table_offset(va)]);
>+ munmap(l2, PAGE_SIZE);
>+ if ( !(l1p & _PAGE_PRESENT) )
>+ return NULL;
>+ l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>PROT_READ, l1p >> PAGE_SHIFT);
>+ if ( l1 == NULL )
>+ return NULL;
>+
>+ p = to_ma(cpu, l1[l1_table_offset(va)]);
>+ munmap(l1, PAGE_SIZE);
>+ if ( !(p & _PAGE_PRESENT) )
>+ return NULL;
>+ if ( v[cpu] != NULL )
>+ munmap(v[cpu], PAGE_SIZE);
>+ v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>perm, p >> PAGE_SHIFT);
>+ if ( v[cpu] == NULL )
>+ return NULL;
>+
>+ return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
> }
>
>
>@@ -252,7 +225,7 @@ map_domain_va_pae(
> {
> unsigned long l2p, l1p, p, va = (unsigned long)guest_va;
> uint64_t *l3, *l2, *l1;
>- static void *v;
>+ static void *v[MAX_VIRT_CPUS];
>
> l3 = xc_map_foreign_range(
> xc_handle, current_domid, PAGE_SIZE, PROT_READ,
>ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
>@@ -260,26 +233,32 @@ map_domain_va_pae(
> return NULL;
>
> l2p = to_ma(cpu, l3[l3_table_offset_pae(va)]);
>+ munmap(l3, PAGE_SIZE);
>+ if ( !(l2p & _PAGE_PRESENT) )
>+ return NULL;
> l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>PROT_READ, l2p >> PAGE_SHIFT);
>- munmap(l3, PAGE_SIZE);
> if ( l2 == NULL )
> return NULL;
>
> l1p = to_ma(cpu, l2[l2_table_offset_pae(va)]);
>+ munmap(l2, PAGE_SIZE);
>+ if ( !(l1p & _PAGE_PRESENT) )
>+ return NULL;
> l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
perm,
>l1p >> PAGE_SHIFT);
>- munmap(l2, PAGE_SIZE);
> if ( l1 == NULL )
> return NULL;
>
> p = to_ma(cpu, l1[l1_table_offset_pae(va)]);
>- if ( v != NULL )
>- munmap(v, PAGE_SIZE);
>- v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
perm, p
>>> PAGE_SHIFT);
> munmap(l1, PAGE_SIZE);
>- if ( v == NULL )
>- return NULL;
>-
>- return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
>+ if ( !(p & _PAGE_PRESENT) )
>+ return NULL;
>+ if ( v[cpu] != NULL )
>+ munmap(v[cpu], PAGE_SIZE);
>+ v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>perm, p >> PAGE_SHIFT);
>+ if ( v[cpu] == NULL )
>+ return NULL;
>+
>+ return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
> }
>
> #ifdef __x86_64__
>@@ -292,7 +271,7 @@ map_domain_va_64(
> {
> unsigned long l3p, l2p, l1p, l1e, p, va = (unsigned long)guest_va;
> uint64_t *l4, *l3, *l2, *l1;
>- static void *v;
>+ static void *v[MAX_VIRT_CPUS];
>
> if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
> return map_domain_va_32(xc_handle, cpu, guest_va, perm);
>@@ -303,24 +282,33 @@ map_domain_va_64(
> return NULL;
>
> l3p = to_ma(cpu, l4[l4_table_offset(va)]);
>+ munmap(l4, PAGE_SIZE);
>+ if ( !(l3p & _PAGE_PRESENT) )
>+ return NULL;
> l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>PROT_READ, l3p >> PAGE_SHIFT);
>- munmap(l4, PAGE_SIZE);
> if ( l3 == NULL )
> return NULL;
>
> l2p = to_ma(cpu, l3[l3_table_offset(va)]);
>+ munmap(l3, PAGE_SIZE);
>+ if ( !(l2p & _PAGE_PRESENT) )
>+ return NULL;
> l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>PROT_READ, l2p >> PAGE_SHIFT);
>- munmap(l3, PAGE_SIZE);
> if ( l2 == NULL )
> return NULL;
>
> l1 = NULL;
> l1e = to_ma(cpu, l2[l2_table_offset(va)]);
>+ if ( !(l1e & _PAGE_PRESENT) )
>+ {
>+ munmap(l2, PAGE_SIZE);
>+ return NULL;
>+ }
> l1p = l1e >> PAGE_SHIFT;
> if (l1e & 0x80) { /* 2M pages */
> p = to_ma(cpu, (l1p + l1_table_offset(va)) << PAGE_SHIFT);
> } else { /* 4K pages */
>- //l1p = to_ma(cpu, l1e[l1_table_offset(va)]);
>+ l1p = to_ma(cpu, l1p);
> l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>perm, l1p >> PAGE_SHIFT);
> munmap(l2, PAGE_SIZE);
> if ( l1 == NULL )
>@@ -328,15 +316,15 @@ map_domain_va_64(
>
> p = to_ma(cpu, l1[l1_table_offset(va)]);
> }
>- if ( v != NULL )
>- munmap(v, PAGE_SIZE);
>- v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
perm, p
>>> PAGE_SHIFT);
>+ if ( v[cpu] != NULL )
>+ munmap(v[cpu], PAGE_SIZE);
>+ v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE,
>perm, p >> PAGE_SHIFT);
> if (l1)
> munmap(l1, PAGE_SIZE);
>- if ( v == NULL )
>- return NULL;
>-
>- return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
>+ if ( v[cpu] == NULL )
>+ return NULL;
>+
>+ return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
> }
> #endif
>
>
>_______________________________________________
>Xen-changelog mailing list
>Xen-changelog@xxxxxxxxxxxxxxxxxxx
>http://lists.xensource.com/xen-changelog
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|