WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 18/18] xenpaging: random debug statements and partial

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 18/18] xenpaging: random debug statements and partial fixes
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Fri, 15 Oct 2010 16:12:20 +0200
Delivery-date: Fri, 15 Oct 2010 07:33:03 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1287151942; l=21775; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=VpyEd1oYIA1AeNIwpYzFZ9CRVUI=; b=nkHkrKTdao/6aj4lpxUXvoFqilFYnmvjS923CiJoEnbmfS064kPKyskbr8no0NDo9g3 VGUMyO2+5kdUJFAl4D4NC5NGXvk0fgjD+7kFApW9/auSXbUBO6vUqyaJj5C8OeE78TTyD lG7kTMcySFp510dprJa3IMORr2L+xTi9r0c=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101015141202.309585877@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
Debug and hacks.

Presented-by: Olaf Hering <olaf@xxxxxxxxx>

---
 tools/firmware/hvmloader/util.c  |    5 ++++-
 tools/libxc/xc_dom_x86.c         |    2 ++
 tools/libxc/xc_domain.c          |    3 +++
 tools/libxc/xc_offline_page.c    |    1 +
 tools/xenpaging/Makefile         |    2 +-
 tools/xenpaging/xenpaging.c      |   26 +++++++++++++++++++++++++-
 tools/xentrace/formats           |    5 +++++
 xen/arch/x86/debug.c             |    2 ++
 xen/arch/x86/hvm/svm/svm.c       |    2 ++
 xen/arch/x86/hvm/vmx/vmx.c       |    4 ++++
 xen/arch/x86/mm.c                |   26 +++++++++++++++++++++++++-
 xen/arch/x86/mm/hap/guest_walk.c |    2 ++
 xen/arch/x86/mm/mem_event.c      |    9 +++++++++
 xen/arch/x86/mm/mem_sharing.c    |    4 ++++
 xen/arch/x86/mm/p2m.c            |    7 +++++++
 xen/arch/x86/mm/shadow/common.c  |    4 ++++
 xen/arch/x86/mm/shadow/multi.c   |    2 ++
 xen/common/grant_table.c         |   27 ++++++++++++++++++++++-----
 xen/common/memory.c              |    3 +++
 xen/include/asm-x86/p2m.h        |    4 ++++
 20 files changed, 131 insertions(+), 9 deletions(-)

--- xen-4.0.1-testing.orig/tools/firmware/hvmloader/util.c
+++ xen-4.0.1-testing/tools/firmware/hvmloader/util.c
@@ -329,7 +329,7 @@ void *mem_alloc(uint32_t size, uint32_t
         mfn = reserve >> PAGE_SHIFT;
 
         /* Try to allocate a brand new page in the reserved area. */
-        if ( !over_allocated )
+        if ( 0 && !over_allocated )
         {
             uint8_t delay = 0;
             xmr.domid = DOMID_SELF;
@@ -339,6 +339,7 @@ void *mem_alloc(uint32_t size, uint32_t
             set_xen_guest_handle(xmr.extent_start, &mfn);
             do {
                 rc = hypercall_memory_op(XENMEM_populate_physmap, &xmr);
+                printf("%s(%u): %x %x\n", __func__,__LINE__,rc, delay);
                 if ( rc == 0 )
                     cpu_relax();
             } while ( rc == 0 && ++delay );
@@ -364,6 +365,7 @@ void *mem_alloc(uint32_t size, uint32_t
         xatp.gpfn  = mfn;
         do {
             rc = hypercall_memory_op(XENMEM_add_to_physmap, &xatp);
+            printf("%s(%u): %x\n", __func__,__LINE__,rc);
             if ( rc == -ENOENT )
                 cpu_relax();
         } while ( rc == -ENOENT );
@@ -622,6 +624,7 @@ uint16_t get_cpu_mhz(void)
     xatp.gpfn  = (unsigned long)shared_info >> 12;
     do {
         rc = hypercall_memory_op(XENMEM_add_to_physmap, &xatp);
+        printf("%s(%u): %x\n", __func__,__LINE__,rc);
         if ( rc == -ENOENT )
             cpu_relax();
     } while ( rc == -ENOENT );
--- xen-4.0.1-testing.orig/tools/libxc/xc_dom_x86.c
+++ xen-4.0.1-testing/tools/libxc/xc_dom_x86.c
@@ -812,6 +812,7 @@ int arch_setup_bootlate(struct xc_dom_im
         rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
         if ( rc != 0 )
         {
+            fprintf(stderr, "%s(%u) rc %x errno %x\n", 
__func__,__LINE__,rc,errno);
             xc_dom_panic(XC_INTERNAL_ERROR, "%s: mapping shared_info failed "
                          "(pfn=0x%" PRIpfn ", rc=%d)\n",
                          __FUNCTION__, xatp.gpfn, rc);
@@ -828,6 +829,7 @@ int arch_setup_bootlate(struct xc_dom_im
             rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
             if ( rc != 0 )
             {
+                fprintf(stderr, "%s(%u) rc %x errno %x\n", 
__func__,__LINE__,rc,errno);
                 if ( (i > 0) && (errno == EINVAL) )
                 {
                     xc_dom_printf("%s: %d grant tables mapped\n", __FUNCTION__,
--- xen-4.0.1-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.1-testing/tools/libxc/xc_domain.c
@@ -564,7 +564,10 @@ static int do_xenmem_op_retry(int xc_han
         }
 
         if ( err )
+        {
+            fprintf(stderr, "%s: delay reset: %d err %x count %lx start %lx 
delay %lu/%lu\n",__func__,cmd,err,count,start,delay,delay/666);
             delay = 0;
+        }
 
         start += err;
         count -= err;
--- xen-4.0.1-testing.orig/tools/libxc/xc_offline_page.c
+++ xen-4.0.1-testing/tools/libxc/xc_offline_page.c
@@ -517,6 +517,7 @@ static int exchange_page(int xc_handle,
     set_xen_guest_handle(exchange.out.extent_start, &out_mfn);
 
     rc = xc_memory_op(xc_handle, XENMEM_exchange, &exchange);
+    fprintf(stderr, "%s(%u) rc %x errno %x\n", __func__,__LINE__,rc,errno);
 
     if (!rc)
         *new_mfn = out_mfn;
--- xen-4.0.1-testing.orig/tools/xenpaging/Makefile
+++ xen-4.0.1-testing/tools/xenpaging/Makefile
@@ -4,7 +4,7 @@ include $(XEN_ROOT)/tools/Rules.mk
 CFLAGS   += -I $(XEN_XC)
 CFLAGS   += -I ./
 CFLAGS   += $(CFLAGS_libxenctrl) $(CFLAGS_libxenstore)
-LDFLAGS  += $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore)
+LDFLAGS  += $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore) -lrt
 
 POLICY    = default
 
--- xen-4.0.1-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.0.1-testing/tools/xenpaging/xenpaging.c
@@ -21,6 +21,7 @@
 
 
 #include <inttypes.h>
+#include <time.h>
 #include <stdlib.h>
 #include <signal.h>
 #include <xc_private.h>
@@ -249,6 +250,7 @@ int xenpaging_teardown(xenpaging_t *pagi
     rc = xc_mem_event_disable(paging->xc_handle, paging->mem_event.domain_id);
     if ( rc != 0 )
     {
+        fprintf(stderr, "%s: rc %x errno %x\n", __func__, rc, errno);
         ERROR("Error tearing down domain paging in xen");
     }
 
@@ -370,6 +372,7 @@ int xenpaging_evict_page(xenpaging_t *pa
         goto out;
     }
 
+    fprintf(stderr, "%s(%u) > gfn %lx pageslot %d\n", __func__, __LINE__, 
victim->gfn, i);
     /* Notify policy of page being paged out */
     policy_notify_paged_out(paging->mem_event.domain_id, victim->gfn);
 
@@ -406,6 +409,8 @@ static int xenpaging_populate_page(
     void *page;
     int ret;
 
+    _gfn = *gfn;
+    fprintf(stderr, "%s(%u) < gfn %lx pageslot %d\n", __func__, __LINE__, 
_gfn, i);
     /* Tell Xen to allocate a page for the domain */
     ret = xc_mem_paging_prep(paging->xc_handle, paging->mem_event.domain_id,
                              *gfn);
@@ -441,6 +446,17 @@ static int xenpaging_populate_page(
     return ret;
 }
 
+static void time_diff(struct timespec *start, struct timespec *end, struct 
timespec *diff)
+{
+    if ((end->tv_nsec - start->tv_nsec) < 0) {
+        diff->tv_sec = end->tv_sec - start->tv_sec - 1;
+        diff->tv_nsec = 1000000000 + end->tv_nsec - start->tv_nsec;
+    } else {
+        diff->tv_sec = end->tv_sec - start->tv_sec;
+        diff->tv_nsec = end->tv_nsec - start->tv_nsec;
+    }
+}
+
 static int evict_victim(xenpaging_t *paging, domid_t domain_id,
                         xenpaging_victim_t *victim, int fd, int i)
 {
@@ -501,6 +517,7 @@ int main(int argc, char *argv[])
     mode_t open_mode = S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP | 
S_IWOTH;
     char filename[80];
     int fd;
+    struct timespec ce, cl, cd;
 
     if ( argc != 3 )
     {
@@ -544,6 +561,8 @@ int main(int argc, char *argv[])
 
     /* Evict pages */
     memset(victims, 0, sizeof(xenpaging_victim_t) * num_pages);
+    if (clock_gettime(CLOCK_MONOTONIC, &ce))
+        perror("clock_gettime");
     for ( i = 0; i < num_pages; i++ )
     {
         rc = evict_victim(paging, domain_id, &victims[i], fd, i);
@@ -554,7 +573,10 @@ int main(int argc, char *argv[])
         if ( i % 100 == 0 )
             DPRINTF("%d pages evicted\n", i);
     }
-
+    if (clock_gettime(CLOCK_MONOTONIC, &cl))
+        perror("clock_gettime");
+    time_diff(&ce, &cl, &cd);
+    DPRINTF("%s: c %d.%09d\n", __func__, (int)cd.tv_sec, (int)cd.tv_nsec);
     DPRINTF("pages evicted\n");
 
     /* Swap pages in and out */
@@ -625,8 +647,10 @@ int main(int argc, char *argv[])
             else
             {
                 DPRINTF("page already populated (domain = %d; vcpu = %d;"
+                        " p2mt = %x;"
                         " gfn = %"PRIx64"; paused = %"PRId64")\n",
                         paging->mem_event.domain_id, req.vcpu_id,
+                        req.p2mt,
                         req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
 
                 /* Tell Xen to resume the vcpu */
--- xen-4.0.1-testing.orig/tools/xentrace/formats
+++ xen-4.0.1-testing/tools/xentrace/formats
@@ -5,6 +5,7 @@
 0x0001f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_change        0x%(1)08x
 0x0001f004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  trace_irq    [ vector = %(1)d, 
count = %(2)d, tot_cycles = 0x%(3)08x, max_cycles = 0x%(4)08x ]
 
+0x00021002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  continue_running    [ dom:vcpu 
= 0x%(1)08x ]
 0x00021011  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  running_to_runnable [ dom:vcpu 
= 0x%(1)08x ]
 0x00021021  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  running_to_blocked  [ dom:vcpu 
= 0x%(1)08x ]
 0x00021031  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  running_to_offline  [ dom:vcpu 
= 0x%(1)08x ]
@@ -70,10 +71,14 @@
 0x00082018  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CLTS
 0x00082019  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  LMSW        [ value = 
0x%(1)08x ]
 0x00082119  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  LMSW        [ value = 
0x%(2)08x%(1)08x ]
+0x00082020  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INTR_WINDOW [ value = 
0x%(1)08x ]
+0x00082021  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  NPF         [ gpa = 
0x%(2)08x%(1)08x mfn = 0x%(4)08x%(3)08x qual = 0x%(5)04x p2mt = 0x%(6)04x ]
 
 0x0010f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_map      [ domid = 
%(1)d ]
 0x0010f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_unmap    [ domid = 
%(1)d ]
 0x0010f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_transfer [ domid = 
%(1)d ]
+0x0010f004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  p2m_set_entry [ gfn 
0x%(2)08x%(1)08x mfn 0x%(4)08x%(3)08x pmt 0x%(5)08x domain:order 0x%(6)08x ]
+0x0010f005  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  decrease_reservation [ gfn 
0x%(2)08x%(1)08x domain:order 0x%(3)08x ]
 
 0x0020f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  hypercall  [ eip = 0x%(1)08x, 
eax = 0x%(2)08x ]
 0x0020f101  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  hypercall  [ rip = 
0x%(2)08x%(1)08x, eax = 0x%(3)08x ]
--- xen-4.0.1-testing.orig/xen/arch/x86/debug.c
+++ xen-4.0.1-testing/xen/arch/x86/debug.c
@@ -62,6 +62,8 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct dom
     }
 
     mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype)); 
+if ( p2m_is_paging(gfntype) )
+printk("%s: gfn %lx p2m %x\n",__func__,gfn,gfntype);
     if ( p2m_is_readonly(gfntype) && toaddr )
     {
         DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/svm/svm.c
@@ -232,6 +232,8 @@ static int svm_vmcb_restore(struct vcpu
         if ( c->cr0 & X86_CR0_PG )
         {
             mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %"PRIx64" p2m %x\n",__func__,c->cr3,p2mt);
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/vmx/vmx.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/vmx/vmx.c
@@ -496,6 +496,8 @@ static int vmx_restore_cr0_cr3(
         if ( cr0 & X86_CR0_PG )
         {
             mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,cr3,p2mt);
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
@@ -1012,6 +1014,8 @@ static void vmx_load_pdptrs(struct vcpu
         goto crash;
 
     mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,cr3,p2mt);
     if ( !p2m_is_ram(p2mt) )
         goto crash;
 
--- xen-4.0.1-testing.orig/xen/arch/x86/mm.c
+++ xen-4.0.1-testing/xen/arch/x86/mm.c
@@ -3148,7 +3148,8 @@ int do_mmu_update(
 
                 rc = -ENOENT;
                 break;
-            }
+            } else if ( p2m_is_paging(p2mt) )
+                MEM_LOG("gfn %lx p2m %x", gmfn, p2mt);
 
             if ( unlikely(!get_page_from_pagenr(mfn, pt_owner)) )
             {
@@ -3224,6 +3225,8 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
+                    else if ( p2m_is_paging(l2e_p2mt) )
+                        MEM_LOG("gfn %lx p2m %x", l2e_get_pfn(l2e), l2e_p2mt);
                     else if ( p2m_ram_shared == l2e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
@@ -3254,6 +3257,8 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
+                    else if ( p2m_is_paging(l3e_p2mt) )
+                        MEM_LOG("gfn %lx p2m %x", l3e_get_pfn(l3e), l3e_p2mt);
                     else if ( p2m_ram_shared == l3e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
@@ -3285,6 +3290,8 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
+                    else if ( p2m_is_paging(l4e_p2mt) )
+                        MEM_LOG("gfn %lx p2m %x", l4e_get_pfn(l4e), l4e_p2mt);
                     else if ( p2m_ram_shared == l4e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
@@ -4322,6 +4329,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
             tmp_mfn = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt, 0));
             if ( unlikely(p2m_is_paging(p2mt)) )
             {
+printk("%s(%u) gfn %lx p2m %x\n",__func__,__LINE__,xatp.idx, p2mt);
                 if ( p2m_is_paged(p2mt) )
                     p2m_mem_paging_populate(d, xatp.idx);
                 rcu_unlock_domain(d);
@@ -4337,6 +4345,21 @@ long arch_memory_op(int op, XEN_GUEST_HA
                 break;
             mfn = tmp_mfn;
             page = mfn_to_page(mfn);
+
+            gpfn = get_gpfn_from_mfn(tmp_mfn);
+            ASSERT( gpfn != SHARED_M2P_ENTRY );
+            gfn_to_mfn(d, gpfn, &p2mt);
+            if ( unlikely(p2m_is_paging(p2mt)) )
+            {
+printk("%s(%u) gfn %lx p2m %x\n",__func__,__LINE__, gpfn, p2mt);
+                if ( page )
+                    put_page(page);
+                if ( p2m_is_paged(p2mt) )
+                    p2m_mem_paging_populate(d, gpfn);
+                rcu_unlock_domain(d);
+                return -ENOENT;
+            }
+
             break;
         }
         default:
@@ -4369,6 +4392,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
                 rc = guest_remove_page(d, xatp.gpfn);
                 if ( rc == -ENOENT )
                 {
+                    MEM_LOG("gfn %lx", xatp.gpfn);
                     domain_unlock(d);
                     rcu_unlock_domain(d);
                     return rc;
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/hap/guest_walk.c
@@ -49,6 +49,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
+printk("%s: gfn %lx p2m %x\n",__func__,cr3 >> PAGE_SHIFT,p2mt);
         if ( p2m_is_paged(p2mt) )
             p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
 
@@ -82,6 +83,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
         gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
         if ( p2m_is_paging(p2mt) )
         {
+printk("%s: gfn %lx p2m %x\n",__func__,gfn_x(gfn),p2mt);
             if ( p2m_is_paged(p2mt) )
                 p2m_mem_paging_populate(v->domain, gfn_x(gfn));
 
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/mem_event.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/mem_event.c
@@ -68,6 +68,7 @@ int mem_event_enable(struct domain *d, m
     d->mem_event.paused = 0;
     d->mem_event.enabled = 1;
 
+printk("%s %u\n",__func__, d->domain_id);
     return 0;
 
  err_shared:
@@ -82,6 +83,7 @@ int mem_event_enable(struct domain *d, m
 
 int mem_event_disable(struct domain *d)
 {
+printk("%s %u\n",__func__, d->domain_id);
     d->mem_event.enabled = 0;
     d->mem_event.paused = 0;
 
@@ -168,6 +170,9 @@ int mem_event_check_ring(struct domain *
     mem_event_ring_lock(d);
 
     free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
+    if ( free_requests < 3 )
+        gdprintk(XENLOG_INFO, "frq %d\n", free_requests);
+    WARN_ON(free_requests == 0);
     ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
 
     if ( (current->domain->domain_id == d->domain_id) && ring_full )
@@ -243,6 +248,8 @@ int mem_event_domctl(struct domain *d, x
             guest_get_eff_l1e(v, ring_addr, &l1e);
             gfn = l1e_get_pfn(l1e);
             ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,gfn,p2mt);
 
             rc = -EINVAL;
             if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
@@ -252,6 +259,8 @@ int mem_event_domctl(struct domain *d, x
             guest_get_eff_l1e(v, shared_addr, &l1e);
             gfn = l1e_get_pfn(l1e);
             shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,gfn,p2mt);
 
             rc = -EINVAL;
             if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/mem_sharing.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/mem_sharing.c
@@ -381,6 +381,8 @@ int mem_sharing_debug_gfn(struct domain
     struct page_info *page;
 
     mfn = gfn_to_mfn(d, gfn, &p2mt);
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,gfn,p2mt);
     page = mfn_to_page(mfn);
 
     printk("Debug for domain=%d, gfn=%lx, ", 
@@ -636,6 +638,8 @@ int mem_sharing_unshare_page(struct doma
     struct list_head *le;
 
     mfn = gfn_to_mfn(d, gfn, &p2mt);
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__,gfn,p2mt);
 
     page = mfn_to_page(mfn);
     handle = page->shr_handle;
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/p2m.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/p2m.c
@@ -2005,6 +2005,8 @@ p2m_remove_page(struct domain *d, unsign
         mfn_return = d->arch.p2m->get_entry(d, gfn + i, &t, p2m_query);
         if ( !p2m_is_grant(t) )
             set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+        if (!( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ))
+        printk("%s(%u) i %lu t %x gfn %lx mfn %lx ret %lx %lu 
%d\n",__func__,__LINE__,i,t,gfn,mfn,mfn_x(mfn_return), p2m_is_valid(t), mfn + i 
== mfn_x(mfn_return));
         ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
     }
     set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
@@ -2115,6 +2117,8 @@ guest_physmap_add_entry(struct domain *d
     int pod_count = 0;
     int rc = 0;
 
+    if ( p2m_is_paging(t) )
+        gdprintk(XENLOG_ERR, "d %u gfn %lx mfn %lx o %x p2m %x\n", 
d->domain_id, gfn, mfn, page_order, t);
     if ( !paging_mode_translate(d) )
     {
         if ( need_iommu(d) && t == p2m_ram_rw )
@@ -2147,6 +2151,8 @@ guest_physmap_add_entry(struct domain *d
     for ( i = 0; i < (1UL << page_order); i++ )
     {
         omfn = gfn_to_mfn_query(d, gfn + i, &ot);
+        if ( p2m_is_paging(ot) )
+            gdprintk(XENLOG_ERR, "d %u gfn %lx omfn %lx o %x i %lx p2m %x\n", 
d->domain_id, gfn, mfn_x(omfn), page_order, i, ot);
         if ( p2m_is_grant(ot) )
         {
             /* Really shouldn't be unmapping grant maps this way */
@@ -2188,6 +2194,7 @@ guest_physmap_add_entry(struct domain *d
             omfn = gfn_to_mfn_query(d, ogfn, &ot);
             if ( unlikely(p2m_is_paging(ot)) )
             {
+                gdprintk(XENLOG_ERR, "d %u gfn %lx omfn %lx o %x i %lx p2m 
%x\n", d->domain_id, ogfn, mfn_x(omfn), page_order, i, ot);
                 p2m_unlock(d->arch.p2m);
                 if ( p2m_is_paged(ot) )
                     p2m_mem_paging_populate(d, ogfn);
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/shadow/common.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/shadow/common.c
@@ -3720,6 +3720,8 @@ int shadow_track_dirty_vram(struct domai
             int dirty = 0;
             paddr_t sl1ma = dirty_vram->sl1ma[i];
 
+if ( p2m_is_paging(t) )
+printk("%s: gfn %lx p2m %x\n",__func__,begin_pfn +i,t);
             if (mfn_x(mfn) == INVALID_MFN)
             {
                 dirty = 1;
@@ -3801,6 +3803,8 @@ int shadow_track_dirty_vram(struct domai
                  * write access */
                 for ( i = begin_pfn; i < end_pfn; i++ ) {
                     mfn_t mfn = gfn_to_mfn(d, i, &t);
+if ( p2m_is_paging(t) )
+printk("%s: gfn %lx p2m %x\n",__func__,i,t);
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/shadow/multi.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/shadow/multi.c
@@ -4795,6 +4795,8 @@ static mfn_t emulate_gva_to_mfn(struct v
         mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
     else
         mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
+if ( p2m_is_paging(p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__, gfn,p2mt);
         
     if ( p2m_is_readonly(p2mt) )
         return _mfn(READONLY_GFN);
--- xen-4.0.1-testing.orig/xen/common/grant_table.c
+++ xen-4.0.1-testing/xen/common/grant_table.c
@@ -1447,6 +1447,7 @@ gnttab_transfer(
     struct domain *d = current->domain;
     struct domain *e;
     struct page_info *page;
+    int rc;
     int i;
     struct gnttab_transfer gop;
     unsigned long mfn;
@@ -1465,7 +1466,12 @@ gnttab_transfer(
             return -EFAULT;
         }
 
-        mfn = gfn_to_mfn_private(d, gop.mfn);
+        rc = __get_paged_frame(gop.mfn, &mfn, 0, d);
+        if ( rc == GNTST_eagain )
+        {
+            gop.status = GNTST_eagain;
+            goto copyback;
+        }
 
         /* Check the passed page frame for basic validity. */
         if ( unlikely(!mfn_valid(mfn)) )
@@ -1580,28 +1586,39 @@ gnttab_transfer(
         /* Tell the guest about its new page frame. */
         spin_lock(&e->grant_table->lock);
 
+        gop.status = GNTST_okay;
+
         if ( e->grant_table->gt_version == 1 )
         {
             grant_entry_v1_t *sha = &shared_entry_v1(e->grant_table, gop.ref);
-            guest_physmap_add_page(e, sha->frame, mfn, 0);
+            rc = guest_physmap_add_page(e, sha->frame, mfn, 0);
+            if ( rc == -ENOENT )
+            {
+                gop.status = GNTST_eagain;
+                goto unlock_granttable_and_copyback;
+            }
             sha->frame = mfn;
         }
         else
         {
             grant_entry_v2_t *sha = &shared_entry_v2(e->grant_table, gop.ref);
-            guest_physmap_add_page(e, sha->full_page.frame, mfn, 0);
+            rc = guest_physmap_add_page(e, sha->full_page.frame, mfn, 0);
+            if ( rc == -ENOENT )
+            {
+                gop.status = GNTST_eagain;
+                goto unlock_granttable_and_copyback;
+            }
             sha->full_page.frame = mfn;
         }
         wmb();
         shared_entry_header(e->grant_table, gop.ref)->flags |=
             GTF_transfer_completed;
 
+    unlock_granttable_and_copyback:
         spin_unlock(&e->grant_table->lock);
 
         rcu_unlock_domain(e);
 
-        gop.status = GNTST_okay;
-
     copyback:
         if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
         {
--- xen-4.0.1-testing.orig/xen/common/memory.c
+++ xen-4.0.1-testing/xen/common/memory.c
@@ -138,6 +138,7 @@ static void populate_physmap(struct memo
             rc = guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
             if ( rc != 0 )
             {
+                gdprintk(XENLOG_INFO, "%s: rc %x\n", __func__, rc);
                 free_domheap_pages(page, a->extent_order);
                 goto out;
             }
@@ -170,6 +171,7 @@ int guest_remove_page(struct domain *d,
     mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
+        gdprintk(XENLOG_INFO, "d %u gfn %lx mfn %lx p2m %x\n", d->domain_id, 
gmfn, mfn, p2mt);
         if ( p2m_is_paged(p2mt) )
             p2m_mem_paging_populate(d, gmfn);
         return -ENOENT;
@@ -374,6 +376,7 @@ static long memory_exchange(XEN_GUEST_HA
                 mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt, 0));
                 if ( p2m_is_paging(p2mt) )
                 {
+                    printk("%s: gfn %lx p2m %x\n",__func__,gmfn+k,p2mt);
                     if ( p2m_is_paged(p2mt) )
                         p2m_mem_paging_populate(d, gmfn);
                     rc = -ENOENT;
--- xen-4.0.1-testing.orig/xen/include/asm-x86/p2m.h
+++ xen-4.0.1-testing/xen/include/asm-x86/p2m.h
@@ -329,6 +329,8 @@ static inline mfn_t gfn_to_mfn_unshare(s
             return mfn;
         }
         mfn = gfn_to_mfn(d, gfn, p2mt);
+if ( p2m_is_paging(*p2mt) )
+printk("%s: gfn %lx p2m %x\n",__func__, gfn,*p2mt);
     }
 #endif
 
@@ -342,6 +344,8 @@ static inline unsigned long gmfn_to_mfn(
     mfn_t mfn;
     p2m_type_t t;
     mfn = gfn_to_mfn(d, gpfn, &t);
+if ( p2m_is_paging(t) )
+printk("%s: gfn %lx p2m %x\n",__func__, gpfn,t);
     if ( p2m_is_valid(t) )
         return mfn_x(mfn);
     return INVALID_MFN;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>