WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [xenppc-unstable] [POWERPC][XEN] Track pages correctly

To: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Subject: [XenPPC] [xenppc-unstable] [POWERPC][XEN] Track pages correctly
From: Xen patchbot-xenppc-unstable <patchbot-xenppc-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 13 Sep 2006 22:40:51 +0000
Delivery-date: Wed, 13 Sep 2006 15:43:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 626a8f102700be3c89a6fb407381d9aa77d15dfd
# Parent  48840bbe607de4472121226de782bdd81db714e4
[POWERPC][XEN] Track pages correctly

The following patch tracks and frees pages correctly.  It solves the problem 
where a foreign mapping would zombie a domain because the refcnts remained.
This involved:
  - implement relinquish_memory() for PowerPC
  - remove free_rma() since all pages get "relinquished" now.
  - foreign pages are get and put correctly

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
 xen/arch/powerpc/domain.c     |   36 +++++++++++++++++++++++++++++++++++-
 xen/arch/powerpc/mm.c         |    7 -------
 xen/arch/powerpc/papr/xlate.c |    5 ++++-
 xen/include/asm-powerpc/mm.h  |    2 --
 4 files changed, 39 insertions(+), 11 deletions(-)

diff -r 48840bbe607d -r 626a8f102700 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/domain.c Wed Sep 13 18:41:11 2006 -0400
@@ -242,10 +242,44 @@ void sync_vcpu_execstate(struct vcpu *v)
     return;
 }
 
+static void relinquish_memory(struct domain *d, struct list_head *list)
+{
+    struct list_head *ent;
+    struct page_info  *page;
+
+    /* Use a recursive lock, as we may enter 'free_domheap_page'. */
+    spin_lock_recursive(&d->page_alloc_lock);
+
+    ent = list->next;
+    while ( ent != list )
+    {
+        page = list_entry(ent, struct page_info, list);
+
+        /* Grab a reference to the page so it won't disappear from under us. */
+        if ( unlikely(!get_page(page, d)) )
+        {
+            /* Couldn't get a reference -- someone is freeing this page. */
+            ent = ent->next;
+            continue;
+        }
+        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+            put_page_and_type(page);
+
+        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            put_page(page);
+
+        /* Follow the list chain and /then/ potentially free the page. */
+        ent = ent->next;
+        put_page(page);
+    }
+    spin_unlock_recursive(&d->page_alloc_lock);
+}
+
 void domain_relinquish_resources(struct domain *d)
 {
-    free_rma(d);
+    relinquish_memory(d, &d->page_list);
     free_extents(d);
+    return;
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 48840bbe607d -r 626a8f102700 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/mm.c     Wed Sep 13 18:41:11 2006 -0400
@@ -329,13 +329,6 @@ int allocate_rma(struct domain *d, unsig
     return 0;
 }
 
-void free_rma(struct domain *d)
-{
-    if (d->arch.rma_page) {
-        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
-    }
-}
-
 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
diff -r 48840bbe607d -r 626a8f102700 xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/papr/xlate.c     Wed Sep 13 18:41:11 2006 -0400
@@ -263,6 +263,7 @@ static void h_enter(struct cpu_user_regs
 
                 BUG_ON(f == d);
                 get_domain(f);
+                get_page(pg, f);
             }
                 break;
             case PFN_TYPE_RMA:
@@ -510,8 +511,10 @@ static void h_remove(struct cpu_user_reg
             struct page_info *pg = mfn_to_page(mfn);
             struct domain *f = page_get_owner(pg);
 
-            if (f != d)
+            if (f != d) {
                 put_domain(f);
+                put_page(pg);
+            }
         }
     }
 
diff -r 48840bbe607d -r 626a8f102700 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/include/asm-powerpc/mm.h      Wed Sep 13 18:41:11 2006 -0400
@@ -154,7 +154,6 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) ) {
-        panic("about to free page: 0x%lx\n", page_to_mfn(page));
         free_domheap_page(page);
     }
 }
@@ -238,7 +237,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
 
 extern int allocate_rma(struct domain *d, unsigned int order_pages);
-extern void free_rma(struct domain *d);
 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
 extern void free_extents(struct domain *d);
 

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [XenPPC] [xenppc-unstable] [POWERPC][XEN] Track pages correctly, Xen patchbot-xenppc-unstable <=