WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] page_alloc: Clean up free_heap_pages and

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] page_alloc: Clean up free_heap_pages and init_heap_pages interfaces.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 20 Jan 2009 20:30:13 -0800
Delivery-date: Tue, 20 Jan 2009 20:30:28 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1232461842 0
# Node ID 108b45539cda94e3350cf892b713e773025b48d8
# Parent  681af1946724258bf7758301c46425cd988a4ce2
page_alloc: Clean up free_heap_pages and init_heap_pages interfaces.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/common/page_alloc.c |   61 ++++++++++++++++--------------------------------
 1 files changed, 21 insertions(+), 40 deletions(-)

diff -r 681af1946724 -r 108b45539cda xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue Jan 20 13:57:20 2009 +0000
+++ b/xen/common/page_alloc.c   Tue Jan 20 14:30:42 2009 +0000
@@ -70,8 +70,6 @@ integer_param("dma_bits", dma_bitsize);
 #define scrub_page(p) clear_page(p)
 #endif
 
-#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
-
 static DEFINE_SPINLOCK(page_scrub_lock);
 LIST_HEAD(page_scrub_list);
 static unsigned long scrub_pages;
@@ -262,7 +260,9 @@ unsigned long __init alloc_boot_pages(
 #define MEMZONE_XEN 0
 #define NR_ZONES    (PADDR_BITS - PAGE_SHIFT)
 
-#define pfn_dom_zone_type(_pfn) (fls(_pfn) - 1)
+#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
+#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN :  \
+                          (fls(page_to_mfn(pg)) - 1))
 
 typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
 static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
@@ -399,13 +399,13 @@ static struct page_info *alloc_heap_page
 
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
-    unsigned int zone, struct page_info *pg, unsigned int order)
+    struct page_info *pg, unsigned int order)
 {
     unsigned long mask;
     unsigned int i, node = phys_to_nid(page_to_maddr(pg));
+    unsigned int zone = page_to_zone(pg);
     struct domain *d;
 
-    ASSERT(zone < NR_ZONES);
     ASSERT(order <= MAX_ORDER);
     ASSERT(node >= 0);
     ASSERT(node < num_online_nodes());
@@ -484,17 +484,12 @@ static void free_heap_pages(
  */
 #define MAX_ORDER_ALIGNED (1UL << (MAX_ORDER))
 static void init_heap_pages(
-    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
+    struct page_info *pg, unsigned long nr_pages)
 {
     unsigned int nid_curr, nid_prev;
     unsigned long i;
 
-    ASSERT(zone < NR_ZONES);
-
-    if ( likely(page_to_mfn(pg) != 0) )
-        nid_prev = phys_to_nid(page_to_maddr(pg-1));
-    else
-        nid_prev = phys_to_nid(page_to_maddr(pg));
+    nid_prev = phys_to_nid(page_to_maddr(pg-1));
 
     for ( i = 0; i < nr_pages; i++ )
     {
@@ -509,7 +504,7 @@ static void init_heap_pages(
          */
          if ( (nid_curr == nid_prev) || (page_to_maddr(pg+i) &
                                          MAX_ORDER_ALIGNED) )
-             free_heap_pages(zone, pg+i, 0);
+             free_heap_pages(pg+i, 0);
          else
              printk("Reserving non-aligned node boundary @ mfn %lu\n",
                     page_to_mfn(pg+i));
@@ -555,7 +550,7 @@ void __init end_boot_allocator(void)
         if ( next_free )
             map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
         if ( curr_free )
-            init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
+            init_heap_pages(mfn_to_page(i), 1);
     }
 
     if ( !dma_bitsize && (num_online_nodes() > 1) )
@@ -656,7 +651,7 @@ void init_xenheap_pages(paddr_t ps, padd
     if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
         pe -= PAGE_SIZE;
 
-    init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+    init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
 }
 
 
@@ -690,7 +685,7 @@ void free_xenheap_pages(void *v, unsigne
 
     memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
 
-    free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
+    free_heap_pages(virt_to_page(v), order);
 }
 
 #else
@@ -738,7 +733,7 @@ void free_xenheap_pages(void *v, unsigne
     for ( i = 0; i < (1u << order); i++ )
         pg[i].count_info &= ~PGC_xen_heap;
 
-    free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+    free_heap_pages(pg, order);
 }
 
 #endif
@@ -751,28 +746,14 @@ void free_xenheap_pages(void *v, unsigne
 
 void init_domheap_pages(paddr_t ps, paddr_t pe)
 {
-    unsigned long s_tot, e_tot;
-    unsigned int zone;
+    unsigned long smfn, emfn;
 
     ASSERT(!in_irq());
 
-    s_tot = round_pgup(ps) >> PAGE_SHIFT;
-    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
-
-    zone = fls(s_tot) - 1;
-    BUG_ON(zone <= MEMZONE_XEN);
-
-    while ( s_tot < e_tot )
-    {
-        unsigned long end = e_tot;
-
-        BUILD_BUG_ON(NR_ZONES > BITS_PER_LONG);
-        if ( zone < BITS_PER_LONG - 1 && end > 1UL << (zone + 1) )
-            end = 1UL << (zone + 1);
-        init_heap_pages(zone, mfn_to_page(s_tot), end - s_tot);
-        s_tot = end;
-        zone++;
-    }
+    smfn = round_pgup(ps) >> PAGE_SHIFT;
+    emfn = round_pgdown(pe) >> PAGE_SHIFT;
+
+    init_heap_pages(mfn_to_page(smfn), emfn - smfn);
 }
 
 
@@ -853,7 +834,7 @@ struct page_info *alloc_domheap_pages(
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
     {
-        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+        free_heap_pages(pg, order);
         return NULL;
     }
     
@@ -898,7 +879,7 @@ void free_domheap_pages(struct page_info
 
         if ( likely(!d->is_dying) )
         {
-            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+            free_heap_pages(pg, order);
         }
         else
         {
@@ -920,7 +901,7 @@ void free_domheap_pages(struct page_info
     else
     {
         /* Freeing anonymous domain-heap pages. */
-        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+        free_heap_pages(pg, order);
         drop_dom_ref = 0;
     }
 
@@ -1041,7 +1022,7 @@ static void page_scrub_softirq(void)
             p = map_domain_page(page_to_mfn(pg));
             scrub_page(p);
             unmap_domain_page(p);
-            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
+            free_heap_pages(pg, 0);
         }
     } while ( (NOW() - start) < MILLISECS(1) );
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] page_alloc: Clean up free_heap_pages and init_heap_pages interfaces., Xen patchbot-unstable <=