WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: fix Dom0 booting time regression

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: fix Dom0 booting time regression
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 04 May 2010 13:20:53 -0700
Delivery-date: Tue, 04 May 2010 13:27:25 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1272973341 -3600
# Node ID b07edd50661e7f768088c08215dabb9becb5c5b6
# Parent  6c7b905b03ff1cf171187bafe7129e3e213e5787
x86: fix Dom0 booting time regression

Unfortunately the changes in c/s 21035 caused boot time to go up
significantly on certain large systems. To rectify this without going
back to the old behavior, introduce a new memory allocation flag so
that Dom0 allocations can exhaust non-DMA memory before starting to
consume DMA memory. For the latter, the behavior introduced in
aforementioned c/s gets retained, while for the former we can now even
try larger chunks first.

This builds on the fact that alloc_chunk() gets called with non-
increasing 'max_pages' arguments, end hence it can store locally the
allocation order last used (as larger order allocations can't succeed
during subsequent invocations if they failed once).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/domain_build.c |   36 +++++++++++++++++++++++-------------
 xen/common/page_alloc.c     |    5 +++--
 xen/include/xen/mm.h        |    2 ++
 3 files changed, 28 insertions(+), 15 deletions(-)

diff -r 6c7b905b03ff -r b07edd50661e xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Tue May 04 12:41:11 2010 +0100
+++ b/xen/arch/x86/domain_build.c       Tue May 04 12:42:21 2010 +0100
@@ -126,26 +126,36 @@ static struct page_info * __init alloc_c
 static struct page_info * __init alloc_chunk(
     struct domain *d, unsigned long max_pages)
 {
+    static unsigned int __initdata last_order = MAX_ORDER;
+    static unsigned int __initdata memflags = MEMF_no_dma;
     struct page_info *page;
-    unsigned int order, free_order;
-
-    /*
-     * Allocate up to 2MB at a time: It prevents allocating very large chunks
-     * from DMA pools before the >4GB pool is fully depleted.
-     */
-    if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
-        max_pages = 2UL << (20 - PAGE_SHIFT);
-    order = get_order_from_pages(max_pages);
-    if ( (max_pages & (max_pages-1)) != 0 )
-        order--;
-    while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
+    unsigned int order = get_order_from_pages(max_pages), free_order;
+
+    if ( order > last_order )
+        order = last_order;
+    else if ( max_pages & (max_pages - 1) )
+        --order;
+    while ( (page = alloc_domheap_pages(d, order, memflags)) == NULL )
         if ( order-- == 0 )
             break;
+    if ( page )
+        last_order = order;
+    else if ( memflags )
+    {
+        /*
+         * Allocate up to 2MB at a time: It prevents allocating very large
+         * chunks from DMA pools before the >4GB pool is fully depleted.
+         */
+        last_order = 21 - PAGE_SHIFT;
+        memflags = 0;
+        return alloc_chunk(d, max_pages);
+    }
+
     /*
      * Make a reasonable attempt at finding a smaller chunk at a higher
      * address, to avoid allocating from low memory as much as possible.
      */
-    for ( free_order = order; page && order--; )
+    for ( free_order = order; !memflags && page && order--; )
     {
         struct page_info *pg2;
 
diff -r 6c7b905b03ff -r b07edd50661e xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue May 04 12:41:11 2010 +0100
+++ b/xen/common/page_alloc.c   Tue May 04 12:42:21 2010 +0100
@@ -1157,8 +1157,9 @@ struct page_info *alloc_domheap_pages(
         pg = alloc_heap_pages(dma_zone + 1, zone_hi, node, order, memflags);
 
     if ( (pg == NULL) &&
-         ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
-                                 node, order, memflags)) == NULL) )
+         ((memflags & MEMF_no_dma) ||
+          ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
+                                  node, order, memflags)) == NULL)) )
          return NULL;
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
diff -r 6c7b905b03ff -r b07edd50661e xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Tue May 04 12:41:11 2010 +0100
+++ b/xen/include/xen/mm.h      Tue May 04 12:42:21 2010 +0100
@@ -80,6 +80,8 @@ int assign_pages(
 #define  MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
 #define _MEMF_tmem        2
 #define  MEMF_tmem        (1U<<_MEMF_tmem)
+#define _MEMF_no_dma      3
+#define  MEMF_no_dma      (1U<<_MEMF_no_dma)
 #define _MEMF_node        8
 #define  MEMF_node(n)     ((((n)+1)&0xff)<<_MEMF_node)
 #define _MEMF_bits        24

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: fix Dom0 booting time regression, Xen patchbot-unstable <=