WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] When tmem is enabled, reserve a fraction

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] When tmem is enabled, reserve a fraction of memory
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 16 Feb 2010 04:00:22 -0800
Delivery-date: Tue, 16 Feb 2010 04:07:57 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1266256444 0
# Node ID 3a0bd7ca6b1146e2165e245cb0d4c2872771de17
# Parent  cbb147631e8cda24fda36d8ab627e0d9f21c4547
When tmem is enabled, reserve a fraction of memory
for allocations of 0<order<9 to avoid fragmentation
issues.

Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
---
 xen/common/page_alloc.c |   19 ++++++++++++++++++-
 1 files changed, 18 insertions(+), 1 deletion(-)

diff -r cbb147631e8c -r 3a0bd7ca6b11 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Mon Feb 15 17:49:14 2010 +0000
+++ b/xen/common/page_alloc.c   Mon Feb 15 17:54:04 2010 +0000
@@ -224,6 +224,10 @@ static unsigned long *avail[MAX_NUMNODES
 static unsigned long *avail[MAX_NUMNODES];
 static long total_avail_pages;
 
+/* TMEM: Reserve a fraction of memory for mid-size (0<order<9) allocations.*/
+static long midsize_alloc_zone_pages;
+#define MIDSIZE_ALLOC_FRAC 128
+
 static DEFINE_SPINLOCK(heap_lock);
 
 static unsigned long init_node_heap(int node, unsigned long mfn,
@@ -304,6 +308,14 @@ static struct page_info *alloc_heap_page
     spin_lock(&heap_lock);
 
     /*
+     * TMEM: When available memory is scarce, allow only mid-size allocations
+     * to avoid worst of fragmentation issues.
+     */
+    if ( opt_tmem && ((order == 0) || (order >= 9)) &&
+         (total_avail_pages <= midsize_alloc_zone_pages) )
+        goto fail;
+
+    /*
      * Start with requested node, but exhaust all node memory in requested 
      * zone before failing, only calc new node value if we fail to find memory 
      * in target node, this avoids needless computation on fast-path.
@@ -336,6 +348,7 @@ static struct page_info *alloc_heap_page
         return pg;
     }
 
+ fail:
     /* No suitable memory blocks. Fail the request. */
     spin_unlock(&heap_lock);
     return NULL;
@@ -504,6 +517,10 @@ static void free_heap_pages(
     avail[node][zone] += 1 << order;
     total_avail_pages += 1 << order;
 
+    if ( opt_tmem )
+        midsize_alloc_zone_pages = max(
+            midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
+
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
     {
@@ -842,7 +859,7 @@ static unsigned long avail_heap_pages(
 
 unsigned long total_free_pages(void)
 {
-    return total_avail_pages;
+    return total_avail_pages - midsize_alloc_zone_pages;
 }
 
 void __init end_boot_allocator(void)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] When tmem is enabled, reserve a fraction of memory, Xen patchbot-unstable <=