WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] xen memory alloctor: remove bit width res

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] xen memory alloctor: remove bit width restrictions
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 26 Feb 2007 03:50:14 -0800
Delivery-date: Mon, 26 Feb 2007 04:42:01 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1172250178 0
# Node ID ee4850bc895b0784850bd32dfd0e97ccb693fcb6
# Parent  70098102f84d40712c43649a4d830d6f2c7ae79b
xen memory alloctor: remove bit width restrictions

Hide the (default or user specified) DMA width from anything outside
the heap allocator. I/O-capable guests can now request any width for
the memory they want exchanged/added.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/domain_build.c   |   13 ++++++++-----
 xen/common/memory.c           |    8 ++++----
 xen/common/page_alloc.c       |   24 +++++++++++++++++-------
 xen/include/asm-ia64/config.h |    2 +-
 xen/include/asm-x86/config.h  |    2 +-
 xen/include/xen/mm.h          |   10 +++-------
 6 files changed, 34 insertions(+), 25 deletions(-)

diff -r 70098102f84d -r ee4850bc895b xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/arch/x86/domain_build.c       Fri Feb 23 17:02:58 2007 +0000
@@ -429,11 +429,14 @@ int construct_dom0(struct domain *d,
     if ( (1UL << order) > nr_pages )
         panic("Domain 0 allocation is too small for kernel image.\n");
 
-    /*
-     * Allocate from DMA pool: on i386 this ensures that our low-memory 1:1
-     * mapping covers the allocation.
-     */
-    if ( (page = alloc_domheap_pages(d, order, MEMF_dma)) == NULL )
+#ifdef __i386__
+    /* Ensure that our low-memory 1:1 mapping covers the allocation. */
+    page = alloc_domheap_pages(d, order,
+                               MEMF_bits(30 + (v_start >> 31)));
+#else
+    page = alloc_domheap_pages(d, order, 0);
+#endif
+    if ( page == NULL )
         panic("Not enough RAM for domain 0 allocation.\n");
     alloc_spfn = page_to_mfn(page);
     alloc_epfn = alloc_spfn + d->tot_pages;
diff -r 70098102f84d -r ee4850bc895b xen/common/memory.c
--- a/xen/common/memory.c       Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/common/memory.c       Fri Feb 23 17:02:58 2007 +0000
@@ -324,12 +324,12 @@ static long memory_exchange(XEN_GUEST_HA
          (exch.out.address_bits <
           (get_order_from_pages(max_page) + PAGE_SHIFT)) )
     {
-        if ( exch.out.address_bits < dma_bitsize )
+        if ( exch.out.address_bits <= PAGE_SHIFT )
         {
             rc = -ENOMEM;
             goto fail_early;
         }
-        memflags = MEMF_dma;
+        memflags = MEMF_bits(exch.out.address_bits);
     }
 
     if ( exch.in.extent_order <= exch.out.extent_order )
@@ -537,9 +537,9 @@ long do_memory_op(unsigned long cmd, XEN
              (reservation.address_bits <
               (get_order_from_pages(max_page) + PAGE_SHIFT)) )
         {
-            if ( reservation.address_bits < dma_bitsize )
+            if ( reservation.address_bits <= PAGE_SHIFT )
                 return start_extent;
-            args.memflags = MEMF_dma;
+            args.memflags = MEMF_bits(reservation.address_bits);
         }
 
         if ( likely(reservation.domid == DOMID_SELF) )
diff -r 70098102f84d -r ee4850bc895b xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/common/page_alloc.c   Fri Feb 23 17:02:58 2007 +0000
@@ -48,8 +48,8 @@ string_param("badpage", opt_badpage);
 /*
  * Bit width of the DMA heap.
  */
-unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
-unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
+static unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
+static unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) 
- 1;
 static void parse_dma_bits(char *s)
 {
     unsigned int v = simple_strtol(s, NULL, 0);
@@ -58,7 +58,7 @@ static void parse_dma_bits(char *s)
         dma_bitsize = BITS_PER_LONG + PAGE_SHIFT;
         max_dma_mfn = ~0UL;
     }
-    else if ( v > PAGE_SHIFT )
+    else if ( v > PAGE_SHIFT + 1 )
     {
         dma_bitsize = v;
         max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
@@ -741,12 +741,22 @@ struct page_info *__alloc_domheap_pages(
     struct page_info *pg = NULL;
     cpumask_t mask;
     unsigned long i;
+    unsigned int bits = memflags >> _MEMF_bits, zone_hi;
 
     ASSERT(!in_irq());
 
-    if ( !(memflags & MEMF_dma) )
-    {
-        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, NR_ZONES - 1, cpu, 
order);
+    if ( bits && bits <= PAGE_SHIFT + 1 )
+        return NULL;
+
+    zone_hi = bits - PAGE_SHIFT - 1;
+    if ( zone_hi >= NR_ZONES )
+        zone_hi = NR_ZONES - 1;
+
+    if ( NR_ZONES + PAGE_SHIFT > dma_bitsize &&
+         (!bits || bits > dma_bitsize) )
+    {
+        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
+
         /* Failure? Then check if we can fall back to the DMA pool. */
         if ( unlikely(pg == NULL) &&
              ((order > MAX_ORDER) ||
@@ -759,7 +769,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( pg == NULL )
         if ( (pg = alloc_heap_pages(MEMZONE_XEN + 1,
-                                    dma_bitsize - PAGE_SHIFT - 1,
+                                    zone_hi,
                                     cpu, order)) == NULL )
             return NULL;
 
diff -r 70098102f84d -r ee4850bc895b xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/include/asm-ia64/config.h     Fri Feb 23 17:02:58 2007 +0000
@@ -42,7 +42,7 @@
 #define CONFIG_IOSAPIC
 #define supervisor_mode_kernel (0)
 
-#define CONFIG_DMA_BITSIZE 30
+#define CONFIG_DMA_BITSIZE 32
 
 /* If PERFC is used, include privop maps.  */
 #ifdef PERF_COUNTERS
diff -r 70098102f84d -r ee4850bc895b xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/include/asm-x86/config.h      Fri Feb 23 17:02:58 2007 +0000
@@ -82,7 +82,7 @@
 /* Debug stack is restricted to 8kB by guard pages. */
 #define DEBUG_STACK_SIZE 8192
 
-#define CONFIG_DMA_BITSIZE 30
+#define CONFIG_DMA_BITSIZE 32
 
 #if defined(__x86_64__)
 
diff -r 70098102f84d -r ee4850bc895b xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Fri Feb 23 17:01:38 2007 +0000
+++ b/xen/include/xen/mm.h      Fri Feb 23 17:02:58 2007 +0000
@@ -74,20 +74,16 @@ int assign_pages(
     unsigned int memflags);
 
 /* memflags: */
-#define _MEMF_dma         0
-#define  MEMF_dma         (1U<<_MEMF_dma)
-#define _MEMF_no_refcount 1
+#define _MEMF_no_refcount 0
 #define  MEMF_no_refcount (1U<<_MEMF_no_refcount)
+#define _MEMF_bits        24
+#define  MEMF_bits(n)     ((n)<<_MEMF_bits)
 
 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
 #else
 #define MAX_ORDER 20 /* 2^20 contiguous pages */
 #endif
-
-/* DMA heap parameters. */
-extern unsigned int  dma_bitsize;
-extern unsigned long max_dma_mfn;
 
 /* Automatic page scrubbing for dead domains. */
 extern struct list_head page_scrub_list;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] xen memory alloctor: remove bit width restrictions, Xen patchbot-unstable <=