WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3/4] domain heap allocator changes - remove bit width

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 3/4] domain heap allocator changes - remove bit width restrictions
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Wed, 07 Feb 2007 17:11:26 +0000
Delivery-date: Wed, 07 Feb 2007 09:11:17 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Hide the (default or user specified) DMA width from anything outside
the heap allocator. I/O-capable guests can now request any width for
the memory they want exchanged/added.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-02-07/xen/arch/x86/domain_build.c
===================================================================
--- 2007-02-07.orig/xen/arch/x86/domain_build.c 2007-02-01 17:41:08.000000000 
+0100
+++ 2007-02-07/xen/arch/x86/domain_build.c      2007-02-07 17:12:45.000000000 
+0100
@@ -429,11 +429,14 @@ int construct_dom0(struct domain *d,
     if ( (1UL << order) > nr_pages )
         panic("Domain 0 allocation is too small for kernel image.\n");
 
-    /*
-     * Allocate from DMA pool: on i386 this ensures that our low-memory 1:1
-     * mapping covers the allocation.
-     */
-    if ( (page = alloc_domheap_pages(d, order, MEMF_dma)) == NULL )
+#ifdef __i386__
+    /* Ensure that our low-memory 1:1 mapping covers the allocation. */
+    page = alloc_domheap_pages(d, order,
+                               MEMF_bits(30 + (v_start >> 31)));
+#else
+    page = alloc_domheap_pages(d, order, 0);
+#endif
+    if ( page == NULL )
         panic("Not enough RAM for domain 0 allocation.\n");
     alloc_spfn = page_to_mfn(page);
     alloc_epfn = alloc_spfn + d->tot_pages;
Index: 2007-02-07/xen/common/memory.c
===================================================================
--- 2007-02-07.orig/xen/common/memory.c 2007-01-30 10:23:30.000000000 +0100
+++ 2007-02-07/xen/common/memory.c      2007-02-07 16:26:50.000000000 +0100
@@ -322,12 +322,12 @@ static long memory_exchange(XEN_GUEST_HA
          (exch.out.address_bits <
           (get_order_from_pages(max_page) + PAGE_SHIFT)) )
     {
-        if ( exch.out.address_bits < dma_bitsize )
+        if ( exch.out.address_bits <= PAGE_SHIFT )
         {
             rc = -ENOMEM;
             goto fail_early;
         }
-        memflags = MEMF_dma;
+        memflags = MEMF_bits(exch.out.address_bits);
     }
 
     if ( exch.in.extent_order <= exch.out.extent_order )
@@ -535,9 +535,9 @@ long do_memory_op(unsigned long cmd, XEN
              (reservation.address_bits <
               (get_order_from_pages(max_page) + PAGE_SHIFT)) )
         {
-            if ( reservation.address_bits < dma_bitsize )
+            if ( reservation.address_bits <= PAGE_SHIFT )
                 return start_extent;
-            args.memflags = MEMF_dma;
+            args.memflags = MEMF_bits(reservation.address_bits);
         }
 
         if ( likely(reservation.domid == DOMID_SELF) )
Index: 2007-02-07/xen/common/page_alloc.c
===================================================================
--- 2007-02-07.orig/xen/common/page_alloc.c     2007-02-07 16:26:45.000000000 
+0100
+++ 2007-02-07/xen/common/page_alloc.c  2007-02-07 17:12:00.000000000 +0100
@@ -48,8 +48,8 @@ string_param("badpage", opt_badpage);
 /*
  * Bit width of the DMA heap.
  */
-unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
-unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
+static unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
+static unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) 
- 1;
 static void parse_dma_bits(char *s)
 {
     unsigned int v = simple_strtol(s, NULL, 0);
@@ -58,7 +58,7 @@ static void parse_dma_bits(char *s)
         dma_bitsize = BITS_PER_LONG + PAGE_SHIFT;
         max_dma_mfn = ~0UL;
     }
-    else if ( v > PAGE_SHIFT )
+    else if ( v > PAGE_SHIFT + 1 )
     {
         dma_bitsize = v;
         max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
@@ -711,12 +711,22 @@ struct page_info *__alloc_domheap_pages(
     struct page_info *pg = NULL;
     cpumask_t mask;
     unsigned long i;
+    unsigned int bits = memflags >> _MEMF_bits, zone_hi;
 
     ASSERT(!in_irq());
 
-    if ( !(memflags & MEMF_dma) )
+    if ( bits && bits <= PAGE_SHIFT + 1 )
+        return NULL;
+
+    zone_hi = bits - PAGE_SHIFT - 1;
+    if ( zone_hi >= NR_ZONES )
+        zone_hi = NR_ZONES - 1;
+
+    if ( NR_ZONES + PAGE_SHIFT > dma_bitsize &&
+         (!bits || bits > dma_bitsize) )
     {
-        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, NR_ZONES - 1, cpu, 
order);
+        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
+
         /* Failure? Then check if we can fall back to the DMA pool. */
         if ( unlikely(pg == NULL) &&
              ((order > MAX_ORDER) ||
@@ -729,7 +739,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( pg == NULL )
         if ( (pg = alloc_heap_pages(MEMZONE_XEN + 1,
-                                    dma_bitsize - PAGE_SHIFT - 1,
+                                    zone_hi,
                                     cpu, order)) == NULL )
             return NULL;
 
Index: 2007-02-07/xen/include/asm-ia64/config.h
===================================================================
--- 2007-02-07.orig/xen/include/asm-ia64/config.h       2007-01-22 
12:26:37.000000000 +0100
+++ 2007-02-07/xen/include/asm-ia64/config.h    2007-02-07 16:26:50.000000000 
+0100
@@ -42,7 +42,7 @@
 #define CONFIG_IOSAPIC
 #define supervisor_mode_kernel (0)
 
-#define CONFIG_DMA_BITSIZE 30
+#define CONFIG_DMA_BITSIZE 32
 
 /* If PERFC is used, include privop maps.  */
 #ifdef PERF_COUNTERS
Index: 2007-02-07/xen/include/asm-x86/config.h
===================================================================
--- 2007-02-07.orig/xen/include/asm-x86/config.h        2007-01-08 
14:15:31.000000000 +0100
+++ 2007-02-07/xen/include/asm-x86/config.h     2007-02-07 16:26:50.000000000 
+0100
@@ -82,7 +82,7 @@
 /* Debug stack is restricted to 8kB by guard pages. */
 #define DEBUG_STACK_SIZE 8192
 
-#define CONFIG_DMA_BITSIZE 30
+#define CONFIG_DMA_BITSIZE 32
 
 #if defined(__x86_64__)
 
Index: 2007-02-07/xen/include/xen/mm.h
===================================================================
--- 2007-02-07.orig/xen/include/xen/mm.h        2007-02-07 16:25:40.000000000 
+0100
+++ 2007-02-07/xen/include/xen/mm.h     2007-02-07 16:26:50.000000000 +0100
@@ -71,10 +71,10 @@ int assign_pages(
     unsigned int memflags);
 
 /* memflags: */
-#define _MEMF_dma         0
-#define  MEMF_dma         (1U<<_MEMF_dma)
-#define _MEMF_no_refcount 1
+#define _MEMF_no_refcount 0
 #define  MEMF_no_refcount (1U<<_MEMF_no_refcount)
+#define _MEMF_bits        24
+#define  MEMF_bits(n)     ((n)<<_MEMF_bits)
 
 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
@@ -82,10 +82,6 @@ int assign_pages(
 #define MAX_ORDER 20 /* 2^20 contiguous pages */
 #endif
 
-/* DMA heap parameters. */
-extern unsigned int  dma_bitsize;
-extern unsigned long max_dma_mfn;
-
 /* Automatic page scrubbing for dead domains. */
 extern struct list_head page_scrub_list;
 #define page_scrub_schedule_work()              \



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel