WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [xenppc-unstable] [POWERPC][XEN] Allocate more memory than RMA

To: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Subject: [XenPPC] [xenppc-unstable] [POWERPC][XEN] Allocate more memory than RMA for Dom0
From: Xen patchbot-xenppc-unstable <patchbot-xenppc-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sun, 27 Aug 2006 20:20:31 +0000
Delivery-date: Sun, 27 Aug 2006 13:47:35 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID f8233165fb80982c36cc8a124c6e017821805843
# Parent  228d1441ee7207d56400aa455e57ca68ad6c5729
[POWERPC][XEN] Allocate more memory than RMA for Dom0

The following patch will only effect Dom0 at the moment and allow it
to be created with memory larger than RMA.  It works by allocating
extents (AKA chunks) of memory of an order specified by the processor.
The extent size should be a value that is capabable of mapping a
"large page" so we use the 970s large page size.  These extents are
tracked by an extents list that simply tracks these page allocations
and what there mapping is.  Other sub-systems effected by this:
  - Dom0 OFD devtree /memory cleanup
  - log_large_page_sizes is now an array
  - Config for minumum Dom0 size

Caveats:
 - There is a hack around HTAB allocation because we currently
   allocate one at domain creation time, this will be correct by a
   patch to follow.
 - Dom0, or domains capable of mapping mfns byt have their extents be
   PFN=MFN

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
 xen/arch/powerpc/Makefile           |    1 
 xen/arch/powerpc/domain.c           |    3 +
 xen/arch/powerpc/domain_build.c     |   33 ++++++++++-
 xen/arch/powerpc/memory.c           |    3 -
 xen/arch/powerpc/mm.c               |   75 +++++++++++++++++++++++++
 xen/arch/powerpc/ofd_fixup.c        |   93 +------------------------------
 xen/arch/powerpc/ofd_fixup_memory.c |  107 ++++++++++++++++++++++++++++++++++++
 xen/arch/powerpc/oftree.h           |    4 -
 xen/arch/powerpc/powerpc64/ppc970.c |   22 +++++--
 xen/include/asm-powerpc/config.h    |    2 
 xen/include/asm-powerpc/domain.h    |    3 +
 xen/include/asm-powerpc/mm.h        |    2 
 xen/include/asm-powerpc/processor.h |    1 
 13 files changed, 247 insertions(+), 102 deletions(-)

diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/Makefile
--- a/xen/arch/powerpc/Makefile Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/Makefile Sun Aug 27 16:12:00 2006 -0400
@@ -31,6 +31,7 @@ obj-y += of-devtree.o
 obj-y += of-devtree.o
 obj-y += of-devwalk.o
 obj-y += ofd_fixup.o
+obj-y += ofd_fixup_memory.o
 obj-y += physdev.o
 obj-y += rtas.o
 obj-y += setup.o
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/domain.c Sun Aug 27 16:12:00 2006 -0400
@@ -114,6 +114,8 @@ int arch_domain_create(struct domain *d)
     }
     htab_alloc(d, htab_order_pages);
 
+    INIT_LIST_HEAD(&d->arch.extent_list);
+
     return 0;
 }
 
@@ -262,6 +264,7 @@ void domain_relinquish_resources(struct 
 void domain_relinquish_resources(struct domain *d)
 {
     free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+    free_extents(d);
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c   Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/domain_build.c   Sun Aug 27 16:12:00 2006 -0400
@@ -30,6 +30,8 @@
 #include <asm/papr.h>
 #include "oftree.h"
 
+#define log2(x) ffz(~(x))
+
 extern int parseelfimage_32(struct domain_setup_info *dsi);
 extern int loadelfimage_32(struct domain_setup_info *dsi);
 
@@ -109,8 +111,10 @@ int construct_dom0(struct domain *d,
     struct domain_setup_info dsi;
     ulong dst;
     u64 *ofh_tree;
+    uint rma_nrpages = 1 << d->arch.rma_order;
     ulong rma_sz = rma_size(d->arch.rma_order);
     ulong rma = page_to_maddr(d->arch.rma_page);
+    uint htab_order;
     start_info_t *si;
     ulong eomem;
     int am64 = 1;
@@ -155,13 +159,36 @@ int construct_dom0(struct domain *d,
     /* By default DOM0 is allocated all available memory. */
     d->max_pages = ~0U;
 
+    /* default is the max(1/16th of memory, CONFIG_MIN_DOM0_PAGES) */
     if (dom0_nrpages == 0) {
-        dom0_nrpages = 1UL << d->arch.rma_order;
-    }
+        dom0_nrpages = total_pages >> 4;
+
+        if (dom0_nrpages < CONFIG_MIN_DOM0_PAGES)
+            dom0_nrpages = CONFIG_MIN_DOM0_PAGES;
+    }
+
+    /* make sure we are at least as big as the RMA */
+    if (dom0_nrpages < rma_nrpages)
+        dom0_nrpages = rma_nrpages;
+    else
+        dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages);
 
     d->tot_pages = dom0_nrpages;
     ASSERT(d->tot_pages > 0);
     
+    htab_order = log2(d->tot_pages) - 6;
+    if (d->arch.htab.order > 0) {
+        /* we incorrectly allocate this too early so lets adjust if
+         * necessary */
+        printk("WARNING: htab allocated to early\n");
+        if (d->arch.htab.order < htab_order) {
+            printk("WARNING: htab reallocated for more memory: 0x%x\n",
+                htab_order);
+            htab_free(d);
+            htab_alloc(d, htab_order);
+        }
+    }
+
     ASSERT( image_len < rma_sz );
 
     si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
@@ -280,7 +307,7 @@ int construct_dom0(struct domain *d,
 
     printk("DOM: pc = 0x%lx, r2 = 0x%lx\n", pc, r2);
 
-    ofd_dom0_fixup(d, *ofh_tree + rma, si, dst - rma);
+    ofd_dom0_fixup(d, *ofh_tree + rma, si);
 
     set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/memory.c
--- a/xen/arch/powerpc/memory.c Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/memory.c Sun Aug 27 16:12:00 2006 -0400
@@ -64,8 +64,7 @@ static void set_max_page(struct membuf *
     }
 }
 
-/* mark all memory from modules onward as unused, skipping hole(s),
- * and returning size of hole(s) */
+/* mark all memory from modules onward as unused */
 static void heap_init(struct membuf *mb, uint entries)
 {
     int i;
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/mm.c     Sun Aug 27 16:12:00 2006 -0400
@@ -239,6 +239,69 @@ static int mfn_in_hole(ulong mfn)
     return 0;
 }
 
+static uint add_extent(struct domain *d, struct page_info *pg, uint order)
+{
+    struct page_extents *pe;
+
+    pe = xmalloc(struct page_extents);
+    if (pe == NULL)
+        return 0;
+
+    pe->pg = pg;
+    pe->order = order;
+    pe->pfn = page_to_mfn(pg);
+
+    list_add_tail(&pe->pe_list, &d->arch.extent_list);
+
+    return pe->pfn;
+}
+
+void free_extents(struct domain *d)
+{
+    /* we just need to free the memory behind list */
+    struct list_head *list;
+    struct list_head *ent;
+    struct list_head *next;
+
+    list = &d->arch.extent_list;
+    ent = list->next;
+
+    while (ent != list) {
+        next = ent->next;
+        xfree(ent);
+        ent = next;
+    }
+}
+
+uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
+{
+    uint ext_order;
+    uint ext_nrpages;
+    uint total_nrpages;
+    struct page_info *pg;
+
+    ext_order = cpu_extent_order();
+    ext_nrpages = 1 << ext_order;
+
+    total_nrpages = rma_nrpages;
+
+    /* We only allocate in nr_extsz chunks so if you are not divisible
+     * you get more than you asked for */
+    while (total_nrpages < nrpages) {
+        pg = alloc_domheap_pages(d, ext_order, 0);
+        if (pg == NULL)
+            return total_nrpages;
+
+        if (add_extent(d, pg, ext_order) == 0) {
+            free_domheap_pages(pg, ext_order);
+            return total_nrpages;
+        }
+        total_nrpages += ext_nrpages;
+    }
+
+    return total_nrpages;
+}
+        
 int allocate_rma(struct domain *d, unsigned int order_pages)
 {
     ulong rma_base;
@@ -266,6 +329,7 @@ ulong pfn2mfn(struct domain *d, long pfn
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     ulong rma_size_mfn = 1UL << d->arch.rma_order;
+    struct page_extents *pe;
 
     if (pfn < rma_size_mfn) {
         if (type)
@@ -278,6 +342,17 @@ ulong pfn2mfn(struct domain *d, long pfn
         if (type)
             *type = PFN_TYPE_IO;
         return pfn;
+    }
+
+    /* quick tests first */
+    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+        uint end_pfn = pe->pfn + (1 << pe->order);
+
+        if (pfn >= pe->pfn && pfn < end_pfn) {
+            if (type)
+                *type = PFN_TYPE_LOGICAL;
+            return page_to_mfn(pe->pg) + (pfn - pe->pfn);
+        }
     }
 
     /* This hack allows dom0 to map all memory, necessary to
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/ofd_fixup.c
--- a/xen/arch/powerpc/ofd_fixup.c      Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/ofd_fixup.c      Sun Aug 27 16:12:00 2006 -0400
@@ -13,7 +13,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
- * Copyright (C) IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005, 2006
  *
  * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
  */
@@ -317,91 +317,6 @@ static ofdn_t ofd_rtas_props(void *m)
 }
 #endif
 
-struct mem_reg {
-    u64 addr;
-    u64 sz;
-};
-
-static ofdn_t ofd_memory_chunk_create(void *m, ofdn_t p,
-        const char *ppath,
-        const char *name,
-        const char *dt,
-        ulong start, ulong size)
-{
-    struct mem_reg reg;
-    char path[128];
-    ulong l;
-    u32 v;
-    ofdn_t n;
-    ulong nl = strlen(name) + 1;
-    ulong dtl = strlen(dt) + 1;
-
-    l = snprintf(path, sizeof (path), "%s/%s@%lx", ppath, name, start);
-    n = ofd_node_add(m, p, path, l + 1);
-    ofd_prop_add(m, n, "name", name, nl);
-
-    v = 1;
-    ofd_prop_add(m, n, "#address-cells", &v, sizeof (v));
-    v = 0;
-    ofd_prop_add(m, n, "#size-cells", &v, sizeof (v));
-
-    ofd_prop_add(m, n, "device_type", dt, dtl);
-
-    /* physical addresses usable without regard to OF */
-    reg.addr = start;
-    reg.sz = size;
-    ofd_prop_add(m, n, "reg", &reg, sizeof (reg));
-
-    return n;
-}
-
-static ofdn_t ofd_memory_props(void *m, struct domain *d, ulong eoload)
-{
-    ofdn_t n = -1;
-    ulong start = 0;
-    static char name[] = "memory";
-    ulong mem_size = rma_size(d->arch.rma_order);
-    ulong chunk_size = rma_size(d->arch.rma_order);
-
-    /* Remove all old memory props */
-    do {
-        ofdn_t old;
-
-        old = ofd_node_find_by_prop(m, OFD_ROOT, "device_type",
-                                    name, sizeof(name));
-        if (old <= 0) break;
-
-        ofd_node_prune(m, old);
-    } while (1);
-
-    while (start < mem_size) {
-        ulong size = (mem_size < chunk_size) ? mem_size : chunk_size;
-
-        n = ofd_memory_chunk_create(m, OFD_ROOT, "", "memory", "memory",
-                start, size);
-
-        if (start == 0) {
-            /* We are processing the first and RMA chunk */
-
-            /* free list of physical addresses available after OF and
-             * client program have been accounted for */
-            struct mem_reg avail[] = {
-                /* 0 til OF @ 32MiB - 16KiB stack */
-                { .addr = 0, .sz = ((32 << 20) - (16 << 10)) },
-                /* end of loaded material to the end the chunk - 1 page */
-                { .addr = eoload, .sz = chunk_size - eoload - PAGE_SIZE },
-                /* the last page is reserved for xen_start_info */
-            };
-            ofd_prop_add(m, n, "available", &avail,
-                    sizeof (avail));
-        }
-
-        start += size;
-        mem_size -= size;
-    }
-    return n;
-}
-
 static ofdn_t ofd_xen_props(void *m, struct domain *d, start_info_t *si)
 {
     ofdn_t n;
@@ -442,7 +357,7 @@ static ofdn_t ofd_xen_props(void *m, str
     return n;
 }
 
-int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si, ulong eoload)
+int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si)
 {
     void *m;
     const ofdn_t n = OFD_ROOT;
@@ -470,8 +385,8 @@ int ofd_dom0_fixup(struct domain *d, ulo
     printk("Add /chosen props\n");
     ofd_chosen_props(m, (char *)si->cmd_line);
 
-    printk("fix /memory@0 props\n");
-    ofd_memory_props(m, d, eoload);
+    printk("fix /memory props\n");
+    ofd_memory_props(m, d);
 
     printk("fix /xen props\n");
     ofd_xen_props(m, d, si);
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/oftree.h
--- a/xen/arch/powerpc/oftree.h Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/oftree.h Sun Aug 27 16:12:00 2006 -0400
@@ -26,8 +26,8 @@ extern ulong oftree_len;
 extern ulong oftree_len;
 extern ulong oftree_end;
 
-extern int ofd_dom0_fixup(
-    struct domain *d, ulong mem, start_info_t *si, ulong dst);
+extern int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si);
+extern void ofd_memory_props(void *m, struct domain *d);
 
 extern int firmware_image_start[0];
 extern int firmware_image_size[0];
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c       Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/arch/powerpc/powerpc64/ppc970.c       Sun Aug 27 16:12:00 2006 -0400
@@ -49,6 +49,10 @@ static struct rma_settings rma_orders[] 
     { .order = 38, .rmlr0 = 0, .rmlr12 = 0, }, /* 256 GB */
 };
 
+static uint log_large_page_sizes[] = {
+    4 + 20, /* (1 << 4) == 16M */
+};
+
 static struct rma_settings *cpu_find_rma(unsigned int order)
 {
     int i;
@@ -66,14 +70,20 @@ unsigned int cpu_default_rma_order_pages
 
 unsigned int cpu_large_page_orders(uint *sizes, uint max)
 {
-    uint lp_log_size = 4 + 20; /* (1 << 4) == 16M */
-    if (max < 1)
-        return 0;
+    uint i = 0;
 
-    sizes[0] = lp_log_size - PAGE_SHIFT;
+    while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
+        sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
+        ++i;
+    }
 
-    return 1;
-}    
+    return i;
+}
+
+unsigned int cpu_extent_order(void)
+{
+    return log_large_page_sizes[0] - PAGE_SHIFT;
+}
 
 void cpu_initialize(int cpuid)
 {
diff -r 228d1441ee72 -r f8233165fb80 xen/include/asm-powerpc/config.h
--- a/xen/include/asm-powerpc/config.h  Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/include/asm-powerpc/config.h  Sun Aug 27 16:12:00 2006 -0400
@@ -47,6 +47,8 @@ extern char __bss_start[];
 /* this should be per processor, but for now */
 #define CACHE_LINE_SIZE 128
 
+/* 256M - 64M of Xen space seems like a nice number */
+#define CONFIG_MIN_DOM0_PAGES (192 << (20 - PAGE_SHIFT))
 #define CONFIG_SHADOW 1
 #define CONFIG_GDB 1
 #define CONFIG_SMP 1
diff -r 228d1441ee72 -r f8233165fb80 xen/include/asm-powerpc/domain.h
--- a/xen/include/asm-powerpc/domain.h  Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/include/asm-powerpc/domain.h  Sun Aug 27 16:12:00 2006 -0400
@@ -37,6 +37,9 @@ struct arch_domain {
      * processor is in real mode */
     struct page_info *rma_page;
     uint rma_order;
+
+    /* list of extents beyond RMA */
+    struct list_head extent_list;
 
     /* I/O-port access bitmap mask. */
     u8 *iobmp_mask;       /* Address of IO bitmap mask, or NULL.      */
diff -r 228d1441ee72 -r f8233165fb80 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/include/asm-powerpc/mm.h      Sun Aug 27 16:12:00 2006 -0400
@@ -259,6 +259,8 @@ static inline unsigned long gmfn_to_mfn(
 #define mfn_to_gmfn(_d, mfn) (mfn)
 
 extern int allocate_rma(struct domain *d, unsigned int order_pages);
+extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
+extern void free_extents(struct domain *d);
 
 extern int steal_page(struct domain *d, struct page_info *page,
                         unsigned int memflags);
diff -r 228d1441ee72 -r f8233165fb80 xen/include/asm-powerpc/processor.h
--- a/xen/include/asm-powerpc/processor.h       Fri Aug 25 16:41:14 2006 -0500
+++ b/xen/include/asm-powerpc/processor.h       Sun Aug 27 16:12:00 2006 -0400
@@ -40,6 +40,7 @@ extern void show_registers(struct cpu_us
 extern void show_registers(struct cpu_user_regs *);
 extern void show_execution_state(struct cpu_user_regs *);
 extern void show_backtrace(ulong sp, ulong lr, ulong pc);
+extern unsigned int cpu_extent_order(void);
 extern unsigned int cpu_default_rma_order_pages(void);
 extern uint cpu_large_page_orders(uint *sizes, uint max);
 extern void cpu_initialize(int cpuid);
diff -r 228d1441ee72 -r f8233165fb80 xen/arch/powerpc/ofd_fixup_memory.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/powerpc/ofd_fixup_memory.c       Sun Aug 27 16:12:00 2006 -0400
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <public/xen.h>
+#include "of-devtree.h"
+#include "oftree.h"
+
+static char memory[] = "memory";
+
+struct mem_reg {
+    u64 addr;
+    u64 sz;
+};
+
+static void ofd_memory_clean(void *m)
+{
+    ofdn_t old;
+
+    /* Remove all old memory props */
+    do {
+        old = ofd_node_find_by_prop(m, OFD_ROOT, "device_type",
+                                    memory, sizeof(memory));
+        if (old <= 0)
+            break;
+
+        ofd_node_prune(m, old);
+    } while (1);
+}
+
+static ofdn_t ofd_memory_node_create(
+    void *m, ofdn_t p, const char *ppath, const char *name,
+    const char *dt, ulong start, ulong size)
+{
+    struct mem_reg reg;
+    char path[128];
+    ulong l;
+    ofdn_t n;
+    ulong nl = strlen(name) + 1;
+    ulong dtl = strlen(dt) + 1;
+
+    l = snprintf(path, sizeof (path), "%s/%s@%lx", ppath, name, start);
+    n = ofd_node_add(m, p, path, l + 1);
+    ofd_prop_add(m, n, "name", name, nl);
+    ofd_prop_add(m, n, "device_type", dt, dtl);
+
+    /* physical addresses usable without regard to OF */
+    reg.addr = start;
+    reg.sz = size;
+    ofd_prop_add(m, n, "reg", &reg, sizeof (reg));
+
+    return n;
+}
+
+static void ofd_memory_rma_node(void *m, struct domain *d)
+{
+    ulong size = rma_size(d->arch.rma_order);
+    ofdn_t n;
+
+    n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, 0, size);
+    BUG_ON(n <= 0);
+}
+
+static void ofd_memory_extent_nodes(void *m, struct domain *d)
+{
+    ulong start;
+    ulong size;
+    ofdn_t n;
+    struct page_extents *pe;
+
+    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+
+        start = pe->pfn << PAGE_SHIFT;
+        size = 1UL << (pe->order + PAGE_SHIFT);
+
+        n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+                                    start, size);
+
+        BUG_ON(n <= 0);
+    }
+}
+
+void ofd_memory_props(void *m, struct domain *d)
+{
+    ofd_memory_clean(m);
+    ofd_memory_rma_node(m, d);
+    ofd_memory_extent_nodes(m,d);
+}

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>