# HG changeset patch
# User Hollis Blanchard <hollisb@xxxxxxxxxx>
# Node ID 6bd1a39dbfc8e220b3f921878ce607a3f0a25aa9
# Parent 215d5eae720cdf96724f919a04a9f73bea887bfd
[XEN][POWERPC] split out an allocate_rma() function from arch_domain_create()
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
xen/arch/powerpc/domain.c | 34 ++++++++++++++--------------------
xen/arch/powerpc/mm.c | 23 +++++++++++++++++++++++
xen/include/asm-powerpc/mm.h | 2 ++
3 files changed, 39 insertions(+), 20 deletions(-)
diff -r 215d5eae720c -r 6bd1a39dbfc8 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/arch/powerpc/domain.c Fri Aug 25 15:09:36 2006 -0500
@@ -76,8 +76,9 @@ int arch_domain_create(struct domain *d)
{
unsigned long rma_base;
unsigned long rma_sz;
- uint htab_order;
- uint nr_pages;
+ uint rma_order_pages;
+ uint htab_order_pages;
+ int rc;
if (d->domain_id == IDLE_DOMAIN_ID) {
d->shared_info = (void *)alloc_xenheap_page();
@@ -86,23 +87,16 @@ int arch_domain_create(struct domain *d)
return 0;
}
- d->arch.rma_order = cpu_default_rma_order_pages();
- rma_sz = rma_size(d->arch.rma_order);
-
/* allocate the real mode area */
- nr_pages = 1UL << d->arch.rma_order;
- d->max_pages = nr_pages;
+ rma_order_pages = cpu_default_rma_order_pages();
+ d->max_pages = 1UL << rma_order_pages;
d->tot_pages = 0;
- d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
- if (NULL == d->arch.rma_page)
- return 1;
-
+
+ rc = allocate_rma(d, rma_order_pages);
+ if (rc)
+ return rc;
rma_base = page_to_maddr(d->arch.rma_page);
-
- BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
-
- printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
- memset((void *)rma_base, 0, rma_sz);
+ rma_sz = rma_size(rma_order_pages);
d->shared_info = (shared_info_t *)
(rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
@@ -113,12 +107,12 @@ int arch_domain_create(struct domain *d)
/* FIXME: we need to the the maximum addressible memory for this
* domain to calculate this correctly. It should probably be set
* by the managment tools */
- htab_order = d->arch.rma_order - 6; /* (1/64) */
+ htab_order_pages = rma_order_pages - 6; /* (1/64) */
if (test_bit(_DOMF_privileged, &d->domain_flags)) {
/* bump the htab size of privleged domains */
- ++htab_order;
- }
- htab_alloc(d, htab_order);
+ ++htab_order_pages;
+ }
+ htab_alloc(d, htab_order_pages);
return 0;
}
diff -r 215d5eae720c -r 6bd1a39dbfc8 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/arch/powerpc/mm.c Fri Aug 25 15:09:36 2006 -0500
@@ -239,6 +239,29 @@ static int mfn_in_hole(ulong mfn)
return 0;
}
+int allocate_rma(struct domain *d, unsigned int order_pages)
+{
+ ulong rma_base;
+ ulong rma_sz = rma_size(order_pages);
+
+ d->arch.rma_page = alloc_domheap_pages(d, order_pages, 0);
+ if (d->arch.rma_page == NULL) {
+ DPRINTK("Could not allocate order_pages=%d RMA for domain %u\n",
+ order_pages, d->domain_id);
+ return -ENOMEM;
+ }
+ d->arch.rma_order = order_pages;
+
+ rma_base = page_to_maddr(d->arch.rma_page);
+ BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
+
+ /* XXX */
+ printk("clearing RMA: 0x%lx[0x%lx]\n", rma_base, rma_sz);
+ memset((void *)rma_base, 0, rma_sz);
+
+ return 0;
+}
+
ulong pfn2mfn(struct domain *d, long pfn, int *type)
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
diff -r 215d5eae720c -r 6bd1a39dbfc8 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/include/asm-powerpc/mm.h Fri Aug 25 15:09:36 2006 -0500
@@ -258,6 +258,8 @@ static inline unsigned long gmfn_to_mfn(
#define mfn_to_gmfn(_d, mfn) (mfn)
+extern int allocate_rma(struct domain *d, unsigned int order_pages);
+
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|