# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1233161921 0
# Node ID 696351cde9a42550d713901bdc1908b284435970
# Parent 31983c30c460fb405b4fc6ab8e2ae49ada2cfec5
Allow memflags to be specified to alloc_xenheap_pages().
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/ia64/xen/domain.c | 5 +++--
xen/arch/ia64/xen/mm.c | 2 +-
xen/arch/ia64/xen/xenmem.c | 2 +-
xen/arch/x86/bzimage.c | 2 +-
xen/arch/x86/domain.c | 17 ++++++-----------
xen/arch/x86/hvm/svm/vmcb.c | 2 +-
xen/arch/x86/smpboot.c | 4 ++--
xen/common/page_alloc.c | 19 +++++--------------
xen/common/trace.c | 2 +-
xen/common/xenoprof.c | 2 +-
xen/common/xmalloc_tlsf.c | 8 ++++----
xen/drivers/char/console.c | 2 +-
xen/drivers/char/serial.c | 2 +-
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 2 +-
xen/include/xen/mm.h | 4 ++--
15 files changed, 31 insertions(+), 44 deletions(-)
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/ia64/xen/domain.c Wed Jan 28 16:58:41 2009 +0000
@@ -509,7 +509,7 @@ int vcpu_late_initialise(struct vcpu *v)
/* Create privregs page. */
order = get_order_from_shift(XMAPPEDREGS_SHIFT);
- v->arch.privregs = alloc_xenheap_pages(order);
+ v->arch.privregs = alloc_xenheap_pages(order, 0);
if (v->arch.privregs == NULL)
return -ENOMEM;
BUG_ON(v->arch.privregs == NULL);
@@ -578,7 +578,8 @@ int arch_domain_create(struct domain *d,
#endif
if (tlb_track_create(d) < 0)
goto fail_nomem1;
- d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT));
+ d->shared_info = alloc_xenheap_pages(
+ get_order_from_shift(XSI_SHIFT), 0);
if (d->shared_info == NULL)
goto fail_nomem;
BUG_ON(d->shared_info == NULL);
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/ia64/xen/mm.c Wed Jan 28 16:58:41 2009 +0000
@@ -3076,7 +3076,7 @@ void *pgtable_quicklist_alloc(void)
clear_page(p);
return p;
}
- p = alloc_xenheap_pages(0);
+ p = alloc_xenheap_page();
if (p) {
clear_page(p);
/*
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/ia64/xen/xenmem.c
--- a/xen/arch/ia64/xen/xenmem.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/ia64/xen/xenmem.c Wed Jan 28 16:58:41 2009 +0000
@@ -64,7 +64,7 @@ paging_init (void)
mpt_table_size = max_page * sizeof(unsigned long);
mpt_order = get_order(mpt_table_size);
ASSERT(mpt_order <= MAX_ORDER);
- if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
+ if ((mpt_table = alloc_xenheap_pages(mpt_order, 0)) == NULL)
panic("Not enough memory to bootstrap Xen.\n");
printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/x86/bzimage.c
--- a/xen/arch/x86/bzimage.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/x86/bzimage.c Wed Jan 28 16:58:41 2009 +0000
@@ -110,7 +110,7 @@ static __init int perform_gunzip(char *
window = (unsigned char *)output;
- free_mem_ptr = (unsigned long)alloc_xenheap_pages(HEAPORDER);
+ free_mem_ptr = (unsigned long)alloc_xenheap_pages(HEAPORDER, 0);
free_mem_end_ptr = free_mem_ptr + (PAGE_SIZE << HEAPORDER);
inbuf = (unsigned char *)image;
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/x86/domain.c Wed Jan 28 16:58:41 2009 +0000
@@ -357,7 +357,7 @@ int arch_domain_create(struct domain *d,
INIT_LIST_HEAD(&d->arch.relmem_list);
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
- d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
+ d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order, 0);
if ( d->arch.mm_perdomain_pt == NULL )
goto fail;
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
@@ -405,17 +405,12 @@ int arch_domain_create(struct domain *d,
if ( d->arch.ioport_caps == NULL )
goto fail;
-#ifdef __i386__
- if ( (d->shared_info = alloc_xenheap_page()) == NULL )
+ /*
+ * The shared_info machine address must fit in a 32-bit field within a
+ * 32-bit guest's start_info structure. Hence we specify MEMF_bits(32).
+ */
+ if ( (d->shared_info = alloc_xenheap_pages(0, MEMF_bits(32))) == NULL )
goto fail;
-#else
- pg = alloc_domheap_page(
- NULL, MEMF_node(domain_to_node(d)) | MEMF_bits(32));
- if ( pg == NULL )
- goto fail;
- pg->count_info |= PGC_xen_heap;
- d->shared_info = page_to_virt(pg);
-#endif
clear_page(d->shared_info);
share_xen_page_with_guest(
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Jan 28 16:58:41 2009 +0000
@@ -138,7 +138,7 @@ static int construct_vmcb(struct vcpu *v
CR_INTERCEPT_CR8_WRITE);
/* I/O and MSR permission bitmaps. */
- arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));
+ arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
if ( arch_svm->msrpm == NULL )
return -ENOMEM;
memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
diff -r 31983c30c460 -r 696351cde9a4 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/arch/x86/smpboot.c Wed Jan 28 16:58:41 2009 +0000
@@ -804,7 +804,7 @@ static void *prepare_idle_stack(unsigned
static void *prepare_idle_stack(unsigned int cpu)
{
if (!stack_base[cpu])
- stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER);
+ stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, 0);
return stack_base[cpu];
}
@@ -867,7 +867,7 @@ static int __devinit do_boot_cpu(int api
MEMF_node(cpu_to_node(cpu)));
per_cpu(gdt_table, cpu) = gdt = page_to_virt(page);
#else
- per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order);
+ per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order, 0);
#endif
memcpy(gdt, boot_cpu_gdt_table,
NR_RESERVED_GDT_PAGES * PAGE_SIZE);
diff -r 31983c30c460 -r 696351cde9a4 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/common/page_alloc.c Wed Jan 28 16:58:41 2009 +0000
@@ -655,7 +655,7 @@ void init_xenheap_pages(paddr_t ps, padd
}
-void *alloc_xenheap_pages(unsigned int order)
+void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
@@ -664,15 +664,11 @@ void *alloc_xenheap_pages(unsigned int o
pg = alloc_heap_pages(
MEMZONE_XEN, MEMZONE_XEN, cpu_to_node(smp_processor_id()), order);
if ( unlikely(pg == NULL) )
- goto no_memory;
+ return NULL;
memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
return page_to_virt(pg);
-
- no_memory:
- printk("Cannot handle page request order %d!\n", order);
- return NULL;
}
@@ -695,26 +691,21 @@ void init_xenheap_pages(paddr_t ps, padd
init_domheap_pages(ps, pe);
}
-void *alloc_xenheap_pages(unsigned int order)
+void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
unsigned int i;
ASSERT(!in_irq());
- pg = alloc_heap_pages(
- MEMZONE_XEN+1, NR_ZONES-1, cpu_to_node(smp_processor_id()), order);
+ pg = alloc_domheap_pages(NULL, order, memflags);
if ( unlikely(pg == NULL) )
- goto no_memory;
+ return NULL;
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info |= PGC_xen_heap;
return page_to_virt(pg);
-
- no_memory:
- printk("Cannot handle page request order %d!\n", order);
- return NULL;
}
void free_xenheap_pages(void *v, unsigned int order)
diff -r 31983c30c460 -r 696351cde9a4 xen/common/trace.c
--- a/xen/common/trace.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/common/trace.c Wed Jan 28 16:58:41 2009 +0000
@@ -94,7 +94,7 @@ static int alloc_trace_bufs(void)
order = get_order_from_pages(nr_pages);
data_size = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf));
- if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
+ if ( (rawbuf = alloc_xenheap_pages(order, 0)) == NULL )
{
printk("Xen trace buffers: memory allocation failed\n");
opt_tbuf_size = 0;
diff -r 31983c30c460 -r 696351cde9a4 xen/common/xenoprof.c
--- a/xen/common/xenoprof.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/common/xenoprof.c Wed Jan 28 16:58:41 2009 +0000
@@ -225,7 +225,7 @@ static int alloc_xenoprof_struct(
bufsize += (max_samples - 1) * i;
npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
- d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
+ d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
if ( d->xenoprof->rawbuf == NULL )
{
xfree(d->xenoprof);
diff -r 31983c30c460 -r 696351cde9a4 xen/common/xmalloc_tlsf.c
--- a/xen/common/xmalloc_tlsf.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/common/xmalloc_tlsf.c Wed Jan 28 16:58:41 2009 +0000
@@ -300,7 +300,7 @@ struct xmem_pool *xmem_pool_create(
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
pool_order = get_order_from_bytes(pool_bytes);
- pool = (void *)alloc_xenheap_pages(pool_order);
+ pool = (void *)alloc_xenheap_pages(pool_order, 0);
if ( pool == NULL )
return NULL;
memset(pool, 0, pool_bytes);
@@ -505,12 +505,12 @@ static void *xmalloc_pool_get(unsigned l
static void *xmalloc_pool_get(unsigned long size)
{
ASSERT(size == PAGE_SIZE);
- return alloc_xenheap_pages(0);
+ return alloc_xenheap_page();
}
static void xmalloc_pool_put(void *p)
{
- free_xenheap_pages(p,0);
+ free_xenheap_page(p);
}
static void *xmalloc_whole_pages(unsigned long size)
@@ -518,7 +518,7 @@ static void *xmalloc_whole_pages(unsigne
struct bhdr *b;
unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
- b = alloc_xenheap_pages(pageorder);
+ b = alloc_xenheap_pages(pageorder, 0);
if ( b == NULL )
return NULL;
diff -r 31983c30c460 -r 696351cde9a4 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/drivers/char/console.c Wed Jan 28 16:58:41 2009 +0000
@@ -885,7 +885,7 @@ static int __init debugtrace_init(void)
return 0;
order = get_order_from_bytes(bytes);
- debugtrace_buf = alloc_xenheap_pages(order);
+ debugtrace_buf = alloc_xenheap_pages(order, 0);
ASSERT(debugtrace_buf != NULL);
memset(debugtrace_buf, '\0', bytes);
diff -r 31983c30c460 -r 696351cde9a4 xen/drivers/char/serial.c
--- a/xen/drivers/char/serial.c Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/drivers/char/serial.c Wed Jan 28 16:58:41 2009 +0000
@@ -495,7 +495,7 @@ void serial_async_transmit(struct serial
BUG_ON(!port->driver->tx_empty);
if ( port->txbuf == NULL )
port->txbuf = alloc_xenheap_pages(
- get_order_from_bytes(serial_txbufsz));
+ get_order_from_bytes(serial_txbufsz), 0);
}
/*
diff -r 31983c30c460 -r 696351cde9a4
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Tue Jan 27 16:41:09
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Jan 28 16:58:41
2009 +0000
@@ -140,7 +140,7 @@ static inline void* __alloc_amd_iommu_ta
static inline void* __alloc_amd_iommu_tables(int order)
{
void *buf;
- buf = alloc_xenheap_pages(order);
+ buf = alloc_xenheap_pages(order, 0);
return buf;
}
diff -r 31983c30c460 -r 696351cde9a4 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h Tue Jan 27 16:41:09 2009 +0000
+++ b/xen/include/xen/mm.h Wed Jan 28 16:58:41 2009 +0000
@@ -45,9 +45,9 @@ void end_boot_allocator(void);
/* Xen suballocator. These functions are interrupt-safe. */
void init_xenheap_pages(paddr_t ps, paddr_t pe);
-void *alloc_xenheap_pages(unsigned int order);
+void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
void free_xenheap_pages(void *v, unsigned int order);
-#define alloc_xenheap_page() (alloc_xenheap_pages(0))
+#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
#define free_xenheap_page(v) (free_xenheap_pages(v,0))
/* Domain suballocator. These functions are *not* interrupt-safe.*/
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|