# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 51094fae410e574ca2194478974c034fb1884d61
# Parent 04d01b8fa2194d10b31d77ea5543c247a005db90
Attached patch adds a DMA zone to xen, also modifies xen_contig_memory()
to ask for DMA pages.
Signed-off-by: srparish@xxxxxxxxxx
diff -r 04d01b8fa219 -r 51094fae410e
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Jul 29
10:27:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Jul 29
10:31:22 2005
@@ -296,7 +296,7 @@
/* 2. Get a new contiguous memory extent. */
BUG_ON(HYPERVISOR_dom_mem_op(
- MEMOP_increase_reservation, &mfn, 1, order) != 1);
+ MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
/* 3. Map the new extent in place of old pages. */
for (i = 0; i < (1<<order); i++) {
diff -r 04d01b8fa219 -r 51094fae410e xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Fri Jul 29 10:27:12 2005
+++ b/xen/arch/x86/domain_build.c Fri Jul 29 10:31:22 2005
@@ -63,7 +63,7 @@
unsigned int order = get_order(max * PAGE_SIZE);
if ( (max & (max-1)) != 0 )
order--;
- while ( (page = alloc_domheap_pages(d, order)) == NULL )
+ while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
if ( order-- == 0 )
break;
return page;
diff -r 04d01b8fa219 -r 51094fae410e xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c Fri Jul 29 10:27:12 2005
+++ b/xen/arch/x86/x86_32/mm.c Fri Jul 29 10:31:22 2005
@@ -102,7 +102,7 @@
mpt_size = 4*1024*1024;
for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
{
- if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL )
+ if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] =
l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE);
diff -r 04d01b8fa219 -r 51094fae410e xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c Fri Jul 29 10:27:12 2005
+++ b/xen/arch/x86/x86_64/mm.c Fri Jul 29 10:31:22 2005
@@ -100,7 +100,7 @@
*/
for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) )
{
- pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER);
+ pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0);
if ( pg == NULL )
panic("Not enough memory for m2p table\n");
map_pages_to_xen(
diff -r 04d01b8fa219 -r 51094fae410e xen/common/dom_mem_ops.c
--- a/xen/common/dom_mem_ops.c Fri Jul 29 10:27:12 2005
+++ b/xen/common/dom_mem_ops.c Fri Jul 29 10:31:22 2005
@@ -37,7 +37,8 @@
unsigned long *extent_list,
unsigned long start_extent,
unsigned int nr_extents,
- unsigned int extent_order)
+ unsigned int extent_order,
+ unsigned int flags)
{
struct pfn_info *page;
unsigned long i;
@@ -56,7 +57,8 @@
{
PREEMPT_CHECK(MEMOP_increase_reservation);
- if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) )
+ if ( unlikely((page = alloc_domheap_pages(d, extent_order,
+ flags)) == NULL) )
{
DPRINTK("Could not allocate a frame\n");
return i;
@@ -131,10 +133,15 @@
{
struct domain *d;
unsigned long rc, start_extent;
+ unsigned int address_bits_order;
/* Extract @start_extent from @op. */
start_extent = op >> START_EXTENT_SHIFT;
op &= (1 << START_EXTENT_SHIFT) - 1;
+
+ /* seperate extent_order and address_bits_order */
+ address_bits_order = (extent_order >> 1) & 0xff;
+ extent_order &= 0xff;
if ( unlikely(start_extent > nr_extents) )
return -EINVAL;
@@ -150,7 +157,8 @@
{
case MEMOP_increase_reservation:
rc = alloc_dom_mem(
- d, extent_list, start_extent, nr_extents, extent_order);
+ d, extent_list, start_extent, nr_extents, extent_order,
+ (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0);
break;
case MEMOP_decrease_reservation:
rc = free_dom_mem(
diff -r 04d01b8fa219 -r 51094fae410e xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Fri Jul 29 10:27:12 2005
+++ b/xen/common/page_alloc.c Fri Jul 29 10:31:22 2005
@@ -207,7 +207,13 @@
#define MEMZONE_XEN 0
#define MEMZONE_DOM 1
-#define NR_ZONES 2
+#define MEMZONE_DMADOM 2
+#define NR_ZONES 3
+
+
+#define MAX_DMADOM_PFN 0xFFFFF
+#define pfn_dom_zone_type(_pfn) \
+ (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
/* Up to 2^20 pages can be allocated at once. */
#define MAX_ORDER 20
@@ -236,7 +242,7 @@
if ( next_free )
map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
if ( curr_free )
- free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0);
+ free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0);
}
}
@@ -474,14 +480,21 @@
{
ASSERT(!in_irq());
- ps = round_pgup(ps);
- pe = round_pgdown(pe);
-
- init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
-}
-
-
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
+ ps = round_pgup(ps) >> PAGE_SHIFT;
+ pe = round_pgdown(pe) >> PAGE_SHIFT;
+
+ if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) {
+ init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
+ init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN),
+ pe - MAX_DMADOM_PFN);
+ }
+ else
+ init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
+}
+
+
+struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order,
+ unsigned int flags)
{
struct pfn_info *pg;
cpumask_t mask;
@@ -489,8 +502,13 @@
ASSERT(!in_irq());
- if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
- return NULL;
+ pg = NULL;
+ if (! (flags & ALLOC_DOM_DMA))
+ pg = alloc_heap_pages(MEMZONE_DOM, order);
+ if (pg == NULL) {
+ if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
+ return NULL;
+ }
mask = pg->u.free.cpumask;
tlbflush_filter(mask, pg->tlbflush_timestamp);
@@ -531,7 +549,7 @@
DPRINTK("...or the domain is dying (%d)\n",
!!test_bit(_DOMF_dying, &d->domain_flags));
spin_unlock(&d->page_alloc_lock);
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
return NULL;
}
@@ -596,7 +614,7 @@
if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
{
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
}
else
{
@@ -616,7 +634,7 @@
else
{
/* Freeing an anonymous domain-heap page. */
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
drop_dom_ref = 0;
}
@@ -627,7 +645,7 @@
unsigned long avail_domheap_pages(void)
{
- return avail[MEMZONE_DOM];
+ return avail[MEMZONE_DOM] + avail[MEMZONE_DMADOM];
}
@@ -676,7 +694,7 @@
p = map_domain_page(page_to_pfn(pg));
clear_page(p);
unmap_domain_page(p);
- free_heap_pages(MEMZONE_DOM, pg, 0);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0);
}
} while ( (NOW() - start) < MILLISECS(1) );
}
diff -r 04d01b8fa219 -r 51094fae410e xen/include/xen/mm.h
--- a/xen/include/xen/mm.h Fri Jul 29 10:27:12 2005
+++ b/xen/include/xen/mm.h Fri Jul 29 10:31:22 2005
@@ -33,11 +33,14 @@
/* Domain suballocator. These functions are *not* interrupt-safe.*/
void init_domheap_pages(physaddr_t ps, physaddr_t pe);
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
+struct pfn_info *alloc_domheap_pages(
+ struct domain *d, unsigned int order, unsigned int flags);
void free_domheap_pages(struct pfn_info *pg, unsigned int order);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
+#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
#define free_domheap_page(p) (free_domheap_pages(p,0))
+
+#define ALLOC_DOM_DMA 1
/* Automatic page scrubbing for dead domains. */
extern struct list_head page_scrub_list;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|