# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1314360052 -3600
# Node ID 33d161ba8a044e756357b601cf8153846fa6f1e5
# Parent 227130622561e20136a1ef56201fe65ead5a76e8
x86/mm/p2m: use defines for page sizes
Use defines for page sizes instead of hardcoding the value.
Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---
diff -r 227130622561 -r 33d161ba8a04 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Aug 25 12:03:14 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Fri Aug 26 13:00:52 2011 +0100
@@ -112,11 +112,11 @@
/* Then add the first one to the appropriate populate-on-demand list */
switch(order)
{
- case 9:
+ case PAGE_ORDER_2M:
page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
p2m->pod.count += 1 << order;
break;
- case 0:
+ case PAGE_ORDER_4K:
page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
p2m->pod.count += 1;
break;
@@ -143,11 +143,11 @@
struct page_info *p = NULL;
int i;
- if ( order == 9 && page_list_empty(&p2m->pod.super) )
+ if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
{
return NULL;
}
- else if ( order == 0 && page_list_empty(&p2m->pod.single) )
+ else if ( order == PAGE_ORDER_4K && page_list_empty(&p2m->pod.single) )
{
unsigned long mfn;
struct page_info *q;
@@ -168,12 +168,12 @@
switch ( order )
{
- case 9:
+ case PAGE_ORDER_2M:
BUG_ON( page_list_empty(&p2m->pod.super) );
p = page_list_remove_head(&p2m->pod.super);
p2m->pod.count -= 1 << order; /* Lock: page_alloc */
break;
- case 0:
+ case PAGE_ORDER_4K:
BUG_ON( page_list_empty(&p2m->pod.single) );
p = page_list_remove_head(&p2m->pod.single);
p2m->pod.count -= 1;
@@ -206,17 +206,17 @@
int order;
if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES )
- order = 9;
+ order = PAGE_ORDER_2M;
else
- order = 0;
+ order = PAGE_ORDER_4K;
retry:
- page = alloc_domheap_pages(d, order, 0);
+ page = alloc_domheap_pages(d, order, PAGE_ORDER_4K);
if ( unlikely(page == NULL) )
{
- if ( order == 9 )
+ if ( order == PAGE_ORDER_2M )
{
/* If we can't allocate a superpage, try singleton pages */
- order = 0;
+ order = PAGE_ORDER_4K;
goto retry;
}
@@ -249,9 +249,9 @@
if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES
&& !page_list_empty(&p2m->pod.super) )
- order = 9;
+ order = PAGE_ORDER_2M;
else
- order = 0;
+ order = PAGE_ORDER_4K;
page = p2m_pod_cache_get(p2m, order);
@@ -468,12 +468,12 @@
free_domheap_page(p);
- p = alloc_domheap_page(d, 0);
+ p = alloc_domheap_page(d, PAGE_ORDER_4K);
if ( unlikely(!p) )
return;
p2m_lock(p2m);
- p2m_pod_cache_add(p2m, p, 0);
+ p2m_pod_cache_add(p2m, p, PAGE_ORDER_4K);
p2m_unlock(p2m);
return;
}
@@ -688,7 +688,7 @@
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfn, _mfn(0), 9,
+ set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
@@ -739,7 +739,7 @@
/* Finally! We've passed all the checks, and can add the mfn superpage
* back on the PoD cache, and account for the new p2m PoD entries */
- p2m_pod_cache_add(p2m, mfn_to_page(mfn0), 9);
+ p2m_pod_cache_add(p2m, mfn_to_page(mfn0), PAGE_ORDER_2M);
p2m->pod.entry_count += SUPERPAGE_PAGES;
out_reset:
@@ -800,7 +800,7 @@
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfns[i], _mfn(0), 0,
+ set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
@@ -810,7 +810,8 @@
unmap_domain_page(map[i]);
map[i] = NULL;
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);
+ set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ types[i], p2m->default_access);
continue;
}
@@ -832,7 +833,8 @@
* check timing. */
if ( j < PAGE_SIZE/sizeof(*map[i]) )
{
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);
+ set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ types[i], p2m->default_access);
}
else
{
@@ -852,7 +854,7 @@
}
/* Add to cache, and account for the new p2m PoD entry */
- p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), 0);
+ p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), PAGE_ORDER_4K);
p2m->pod.entry_count++;
}
}
@@ -867,7 +869,7 @@
if ( p2m->pod.reclaim_super == 0 )
{
- p2m->pod.reclaim_super = (p2m->pod.max_guest>>9)<<9;
+ p2m->pod.reclaim_super =
(p2m->pod.max_guest>>PAGE_ORDER_2M)<<PAGE_ORDER_2M;
p2m->pod.reclaim_super -= SUPERPAGE_PAGES;
}
@@ -956,7 +958,7 @@
/* Because PoD does not have cache list for 1GB pages, it has to remap
* 1GB region to 2MB chunks for a retry. */
- if ( order == 18 )
+ if ( order == PAGE_ORDER_1G )
{
gfn_aligned = (gfn >> order) << order;
/* Note that we are supposed to call set_p2m_entry() 512 times to
@@ -964,7 +966,7 @@
* set_p2m_entry() should automatically shatter the 1GB page into
* 512 2MB pages. The rest of 511 calls are unnecessary.
*/
- set_p2m_entry(p2m, gfn_aligned, _mfn(0), 9,
+ set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -979,12 +981,12 @@
{
/* If we're low, start a sweep */
- if ( order == 9 && page_list_empty(&p2m->pod.super) )
+ if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
p2m_pod_emergency_sweep_super(p2m);
if ( page_list_empty(&p2m->pod.single) &&
- ( ( order == 0 )
- || (order == 9 && page_list_empty(&p2m->pod.super) ) ) )
+ ( ( order == PAGE_ORDER_4K )
+ || (order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super)
) ) )
p2m_pod_emergency_sweep(p2m);
}
@@ -1046,13 +1048,13 @@
out_fail:
return -1;
remap_and_retry:
- BUG_ON(order != 9);
+ BUG_ON(order != PAGE_ORDER_2M);
spin_unlock(&d->page_alloc_lock);
/* Remap this 2-meg region in singleton chunks */
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
- set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), 0,
+ set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
diff -r 227130622561 -r 33d161ba8a04 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Thu Aug 25 12:03:14 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c Fri Aug 26 13:00:52 2011 +0100
@@ -121,12 +121,12 @@
p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
{
/* End if the entry is a leaf entry. */
- if ( page_order == 0
+ if ( page_order == PAGE_ORDER_4K
|| !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
|| (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
return;
- if ( page_order > 9 )
+ if ( page_order > PAGE_ORDER_2M )
{
l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -323,7 +323,7 @@
/*
* Try to allocate 1GB page table if this feature is supported.
*/
- if ( page_order == 18 )
+ if ( page_order == PAGE_ORDER_1G )
{
l1_pgentry_t old_entry = l1e_empty();
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -373,7 +373,7 @@
PGT_l2_page_table) )
goto out;
- if ( page_order == 0 )
+ if ( page_order == PAGE_ORDER_4K )
{
if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
@@ -399,7 +399,7 @@
p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
}
- else if ( page_order == 9 )
+ else if ( page_order == PAGE_ORDER_2M )
{
l1_pgentry_t old_entry = l1e_empty();
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -541,7 +541,7 @@
/* The read has succeeded, so we know that mapping exists */
if ( q != p2m_query )
{
- if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
p2mt = p2m_invalid;
printk("%s: Allocate 1GB failed!\n", __func__);
@@ -735,7 +735,7 @@
{
if ( q != p2m_query )
{
- if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
}
else
@@ -771,7 +771,7 @@
{
if ( q != p2m_query ) {
if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l2e, 9, q) )
+ (l1_pgentry_t *)l2e,
PAGE_ORDER_2M, q) )
goto pod_retry_l2;
} else
*t = p2m_populate_on_demand;
@@ -803,7 +803,7 @@
{
if ( q != p2m_query ) {
if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l1e, 0, q) )
+ (l1_pgentry_t *)l1e,
PAGE_ORDER_4K, q) )
goto pod_retry_l1;
} else
*t = p2m_populate_on_demand;
diff -r 227130622561 -r 33d161ba8a04 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Aug 25 12:03:14 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Fri Aug 26 13:00:52 2011 +0100
@@ -149,10 +149,10 @@
while ( todo )
{
if ( hap_enabled(d) )
- order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << 18) - 1)) == 0) &&
- hvm_hap_has_1gb(d) && opt_hap_1gb ) ? 18 :
- ((((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) &&
- hvm_hap_has_2mb(d) && opt_hap_2mb) ? 9 : 0;
+ order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_1G) -
1)) == 0) &&
+ hvm_hap_has_1gb(d) && opt_hap_1gb ) ? PAGE_ORDER_1G :
+ ((((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_2M) -
1)) == 0) &&
+ hvm_hap_has_2mb(d) && opt_hap_2mb) ? PAGE_ORDER_2M :
PAGE_ORDER_4K;
else
order = 0;
diff -r 227130622561 -r 33d161ba8a04 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Thu Aug 25 12:03:14 2011 +0100
+++ b/xen/include/asm-x86/page.h Fri Aug 26 13:00:52 2011 +0100
@@ -13,6 +13,10 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_FLAG_MASK (~0)
+#define PAGE_ORDER_4K 0
+#define PAGE_ORDER_2M 9
+#define PAGE_ORDER_1G 18
+
#ifndef __ASSEMBLY__
# include <asm/types.h>
# include <xen/lib.h>
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|