Unless more than 16Tb are going to ever be supported in Xen, this will
allow reducing the linked list entries in struct page_info from 16 to
8 bytes.
This doesn't modify struct shadow_page_info, yet, so in order to meet
the constraints of that 'mirror' structure the list entry gets
artificially forced to be 16 bytes in size. That workaround will be
removed in a subsequent patch.
One question related to the special list manipulation methods used in
relinquish_memory(): Couldn't relinquish_memory() remove the page to be
dealt with from the list and re-add it in all cases where it's not
being put on relmem_list (would eliminate the need for
page_list_move_tail(), page_list_splice_init(), and perhaps also
page_list_is_eol())?
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- 2009-01-30.orig/xen/arch/x86/domain.c 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/arch/x86/domain.c 2009-01-30 10:13:53.000000000 +0100
@@ -141,7 +141,7 @@ void dump_pageframe_info(struct domain *
}
else
{
- list_for_each_entry ( page, &d->page_list, list )
+ page_list_for_each ( page, &d->page_list )
{
printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
@@ -154,7 +154,7 @@ void dump_pageframe_info(struct domain *
p2m_pod_dump_data(d);
}
- list_for_each_entry ( page, &d->xenpage_list, list )
+ page_list_for_each ( page, &d->xenpage_list )
{
printk(" XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
@@ -380,7 +380,7 @@ int arch_domain_create(struct domain *d,
INIT_LIST_HEAD(&d->arch.pdev_list);
d->arch.relmem = RELMEM_not_started;
- INIT_LIST_HEAD(&d->arch.relmem_list);
+ INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order, 0);
@@ -1655,27 +1655,25 @@ int hypercall_xlat_continuation(unsigned
#endif
static int relinquish_memory(
- struct domain *d, struct list_head *list, unsigned long type)
+ struct domain *d, struct page_list_head *list, unsigned long type)
{
- struct list_head *ent;
- struct page_info *page;
+ struct page_info *page, *cur;
unsigned long x, y;
int ret = 0;
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
- ent = list->next;
- while ( ent != list )
+ page = page_list_first(list);
+ while ( !page_list_is_eol(page, list) )
{
- page = list_entry(ent, struct page_info, list);
-
/* Grab a reference to the page so it won't disappear from under us. */
if ( unlikely(!get_page(page, d)) )
{
/* Couldn't get a reference -- someone is freeing this page. */
- ent = ent->next;
- list_move_tail(&page->list, &d->arch.relmem_list);
+ cur = page;
+ page = page_list_next(page, list);
+ page_list_move_tail(cur, list, &d->arch.relmem_list);
continue;
}
@@ -1747,9 +1745,10 @@ static int relinquish_memory(
}
/* Follow the list chain and /then/ potentially free the page. */
- ent = ent->next;
- list_move_tail(&page->list, &d->arch.relmem_list);
- put_page(page);
+ cur = page;
+ page = page_list_next(page, list);
+ page_list_move_tail(cur, list, &d->arch.relmem_list);
+ put_page(cur);
if ( hypercall_preempt_check() )
{
@@ -1758,7 +1757,7 @@ static int relinquish_memory(
}
}
- list_splice_init(&d->arch.relmem_list, list);
+ page_list_splice_init(&d->arch.relmem_list, list);
out:
spin_unlock_recursive(&d->page_alloc_lock);
--- 2009-01-30.orig/xen/arch/x86/domain_build.c 2009-01-28 08:53:49.000000000
+0100
+++ 2009-01-30/xen/arch/x86/domain_build.c 2009-01-30 10:13:53.000000000
+0100
@@ -880,7 +880,7 @@ int __init construct_dom0(
}
si->first_p2m_pfn = pfn;
si->nr_p2m_frames = d->tot_pages - count;
- list_for_each_entry ( page, &d->page_list, list )
+ page_list_for_each ( page, &d->page_list )
{
mfn = page_to_mfn(page);
if ( get_gpfn_from_mfn(mfn) >= count )
--- 2009-01-30.orig/xen/arch/x86/domctl.c 2009-01-30 08:27:02.000000000
+0100
+++ 2009-01-30/xen/arch/x86/domctl.c 2009-01-30 10:13:53.000000000 +0100
@@ -240,7 +240,7 @@ long arch_do_domctl(
struct domain *d = rcu_lock_domain_by_id(domctl->domain);
unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
uint64_t mfn;
- struct list_head *list_ent;
+ struct page_info *page;
ret = -EINVAL;
if ( d != NULL )
@@ -259,19 +259,19 @@ long arch_do_domctl(
goto getmemlist_out;
}
- ret = 0;
- list_ent = d->page_list.next;
- for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
+ ret = i = 0;
+ page_list_for_each(page, &d->page_list)
{
- mfn = page_to_mfn(list_entry(
- list_ent, struct page_info, list));
+ if ( i >= max_pfns )
+ break;
+ mfn = page_to_mfn(page);
if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
i, &mfn, 1) )
{
ret = -EFAULT;
break;
}
- list_ent = mfn_to_page(mfn)->list.next;
+ ++i;
}
spin_unlock(&d->page_alloc_lock);
--- 2009-01-30.orig/xen/arch/x86/e820.c 2009-01-30 09:53:28.000000000 +0100
+++ 2009-01-30/xen/arch/x86/e820.c 2009-01-30 10:13:53.000000000 +0100
@@ -1,10 +1,10 @@
#include <xen/config.h>
#include <xen/init.h>
#include <xen/lib.h>
+#include <xen/mm.h>
#include <xen/compat.h>
#include <xen/dmi.h>
#include <asm/e820.h>
-#include <asm/mm.h>
#include <asm/page.h>
/* opt_mem: Limit of physical RAM. Any RAM beyond this point is ignored. */
--- 2009-01-30.orig/xen/arch/x86/mm.c 2009-01-28 08:53:49.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm.c 2009-01-30 10:13:53.000000000 +0100
@@ -333,7 +333,7 @@ void share_xen_page_with_guest(
page->count_info |= PGC_allocated | 1;
if ( unlikely(d->xenheap_pages++ == 0) )
get_knownalive_domain(d);
- list_add_tail(&page->list, &d->xenpage_list);
+ page_list_add_tail(page, &d->xenpage_list);
}
spin_unlock(&d->page_alloc_lock);
@@ -3508,7 +3508,7 @@ int steal_page(
/* Unlink from original owner. */
if ( !(memflags & MEMF_no_refcount) )
d->tot_pages--;
- list_del(&page->list);
+ page_list_del(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
return 0;
--- 2009-01-30.orig/xen/arch/x86/mm/hap/hap.c 2009-01-30 08:27:02.000000000
+0100
+++ 2009-01-30/xen/arch/x86/mm/hap/hap.c 2009-01-30 10:13:53.000000000
+0100
@@ -96,11 +96,10 @@ static struct page_info *hap_alloc(struc
ASSERT(hap_locked_by_me(d));
- if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )
+ pg = page_list_remove_head(&d->arch.paging.hap.freelist);
+ if ( unlikely(!pg) )
return NULL;
- pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);
- list_del(&pg->list);
d->arch.paging.hap.free_pages--;
p = hap_map_domain_page(page_to_mfn(pg));
@@ -118,7 +117,7 @@ static void hap_free(struct domain *d, m
ASSERT(hap_locked_by_me(d));
d->arch.paging.hap.free_pages++;
- list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
+ page_list_add_tail(pg, &d->arch.paging.hap.freelist);
}
static struct page_info *hap_alloc_p2m_page(struct domain *d)
@@ -210,15 +209,13 @@ hap_set_allocation(struct domain *d, uns
}
d->arch.paging.hap.free_pages++;
d->arch.paging.hap.total_pages++;
- list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
+ page_list_add_tail(pg, &d->arch.paging.hap.freelist);
}
else if ( d->arch.paging.hap.total_pages > pages )
{
/* Need to return memory to domheap */
- ASSERT(!list_empty(&d->arch.paging.hap.freelist));
- pg = list_entry(d->arch.paging.hap.freelist.next,
- struct page_info, list);
- list_del(&pg->list);
+ pg = page_list_remove_head(&d->arch.paging.hap.freelist);
+ ASSERT(pg);
d->arch.paging.hap.free_pages--;
d->arch.paging.hap.total_pages--;
pg->count_info = 0;
@@ -393,7 +390,7 @@ static void hap_destroy_monitor_table(st
void hap_domain_init(struct domain *d)
{
hap_lock_init(d);
- INIT_LIST_HEAD(&d->arch.paging.hap.freelist);
+ INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
/* This domain will use HAP for log-dirty mode */
paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
--- 2009-01-30.orig/xen/arch/x86/mm/hap/p2m-ept.c 2009-01-30
08:27:02.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/hap/p2m-ept.c 2009-01-30 10:13:53.000000000
+0100
@@ -63,7 +63,7 @@ static int ept_set_middle_entry(struct d
pg->count_info = 1;
pg->u.inuse.type_info = 1 | PGT_validated;
- list_add_tail(&pg->list, &d->arch.p2m->pages);
+ page_list_add_tail(pg, &d->arch.p2m->pages);
ept_entry->emt = 0;
ept_entry->igmt = 0;
--- 2009-01-30.orig/xen/arch/x86/mm/p2m.c 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/arch/x86/mm/p2m.c 2009-01-30 10:13:53.000000000 +0100
@@ -175,7 +175,7 @@ p2m_next_level(struct domain *d, mfn_t *
struct page_info *pg = d->arch.p2m->alloc_page(d);
if ( pg == NULL )
return 0;
- list_add_tail(&pg->list, &d->arch.p2m->pages);
+ page_list_add_tail(pg, &d->arch.p2m->pages);
pg->u.inuse.type_info = type | 1 | PGT_validated;
pg->count_info = 1;
@@ -214,7 +214,7 @@ p2m_next_level(struct domain *d, mfn_t *
struct page_info *pg = d->arch.p2m->alloc_page(d);
if ( pg == NULL )
return 0;
- list_add_tail(&pg->list, &d->arch.p2m->pages);
+ page_list_add_tail(pg, &d->arch.p2m->pages);
pg->u.inuse.type_info = PGT_l1_page_table | 1 | PGT_validated;
pg->count_info = 1;
@@ -300,18 +300,18 @@ p2m_pod_cache_add(struct domain *d,
for(i=0; i < 1 << order ; i++)
{
p = page + i;
- list_del(&p->list);
+ page_list_del(p, &d->page_list);
}
/* Then add the first one to the appropriate populate-on-demand list */
switch(order)
{
case 9:
- list_add_tail(&page->list, &p2md->pod.super); /* lock: page_alloc */
+ page_list_add_tail(page, &p2md->pod.super); /* lock: page_alloc */
p2md->pod.count += 1 << order;
break;
case 0:
- list_add_tail(&page->list, &p2md->pod.single); /* lock: page_alloc */
+ page_list_add_tail(page, &p2md->pod.single); /* lock: page_alloc */
p2md->pod.count += 1 ;
break;
default:
@@ -334,54 +334,51 @@ static struct page_info * p2m_pod_cache_
struct page_info *p = NULL;
int i;
- if ( order == 9 && list_empty(&p2md->pod.super) )
+ if ( order == 9 && page_list_empty(&p2md->pod.super) )
{
return NULL;
}
- else if ( order == 0 && list_empty(&p2md->pod.single) )
+ else if ( order == 0 && page_list_empty(&p2md->pod.single) )
{
unsigned long mfn;
struct page_info *q;
- BUG_ON( list_empty(&p2md->pod.super) );
+ BUG_ON( page_list_empty(&p2md->pod.super) );
/* Break up a superpage to make single pages. NB count doesn't
* need to be adjusted. */
printk("%s: Breaking up superpage.\n", __func__);
- p = list_entry(p2md->pod.super.next, struct page_info, list);
- list_del(&p->list);
+ p = page_list_remove_head(&p2md->pod.super);
mfn = mfn_x(page_to_mfn(p));
for ( i=0; i<(1<<9); i++ )
{
q = mfn_to_page(_mfn(mfn+i));
- list_add_tail(&q->list, &p2md->pod.single);
+ page_list_add_tail(q, &p2md->pod.single);
}
}
switch ( order )
{
case 9:
- BUG_ON( list_empty(&p2md->pod.super) );
- p = list_entry(p2md->pod.super.next, struct page_info, list);
+ BUG_ON( page_list_empty(&p2md->pod.super) );
+ p = page_list_remove_head(&p2md->pod.super);
p2md->pod.count -= 1 << order; /* Lock: page_alloc */
break;
case 0:
- BUG_ON( list_empty(&p2md->pod.single) );
- p = list_entry(p2md->pod.single.next, struct page_info, list);
+ BUG_ON( page_list_empty(&p2md->pod.single) );
+ p = page_list_remove_head(&p2md->pod.single);
p2md->pod.count -= 1;
break;
default:
BUG();
}
- list_del(&p->list);
-
/* Put the pages back on the domain page_list */
for ( i = 0 ; i < (1 << order) ; i++ )
{
BUG_ON(page_get_owner(p + i) != d);
- list_add_tail(&p[i].list, &d->page_list);
+ page_list_add_tail(p + i, &d->page_list);
}
return p;
@@ -425,7 +422,7 @@ p2m_pod_set_cache_target(struct domain *
spin_lock(&d->page_alloc_lock);
if ( (p2md->pod.count - pod_target) > (1>>9)
- && !list_empty(&p2md->pod.super) )
+ && !page_list_empty(&p2md->pod.super) )
order = 9;
else
order = 0;
@@ -535,38 +532,27 @@ void
p2m_pod_empty_cache(struct domain *d)
{
struct p2m_domain *p2md = d->arch.p2m;
- struct list_head *q, *p;
+ struct page_info *page;
spin_lock(&d->page_alloc_lock);
- list_for_each_safe(p, q, &p2md->pod.super) /* lock: page_alloc */
+ while ( (page = page_list_remove_head(&p2md->pod.super)) )
{
int i;
- struct page_info *page;
- list_del(p);
-
- page = list_entry(p, struct page_info, list);
-
for ( i = 0 ; i < (1 << 9) ; i++ )
{
BUG_ON(page_get_owner(page + i) != d);
- list_add_tail(&page[i].list, &d->page_list);
+ page_list_add_tail(page + i, &d->page_list);
}
p2md->pod.count -= 1<<9;
}
- list_for_each_safe(p, q, &p2md->pod.single)
+ while ( (page = page_list_remove_head(&p2md->pod.single)) )
{
- struct page_info *page;
-
- list_del(p);
-
- page = list_entry(p, struct page_info, list);
-
BUG_ON(page_get_owner(page) != d);
- list_add_tail(&page->list, &d->page_list);
+ page_list_add_tail(page, &d->page_list);
p2md->pod.count -= 1;
}
@@ -952,7 +938,7 @@ p2m_pod_emergency_sweep_super(struct dom
* NB that this is a zero-sum game; we're increasing our cache size
* by increasing our 'debt'. Since we hold the p2m lock,
* (entry_count - count) must remain the same. */
- if ( !list_empty(&p2md->pod.super) && i < limit )
+ if ( !page_list_empty(&p2md->pod.super) && i < limit )
break;
}
@@ -1035,12 +1021,12 @@ p2m_pod_demand_populate(struct domain *d
}
/* If we're low, start a sweep */
- if ( order == 9 && list_empty(&p2md->pod.super) )
+ if ( order == 9 && page_list_empty(&p2md->pod.super) )
p2m_pod_emergency_sweep_super(d);
- if ( list_empty(&p2md->pod.single) &&
+ if ( page_list_empty(&p2md->pod.single) &&
( ( order == 0 )
- || (order == 9 && list_empty(&p2md->pod.super) ) ) )
+ || (order == 9 && page_list_empty(&p2md->pod.super) ) ) )
p2m_pod_emergency_sweep(d);
/* Keep track of the highest gfn demand-populated by a guest fault */
@@ -1477,9 +1463,9 @@ int p2m_init(struct domain *d)
memset(p2m, 0, sizeof(*p2m));
p2m_lock_init(p2m);
- INIT_LIST_HEAD(&p2m->pages);
- INIT_LIST_HEAD(&p2m->pod.super);
- INIT_LIST_HEAD(&p2m->pod.single);
+ INIT_PAGE_LIST_HEAD(&p2m->pages);
+ INIT_PAGE_LIST_HEAD(&p2m->pod.super);
+ INIT_PAGE_LIST_HEAD(&p2m->pod.single);
p2m->set_entry = p2m_set_entry;
p2m->get_entry = p2m_gfn_to_mfn;
@@ -1540,7 +1526,6 @@ int p2m_alloc_table(struct domain *d,
{
mfn_t mfn = _mfn(INVALID_MFN);
- struct list_head *entry;
struct page_info *page, *p2m_top;
unsigned int page_count = 0;
unsigned long gfn = -1UL;
@@ -1566,7 +1551,7 @@ int p2m_alloc_table(struct domain *d,
p2m_unlock(p2m);
return -ENOMEM;
}
- list_add_tail(&p2m_top->list, &p2m->pages);
+ page_list_add_tail(p2m_top, &p2m->pages);
p2m_top->count_info = 1;
p2m_top->u.inuse.type_info =
@@ -1587,11 +1572,8 @@ int p2m_alloc_table(struct domain *d,
goto error;
/* Copy all existing mappings from the page list and m2p */
- for ( entry = d->page_list.next;
- entry != &d->page_list;
- entry = entry->next )
+ page_list_for_each(page, &d->page_list)
{
- page = list_entry(entry, struct page_info, list);
mfn = page_to_mfn(page);
gfn = get_gpfn_from_mfn(mfn_x(mfn));
page_count++;
@@ -1621,19 +1603,14 @@ void p2m_teardown(struct domain *d)
/* Return all the p2m pages to Xen.
* We know we don't have any extra mappings to these pages */
{
- struct list_head *entry, *n;
struct page_info *pg;
struct p2m_domain *p2m = d->arch.p2m;
p2m_lock(p2m);
d->arch.phys_table = pagetable_null();
- list_for_each_safe(entry, n, &p2m->pages)
- {
- pg = list_entry(entry, struct page_info, list);
- list_del(entry);
+ while ( (pg = page_list_remove_head(&p2m->pages)) )
p2m->free_page(d, pg);
- }
p2m_unlock(p2m);
}
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/common.c 2009-01-28
08:53:49.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/common.c 2009-01-30 10:13:53.000000000
+0100
@@ -49,7 +49,7 @@ void shadow_domain_init(struct domain *d
shadow_lock_init(d);
for ( i = 0; i <= SHADOW_MAX_ORDER; i++ )
INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
- INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
+ INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
/* Use shadow pagetables for log-dirty support */
@@ -1672,7 +1672,7 @@ sh_alloc_p2m_pages(struct domain *d)
*/
page_set_owner(&pg[i], d);
pg[i].count_info = 1;
- list_add_tail(&pg[i].list, &d->arch.paging.shadow.p2m_freelist);
+ page_list_add_tail(&pg[i], &d->arch.paging.shadow.p2m_freelist);
}
return 1;
}
@@ -1681,25 +1681,22 @@ sh_alloc_p2m_pages(struct domain *d)
static struct page_info *
shadow_alloc_p2m_page(struct domain *d)
{
- struct list_head *entry;
struct page_info *pg;
mfn_t mfn;
void *p;
shadow_lock(d);
- if ( list_empty(&d->arch.paging.shadow.p2m_freelist) &&
+ if ( page_list_empty(&d->arch.paging.shadow.p2m_freelist) &&
!sh_alloc_p2m_pages(d) )
{
shadow_unlock(d);
return NULL;
}
- entry = d->arch.paging.shadow.p2m_freelist.next;
- list_del(entry);
+ pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist);
shadow_unlock(d);
- pg = list_entry(entry, struct page_info, list);
mfn = page_to_mfn(pg);
p = sh_map_domain_page(mfn);
clear_page(p);
@@ -3156,7 +3153,6 @@ void shadow_teardown(struct domain *d)
{
struct vcpu *v;
mfn_t mfn;
- struct list_head *entry, *n;
struct page_info *pg;
ASSERT(d->is_dying);
@@ -3208,12 +3204,8 @@ void shadow_teardown(struct domain *d)
}
#endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
- list_for_each_safe(entry, n, &d->arch.paging.shadow.p2m_freelist)
- {
- list_del(entry);
- pg = list_entry(entry, struct page_info, list);
+ while ( (pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist)) )
shadow_free_p2m_page(d, pg);
- }
if ( d->arch.paging.shadow.total_pages != 0 )
{
--- 2009-01-30.orig/xen/arch/x86/numa.c 2009-01-30 08:27:02.000000000 +0100
+++ 2009-01-30/xen/arch/x86/numa.c 2009-01-30 10:13:53.000000000 +0100
@@ -312,7 +312,7 @@ static void dump_numa(unsigned char key)
for_each_online_node(i)
page_num_node[i] = 0;
- list_for_each_entry(page, &d->page_list, list)
+ page_list_for_each(page, &d->page_list)
{
i = phys_to_nid(page_to_mfn(page) << PAGE_SHIFT);
page_num_node[i]++;
--- 2009-01-30.orig/xen/common/domain.c 2009-01-30 09:53:28.000000000 +0100
+++ 2009-01-30/xen/common/domain.c 2009-01-30 10:13:53.000000000 +0100
@@ -233,8 +233,8 @@ struct domain *domain_create(
spin_lock_init(&d->page_alloc_lock);
spin_lock_init(&d->shutdown_lock);
spin_lock_init(&d->hypercall_deadlock_mutex);
- INIT_LIST_HEAD(&d->page_list);
- INIT_LIST_HEAD(&d->xenpage_list);
+ INIT_PAGE_LIST_HEAD(&d->page_list);
+ INIT_PAGE_LIST_HEAD(&d->xenpage_list);
if ( domcr_flags & DOMCRF_hvm )
d->is_hvm = 1;
--- 2009-01-30.orig/xen/common/grant_table.c 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/common/grant_table.c 2009-01-30 10:13:53.000000000 +0100
@@ -1192,7 +1192,7 @@ gnttab_transfer(
/* Okay, add the page to 'e'. */
if ( unlikely(e->tot_pages++ == 0) )
get_knownalive_domain(e);
- list_add_tail(&page->list, &e->page_list);
+ page_list_add_tail(page, &e->page_list);
page_set_owner(page, e);
spin_unlock(&e->page_alloc_lock);
--- 2009-01-30.orig/xen/common/memory.c 2009-01-30 08:27:02.000000000 +0100
+++ 2009-01-30/xen/common/memory.c 2009-01-30 10:13:53.000000000 +0100
@@ -218,8 +218,8 @@ static void decrease_reservation(struct
static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
{
struct xen_memory_exchange exch;
- LIST_HEAD(in_chunk_list);
- LIST_HEAD(out_chunk_list);
+ PAGE_LIST_HEAD(in_chunk_list);
+ PAGE_LIST_HEAD(out_chunk_list);
unsigned long in_chunk_order, out_chunk_order;
xen_pfn_t gpfn, gmfn, mfn;
unsigned long i, j, k;
@@ -325,7 +325,7 @@ static long memory_exchange(XEN_GUEST_HA
goto fail;
}
- list_add(&page->list, &in_chunk_list);
+ page_list_add(page, &in_chunk_list);
}
}
@@ -339,7 +339,7 @@ static long memory_exchange(XEN_GUEST_HA
goto fail;
}
- list_add(&page->list, &out_chunk_list);
+ page_list_add(page, &out_chunk_list);
}
/*
@@ -347,10 +347,8 @@ static long memory_exchange(XEN_GUEST_HA
*/
/* Destroy final reference to each input page. */
- while ( !list_empty(&in_chunk_list) )
+ while ( (page = page_list_remove_head(&in_chunk_list)) )
{
- page = list_entry(in_chunk_list.next, struct page_info, list);
- list_del(&page->list);
if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
BUG();
mfn = page_to_mfn(page);
@@ -360,10 +358,8 @@ static long memory_exchange(XEN_GUEST_HA
/* Assign each output page to the domain. */
j = 0;
- while ( !list_empty(&out_chunk_list) )
+ while ( (page = page_list_remove_head(&out_chunk_list)) )
{
- page = list_entry(out_chunk_list.next, struct page_info, list);
- list_del(&page->list);
if ( assign_pages(d, page, exch.out.extent_order,
MEMF_no_refcount) )
BUG();
@@ -399,21 +395,13 @@ static long memory_exchange(XEN_GUEST_HA
*/
fail:
/* Reassign any input pages we managed to steal. */
- while ( !list_empty(&in_chunk_list) )
- {
- page = list_entry(in_chunk_list.next, struct page_info, list);
- list_del(&page->list);
+ while ( (page = page_list_remove_head(&in_chunk_list)) )
if ( assign_pages(d, page, 0, MEMF_no_refcount) )
BUG();
- }
/* Free any output pages we managed to allocate. */
- while ( !list_empty(&out_chunk_list) )
- {
- page = list_entry(out_chunk_list.next, struct page_info, list);
- list_del(&page->list);
+ while ( (page = page_list_remove_head(&out_chunk_list)) )
free_domheap_pages(page, exch.out.extent_order);
- }
exch.nr_exchanged = i << in_chunk_order;
--- 2009-01-30.orig/xen/common/page_alloc.c 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/common/page_alloc.c 2009-01-30 10:13:53.000000000 +0100
@@ -71,7 +71,7 @@ integer_param("dma_bits", dma_bitsize);
#endif
static DEFINE_SPINLOCK(page_scrub_lock);
-LIST_HEAD(page_scrub_list);
+PAGE_LIST_HEAD(page_scrub_list);
static unsigned long scrub_pages;
/*********************
@@ -264,7 +264,7 @@ unsigned long __init alloc_boot_pages(
#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN : \
(fls(page_to_mfn(pg)) - 1))
-typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
+typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
#define heap(node, zone, order) ((*_heap[node])[zone][order])
@@ -298,7 +298,7 @@ static void init_node_heap(int node)
for ( i = 0; i < NR_ZONES; i++ )
for ( j = 0; j <= MAX_ORDER; j++ )
- INIT_LIST_HEAD(&(*_heap[node])[i][j]);
+ INIT_PAGE_LIST_HEAD(&(*_heap[node])[i][j]);
}
/* Allocate 2^@order contiguous pages. */
@@ -340,7 +340,7 @@ static struct page_info *alloc_heap_page
/* Find smallest order which can satisfy the request. */
for ( j = order; j <= MAX_ORDER; j++ )
- if ( !list_empty(&heap(node, zone, j)) )
+ if ( (pg = page_list_remove_head(&heap(node, zone, j))) )
goto found;
} while ( zone-- > zone_lo ); /* careful: unsigned zone may wrap */
@@ -354,14 +354,11 @@ static struct page_info *alloc_heap_page
return NULL;
found:
- pg = list_entry(heap(node, zone, j).next, struct page_info, list);
- list_del(&pg->list);
-
/* We may have to halve the chunk a number of times. */
while ( j != order )
{
PFN_ORDER(pg) = --j;
- list_add_tail(&pg->list, &heap(node, zone, j));
+ page_list_add_tail(pg, &heap(node, zone, j));
pg += 1 << j;
}
@@ -452,8 +449,8 @@ static void free_heap_pages(
if ( allocated_in_map(page_to_mfn(pg)-mask) ||
(PFN_ORDER(pg-mask) != order) )
break;
- list_del(&(pg-mask)->list);
pg -= mask;
+ page_list_del(pg, &heap(node, zone, order));
}
else
{
@@ -461,7 +458,7 @@ static void free_heap_pages(
if ( allocated_in_map(page_to_mfn(pg)+mask) ||
(PFN_ORDER(pg+mask) != order) )
break;
- list_del(&(pg+mask)->list);
+ page_list_del(pg + mask, &heap(node, zone, order));
}
order++;
@@ -471,7 +468,7 @@ static void free_heap_pages(
}
PFN_ORDER(pg) = order;
- list_add_tail(&pg->list, &heap(node, zone, order));
+ page_list_add_tail(pg, &heap(node, zone, order));
spin_unlock(&heap_lock);
}
@@ -786,7 +783,7 @@ int assign_pages(
page_set_owner(&pg[i], d);
wmb(); /* Domain pointer must be visible before updating refcnt. */
pg[i].count_info = PGC_allocated | 1;
- list_add_tail(&pg[i].list, &d->page_list);
+ page_list_add_tail(&pg[i], &d->page_list);
}
spin_unlock(&d->page_alloc_lock);
@@ -844,7 +841,7 @@ void free_domheap_pages(struct page_info
spin_lock_recursive(&d->page_alloc_lock);
for ( i = 0; i < (1 << order); i++ )
- list_del(&pg[i].list);
+ page_list_del2(&pg[i], &d->xenpage_list, &d->arch.relmem_list);
d->xenheap_pages -= 1 << order;
drop_dom_ref = (d->xenheap_pages == 0);
@@ -859,7 +856,7 @@ void free_domheap_pages(struct page_info
for ( i = 0; i < (1 << order); i++ )
{
BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
- list_del(&pg[i].list);
+ page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
}
d->tot_pages -= 1 << order;
@@ -882,7 +879,7 @@ void free_domheap_pages(struct page_info
{
page_set_owner(&pg[i], NULL);
spin_lock(&page_scrub_lock);
- list_add(&pg[i].list, &page_scrub_list);
+ page_list_add(&pg[i], &page_scrub_list);
scrub_pages++;
spin_unlock(&page_scrub_lock);
}
@@ -965,7 +962,7 @@ static DEFINE_PER_CPU(struct timer, page
static void page_scrub_softirq(void)
{
- struct list_head *ent;
+ PAGE_LIST_HEAD(list);
struct page_info *pg;
void *p;
int i;
@@ -983,32 +980,26 @@ static void page_scrub_softirq(void)
do {
spin_lock(&page_scrub_lock);
- if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
- {
- spin_unlock(&page_scrub_lock);
- goto out;
- }
-
/* Peel up to 16 pages from the list. */
for ( i = 0; i < 16; i++ )
{
- if ( ent->next == &page_scrub_list )
+ if ( !(pg = page_list_remove_head(&page_scrub_list)) )
break;
- ent = ent->next;
+ page_list_add_tail(pg, &list);
}
- /* Remove peeled pages from the list. */
- ent->next->prev = &page_scrub_list;
- page_scrub_list.next = ent->next;
- scrub_pages -= (i+1);
+ if ( unlikely(i == 0) )
+ {
+ spin_unlock(&page_scrub_lock);
+ goto out;
+ }
+
+ scrub_pages -= i;
spin_unlock(&page_scrub_lock);
- /* Working backwards, scrub each page in turn. */
- while ( ent != &page_scrub_list )
- {
- pg = list_entry(ent, struct page_info, list);
- ent = ent->prev;
+ /* Scrub each page in turn. */
+ while ( (pg = page_list_remove_head(&list)) ) {
p = map_domain_page(page_to_mfn(pg));
scrub_page(p);
unmap_domain_page(p);
--- 2009-01-30.orig/xen/drivers/passthrough/amd/iommu_map.c 2009-01-30
08:27:02.000000000 +0100
+++ 2009-01-30/xen/drivers/passthrough/amd/iommu_map.c 2009-01-30
10:13:53.000000000 +0100
@@ -552,7 +552,6 @@ int amd_iommu_sync_p2m(struct domain *d)
{
unsigned long mfn, gfn, flags;
u64 iommu_l2e;
- struct list_head *entry;
struct page_info *page;
struct hvm_iommu *hd;
int iw = IOMMU_IO_WRITE_ENABLED;
@@ -568,10 +567,8 @@ int amd_iommu_sync_p2m(struct domain *d)
if ( hd->p2m_synchronized )
goto out;
- for ( entry = d->page_list.next; entry != &d->page_list;
- entry = entry->next )
+ page_list_for_each ( page, &d->page_list )
{
- page = list_entry(entry, struct page_info, list);
mfn = page_to_mfn(page);
gfn = get_gpfn_from_mfn(mfn);
--- 2009-01-30.orig/xen/drivers/passthrough/amd/pci_amd_iommu.c 2009-01-30
08:27:02.000000000 +0100
+++ 2009-01-30/xen/drivers/passthrough/amd/pci_amd_iommu.c 2009-01-30
10:13:53.000000000 +0100
@@ -23,7 +23,6 @@
#include <xen/pci_regs.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
-#include <asm/mm.h>
extern unsigned short ivrs_bdf_entries;
extern struct ivrs_mappings *ivrs_mappings;
--- 2009-01-30.orig/xen/drivers/passthrough/iommu.c 2009-01-30
08:27:02.000000000 +0100
+++ 2009-01-30/xen/drivers/passthrough/iommu.c 2009-01-30 10:13:53.000000000
+0100
@@ -141,7 +141,7 @@ static int iommu_populate_page_table(str
spin_lock(&d->page_alloc_lock);
- list_for_each_entry ( page, &d->page_list, list )
+ page_list_for_each ( page, &d->page_list )
{
if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
{
--- 2009-01-30.orig/xen/include/asm-x86/domain.h 2009-01-30
08:27:02.000000000 +0100
+++ 2009-01-30/xen/include/asm-x86/domain.h 2009-01-30 10:13:53.000000000
+0100
@@ -83,7 +83,7 @@ struct shadow_domain {
/* Memory allocation */
struct list_head freelists[SHADOW_MAX_ORDER + 1];
- struct list_head p2m_freelist;
+ struct page_list_head p2m_freelist;
unsigned int total_pages; /* number of pages allocated */
unsigned int free_pages; /* number of pages on freelists */
unsigned int p2m_pages; /* number of pages allocates to p2m */
@@ -143,7 +143,7 @@ struct hap_domain {
int locker;
const char *locker_function;
- struct list_head freelist;
+ struct page_list_head freelist;
unsigned int total_pages; /* number of pages allocated */
unsigned int free_pages; /* number of pages on freelists */
unsigned int p2m_pages; /* number of pages allocates to p2m */
@@ -265,7 +265,7 @@ struct arch_domain
RELMEM_l2,
RELMEM_done,
} relmem;
- struct list_head relmem_list;
+ struct page_list_head relmem_list;
cpuid_input_t cpuids[MAX_CPUID_INPUT];
} __cacheline_aligned;
--- 2009-01-30.orig/xen/include/asm-x86/mm.h 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/include/asm-x86/mm.h 2009-01-30 10:13:53.000000000 +0100
@@ -12,15 +12,24 @@
* Per-page-frame information.
*
* Every architecture must ensure the following:
- * 1. 'struct page_info' contains a 'struct list_head list'.
+ * 1. 'struct page_info' contains a 'struct page_list_entry list'.
* 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+#ifndef __i386__
+# undef page_list_entry
+struct page_list_entry
+{
+ unsigned int next, prev;
+ unsigned long _pad_for_sh_; /* until struct shadow_page_info gets updated
*/
+};
+#endif
+
struct page_info
{
/* Each frame can be threaded onto a doubly-linked list. */
- struct list_head list;
+ struct page_list_entry list;
/* Reference count and various PGC_xxx flags and fields. */
unsigned long count_info;
--- 2009-01-30.orig/xen/include/asm-x86/p2m.h 2009-01-30 08:27:02.000000000
+0100
+++ 2009-01-30/xen/include/asm-x86/p2m.h 2009-01-30 10:13:53.000000000
+0100
@@ -110,7 +110,7 @@ struct p2m_domain {
const char *locker_function; /* Func that took it */
/* Pages used to construct the p2m */
- struct list_head pages;
+ struct page_list_head pages;
/* Functions to call to get or free pages for the p2m */
struct page_info * (*alloc_page )(struct domain *d);
@@ -148,7 +148,7 @@ struct p2m_domain {
* protect moving stuff from the PoD cache to the domain page list.
*/
struct {
- struct list_head super, /* List of superpages */
+ struct page_list_head super, /* List of superpages */
single; /* Non-super lists */
int count, /* # of pages in cache lists */
entry_count; /* # of pages in p2m marked pod */
--- 2009-01-30.orig/xen/include/asm-x86/perfc.h 2009-01-30 08:27:02.000000000
+0100
+++ 2009-01-30/xen/include/asm-x86/perfc.h 2009-01-30 10:13:53.000000000
+0100
@@ -1,6 +1,5 @@
#ifndef __ASM_PERFC_H__
#define __ASM_PERFC_H__
-#include <asm/mm.h>
static inline void arch_perfc_printall(void)
{
--- 2009-01-30.orig/xen/include/xen/mm.h 2009-01-30 09:53:28.000000000
+0100
+++ 2009-01-30/xen/include/xen/mm.h 2009-01-30 10:13:53.000000000 +0100
@@ -85,22 +85,221 @@ int assign_pages(
#define MAX_ORDER 20 /* 2^20 contiguous pages */
#endif
+#define page_list_entry list_head
+
+#include <asm/mm.h>
+
+#ifndef page_list_entry
+struct page_list_head
+{
+ struct page_info *next, *tail;
+};
+/* These must only have instances in struct page_info. */
+# define page_list_entry
+
+# define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
+# define PAGE_LIST_HEAD(name) \
+ struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
+# define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
+# define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = ~0)
+
+static inline int
+page_list_empty(const struct page_list_head *head)
+{
+ return !head->next;
+}
+static inline struct page_info *
+page_list_first(const struct page_list_head *head)
+{
+ return head->next;
+}
+static inline struct page_info *
+page_list_next(const struct page_info *page,
+ const struct page_list_head *head)
+{
+ return page != head->tail ? mfn_to_page(page->list.next) : NULL;
+}
+static inline struct page_info *
+page_list_prev(const struct page_info *page,
+ const struct page_list_head *head)
+{
+ return page != head->next ? mfn_to_page(page->list.prev) : NULL;
+}
+static inline int
+page_list_is_eol(const struct page_info *page,
+ const struct page_list_head *head)
+{
+ return !page;
+}
+static inline void
+page_list_add(struct page_info *page, struct page_list_head *head)
+{
+ if ( head->next )
+ {
+ page->list.next = page_to_mfn(head->next);
+ head->next->list.prev = page_to_mfn(page);
+ }
+ else
+ {
+ head->tail = page;
+ page->list.next = ~0;
+ }
+ page->list.prev = ~0;
+ head->next = page;
+}
+static inline void
+page_list_add_tail(struct page_info *page, struct page_list_head *head)
+{
+ page->list.next = ~0;
+ if ( head->next )
+ {
+ page->list.prev = page_to_mfn(head->tail);
+ head->tail->list.next = page_to_mfn(page);
+ }
+ else
+ {
+ page->list.prev = ~0;
+ head->next = page;
+ }
+ head->tail = page;
+}
+static inline bool_t
+__page_list_del_head(struct page_info *page, struct page_list_head *head,
+ struct page_info *next, struct page_info *prev)
+{
+ if ( head->next == page )
+ {
+ if ( head->tail != page )
+ {
+ next->list.prev = ~0;
+ head->next = next;
+ }
+ else
+ head->tail = head->next = NULL;
+ return 1;
+ }
+
+ if ( head->tail == page )
+ {
+ prev->list.next = ~0;
+ head->tail = prev;
+ return 1;
+ }
+
+ return 0;
+}
+static inline void
+page_list_del(struct page_info *page, struct page_list_head *head)
+{
+ struct page_info *next = mfn_to_page(page->list.next);
+ struct page_info *prev = mfn_to_page(page->list.prev);
+
+ if ( !__page_list_del_head(page, head, next, prev) )
+ {
+ next->list.prev = page->list.prev;
+ prev->list.next = page->list.next;
+ }
+}
+static inline void
+page_list_del2(struct page_info *page, struct page_list_head *head1,
+ struct page_list_head *head2)
+{
+ struct page_info *next = mfn_to_page(page->list.next);
+ struct page_info *prev = mfn_to_page(page->list.prev);
+
+ if ( !__page_list_del_head(page, head1, next, prev) &&
+ !__page_list_del_head(page, head2, next, prev) )
+ {
+ next->list.prev = page->list.prev;
+ prev->list.next = page->list.next;
+ }
+}
+static inline void
+page_list_move_tail(struct page_info *page, struct page_list_head *list,
+ struct page_list_head *head)
+{
+ page_list_del(page, list);
+ page_list_add_tail(page, head);
+}
+static inline struct page_info *
+page_list_remove_head(struct page_list_head *head)
+{
+ struct page_info *page = head->next;
+
+ if ( page )
+ page_list_del(page, head);
+
+ return page;
+}
+static inline void
+page_list_splice_init(struct page_list_head *list, struct page_list_head *head)
+{
+ if ( !page_list_empty(list) )
+ {
+ if ( head->next )
+ head->tail->list.next = page_to_mfn(list->next);
+ else
+ head->next = list->next;
+ head->tail = list->tail;
+ INIT_PAGE_LIST_HEAD(list);
+ }
+}
+
+#define page_list_for_each(pos, head) \
+ for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
+#define page_list_for_each_safe(pos, tmp, head) \
+ for ( pos = (head)->next; \
+ pos ? (tmp = page_list_next(pos, head), 1) : 0; \
+ pos = tmp )
+#define page_list_for_each_safe_reverse(pos, tmp, head) \
+ for ( pos = (head)->tail; \
+ pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
+ pos = tmp )
+#else
+# define page_list_head list_head
+# define PAGE_LIST_HEAD_INIT LIST_HEAD_INIT
+# define PAGE_LIST_HEAD LIST_HEAD
+# define INIT_PAGE_LIST_HEAD INIT_LIST_HEAD
+# define INIT_PAGE_LIST_ENTRY INIT_LIST_HEAD
+# define page_list_empty list_empty
+# define page_list_first(hd) list_entry((hd)->next, \
+ struct page_info, list)
+# define page_list_next(pg, hd) list_entry((pg)->list.next, \
+ struct page_info, list)
+# define page_list_is_eol(pg, hd) (&(pg)->list == (hd))
+# define page_list_add(pg, hd) list_add(&(pg)->list, hd)
+# define page_list_add_tail(pg, hd) list_add_tail(&(pg)->list, hd)
+# define page_list_del(pg, hd) list_del(&(pg)->list)
+# define page_list_del2(pg, hd1, hd2) list_del(&(pg)->list)
+# define page_list_move_tail(pg, o, n) list_move_tail(&(pg)->list, n)
+# define page_list_remove_head(hd) (!page_list_empty(hd) ? \
+ ({ \
+ struct page_info *__pg = page_list_first(hd); \
+ list_del(&__pg->list); \
+ __pg; \
+ }) : NULL)
+# define page_list_splice_init list_splice_init
+# define page_list_for_each(pos, head) list_for_each_entry(pos, head, list)
+# define page_list_for_each_safe(pos, tmp, head) \
+ list_for_each_entry_safe(pos, tmp, head, list)
+# define page_list_for_each_safe_reverse(pos, tmp, head) \
+ list_for_each_entry_safe_reverse(pos, tmp, head, list)
+#endif
+
/* Automatic page scrubbing for dead domains. */
-extern struct list_head page_scrub_list;
-#define page_scrub_schedule_work() \
- do { \
- if ( !list_empty(&page_scrub_list) ) \
- raise_softirq(PAGE_SCRUB_SOFTIRQ); \
+extern struct page_list_head page_scrub_list;
+#define page_scrub_schedule_work() \
+ do { \
+ if ( !page_list_empty(&page_scrub_list) ) \
+ raise_softirq(PAGE_SCRUB_SOFTIRQ); \
} while ( 0 )
#define page_scrub_kick() \
do { \
- if ( !list_empty(&page_scrub_list) ) \
+ if ( !page_list_empty(&page_scrub_list) ) \
cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
} while ( 0 )
unsigned long avail_scrub_pages(void);
-#include <asm/mm.h>
-
int guest_remove_page(struct domain *d, unsigned long gmfn);
/* Returns TRUE if the whole page at @mfn is ordinary RAM. */
--- 2009-01-30.orig/xen/include/xen/sched.h 2009-01-30 08:27:02.000000000
+0100
+++ 2009-01-30/xen/include/xen/sched.h 2009-01-30 10:13:53.000000000 +0100
@@ -171,8 +171,8 @@ struct domain
spinlock_t domain_lock;
spinlock_t page_alloc_lock; /* protects all the following fields */
- struct list_head page_list; /* linked list, of size tot_pages */
- struct list_head xenpage_list; /* linked list, of size xenheap_pages */
+ struct page_list_head page_list; /* linked list, of size tot_pages */
+ struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
unsigned int tot_pages; /* number of pages currently possesed */
unsigned int max_pages; /* maximum value for tot_pages */
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|