Hi Keir,
In order to prevent Xen from crashing, could you apply
the following two patches to xen-3.4.3?
changeset: 20196:45f109d149bd
date: Tue Sep 15 09:15:14 2009 +0100
Summary: p2m: Reorganize p2m_pod_demand_populate in preparation for EPT PoD
patch
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
changeset: 20269:fd3d5d66c446
date: Thu Oct 01 12:29:33 2009 +0100
Summary: Fix recursive lock p2m lock acquisition in POD code
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
c/s 20196 doesn't look like a bug fix but it fixes the issue that
p2m_lock is left locked on error.
Attached are my backports to xen-3.4-testing.hg.
Thanks,
Kouya
# HG changeset patch
# User Kouya Shimura <kouya@xxxxxxxxxxxxxx>
# Date 1259799022 -32400
# Node ID 5456555ecd1ccb328268da4c9d4c029919be152b
# Parent 13c5436cf6a70d4a7bfe6e3884db333646fd76c0
p2m: Reorganize p2m_pod_demand_populate in preparation for EPT PoD patch
p2m_pod_demand_populate is too non-EPT-p2m-centric. Reorganize code
to have a p2m-specific call that wraps a generic PoD call.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
This also includes a fix for xen-3.4 changeset:19826:8c5708bc87ad.
The issue is that p2m_lock is left locked on error.
xen-unstable changeset: 20196:45f109d149bd
xen-unstable date: Tue Sep 15 09:15:14 2009 +0100
diff -r 13c5436cf6a7 -r 5456555ecd1c xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Fri Dec 04 07:14:58 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c Thu Dec 03 09:10:22 2009 +0900
@@ -987,32 +987,16 @@ p2m_pod_emergency_sweep(struct domain *d
}
-static int
+int
p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
- mfn_t table_mfn,
- l1_pgentry_t *p2m_entry,
unsigned int order,
p2m_query_t q)
{
struct page_info *p = NULL; /* Compiler warnings */
unsigned long gfn_aligned;
mfn_t mfn;
- l1_pgentry_t entry_content = l1e_empty();
struct p2m_domain *p2md = d->arch.p2m;
int i;
-
- /* We need to grab the p2m lock here and re-check the entry to make
- * sure that someone else hasn't populated it for us, then hold it
- * until we're done. */
- p2m_lock(p2md);
- audit_p2m(d);
-
- /* Check to make sure this is still PoD */
- if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) !=
p2m_populate_on_demand )
- {
- p2m_unlock(p2md);
- return 0;
- }
/* This check is done with the p2m lock held. This will make sure that
* even if d->is_dying changes under our feet, empty_pod_cache() won't
start
@@ -1049,45 +1033,22 @@ p2m_pod_demand_populate(struct domain *d
spin_unlock(&d->page_alloc_lock);
- /* Fill in the entry in the p2m */
- switch ( order )
- {
- case 9:
- {
- l2_pgentry_t l2e_content;
-
- l2e_content = l2e_from_pfn(mfn_x(mfn),
- p2m_type_to_flags(p2m_ram_rw) | _PAGE_PSE);
-
- entry_content.l1 = l2e_content.l2;
- }
- break;
- case 0:
- entry_content = l1e_from_pfn(mfn_x(mfn),
- p2m_type_to_flags(p2m_ram_rw));
- break;
-
- }
-
gfn_aligned = (gfn >> order) << order;
- paging_write_p2m_entry(d, gfn_aligned, p2m_entry, table_mfn,
- entry_content, (order==9)?2:1);
+ set_p2m_entry(d, gfn_aligned, mfn, order, p2m_ram_rw);
for( i = 0 ; i < (1UL << order) ; i++ )
set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
BUG_ON(p2md->pod.entry_count < 0);
- audit_p2m(d);
- p2m_unlock(p2md);
return 0;
out_of_memory:
spin_unlock(&d->page_alloc_lock);
- audit_p2m(d);
- p2m_unlock(p2md);
- printk("%s: Out of populate-on-demand memory!\n", __func__);
+
+ printk("%s: Out of populate-on-demand memory! tot_pages %" PRIu32 "
pod_entries %" PRIi32 "\n",
+ __func__, d->tot_pages, p2md->pod.entry_count);
domain_crash(d);
out_fail:
return -1;
@@ -1100,9 +1061,32 @@ remap_and_retry:
for(i=0; i<(1<<order); i++)
set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
p2m_populate_on_demand);
+
+ return 0;
+}
+
+/* Non-ept "lock-and-check" wrapper */
+static int p2m_pod_check_and_populate(struct domain *d, unsigned long gfn,
+ l1_pgentry_t *p2m_entry, int order,
+ p2m_query_t q)
+{
+ int r;
+ p2m_lock(d->arch.p2m);
audit_p2m(d);
- p2m_unlock(p2md);
- return 0;
+
+ /* Check to make sure this is still PoD */
+ if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) !=
p2m_populate_on_demand )
+ {
+ p2m_unlock(d->arch.p2m);
+ return 0;
+ }
+
+ r = p2m_pod_demand_populate(d, gfn, order, q);
+
+ audit_p2m(d);
+ p2m_unlock(d->arch.p2m);
+
+ return r;
}
// Returns 0 on error (out of memory)
@@ -1275,8 +1259,8 @@ pod_retry_l2:
if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if( !p2m_pod_demand_populate(d, gfn, mfn,
- (l1_pgentry_t *)l2e, 9, q) )
+ if ( !p2m_pod_check_and_populate(d, gfn,
+ (l1_pgentry_t *)l2e, 9,
q) )
goto pod_retry_l2;
} else
*t = p2m_populate_on_demand;
@@ -1307,8 +1291,8 @@ pod_retry_l1:
if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if( !p2m_pod_demand_populate(d, gfn, mfn,
- (l1_pgentry_t *)l1e, 0, q) )
+ if ( !p2m_pod_check_and_populate(d, gfn,
+ (l1_pgentry_t *)l1e, 0,
q) )
goto pod_retry_l1;
} else
*t = p2m_populate_on_demand;
@@ -1367,8 +1351,8 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
* exits at this point. */
if ( q != p2m_query )
{
- if( !p2m_pod_demand_populate(current->domain, gfn, mfn,
- p2m_entry, 9, q) )
+ if ( !p2m_pod_check_and_populate(current->domain, gfn,
+ p2m_entry, 9, q) )
goto pod_retry_l2;
/* Allocate failed. */
@@ -1423,9 +1407,8 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
* exits at this point. */
if ( q != p2m_query )
{
- if( !p2m_pod_demand_populate(current->domain, gfn, mfn,
- (l1_pgentry_t *)p2m_entry, 0,
- q) )
+ if ( !p2m_pod_check_and_populate(current->domain, gfn,
+ (l1_pgentry_t
*)p2m_entry, 0, q) )
goto pod_retry_l1;
/* Allocate failed. */
diff -r 13c5436cf6a7 -r 5456555ecd1c xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Dec 04 07:14:58 2009 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Dec 03 09:10:22 2009 +0900
@@ -312,6 +312,12 @@ p2m_pod_decrease_reservation(struct doma
xen_pfn_t gpfn,
unsigned int order);
+/* Called by p2m code when demand-populating a PoD page */
+int
+p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
+ unsigned int order,
+ p2m_query_t q);
+
/* Add a page to a domain's p2m table */
int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int page_order,
# HG changeset patch
# User Kouya Shimura <kouya@xxxxxxxxxxxxxx>
# Date 1259799590 -32400
# Node ID 30f13ccf564ff7bd01b7a1e4175c22a596aa7624
# Parent 5456555ecd1ccb328268da4c9d4c029919be152b
Fix recursive lock p2m lock acquisition in POD code
The POD code can take the p2m lock from inside a lookup. This causes
a crash if anyone calls gfn_to_mfn* with the p2m lock held, which is
quite a few places. Make the POD code understand that it may be
called with the lock held, and DTRT about talking or releasing it.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
xen-unstable changeset: 20269:fd3d5d66c446
xen-unstable date: Thu Oct 01 12:29:33 2009 +0100
diff -r 5456555ecd1c -r 30f13ccf564f xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Dec 03 09:10:22 2009 +0900
+++ b/xen/arch/x86/mm/p2m.c Thu Dec 03 09:19:50 2009 +0900
@@ -1070,21 +1070,29 @@ static int p2m_pod_check_and_populate(st
l1_pgentry_t *p2m_entry, int order,
p2m_query_t q)
{
+ /* Only take the lock if we don't already have it. Otherwise it
+ * wouldn't be safe to do p2m lookups with the p2m lock held */
+ int do_locking = !p2m_locked_by_me(d->arch.p2m);
int r;
- p2m_lock(d->arch.p2m);
+
+ if ( do_locking )
+ p2m_lock(d->arch.p2m);
+
audit_p2m(d);
/* Check to make sure this is still PoD */
if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) !=
p2m_populate_on_demand )
{
- p2m_unlock(d->arch.p2m);
+ if ( do_locking )
+ p2m_unlock(d->arch.p2m);
return 0;
}
r = p2m_pod_demand_populate(d, gfn, order, q);
audit_p2m(d);
- p2m_unlock(d->arch.p2m);
+ if ( do_locking )
+ p2m_unlock(d->arch.p2m);
return r;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|