# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1251968492 -3600
# Node ID 9c73804590c9cc4596f2ca5e43dcd02c2494691a
# Parent 088834c1abfb1ef787b170092ef0ec1433910825
x86: shadow_alloc_p2m_page() should call shadow_prealloc() before shadow_alloc()
shadow_alloc_p2m_page() fails to call shadow_prealloc() before calling
shadow_alloc(). In certain conditions, notably when PoD is being
exercised, this may cause shadow_alloc() to fail, crashing Xen.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
xen-unstable changeset: 20098:4b30cfb85529
xen-unstable date: Thu Aug 20 16:15:52 2009 +0100
---
xen/arch/x86/mm/shadow/common.c | 2 +-
1 files changed, 1 insertion(+), 1 deletion(-)
diff -r 088834c1abfb -r 9c73804590c9 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Thu Sep 03 10:00:54 2009 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Thu Sep 03 10:01:32 2009 +0100
@@ -1417,7 +1417,6 @@ static void _shadow_prealloc(
* to avoid freeing shadows that the caller is currently working on. */
void shadow_prealloc(struct domain *d, u32 type, unsigned int count)
{
- ASSERT(type != SH_type_p2m_table);
return _shadow_prealloc(d, shadow_order(type), count);
}
@@ -1665,6 +1664,7 @@ sh_alloc_p2m_pages(struct domain *d)
< (shadow_min_acceptable_pages(d) + (1 << order)) )
return 0; /* Not enough shadow memory: need to increase it first */
+ shadow_prealloc(d, SH_type_p2m_table, 1);
pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
d->arch.paging.shadow.p2m_pages += (1 << order);
d->arch.paging.shadow.total_pages -= (1 << order);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|