# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID 22885e4c1275a540a95a64e1c30960d62be1e7c9
# Parent 3cc0b589c235fb1d624d9118e342df10cdba403d
[XEN] Don't kill shadowed guest for writing bogus PTEs.
If a guest writes a "bad" pagetable entry and we fail to unshadow the
page, don't kill the guest. This makes the behaviour the same as
if the bad PTE were already in place when we shadowed the page.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/mm/shadow/common.c | 41 ++++++++++++++++++++++++----------------
xen/arch/x86/mm/shadow/multi.c | 2 -
xen/include/asm-x86/shadow.h | 4 +--
3 files changed, 28 insertions(+), 19 deletions(-)
diff -r 3cc0b589c235 -r 22885e4c1275 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Tue Oct 17 09:59:11 2006 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Tue Oct 17 11:07:11 2006 +0100
@@ -343,8 +343,11 @@ shadow_validate_guest_pt_write(struct vc
if ( rc & SHADOW_SET_ERROR )
{
/* This page is probably not a pagetable any more: tear it out of the
- * shadows, along with any tables that reference it */
- shadow_remove_all_shadows_and_parents(v, gmfn);
+ * shadows, along with any tables that reference it.
+ * Since the validate call above will have made a "safe" (i.e. zero)
+ * shadow entry, we can let the domain live even if we can't fully
+ * unshadow the page. */
+ sh_remove_shadows(v, gmfn, 0, 0);
}
}
@@ -2058,17 +2061,20 @@ static int sh_remove_shadow_via_pointer(
return rc;
}
-void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int all)
+void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
/* Remove the shadows of this guest page.
- * If all != 0, find all shadows, if necessary by walking the tables.
- * Otherwise, just try the (much faster) heuristics, which will remove
- * at most one reference to each shadow of the page. */
+ * If fast != 0, just try the quick heuristic, which will remove
+ * at most one reference to each shadow of the page. Otherwise, walk
+ * all the shadow tables looking for refs to shadows of this gmfn.
+ * If all != 0, kill the domain if we can't find all the shadows.
+ * (all != 0 implies fast == 0)
+ */
{
struct page_info *pg;
mfn_t smfn;
u32 sh_flags;
unsigned char t;
-
+
/* Dispatch table for getting per-type functions: each level must
* be called with the function to remove a lower-level shadow. */
static hash_callback_t callbacks[16] = {
@@ -2128,6 +2134,7 @@ void sh_remove_shadows(struct vcpu *v, m
};
ASSERT(shadow_lock_is_acquired(v->domain));
+ ASSERT(!(all && fast));
pg = mfn_to_page(gmfn);
@@ -2147,20 +2154,20 @@ void sh_remove_shadows(struct vcpu *v, m
* call will remove at most one shadow, and terminate immediately when
* it does remove it, so we never walk the hash after doing a deletion. */
#define DO_UNSHADOW(_type) do { \
- t = (_type) >> PGC_SH_type_shift; \
- smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
- if ( !sh_remove_shadow_via_pointer(v, smfn) && all ) \
+ t = (_type) >> PGC_SH_type_shift; \
+ smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
+ if ( !sh_remove_shadow_via_pointer(v, smfn) && !fast ) \
hash_foreach(v, masks[t], callbacks, smfn); \
} while (0)
/* Top-level shadows need to be unpinned */
-#define DO_UNPIN(_type) do { \
+#define DO_UNPIN(_type) do { \
t = (_type) >> PGC_SH_type_shift; \
smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
if ( mfn_to_page(smfn)->count_info & PGC_SH_pinned ) \
sh_unpin(v, smfn); \
if ( (_type) == PGC_SH_l3_pae_shadow ) \
- SHADOW_INTERNAL_NAME(sh_unpin_all_l3_subshadows,3,3)(v, smfn); \
+ SHADOW_INTERNAL_NAME(sh_unpin_all_l3_subshadows,3,3)(v, smfn); \
} while (0)
if ( sh_flags & SHF_L1_32 ) DO_UNSHADOW(PGC_SH_l1_32_shadow);
@@ -2190,11 +2197,13 @@ void sh_remove_shadows(struct vcpu *v, m
#endif
/* If that didn't catch the shadows, something is wrong */
- if ( all && (pg->count_info & PGC_page_table) )
- {
- SHADOW_ERROR("can't find all shadows of mfn %05lx
(shadow_flags=%08x)\n",
+ if ( !fast && (pg->count_info & PGC_page_table) )
+ {
+ SHADOW_ERROR("can't find all shadows of mfn %05lx "
+ "(shadow_flags=%08x)\n",
mfn_x(gmfn), pg->shadow_flags);
- domain_crash(v->domain);
+ if ( all )
+ domain_crash(v->domain);
}
}
diff -r 3cc0b589c235 -r 22885e4c1275 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Tue Oct 17 09:59:11 2006 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Tue Oct 17 11:07:11 2006 +0100
@@ -2825,7 +2825,7 @@ static inline void check_for_early_unsha
if ( !(flags & (SHF_L2_32|SHF_L3_PAE|SHF_L4_64)) )
{
perfc_incrc(shadow_early_unshadow);
- sh_remove_shadows(v, gmfn, 0 /* Can fail to unshadow */ );
+ sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
return;
}
/* SHF_unhooked_mappings is set to make sure we only unhook
diff -r 3cc0b589c235 -r 22885e4c1275 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Tue Oct 17 09:59:11 2006 +0100
+++ b/xen/include/asm-x86/shadow.h Tue Oct 17 11:07:11 2006 +0100
@@ -549,13 +549,13 @@ shadow_remove_all_shadows_and_parents(st
* Unshadow it, and recursively unshadow pages that reference it. */
/* Remove all shadows of the guest mfn. */
-extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int all);
+extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
{
int was_locked = shadow_lock_is_acquired(v->domain);
if ( !was_locked )
shadow_lock(v->domain);
- sh_remove_shadows(v, gmfn, 1);
+ sh_remove_shadows(v, gmfn, 0, 1);
if ( !was_locked )
shadow_unlock(v->domain);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|