WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Merge

# HG changeset patch
# User Alastair Tse <atse@xxxxxxxxxxxxx>
# Node ID d293b17cd1c4794766056505b737138495bf1806
# Parent  3b8c07bd098012e9020a0249723ca179741b13f5
# Parent  b91f42ea5985b399343479e0f9395fc3467428cd
Merge
---
 xen/arch/x86/mm/shadow/common.c |   50 ++++++++++++++++++++++++++++++++++++++++
 1 files changed, 50 insertions(+)

diff -r 3b8c07bd0980 -r d293b17cd1c4 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Nov 09 16:20:26 2006 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Nov 09 16:20:58 2006 +0000
@@ -635,6 +635,56 @@ void shadow_prealloc(struct domain *d, u
     BUG();
 }
 
+#ifndef NDEBUG
+/* Deliberately free all the memory we can: this can be used to cause the
+ * guest's pagetables to be re-shadowed if we suspect that the shadows
+ * have somehow got out of sync */
+static void shadow_blow_tables(unsigned char c)
+{
+    struct list_head *l, *t;
+    struct page_info *pg;
+    struct domain *d;
+    struct vcpu *v;
+    mfn_t smfn;
+
+    for_each_domain(d)
+    {
+        if ( shadow_mode_enabled(d) && (v = d->vcpu[0]) != NULL)
+        {
+            shadow_lock(d);
+            printk("Blowing shadow tables for domain %u\n", d->domain_id);
+
+            /* Pass one: unpin all top-level pages */
+            list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
+            {
+                pg = list_entry(l, struct page_info, list);
+                smfn = page_to_mfn(pg);
+                sh_unpin(v, smfn);
+            }
+
+            /* Second pass: unhook entries of in-use shadows */
+            list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
+            {
+                pg = list_entry(l, struct page_info, list);
+                smfn = page_to_mfn(pg);
+                shadow_unhook_mappings(v, smfn);
+            }
+            
+            /* Make sure everyone sees the unshadowings */
+            flush_tlb_mask(d->domain_dirty_cpumask);
+            shadow_unlock(d);
+        }
+    }
+}
+
+/* Register this function in the Xen console keypress table */
+static __init int shadow_blow_tables_keyhandler_init(void)
+{
+    register_keyhandler('S', shadow_blow_tables, "reset shadow pagetables");
+    return 0;
+}
+__initcall(shadow_blow_tables_keyhandler_init);
+#endif /* !NDEBUG */
 
 /* Allocate another shadow's worth of (contiguous, aligned) pages,
  * and fill in the type and backpointer fields of their page_infos. 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>