|
|
|
|
|
|
|
|
|
|
xen-changelog
[Xen-changelog] Add a defensive batched tlb flush to free_page_type(), t
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 42d4059108d4d9a4b0dfb33155cd5e85d08bc297
# Parent b21be984945a8b11698dc66ef6dde8a5babdf18f
Add a defensive batched tlb flush to free_page_type(), to
ensure the linear_pg_table remains in sync with modified
page table structure. Otherwise we can update stale entries
and screw reference counts (but probably only when running
a malicious domain).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r b21be984945a -r 42d4059108d4 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Dec 2 17:00:49 2005
+++ b/xen/arch/x86/mm.c Fri Dec 2 17:56:24 2005
@@ -128,8 +128,9 @@
/* Used to defer flushing of memory structures. */
static struct {
-#define DOP_FLUSH_TLB (1<<0) /* Flush the TLB. */
-#define DOP_RELOAD_LDT (1<<1) /* Reload the LDT shadow mapping. */
+#define DOP_FLUSH_TLB (1<<0) /* Flush the local TLB. */
+#define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */
+#define DOP_RELOAD_LDT (1<<2) /* Reload the LDT shadow mapping. */
unsigned int deferred_ops;
/* If non-NULL, specifies a foreign subject domain for some operations. */
struct domain *foreign;
@@ -1323,14 +1324,28 @@
struct domain *owner = page_get_owner(page);
unsigned long gpfn;
- if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
- {
- mark_dirty(owner, page_to_pfn(page));
- if ( unlikely(shadow_mode_refcounts(owner)) )
- return;
- gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
- ASSERT(VALID_M2P(gpfn));
- remove_shadow(owner, gpfn, type & PGT_type_mask);
+ if ( likely(owner != NULL) )
+ {
+ /*
+ * We have to flush before the next use of the linear mapping
+ * (e.g., update_va_mapping()) or we could end up modifying a page
+ * that is no longer a page table (and hence screw up ref counts).
+ */
+ percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS;
+
+ if ( unlikely(shadow_mode_enabled(owner)) )
+ {
+ /* Raw page tables are rewritten during save/restore. */
+ if ( !shadow_mode_translate(owner) )
+ mark_dirty(owner, page_to_pfn(page));
+
+ if ( shadow_mode_refcounts(owner) )
+ return;
+
+ gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
+ ASSERT(VALID_M2P(gpfn));
+ remove_shadow(owner, gpfn, type & PGT_type_mask);
+ }
}
switch ( type & PGT_type_mask )
@@ -1600,11 +1615,14 @@
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
- if ( deferred_ops & DOP_FLUSH_TLB )
+ if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
{
if ( shadow_mode_enabled(d) )
shadow_sync_all(d);
- local_flush_tlb();
+ if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
+ flush_tlb_mask(d->cpumask);
+ else
+ local_flush_tlb();
}
if ( deferred_ops & DOP_RELOAD_LDT )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
<Prev in Thread] |
Current Thread |
[Next in Thread> |
- [Xen-changelog] Add a defensive batched tlb flush to free_page_type(), to,
Xen patchbot -unstable <=
|
|
|
|
|