# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1242830314 -3600
# Node ID b0966b6f5180507d5e1b5c32be08cac4ea58c569
# Parent cafab208441018ca2d7f4f0b015b647b2a9cb4fc
x86-64: also handle virtual aliases of Xen image pages
With the unification of the heaps, the pages freed from the Xen boot
image now can also end up being allocated to a domain, and hence the
respective aliases need handling when such pages get their
cacheability attributes changed.
Rather than establishing multiple mappings with non-WB attributes
(which temporarily still can cause aliasing issues), simply unmap
those pages from the Xen virtual space, and re-map them (to allow re-
establishing of eventual large page mappings) when the cachability
attribute for them gets restored to normal (WB).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 33 +++++++++++++++++++--------------
1 files changed, 19 insertions(+), 14 deletions(-)
diff -r cafab2084410 -r b0966b6f5180 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed May 20 15:35:32 2009 +0100
+++ b/xen/arch/x86/mm.c Wed May 20 15:38:34 2009 +0100
@@ -709,6 +709,23 @@ int is_iomem_page(unsigned long mfn)
return (page_get_owner(page) == dom_io);
}
+static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
+{
+#ifdef __x86_64__
+ bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
+ mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START);
+ unsigned long xen_va =
+ XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
+
+ if ( unlikely(alias) && cacheattr )
+ map_pages_to_xen(xen_va, mfn, 1, 0);
+ map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+ PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
+ if ( unlikely(alias) && !cacheattr )
+ map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
+#endif
+}
+
int
get_page_from_l1e(
@@ -796,10 +813,7 @@ get_page_from_l1e(
y = cmpxchg(&page->count_info, x, nx);
}
-#ifdef __x86_64__
- map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
- PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
-#endif
+ update_xen_mappings(mfn, cacheattr);
}
return 1;
@@ -857,12 +871,6 @@ get_page_from_l2e(
return -EINVAL;
}
} while ( m++ < (mfn + (L1_PAGETABLE_ENTRIES-1)) );
-
-#ifdef __x86_64__
- map_pages_to_xen(
- (unsigned long)mfn_to_virt(mfn), mfn, L1_PAGETABLE_ENTRIES,
- PAGE_HYPERVISOR | l2e_get_flags(l2e));
-#endif
}
return rc;
@@ -2406,10 +2414,7 @@ void cleanup_page_cacheattr(struct page_
BUG_ON(is_xen_heap_page(page));
-#ifdef __x86_64__
- map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page),
- 1, PAGE_HYPERVISOR);
-#endif
+ update_xen_mappings(page_to_mfn(page), 0);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|