Running Xen on top of KVM exposed an issue that latently also exists on
real hardware: So far, updating any L3 entry resulted in the Xen owned
part of the L2 table referenced by the final L3 one to be re-
initialized. This was not only unnecessary, it actually resulted in Xen
relying on the TLB entry which maps the L2 page that's being updated
not going away intermediately, since as a first step the full range of
Xen owned entries in the L2 were replaced by the respective ones from
the idle page table, and only then the per-domain entries got re-
written to their intended values.
This part of the initialization really is sufficient to be done once,
when the page becomes an L2-with-Xen-entries (PGT_pae_xen_l2) one, i.e.
can be moved to alloc_l2_table(). Only the linear page table setup has
to remain where it always was.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- 2009-07-10.orig/xen/arch/x86/mm.c 2009-07-14 11:14:26.000000000 +0200
+++ 2009-07-10/xen/arch/x86/mm.c 2009-07-14 11:22:42.000000000 +0200
@@ -1159,10 +1159,9 @@ static int alloc_l1_table(struct page_in
static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
{
struct page_info *page;
- l2_pgentry_t *pl2e;
l3_pgentry_t l3e3;
-#ifndef CONFIG_COMPAT
- l2_pgentry_t l2e;
+#ifdef __i386__
+ l2_pgentry_t *pl2e, l2e;
int i;
#endif
@@ -1198,17 +1197,9 @@ static int create_pae_xen_mappings(struc
return 0;
}
- /* Xen private mappings. */
+#ifdef __i386__
+ /* Xen linear pagetable mappings. */
pl2e = map_domain_page(l3e_get_pfn(l3e3));
-#ifndef CONFIG_COMPAT
- memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
- &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- {
- l2e = l2e_from_page(perdomain_pt_page(d, i), __PAGE_HYPERVISOR);
- l2e_write(&pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i], l2e);
- }
for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
{
l2e = l2e_empty();
@@ -1216,13 +1207,8 @@ static int create_pae_xen_mappings(struc
l2e = l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR);
l2e_write(&pl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i], l2e);
}
-#else
- memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
- &compat_idle_pg_table_l2[
- l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
- COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*pl2e));
-#endif
unmap_domain_page(pl2e);
+#endif
return 1;
}
@@ -1315,6 +1301,27 @@ static int alloc_l2_table(struct page_in
adjust_guest_l2e(pl2e[i], d);
}
+ if ( rc >= 0 && (type & PGT_pae_xen_l2) )
+ {
+ /* Xen private mappings. */
+#if defined(__i386__)
+ memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
+ &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
+ L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
+ for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
+ l2e_write(&pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i],
+ l2e_from_page(perdomain_pt_page(d, i),
+ __PAGE_HYPERVISOR));
+ pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
+ l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
+#elif defined(CONFIG_COMPAT)
+ memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
+ &compat_idle_pg_table_l2[
+ l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+ COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*pl2e));
+#endif
+ }
+
unmap_domain_page(pl2e);
return rc > 0 ? 0 : rc;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|