# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1170083098 0
# Node ID d401cb96d8a0da5febe737b86f453a88f1f45bb7
# Parent 8a4c107eae1e306674e4523bbbcca4b50773a186
[XEN] Snapshot guest entries when shadowing PAE tables.
This fixes a crash when migrating Solaris guests.
Also delete some old and unused linear pagetable definitions.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 2
xen/arch/x86/mm/shadow/multi.c | 115 ++++++++++++++++------------------
xen/arch/x86/mm/shadow/page-guest32.h | 5 -
xen/arch/x86/mm/shadow/types.h | 2
xen/include/asm-x86/domain.h | 8 +-
xen/include/asm-x86/page.h | 3
6 files changed, 63 insertions(+), 72 deletions(-)
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Mon Jan 29 15:04:58 2007 +0000
@@ -2567,7 +2567,7 @@ void walk_shadow_and_guest_pt(unsigned l
shadow_sync_va(v, gva);
gpte.l1 = 0;
- __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
+ __copy_from_user(&gpte, &__linear_l1_table[ l1_linear_offset(gva) ],
sizeof(gpte) );
printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c Mon Jan 29 15:04:58 2007 +0000
@@ -237,7 +237,8 @@ guest_walk_tables(struct vcpu *v, unsign
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
/* Get l4e from the top level table */
gw->l4mfn = pagetable_get_mfn(v->arch.guest_table);
- gw->l4e = (guest_l4e_t *)v->arch.guest_vtable + guest_l4_table_offset(va);
+ gw->l4e = (guest_l4e_t *)v->arch.shadow.guest_vtable
+ + guest_l4_table_offset(va);
/* Walk down to the l3e */
if ( !(guest_l4e_get_flags(*gw->l4e) & _PAGE_PRESENT) ) return 0;
gw->l3mfn = vcpu_gfn_to_mfn(v, guest_l4e_get_gfn(*gw->l4e));
@@ -248,9 +249,8 @@ guest_walk_tables(struct vcpu *v, unsign
gw->l3e = ((guest_l3e_t *)sh_map_domain_page(gw->l3mfn))
+ guest_l3_table_offset(va);
#else /* PAE only... */
- /* Get l3e from the top level table */
- gw->l3mfn = pagetable_get_mfn(v->arch.guest_table);
- gw->l3e = (guest_l3e_t *)v->arch.guest_vtable + guest_l3_table_offset(va);
+ /* Get l3e from the cache of the guest's top level table */
+ gw->l3e = (guest_l3e_t *)&v->arch.shadow.gl3e[guest_l3_table_offset(va)];
#endif /* PAE or 64... */
/* Walk down to the l2e */
if ( !(guest_l3e_get_flags(*gw->l3e) & _PAGE_PRESENT) ) return 0;
@@ -264,7 +264,8 @@ guest_walk_tables(struct vcpu *v, unsign
#else /* 32-bit only... */
/* Get l2e from the top level table */
gw->l2mfn = pagetable_get_mfn(v->arch.guest_table);
- gw->l2e = (guest_l2e_t *)v->arch.guest_vtable + guest_l2_table_offset(va);
+ gw->l2e = (guest_l2e_t *)v->arch.shadow.guest_vtable
+ + guest_l2_table_offset(va);
#endif /* All levels... */
if ( !(guest_l2e_get_flags(*gw->l2e) & _PAGE_PRESENT) ) return 0;
@@ -357,8 +358,8 @@ static inline void print_gw(walk_t *gw)
SHADOW_PRINTK(" l4e=%p\n", gw->l4e);
if ( gw->l4e )
SHADOW_PRINTK(" *l4e=%" SH_PRI_gpte "\n", gw->l4e->l4);
+ SHADOW_PRINTK(" l3mfn=%" SH_PRI_mfn "\n", mfn_x(gw->l3mfn));
#endif /* PAE or 64... */
- SHADOW_PRINTK(" l3mfn=%" SH_PRI_mfn "\n", mfn_x(gw->l3mfn));
SHADOW_PRINTK(" l3e=%p\n", gw->l3e);
if ( gw->l3e )
SHADOW_PRINTK(" *l3e=%" SH_PRI_gpte "\n", gw->l3e->l3);
@@ -3127,8 +3128,7 @@ sh_update_linear_entries(struct vcpu *v)
#else /* GUEST_PAGING_LEVELS == 3 */
shadow_l3e = (shadow_l3e_t *)&v->arch.shadow.l3table;
- /* Always safe to use guest_vtable, because it's globally mapped */
- guest_l3e = v->arch.guest_vtable;
+ guest_l3e = (guest_l3e_t *)&v->arch.shadow.gl3e;
#endif /* GUEST_PAGING_LEVELS */
@@ -3226,38 +3226,36 @@ sh_update_linear_entries(struct vcpu *v)
}
-/* Removes vcpu->arch.guest_vtable and vcpu->arch.shadow_table[].
+/* Removes vcpu->arch.shadow.guest_vtable and vcpu->arch.shadow_table[].
* Does all appropriate management/bookkeeping/refcounting/etc...
*/
static void
sh_detach_old_tables(struct vcpu *v)
{
- struct domain *d = v->domain;
mfn_t smfn;
int i = 0;
////
- //// vcpu->arch.guest_vtable
+ //// vcpu->arch.shadow.guest_vtable
////
- if ( v->arch.guest_vtable )
- {
-#if GUEST_PAGING_LEVELS == 4
+
+#if GUEST_PAGING_LEVELS == 3
+ /* PAE guests don't have a mapping of the guest top-level table */
+ ASSERT(v->arch.shadow.guest_vtable == NULL);
+#else
+ if ( v->arch.shadow.guest_vtable )
+ {
+ struct domain *d = v->domain;
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
-#elif GUEST_PAGING_LEVELS == 3
- if ( 1 || shadow_mode_external(d) || shadow_mode_translate(d) )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
-#elif GUEST_PAGING_LEVELS == 2
- if ( shadow_mode_external(d) || shadow_mode_translate(d) )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
-#endif
- v->arch.guest_vtable = NULL;
- }
+ sh_unmap_domain_page_global(v->arch.shadow.guest_vtable);
+ v->arch.shadow.guest_vtable = NULL;
+ }
+#endif
+
////
//// vcpu->arch.shadow_table[]
////
-
#if GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries */
@@ -3352,7 +3350,9 @@ sh_update_cr3(struct vcpu *v)
struct domain *d = v->domain;
mfn_t gmfn;
#if GUEST_PAGING_LEVELS == 3
+ guest_l3e_t *gl3e;
u32 guest_idx=0;
+ int i;
#endif
ASSERT(shadow_locked_by_me(v->domain));
@@ -3407,55 +3407,54 @@ sh_update_cr3(struct vcpu *v)
}
////
- //// vcpu->arch.guest_vtable
+ //// vcpu->arch.shadow.guest_vtable
////
#if GUEST_PAGING_LEVELS == 4
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
- if ( v->arch.guest_vtable )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
- v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
+ if ( v->arch.shadow.guest_vtable )
+ sh_unmap_domain_page_global(v->arch.shadow.guest_vtable);
+ v->arch.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
}
else
- v->arch.guest_vtable = __linear_l4_table;
+ v->arch.shadow.guest_vtable = __linear_l4_table;
#elif GUEST_PAGING_LEVELS == 3
- if ( v->arch.guest_vtable )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
- if ( shadow_mode_external(d) )
- {
- if ( shadow_vcpu_mode_translate(v) )
- /* Paging enabled: find where in the page the l3 table is */
- guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
- else
- /* Paging disabled: l3 is at the start of a page (in the p2m) */
- guest_idx = 0;
-
- // Ignore the low 2 bits of guest_idx -- they are really just
- // cache control.
- guest_idx &= ~3;
-
- // XXX - why does this need a global map?
- v->arch.guest_vtable =
- (guest_l3e_t *)sh_map_domain_page_global(gmfn) + guest_idx;
- }
+ /* On PAE guests we don't use a mapping of the guest's own top-level
+ * table. We cache the current state of that table and shadow that,
+ * until the next CR3 write makes us refresh our cache. */
+ ASSERT(v->arch.shadow.guest_vtable == NULL);
+
+ if ( shadow_mode_external(d) && shadow_vcpu_mode_translate(v) )
+ /* Paging enabled: find where in the page the l3 table is */
+ guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
else
- v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
+ /* Paging disabled or PV: l3 is at the start of a page */
+ guest_idx = 0;
+
+ // Ignore the low 2 bits of guest_idx -- they are really just
+ // cache control.
+ guest_idx &= ~3;
+
+ gl3e = ((guest_l3e_t *)sh_map_domain_page(gmfn)) + guest_idx;
+ for ( i = 0; i < 4 ; i++ )
+ v->arch.shadow.gl3e[i] = gl3e[i];
+ sh_unmap_domain_page(gl3e);
#elif GUEST_PAGING_LEVELS == 2
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
- if ( v->arch.guest_vtable )
- sh_unmap_domain_page_global(v->arch.guest_vtable);
- v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
+ if ( v->arch.shadow.guest_vtable )
+ sh_unmap_domain_page_global(v->arch.shadow.guest_vtable);
+ v->arch.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
}
else
- v->arch.guest_vtable = __linear_l2_table;
+ v->arch.shadow.guest_vtable = __linear_l2_table;
#else
#error this should never happen
#endif
#if 0
- printk("%s %s %d gmfn=%05lx guest_vtable=%p\n",
- __func__, __FILE__, __LINE__, gmfn, v->arch.guest_vtable);
+ printk("%s %s %d gmfn=%05lx shadow.guest_vtable=%p\n",
+ __func__, __FILE__, __LINE__, gmfn, v->arch.shadow.guest_vtable);
#endif
////
@@ -3473,10 +3472,10 @@ sh_update_cr3(struct vcpu *v)
/* PAE guests have four shadow_table entries, based on the
* current values of the guest's four l3es. */
{
- int i, flush = 0;
+ int flush = 0;
gfn_t gl2gfn;
mfn_t gl2mfn;
- guest_l3e_t *gl3e = (guest_l3e_t*)v->arch.guest_vtable;
+ guest_l3e_t *gl3e = (guest_l3e_t*)&v->arch.shadow.gl3e;
/* First, make all four entries read-only. */
for ( i = 0; i < 4; i++ )
{
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/arch/x86/mm/shadow/page-guest32.h
--- a/xen/arch/x86/mm/shadow/page-guest32.h Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/arch/x86/mm/shadow/page-guest32.h Mon Jan 29 15:04:58 2007 +0000
@@ -87,11 +87,6 @@ static inline l2_pgentry_32_t l2e_from_p
#define l2_table_offset_32(a) \
(((a) >> L2_PAGETABLE_SHIFT_32) & (L2_PAGETABLE_ENTRIES_32 - 1))
-#define linear_l1_table_32 \
- ((l1_pgentry_32_t *)(LINEAR_PT_VIRT_START))
-
-#define linear_pg_table_32 linear_l1_table_32
-
#endif /* __X86_PAGE_GUEST_H__ */
/*
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/arch/x86/mm/shadow/types.h Mon Jan 29 15:04:58 2007 +0000
@@ -453,10 +453,8 @@ struct shadow_walk_t
guest_l2e_t *l2e; /* Pointer to guest's level 2 entry */
guest_l1e_t *l1e; /* Pointer to guest's level 1 entry */
guest_l1e_t eff_l1e; /* Effective level 1 entry */
-#if GUEST_PAGING_LEVELS >= 3
#if GUEST_PAGING_LEVELS >= 4
mfn_t l4mfn; /* MFN that the level 4 entry is in */
-#endif
mfn_t l3mfn; /* MFN that the level 3 entry is in */
#endif
mfn_t l2mfn; /* MFN that the level 2 entry is in */
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/include/asm-x86/domain.h Mon Jan 29 15:04:58 2007 +0000
@@ -138,7 +138,11 @@ struct shadow_vcpu {
#if CONFIG_PAGING_LEVELS >= 3
/* PAE guests: per-vcpu shadow top-level table */
l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
-#endif
+ /* PAE guests: per-vcpu cache of the top-level *guest* entries */
+ l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
+#endif
+ /* Non-PAE guests: pointer to guets top-level pagetable */
+ void *guest_vtable;
/* Pointers to mode-specific entry points. */
struct shadow_paging_mode *mode;
/* Last MFN that we emulated a write to. */
@@ -194,8 +198,6 @@ struct arch_vcpu
pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
unsigned long cr3; /* (MA) value to install in HW CR3
*/
-
- void *guest_vtable; /* virtual addr of pagetable */
/* Current LDT details. */
unsigned long shadow_ldt_mapcnt;
diff -r 8a4c107eae1e -r d401cb96d8a0 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Mon Jan 29 14:28:40 2007 +0000
+++ b/xen/include/asm-x86/page.h Mon Jan 29 15:04:58 2007 +0000
@@ -275,9 +275,6 @@ typedef struct { u64 pfn; } pagetable_t;
#define __linear_l4_table \
((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
-#define linear_l1_table __linear_l1_table
-#define linear_pg_table linear_l1_table
-#define linear_l2_table(v) ((l2_pgentry_t *)(v)->arch.guest_vtable)
#ifndef __ASSEMBLY__
#if CONFIG_PAGING_LEVELS == 3
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|