clear all shadow caches when return to real mode from protect mode.
So that, if OS modify some page tables in real mode and then
return to protect mode, no outdated shadow table be used because
out of sync machanism do not work in real mode.
Signed-off-by: Xiaofeng Ling <xiaofeng.ling@xxxxxxxxx>
# HG changeset patch
# User Xiaofeng Ling <xiaofeng.ling@xxxxxxxxx>
# Node ID aadb771248f677c4a957731a59b22c90a747646f
# Parent b4d615464054390c88e4b62dc2f26d3adeb86443
clear all shadow caches when return to real mode from protect mode.
So that, if OS modify some page tables in real mode and then
return to protect mode, no outdated shadow table be used.
Signed-off-by: Xiaofeng Ling <xiaofeng.ling@xxxxxxxxx>
diff -r b4d615464054 -r aadb771248f6 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Thu Dec 8 15:09:46 2005 +0800
+++ b/xen/arch/x86/shadow32.c Thu Dec 8 15:09:49 2005 +0800
@@ -2982,6 +2982,23 @@
}
}
+void clear_all_shadow_status(struct domain *d)
+{
+ shadow_lock(d);
+ free_shadow_pages(d);
+ free_shadow_ht_entries(d);
+ d->arch.shadow_ht =
+ xmalloc_array(struct shadow_status, shadow_ht_buckets);
+ if ( d->arch.shadow_ht == NULL ) {
+ printk("clear all shadow status:xmalloc fail\n");
+ domain_crash_synchronous();
+ }
+ memset(d->arch.shadow_ht, 0,
+ shadow_ht_buckets * sizeof(struct shadow_status));
+
+ free_out_of_sync_entries(d);
+ shadow_unlock(d);
+}
/************************************************************************/
/************************************************************************/
diff -r b4d615464054 -r aadb771248f6 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Thu Dec 8 15:09:46 2005 +0800
+++ b/xen/arch/x86/shadow_public.c Thu Dec 8 15:09:49 2005 +0800
@@ -1748,6 +1750,24 @@
shadow_unlock(d);
}
+void clear_all_shadow_status(struct domain *d)
+{
+ shadow_lock(d);
+ free_shadow_pages(d);
+ free_shadow_ht_entries(d);
+ d->arch.shadow_ht =
+ xmalloc_array(struct shadow_status, shadow_ht_buckets);
+ if ( d->arch.shadow_ht == NULL ) {
+ printk("clear all shadow status:xmalloc fail\n");
+ domain_crash_synchronous();
+ }
+ memset(d->arch.shadow_ht, 0,
+ shadow_ht_buckets * sizeof(struct shadow_status));
+
+ free_out_of_sync_entries(d);
+ shadow_unlock(d);
+}
+
/*
* Local variables:
* mode: C
diff -r b4d615464054 -r aadb771248f6 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c Thu Dec 8 15:09:46 2005 +0800
+++ b/xen/arch/x86/vmx.c Thu Dec 8 15:09:49 2005 +0800
@@ -1210,6 +1210,7 @@
}
}
+ clear_all_shadow_status(v->domain);
if (vmx_assist(v, VMX_ASSIST_INVOKE)) {
set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.arch_vmx.cpu_state);
__vmread(GUEST_RIP, &eip);
diff -r b4d615464054 -r aadb771248f6 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Thu Dec 8 15:09:46 2005 +0800
+++ b/xen/include/asm-x86/shadow.h Thu Dec 8 15:09:49 2005 +0800
@@ -1707,6 +1707,8 @@
}
}
+void clear_all_shadow_status(struct domain *d);
+
#if SHADOW_DEBUG
extern int _check_pagetable(struct vcpu *v, char *s);
extern int _check_all_pagetables(struct vcpu *v, char *s);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|