# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1185271808 -3600
# Node ID 531b8ccda9732e521ea96b4d0a6b6d58b8981149
# Parent 7bdc9f6407d324dbe4748adc24e2d4feab8953e1
[HVM] Shadow: release shadow lock during emulation path
and retake it only for the write-back at the end.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/mm/shadow/multi.c | 46 ++++++++++++++++++++++++++++++++---------
1 files changed, 36 insertions(+), 10 deletions(-)
diff -r 7bdc9f6407d3 -r 531b8ccda973 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Mon Jul 23 10:03:17 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Tue Jul 24 11:10:08 2007 +0100
@@ -2920,6 +2920,15 @@ static int sh_page_fault(struct vcpu *v,
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
(unsigned long)regs->eip, (unsigned long)regs->esp);
+ /*
+ * We don't need to hold the lock for the whole emulation; we will
+ * take it again when we write to the pagetables.
+ */
+ sh_audit_gw(v, &gw);
+ unmap_walk(v, &gw);
+ shadow_audit_tables(v);
+ shadow_unlock(d);
+
emul_ops = shadow_init_emulation(&emul_ctxt, regs);
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
@@ -2937,7 +2946,7 @@ static int sh_page_fault(struct vcpu *v,
/* If this is actually a page table, then we have a bug, and need
* to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
- sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+ shadow_remove_all_shadows(v, gmfn);
}
#if GUEST_PAGING_LEVELS == 3 /* PAE guest */
@@ -2972,7 +2981,9 @@ static int sh_page_fault(struct vcpu *v,
/* Emulator has changed the user registers: write back */
if ( is_hvm_domain(d) )
hvm_load_cpu_guest_regs(v, regs);
- goto done;
+
+ SHADOW_PRINTK("emulated\n");
+ return EXCRET_fault_fixed;
mmio:
if ( !guest_mode(regs) )
@@ -4053,11 +4064,15 @@ sh_x86_emulate_write(struct vcpu *v, uns
if ( vaddr & (bytes-1) )
return X86EMUL_UNHANDLEABLE;
- ASSERT(shadow_locked_by_me(v->domain));
ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE);
-
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
+ shadow_lock(v->domain);
+
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( addr == NULL )
+ {
+ shadow_unlock(v->domain);
return X86EMUL_EXCEPTION;
+ }
skip = safe_not_to_verify_write(mfn, addr, src, bytes);
memcpy(addr, src, bytes);
@@ -4073,6 +4088,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
sh_unmap_domain_page(addr);
shadow_audit_tables(v);
+ shadow_unlock(v->domain);
return X86EMUL_OKAY;
}
@@ -4086,14 +4102,18 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
unsigned long prev;
int rv = X86EMUL_OKAY, skip;
- ASSERT(shadow_locked_by_me(v->domain));
ASSERT(bytes <= sizeof(unsigned long));
+ shadow_lock(v->domain);
if ( vaddr & (bytes-1) )
return X86EMUL_UNHANDLEABLE;
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( addr == NULL )
+ {
+ shadow_unlock(v->domain);
return X86EMUL_EXCEPTION;
+ }
skip = safe_not_to_verify_write(mfn, &new, &old, bytes);
@@ -4129,6 +4149,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
sh_unmap_domain_page(addr);
shadow_audit_tables(v);
+ shadow_unlock(v->domain);
return rv;
}
@@ -4143,13 +4164,17 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
u64 old, new, prev;
int rv = X86EMUL_OKAY, skip;
- ASSERT(shadow_locked_by_me(v->domain));
-
if ( vaddr & 7 )
return X86EMUL_UNHANDLEABLE;
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
+ shadow_lock(v->domain);
+
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( addr == NULL )
+ {
+ shadow_unlock(v->domain);
return X86EMUL_EXCEPTION;
+ }
old = (((u64) old_hi) << 32) | (u64) old_lo;
new = (((u64) new_hi) << 32) | (u64) new_lo;
@@ -4173,6 +4198,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
sh_unmap_domain_page(addr);
shadow_audit_tables(v);
+ shadow_unlock(v->domain);
return rv;
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|