commit 242318e2ae4229ba3ac6a5253f3f4722133348a6 Author: Andre Przywara Date: Sun Jul 25 14:33:21 2010 +0200 svm: implement instruction fetch part of DecodeAssist Newer SVM implementations (Bulldozer) copy up to 15 bytes from the instruction stream into the VMCB when a #PF or #NPF exception is intercepted. This patch makes use of this information if available. This saves us from a) traversing the guest's page tables, b) mapping the guest's memory and c) copy the instructions from there into the hypervisor's address space. This speeds up #NPF intercepts quite a lot and avoids cache and TLB trashing. Signed-off-by: Andre Przywara diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index af903c9..7074d83 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -995,15 +995,22 @@ int hvm_emulate_one( pfec |= PFEC_user_mode; hvmemul_ctxt->insn_buf_eip = regs->eip; - hvmemul_ctxt->insn_buf_bytes = - (hvm_virtual_to_linear_addr( - x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs], - regs->eip, sizeof(hvmemul_ctxt->insn_buf), - hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) && - !hvm_fetch_from_guest_virt_nofault( - hvmemul_ctxt->insn_buf, addr, - sizeof(hvmemul_ctxt->insn_buf), pfec)) - ? sizeof(hvmemul_ctxt->insn_buf) : 0; + if (curr->arch.hvm_vcpu.guest_ins_len == 0) { + hvmemul_ctxt->insn_buf_bytes = + (hvm_virtual_to_linear_addr( + x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs], + regs->eip, sizeof(hvmemul_ctxt->insn_buf), + hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) && + !hvm_fetch_from_guest_virt_nofault( + hvmemul_ctxt->insn_buf, addr, + sizeof(hvmemul_ctxt->insn_buf), pfec)) + ? sizeof(hvmemul_ctxt->insn_buf) : 0; + } else { + hvmemul_ctxt->insn_buf_bytes = curr->arch.hvm_vcpu.guest_ins_len; + memcpy(hvmemul_ctxt->insn_buf, curr->arch.hvm_vcpu.guest_ins, + hvmemul_ctxt->insn_buf_bytes); + curr->arch.hvm_vcpu.guest_ins_len = 0; + } hvmemul_ctxt->exn_pending = 0; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 279220a..861c2c7 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1589,6 +1589,18 @@ static void svm_invlpg_intercept(unsigned long vaddr) svm_asid_g_invlpg(curr, vaddr); } +static void svm_set_instruction_bytes(struct vmcb_struct *vmcb) +{ + struct vcpu *curr = current; + int len = vmcb->guest_ins_len & 0x0F; + + if (len == 0) + return; + curr->arch.hvm_vcpu.guest_ins_len = len; + curr->arch.hvm_vcpu.guest_ins = vmcb->guest_ins; + return; +} + static struct hvm_function_table __read_mostly svm_function_table = { .name = "SVM", .cpu_up_prepare = svm_cpu_up_prepare, @@ -1801,6 +1813,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) unsigned long va; va = vmcb->exitinfo2; regs->error_code = vmcb->exitinfo1; + svm_set_instruction_bytes(vmcb); HVM_DBG_LOG(DBG_LEVEL_VMMU, "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", (unsigned long)regs->eax, (unsigned long)regs->ebx, @@ -1809,6 +1822,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) if ( paging_fault(va, regs) ) { + v->arch.hvm_vcpu.guest_ins_len = 0; if ( trace_will_trace_event(TRC_SHADOW) ) break; if ( hvm_long_mode_enabled(v) ) @@ -1817,6 +1831,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) HVMTRACE_2D(PF_XEN, regs->error_code, va); break; } + v->arch.hvm_vcpu.guest_ins_len = 0; hvm_inject_exception(TRAP_page_fault, regs->error_code, va); break; @@ -1968,6 +1983,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_NPF: perfc_incra(svmexits, VMEXIT_NPF_PERFC); regs->error_code = vmcb->exitinfo1; + svm_set_instruction_bytes(vmcb); svm_do_nested_pgfault(v, regs, vmcb->exitinfo2); break; diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index eabecaa..741bee8 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -140,6 +140,9 @@ struct hvm_vcpu { unsigned long mmio_gva; unsigned long mmio_gpfn; + uint8_t guest_ins_len; + void *guest_ins; + /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */ void (*fpu_exception_callback)(void *, struct cpu_user_regs *); void *fpu_exception_callback_arg;