# HG changeset patch # User cegger # Date 1302705903 -7200 Implement ASID implementation. This allows the guest to run nested guest using hw ASID. Signed-off-by: Christoph Egger diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/asid.c --- a/xen/arch/x86/hvm/asid.c +++ b/xen/arch/x86/hvm/asid.c @@ -102,10 +102,14 @@ void hvm_asid_flush_core(void) data->disabled = 1; } -bool_t hvm_asid_handle_vmenter(void) +bool_t hvm_asid_handle_vmenter(bool_t run_n2guest) { + int need_flush = 0; struct vcpu *curr = current; struct hvm_asid_data *data = &this_cpu(hvm_asid_data); + struct nestedvcpu *nv; + + nv = &vcpu_nestedhvm(curr); /* On erratum #170 systems we must flush the TLB. * Generation overruns are taken here, too. */ @@ -113,27 +117,69 @@ bool_t hvm_asid_handle_vmenter(void) goto disabled; /* Test if VCPU has valid ASID. */ - if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation ) - return 0; - - /* If there are no free ASIDs, need to go to a new generation */ - if ( unlikely(data->next_asid > data->max_asid) ) - { - hvm_asid_flush_core(); - data->next_asid = 1; - if ( data->disabled ) - goto disabled; + if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation ) { + if ( run_n2guest ) { + if ( !nv->nv_new_vasid && data->next_asid > nv->nv_n2asid ) { + /* l1 guest doesn't request a new asid */ + /* When asid generation changed last time when we were + * were going to run l1 guest then + * next_asid <= nv->nv_n2asid. + */ + ASSERT(nv->nv_n2asid != 0); + ASSERT(nv->nv_n1asid != nv->nv_n2asid); + curr->arch.hvm_vcpu.asid = nv->nv_n2asid; + return 0; + } + } else if ( data->next_asid > nv->nv_n1asid ) { + /* When asid generation changed last time when we were going to + * run l2 guest then next_asid <= nv->nv_n1asid. + */ + ASSERT(nv->nv_n1asid != 0); + ASSERT(nv->nv_n1asid != nv->nv_n2asid); + curr->arch.hvm_vcpu.asid = nv->nv_n1asid; + return 0; + } } - /* Now guaranteed to be a free ASID. */ - curr->arch.hvm_vcpu.asid = data->next_asid++; + do { + /* If there are no free ASIDs, need to go to a new generation */ + if ( unlikely(data->next_asid > data->max_asid) ) + { + hvm_asid_flush_core(); + data->next_asid = 1; + if ( data->disabled ) + goto disabled; + } + + if ( data->next_asid == 1 ) { + /* We start a new generation, so all old ASID allocations are + * stale now. Ensure we flush the TLB also in case of another + * iteration. + */ + need_flush = 1; + } + + /* Now guaranteed to be a free ASID. */ + if ( run_n2guest ) + /* nv_n1asid might have an asid from an old generation. + * We handle this on next vmenter. + */ + nv->nv_n2asid = data->next_asid++; + else + /* nv_n2asid might have an asid from an old generation. + * We handle this on next vmenter. + */ + nv->nv_n1asid = data->next_asid++; + + /* Make sure an asid isn't used twice */ + } while (nv->nv_n2asid == nv->nv_n1asid); + + ASSERT(nv->nv_n1asid != 0); + + curr->arch.hvm_vcpu.asid = (run_n2guest) ? nv->nv_n2asid : nv->nv_n1asid; curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation; - /* - * When we assign ASID 1, flush all TLB entries as we are starting a new - * generation, and all old ASID allocations are now stale. - */ - return (curr->arch.hvm_vcpu.asid == 1); + return need_flush; disabled: curr->arch.hvm_vcpu.asid = 0; diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/nestedhvm.c --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -61,6 +61,9 @@ nestedhvm_vcpu_reset(struct vcpu *v) nv->nv_vvmcxaddr = VMCX_EADDR; nv->nv_flushp2m = 0; nv->nv_p2m = NULL; + nv->nv_new_vasid = 0; + nv->nv_n1asid = 0; + nv->nv_n2asid = 0; if ( hvm_funcs.nhvm_vcpu_reset ) hvm_funcs.nhvm_vcpu_reset(v); diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/svm/asid.c --- a/xen/arch/x86/hvm/svm/asid.c +++ b/xen/arch/x86/hvm/svm/asid.c @@ -22,6 +22,7 @@ #include #include #include +#include void svm_asid_init(struct cpuinfo_x86 *c) { @@ -42,7 +43,13 @@ asmlinkage void svm_asid_handle_vmrun(vo { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; - bool_t need_flush = hvm_asid_handle_vmenter(); + bool_t need_flush; + bool_t vcpu_guestmode = 0; + + if ( nestedhvm_enabled(curr->domain) && nestedhvm_vcpu_in_guestmode(curr) ) + vcpu_guestmode = 1; + + need_flush = hvm_asid_handle_vmenter(vcpu_guestmode); /* ASID 0 indicates that ASIDs are disabled. */ if ( curr->arch.hvm_vcpu.asid == 0 ) diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -261,8 +261,6 @@ int nsvm_vcpu_hostrestore(struct vcpu *v /* Cleanbits */ n1vmcb->cleanbits.bytes = 0; - hvm_asid_flush_vcpu(v); - return 0; } @@ -408,9 +406,7 @@ static int nsvm_vmcb_prepare4vmrun(struc if (rc) return rc; - /* ASID */ - hvm_asid_flush_vcpu(v); - /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */ + /* ASID - Emulation handled in hvm_asid_handle_vmenter() */ /* TLB control */ n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control; @@ -605,8 +601,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3; svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3; - nv->nv_flushp2m = (ns_vmcb->tlb_control - || (svm->ns_guest_asid != ns_vmcb->_guest_asid)); + nv->nv_new_vasid = (svm->ns_guest_asid != ns_vmcb->_guest_asid); + nv->nv_flushp2m = (ns_vmcb->tlb_control || nv->nv_new_vasid); svm->ns_guest_asid = ns_vmcb->_guest_asid; /* nested paging for the guest */ diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1580,6 +1580,17 @@ static void svm_vmexit_do_invalidate_cac __update_guest_eip(regs, inst_len); } +static void svm_invlpga_intercept(struct vcpu *v, + unsigned long vaddr, uint32_t asid) +{ + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + if (asid == 0) + asid = nv->nv_n1asid; /* remap to l1 guest asid */ + else + asid = nv->nv_n2asid; /* remap to l2 guest asid */ + svm_invlpga(vaddr, asid); +} + static void svm_invlpg_intercept(unsigned long vaddr) { struct vcpu *curr = current; @@ -1892,10 +1903,12 @@ asmlinkage void svm_vmexit_handler(struc case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE: case VMEXIT_INVLPG: - case VMEXIT_INVLPGA: if ( !handle_mmio() ) hvm_inject_exception(TRAP_gp_fault, 0, 0); break; + case VMEXIT_INVLPGA: + svm_invlpga_intercept(v, regs->rax, regs->ecx); + break; case VMEXIT_VMMCALL: if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 ) diff -r e1895b172170 -r 07ce1a6ecad4 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2673,7 +2673,7 @@ asmlinkage void vmx_vmenter_helper(void) goto out; old_asid = curr->arch.hvm_vcpu.asid; - need_flush = hvm_asid_handle_vmenter(); + need_flush = hvm_asid_handle_vmenter(0 /* false */); new_asid = curr->arch.hvm_vcpu.asid; if ( unlikely(new_asid != old_asid) ) diff -r e1895b172170 -r 07ce1a6ecad4 xen/include/asm-x86/hvm/asid.h --- a/xen/include/asm-x86/hvm/asid.h +++ b/xen/include/asm-x86/hvm/asid.h @@ -35,7 +35,7 @@ void hvm_asid_flush_core(void); /* Called before entry to guest context. Checks ASID allocation, returns a * boolean indicating whether all ASIDs must be flushed. */ -bool_t hvm_asid_handle_vmenter(void); +bool_t hvm_asid_handle_vmenter(bool_t run_n2guest); #endif /* __ASM_X86_HVM_ASID_H__ */ diff -r e1895b172170 -r 07ce1a6ecad4 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -49,6 +49,11 @@ struct nestedvcpu { uint64_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */ uint64_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */ + /* ASID emulation */ + bool_t nv_new_vasid; /* true when l1 guest requests new virtual asid */ + uint32_t nv_n1asid; /* hw ASID number used to run l1 guest */ + uint32_t nv_n2asid; /* hw ASID number used to run l2 guest */ + /* SVM/VMX arch specific */ union { struct nestedsvm nsvm;