# HG changeset patch # User Wei Huang # Date 1306514223 18000 # Node ID 83e24e4232bf0f886543e56c7dac3ae958e6d3eb # Parent 14eb8e1fcd828e4610df05e5d781e5f9693fd65c HVM/SVM: enable tsc scaling ratio for SVM Future AMD CPUs support TSC scaling. It allows guests to have a different TSC frequency from host system using this formula: guest_tsc = host_tsc * tsc_ratio + vmcb_offset. The tsc_ratio is a 64bit MSR contains a fixed-point number in 8.32 format (8 bits for integer part and 32bits for fractional part). For instance 0x00000003_80000000 means tsc_ratio=3.5. This patch enables TSC scaling ratio for SVM. With it, guest VMs don't need take #VMEXIT to calculate a translated TSC value when it is running under TSC emulation mode. This can substancially reduce the rdtsc overhead. Signed-off-by: Wei Huang diff -r 14eb8e1fcd82 -r 83e24e4232bf xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri May 27 15:49:24 2011 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri May 27 11:37:03 2011 -0500 @@ -640,8 +640,23 @@ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct vmcb_struct *n1vmcb, *n2vmcb; uint64_t n2_tsc_offset = 0; + struct domain *d = v->domain; - if ( !nestedhvm_enabled(v->domain) ) { + if ( !nestedhvm_enabled(d) ) { + /* Re-adjust the offset value when TSC_RATIO is available */ + if ( cpu_has_tsc_ratio && d->arch.vtsc ) + { + uint64_t host_tsc, guest_tsc; + + rdtscll(host_tsc); + guest_tsc = hvm_get_guest_tsc(v); + + /* calculate hi,lo parts in 64bits to prevent overflow */ + offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) + + (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz; + offset = guest_tsc - offset; + } + vmcb_set_tsc_offset(vmcb, offset); return; } @@ -749,6 +764,19 @@ return 0; } +static inline void svm_tsc_ratio_save(struct vcpu *v) +{ + /* Other vcpus might not have vtsc enabled. So disable TSC_RATIO here. */ + if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) + wrmsrl(MSR_AMD64_TSC_RATIO, DEFAULT_TSC_RATIO); +} + +static inline void svm_tsc_ratio_load(struct vcpu *v) +{ + if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) + wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v)); +} + static void svm_ctxt_switch_from(struct vcpu *v) { int cpu = smp_processor_id(); @@ -758,6 +786,7 @@ svm_save_dr(v); vpmu_save(v); svm_lwp_save(v); + svm_tsc_ratio_save(v); svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); @@ -802,6 +831,7 @@ vmcb->cleanbits.bytes = 0; vpmu_load(v); svm_lwp_load(v); + svm_tsc_ratio_load(v); if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); diff -r 14eb8e1fcd82 -r 83e24e4232bf xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Fri May 27 15:49:24 2011 +0100 +++ b/xen/arch/x86/hvm/svm/vmcb.c Fri May 27 11:37:03 2011 -0500 @@ -128,7 +128,9 @@ /* TSC. */ vmcb->_tsc_offset = 0; - if ( v->domain->arch.vtsc ) + + /* Don't need to intercept RDTSC if CPU supports TSC rate scaling */ + if ( v->domain->arch.vtsc && !cpu_has_tsc_ratio ) { vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; vmcb->_general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP; diff -r 14eb8e1fcd82 -r 83e24e4232bf xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h Fri May 27 15:49:24 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/svm.h Fri May 27 11:37:03 2011 -0500 @@ -87,7 +87,15 @@ #define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN) #define cpu_has_svm_decode cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS) #define cpu_has_pause_filter cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER) +#define cpu_has_tsc_ratio cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR) #define SVM_PAUSEFILTER_INIT 3000 +/* TSC rate */ +#define DEFAULT_TSC_RATIO 0x0000000100000000ULL +#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL +#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \ + ~TSC_RATIO_RSVD_BITS ) +#define vcpu_tsc_ratio(v) TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz) + #endif /* __ASM_X86_HVM_SVM_H__ */ diff -r 14eb8e1fcd82 -r 83e24e4232bf xen/include/asm-x86/msr-index.h --- a/xen/include/asm-x86/msr-index.h Fri May 27 15:49:24 2011 +0100 +++ b/xen/include/asm-x86/msr-index.h Fri May 27 11:37:03 2011 -0500 @@ -266,6 +266,9 @@ #define MSR_AMD_PATCHLEVEL 0x0000008b #define MSR_AMD_PATCHLOADER 0xc0010020 +/* AMD TSC RATE MSR */ +#define MSR_AMD64_TSC_RATIO 0xc0000104 + /* AMD Lightweight Profiling MSRs */ #define MSR_AMD64_LWP_CFG 0xc0000105 #define MSR_AMD64_LWP_CBADDR 0xc0000106