- use __clear_bit() rather than clear_bit()
- use switch statements instead of long series of if-s
- eliminate pointless casts
(Applies cleanly only on top of the previously sent SVM/EFER patch.)
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2007-08-08/xen/arch/x86/hvm/hvm.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/hvm.c 2007-08-06 15:08:40.000000000
+0200
+++ 2007-08-08/xen/arch/x86/hvm/hvm.c 2007-08-08 11:45:25.000000000 +0200
@@ -614,37 +614,38 @@ void hvm_cpuid(unsigned int input, unsig
{
if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
{
+ struct vcpu *v = current;
+
cpuid(input, eax, ebx, ecx, edx);
- if ( input == 0x00000001 )
+ switch ( input )
{
- struct vcpu *v = current;
-
- clear_bit(X86_FEATURE_MWAIT & 31, ecx);
+ case 0x00000001:
+ __clear_bit(X86_FEATURE_MWAIT & 31, ecx);
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
- clear_bit(X86_FEATURE_APIC & 31, edx);
+ __clear_bit(X86_FEATURE_APIC & 31, edx);
#if CONFIG_PAGING_LEVELS >= 3
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
#endif
- clear_bit(X86_FEATURE_PAE & 31, edx);
- clear_bit(X86_FEATURE_PSE36 & 31, edx);
- }
- else if ( input == 0x80000001 )
- {
+ __clear_bit(X86_FEATURE_PAE & 31, edx);
+ __clear_bit(X86_FEATURE_PSE36 & 31, edx);
+ break;
+
+ case 0x80000001:
#if CONFIG_PAGING_LEVELS >= 3
- struct vcpu *v = current;
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
#endif
- clear_bit(X86_FEATURE_NX & 31, edx);
+ __clear_bit(X86_FEATURE_NX & 31, edx);
#ifdef __i386__
/* Mask feature for Intel ia32e or AMD long mode. */
- clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
+ __clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
- clear_bit(X86_FEATURE_LM & 31, edx);
- clear_bit(X86_FEATURE_SYSCALL & 31, edx);
+ __clear_bit(X86_FEATURE_LM & 31, edx);
+ __clear_bit(X86_FEATURE_SYSCALL & 31, edx);
#endif
+ break;
}
}
}
Index: 2007-08-08/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/svm/svm.c 2007-08-08 11:43:53.000000000
+0200
+++ 2007-08-08/xen/arch/x86/hvm/svm/svm.c 2007-08-08 11:45:25.000000000
+0200
@@ -1117,8 +1117,9 @@ static void svm_vmexit_do_cpuid(struct v
hvm_cpuid(input, &eax, &ebx, &ecx, &edx);
- if ( input == 0x00000001 )
+ switch ( input )
{
+ case 0x00000001:
/* Clear out reserved bits. */
ecx &= ~SVM_VCPU_CPUID_L1_ECX_RESERVED;
edx &= ~SVM_VCPU_CPUID_L1_EDX_RESERVED;
@@ -1126,50 +1127,56 @@ static void svm_vmexit_do_cpuid(struct v
/* Guest should only see one logical processor.
* See details on page 23 of AMD CPUID Specification.
*/
- clear_bit(X86_FEATURE_HT & 31, &edx); /* clear the hyperthread bit */
+ __clear_bit(X86_FEATURE_HT & 31, &edx); /* clear the hyperthread bit
*/
ebx &= 0xFF00FFFF; /* clear the logical processor count when HTT=0 */
ebx |= 0x00010000; /* set to 1 just for precaution */
- }
- else if ( input == 0x80000001 )
- {
+ break;
+
+ case 0x80000001:
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
- clear_bit(X86_FEATURE_APIC & 31, &edx);
+ __clear_bit(X86_FEATURE_APIC & 31, &edx);
#if CONFIG_PAGING_LEVELS >= 3
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
#endif
- clear_bit(X86_FEATURE_PAE & 31, &edx);
+ __clear_bit(X86_FEATURE_PAE & 31, &edx);
- clear_bit(X86_FEATURE_PSE36 & 31, &edx);
+ __clear_bit(X86_FEATURE_PSE36 & 31, &edx);
/* Clear the Cmp_Legacy bit
* This bit is supposed to be zero when HTT = 0.
* See details on page 23 of AMD CPUID Specification.
*/
- clear_bit(X86_FEATURE_CMP_LEGACY & 31, &ecx);
+ __clear_bit(X86_FEATURE_CMP_LEGACY & 31, &ecx);
/* Make SVM feature invisible to the guest. */
- clear_bit(X86_FEATURE_SVME & 31, &ecx);
+ __clear_bit(X86_FEATURE_SVME & 31, &ecx);
+ __clear_bit(X86_FEATURE_SKINIT & 31, &ecx);
+
+ __clear_bit(X86_FEATURE_OSVW & 31, &ecx);
+ __clear_bit(X86_FEATURE_WDT & 31, &ecx);
/* So far, we do not support 3DNow for the guest. */
- clear_bit(X86_FEATURE_3DNOW & 31, &edx);
- clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
- }
- else if ( input == 0x80000007 || input == 0x8000000A )
- {
+ __clear_bit(X86_FEATURE_3DNOW & 31, &edx);
+ __clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
+ break;
+
+ case 0x80000007:
+ case 0x8000000A:
/* Mask out features of power management and SVM extension. */
eax = ebx = ecx = edx = 0;
- }
- else if ( input == 0x80000008 )
- {
+ break;
+
+ case 0x80000008:
/* Make sure Number of CPU core is 1 when HTT=0 */
ecx &= 0xFFFFFF00;
+ break;
}
- regs->eax = (unsigned long)eax;
- regs->ebx = (unsigned long)ebx;
- regs->ecx = (unsigned long)ecx;
- regs->edx = (unsigned long)edx;
+ regs->eax = eax;
+ regs->ebx = ebx;
+ regs->ecx = ecx;
+ regs->edx = edx;
HVMTRACE_3D(CPUID, v, input,
((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
Index: 2007-08-08/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-08-07 15:00:27.000000000
+0200
+++ 2007-08-08/xen/arch/x86/hvm/vmx/vmx.c 2007-08-08 11:45:25.000000000
+0200
@@ -1326,15 +1326,9 @@ static void vmx_do_no_device_fault(void)
static void vmx_do_cpuid(struct cpu_user_regs *regs)
{
unsigned int input = (unsigned int)regs->eax;
- unsigned int count = (unsigned int)regs->ecx;
unsigned int eax, ebx, ecx, edx;
- if ( input == 0x00000004 )
- {
- cpuid_count(input, count, &eax, &ebx, &ecx, &edx);
- eax &= NUM_CORES_RESET_MASK;
- }
- else if ( input == 0x40000003 )
+ if ( input == 0x40000003 )
{
/*
* NB. Unsupported interface for private use of VMXASSIST only.
@@ -1360,37 +1354,52 @@ static void vmx_do_cpuid(struct cpu_user
unmap_domain_page(p);
gdprintk(XENLOG_INFO, "Output value is 0x%"PRIx64".\n", value);
- ecx = (u32)value;
- edx = (u32)(value >> 32);
- } else {
- hvm_cpuid(input, &eax, &ebx, &ecx, &edx);
+ regs->ecx = (u32)value;
+ regs->edx = (u32)(value >> 32);
+ return;
+ }
- if ( input == 0x00000001 )
- {
- /* Mask off reserved bits. */
- ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
+ hvm_cpuid(input, &eax, &ebx, &ecx, &edx);
- ebx &= NUM_THREADS_RESET_MASK;
+ switch ( input )
+ {
+ case 0x00000001:
+ /* Mask off reserved bits. */
+ ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
- /* Unsupportable for virtualised CPUs. */
- ecx &= ~(bitmaskof(X86_FEATURE_VMXE) |
- bitmaskof(X86_FEATURE_EST) |
- bitmaskof(X86_FEATURE_TM2) |
- bitmaskof(X86_FEATURE_CID));
+ ebx &= NUM_THREADS_RESET_MASK;
- edx &= ~(bitmaskof(X86_FEATURE_HT) |
- bitmaskof(X86_FEATURE_ACPI) |
- bitmaskof(X86_FEATURE_ACC));
- }
+ /* Unsupportable for virtualised CPUs. */
+ ecx &= ~(bitmaskof(X86_FEATURE_VMXE) |
+ bitmaskof(X86_FEATURE_EST) |
+ bitmaskof(X86_FEATURE_TM2) |
+ bitmaskof(X86_FEATURE_CID));
- if ( input == 0x00000006 || input == 0x00000009 || input == 0x0000000A
)
- eax = ebx = ecx = edx = 0x0;
+ edx &= ~(bitmaskof(X86_FEATURE_HT) |
+ bitmaskof(X86_FEATURE_ACPI) |
+ bitmaskof(X86_FEATURE_ACC));
+
+ /* Unsupported for virtualised CPUs. */
+ ecx &= ~(bitmaskof(X86_FEATURE_PDCM));
+
+ break;
+
+ case 0x00000004:
+ cpuid_count(input, regs->ecx, &eax, &ebx, &ecx, &edx);
+ eax &= NUM_CORES_RESET_MASK;
+ break;
+
+ case 0x00000006:
+ case 0x00000009:
+ case 0x0000000A:
+ eax = ebx = ecx = edx = 0;
+ break;
}
- regs->eax = (unsigned long)eax;
- regs->ebx = (unsigned long)ebx;
- regs->ecx = (unsigned long)ecx;
- regs->edx = (unsigned long)edx;
+ regs->eax = eax;
+ regs->ebx = ebx;
+ regs->ecx = ecx;
+ regs->edx = edx;
HVMTRACE_3D(CPUID, current, input,
((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
Index: 2007-08-08/xen/include/asm-x86/cpufeature.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/cpufeature.h 2007-08-08
11:45:13.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/cpufeature.h 2007-08-08 11:45:25.000000000
+0200
@@ -80,9 +80,15 @@
#define X86_FEATURE_VMXE (4*32+ 5) /* Virtual Machine Extensions */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental Streaming SIMD
Extensions-3 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM (4*32+15) /* Perf/Debug Capability MSR */
+#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_SSE4_1 (4*32+19) /* Streaming SIMD Extensions 4.1 */
+#define X86_FEATURE_SSE4_2 (4*32+20) /* Streaming SIMD Extensions 4.2 */
+#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
@@ -94,6 +100,15 @@
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVME (6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPICSPACE (6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_ALTMOVCR (6*32+ 4) /* LOCK MOV CR accesses CR+8 */
+#define X86_FEATURE_ABM (6*32+ 5) /* Advanced Bit Manipulation
*/
+#define X86_FEATURE_SSE4A (6*32+ 6) /* AMD Streaming SIMD Extensions-4a */
+#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE Access */
+#define X86_FEATURE_3DNOWPF (6*32+ 8) /* 3DNow! Prefetch */
+#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_SKINIT (6*32+ 12) /* SKINIT, STGI/CLGI, DEV */
+#define X86_FEATURE_WDT (6*32+ 13) /* Watchdog Timer */
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|