Both writing of certain MSRs and VCPUOP_get_physid make sense also for dynamically (perhaps temporarily) pinned vcpus. Likely a couple of other MSR writes (MSR_K8_HWCR, MSR_AMD64_NB_CFG, MSR_FAM10H_MMIO_CONF_BASE) would make sense to be restricted by an is_pinned() check too, possibly also some MSR reads. Signed-off-by: Jan Beulich --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1036,7 +1036,7 @@ arch_do_vcpu_op( struct vcpu_get_physid cpu_id; rc = -EINVAL; - if ( !v->domain->is_pinned ) + if ( !is_pinned(v) ) break; cpu_id.phys_id = --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2335,7 +2335,7 @@ static int emulate_privileged_op(struct case MSR_IA32_THERM_CONTROL: if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) goto fail; - if ( (v->domain->domain_id != 0) || !v->domain->is_pinned ) + if ( (v->domain->domain_id != 0) || !is_pinned(v) ) break; if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -634,6 +634,8 @@ void watchdog_domain_destroy(struct doma #define is_hvm_domain(d) ((d)->is_hvm) #define is_hvm_vcpu(v) (is_hvm_domain(v->domain)) +#define is_pinned(v) ((v)->domain->is_pinned || \ + cpus_weight((v)->cpu_affinity) == 1) #define need_iommu(d) ((d)->need_iommu) void set_vcpu_migration_delay(unsigned int delay);