diff --git a/arch/x86_64/kernel/xen_entry.S b/arch/x86_64/kernel/xen_entry.S index 66efa07..2ffb030 100644 --- a/arch/x86_64/kernel/xen_entry.S +++ b/arch/x86_64/kernel/xen_entry.S @@ -12,19 +12,18 @@ //#define preempt_enable(reg) decl threadinfo_preempt_count(reg) #define preempt_disable(reg) #define preempt_enable(reg) -#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \ - movq %gs:pda_cpunumber,reg ; \ - shl $32, reg ; \ - shr $32-sizeof_vcpu_shift,reg ; \ - addq HYPERVISOR_shared_info,reg #define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \ #define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff #else -#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg #define XEN_PUT_VCPU_INFO(reg) #define XEN_PUT_VCPU_INFO_fixup #endif +#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \ + movq %gs:pda_data_offset,reg ; \ + addq $per_cpu__xen_vcpu,reg ; \ + movq (reg), reg + #define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) #define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg) #define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \ diff --git a/arch/x86_64/mm/init-xen.c b/arch/x86_64/mm/init-xen.c index 72fbf89..d6bc553 100644 --- a/arch/x86_64/mm/init-xen.c +++ b/arch/x86_64/mm/init-xen.c @@ -728,6 +728,7 @@ static void xen_finish_init_mapping(void) /* Switch to the real shared_info page, and clear the dummy page. */ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info); HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); + per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; memset(empty_zero_page, 0, sizeof(empty_zero_page)); /* Set up mapping of lowest 1MB of physical memory. */ diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c index 63ea957..554d1f0 100644 --- a/drivers/xen/core/evtchn.c +++ b/drivers/xen/core/evtchn.c @@ -242,14 +242,13 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs) int irq; unsigned int cpu = smp_processor_id(); shared_info_t *s = HYPERVISOR_shared_info; - vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; exit_idle(); irq_enter(); do { /* Avoid a callback storm when we reenable delivery. */ - vcpu_info->evtchn_upcall_pending = 0; + vcpu_info(cpu)->evtchn_upcall_pending = 0; /* Nested invocations bail immediately. */ if (unlikely(per_cpu(upcall_count, cpu)++)) @@ -259,7 +258,7 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs) /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif - l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); + l1 = xchg(&vcpu_info(cpu)->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); @@ -943,7 +942,6 @@ void unmask_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; unsigned int cpu = smp_processor_id(); - vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; BUG_ON(!irqs_disabled()); @@ -959,8 +957,8 @@ void unmask_evtchn(int port) /* Did we miss an interrupt 'edge'? Re-fire if so. */ if (synch_test_bit(port, s->evtchn_pending) && !synch_test_and_set_bit(port / BITS_PER_LONG, - &vcpu_info->evtchn_pending_sel)) - vcpu_info->evtchn_upcall_pending = 1; + &vcpu_info(cpu)->evtchn_pending_sel)) + vcpu_info(cpu)->evtchn_upcall_pending = 1; } EXPORT_SYMBOL_GPL(unmask_evtchn); diff --git a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c index 80bd1bb..b285b8a 100644 --- a/drivers/xen/core/smpboot.c +++ b/drivers/xen/core/smpboot.c @@ -66,6 +66,40 @@ EXPORT_SYMBOL(x86_cpu_to_apicid); unsigned int maxcpus = NR_CPUS; #endif +#ifdef __x86_64__ +DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); +DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu) = + { (struct vcpu_info *)empty_zero_page }; +EXPORT_PER_CPU_SYMBOL(xen_vcpu); + +static void check_relocate_vcpus(void) +{ + struct vcpu_register_vcpu_info info; + struct vcpu_info *vcpup; + int rc, cpu, relocate=0; + + if (num_possible_cpus() > MAX_VIRT_CPUS) + relocate = 1; + + for_each_possible_cpu (cpu) { + if (relocate) { + vcpup = &per_cpu(xen_vcpu_info, cpu); + info.mfn = virt_to_mfn(vcpup); + info.offset = offset_in_page(vcpup); + rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, + &info); + BUG_ON(rc); + } else { + /* use shared page so we can run on older xen without + * VCPUOP_register_vcpu_info */ + vcpup = &HYPERVISOR_shared_info->vcpu_info[cpu]; + } + + per_cpu(xen_vcpu, cpu) = vcpup; + } +} +#endif + void __init prefill_possible_map(void) { int i, rc; @@ -364,6 +398,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); #endif +#ifdef __x86_64__ + check_relocate_vcpus(); +#endif } void __devinit smp_prepare_boot_cpu(void) diff --git a/include/asm-i386/mach-xen/asm/hypervisor.h b/include/asm-i386/mach-xen/asm/hypervisor.h index b38ab1b..dbe13e5 100644 --- a/include/asm-i386/mach-xen/asm/hypervisor.h +++ b/include/asm-i386/mach-xen/asm/hypervisor.h @@ -55,10 +55,16 @@ #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif +#if defined(__x86_64__) +#include +DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); +#define vcpu_info(cpu) (per_cpu(xen_vcpu, cpu)) +#else +#define vcpu_info(cpu) (&HYPERVISOR_shared_info->vcpu_info[cpu]) +#endif extern shared_info_t *HYPERVISOR_shared_info; -#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else diff --git a/include/asm-x86_64/mach-xen/irq_vectors.h b/include/asm-x86_64/mach-xen/irq_vectors.h index 4391b08..594048b 100644 --- a/include/asm-x86_64/mach-xen/irq_vectors.h +++ b/include/asm-x86_64/mach-xen/irq_vectors.h @@ -109,7 +109,7 @@ #define NR_PIRQS 256 #define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) -#define NR_DYNIRQS 256 +#define NR_DYNIRQS 1024 #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) #define NR_IRQ_VECTORS NR_IRQS