The MAXSMP config option requires CPUMASK_OFFSTACK, which in turn requires we init the memory for the maps while we bringing up the cpus. MAXSMP also increases NR_CPUS to 4096. This increase in size exposed an issue in the argument construction for mulitcalls from xen_flush_tlb_others. The args should only need space for the actual number of cpus, which with xen is currently only up to 32. Signed-off-by: Andrew Jones --- arch/x86/xen/mmu.c | 2 +- arch/x86/xen/smp.c | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3bf7b1d..0cf4d7c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1293,7 +1293,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, { struct { struct mmuext_op op; - DECLARE_BITMAP(mask, NR_CPUS); + DECLARE_BITMAP(mask, num_processors); } *args; struct multicall_space mcs; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index fe03eee..da5aec1 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -178,11 +178,18 @@ static void __init xen_smp_prepare_boot_cpu(void) static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; + unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; + + for_each_possible_cpu(i) { + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); + } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) -- 1.6.5.2