diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c --- a/xen/arch/x86/i8259.c +++ b/xen/arch/x86/i8259.c @@ -392,7 +392,7 @@ desc->handler = &i8259A_irq_type; per_cpu(vector_irq, cpu)[FIRST_LEGACY_VECTOR + irq] = irq; - cfg->domain = cpumask_of_cpu(cpu); + cfg->cpu_mask= cpumask_of_cpu(cpu); cfg->vector = FIRST_LEGACY_VECTOR + irq; } diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -412,7 +412,7 @@ if (!cfg->move_cleanup_count) goto unlock; - if (vector == cfg->vector && cpu_isset(me, cfg->domain)) + if (vector == cfg->vector && cpu_isset(me, cfg->cpu_mask)) goto unlock; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); @@ -441,7 +441,7 @@ { cpumask_t cleanup_mask; - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); + cpus_and(cleanup_mask, cfg->old_cpu_mask, cpu_online_map); cfg->move_cleanup_count = cpus_weight(cleanup_mask); genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); @@ -460,7 +460,7 @@ vector = get_irq_regs()->entry_vector; me = smp_processor_id(); - if (vector == cfg->vector && cpu_isset(me, cfg->domain)) + if (vector == cfg->vector && cpu_isset(me, cfg->cpu_mask)) send_cleanup_vector(cfg); } @@ -488,7 +488,7 @@ return BAD_APICID; cpus_copy(desc->affinity, mask); - cpus_and(dest_mask, desc->affinity, cfg->domain); + cpus_and(dest_mask, desc->affinity, cfg->cpu_mask); return cpu_mask_to_apicid(dest_mask); } @@ -638,8 +638,8 @@ continue; irq = pin_2_irq(irq_entry, ioapic, pin); cfg = irq_cfg(irq); - BUG_ON(cpus_empty(cfg->domain)); - set_ioapic_affinity_irq(irq, cfg->domain); + BUG_ON(cpus_empty(cfg->cpu_mask)); + set_ioapic_affinity_irq(irq, cfg->cpu_mask); } } @@ -1003,7 +1003,7 @@ } cfg = irq_cfg(irq); SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest, - cpu_mask_to_apicid(cfg->domain)); + cpu_mask_to_apicid(cfg->cpu_mask)); spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); @@ -2446,7 +2446,7 @@ rte.vector = cfg->vector; SET_DEST(rte.dest.dest32, rte.dest.logical.logical_dest, - cpu_mask_to_apicid(cfg->domain)); + cpu_mask_to_apicid(cfg->cpu_mask)); io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0)); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1)); diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -86,14 +86,14 @@ cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; - if ((cfg->vector == vector) && cpus_equal(cfg->domain, mask)) + if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, mask)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; - cfg->domain = mask; + cfg->cpu_mask = mask; irq_status[irq] = IRQ_USED; if (IO_APIC_IRQ(irq)) irq_vector[irq] = vector; @@ -179,13 +179,13 @@ BUG_ON(!cfg->vector); vector = cfg->vector; - cpus_and(tmp_mask, cfg->domain, cpu_online_map); + cpus_and(tmp_mask, cfg->cpu_mask, cpu_online_map); for_each_cpu_mask(cpu, tmp_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; - cpus_clear(cfg->domain); + cpus_clear(cfg->cpu_mask); init_one_irq_status(irq); if (likely(!cfg->move_in_progress)) @@ -257,8 +257,8 @@ static void init_one_irq_cfg(struct irq_cfg *cfg) { cfg->vector = IRQ_VECTOR_UNASSIGNED; - cpus_clear(cfg->domain); - cpus_clear(cfg->old_domain); + cpus_clear(cfg->cpu_mask); + cpus_clear(cfg->old_cpu_mask); } int init_irq_data(void) @@ -354,7 +354,7 @@ old_vector = irq_to_vector(irq); if (old_vector) { cpus_and(tmp_mask, mask, cpu_online_map); - cpus_and(tmp_mask, cfg->domain, tmp_mask); + cpus_and(tmp_mask, cfg->cpu_mask, tmp_mask); if (!cpus_empty(tmp_mask)) { cfg->vector = old_vector; return 0; @@ -395,12 +395,12 @@ current_offset = offset; if (old_vector) { cfg->move_in_progress = 1; - cpus_copy(cfg->old_domain, cfg->domain); + cpus_copy(cfg->old_cpu_mask, cfg->cpu_mask); } for_each_cpu_mask(new_cpu, tmp_mask) per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; - cpus_copy(cfg->domain, tmp_mask); + cpus_copy(cfg->cpu_mask, tmp_mask); irq_status[irq] = IRQ_USED; if (IO_APIC_IRQ(irq)) @@ -424,7 +424,7 @@ ret = __assign_irq_vector(irq, cfg, TARGET_CPUS); if (!ret) { ret = cfg->vector; - cpus_copy(desc->affinity, cfg->domain); + cpus_copy(desc->affinity, cfg->cpu_mask); } spin_unlock_irqrestore(&vector_lock, flags); return ret; @@ -445,7 +445,7 @@ /* Mark the inuse vectors */ for (irq = 0; irq < nr_irqs; ++irq) { cfg = irq_cfg(irq); - if (!cpu_isset(cpu, cfg->domain)) + if (!cpu_isset(cpu, cfg->cpu_mask)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c --- a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c @@ -124,7 +124,7 @@ cpumask_t domain; struct irq_cfg *cfg = irq_cfg(irq); int vector = cfg->vector; - domain = cfg->domain; + domain = cfg->cpu_mask; if ( cpus_empty( domain ) ) { dprintk(XENLOG_ERR,"%s, compose msi message error!!\n", __func__); diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -1015,7 +1015,7 @@ irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1; per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq; irq_cfg[irq].vector = FIRST_HIPRIORITY_VECTOR + seridx + 1; - irq_cfg[irq].domain = (cpumask_t)CPU_MASK_ALL; + irq_cfg[irq].cpu_mask = (cpumask_t)CPU_MASK_ALL; } /* IPI for cleanuping vectors after irq move */ diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1837,7 +1837,7 @@ } cfg = irq_cfg(iommu->irq); - dma_msi_set_affinity(iommu->irq, cfg->domain); + dma_msi_set_affinity(iommu->irq, cfg->cpu_mask); clear_fault_bits(iommu); diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h --- a/xen/include/asm-x86/irq.h +++ b/xen/include/asm-x86/irq.h @@ -25,8 +25,8 @@ struct irq_cfg { int vector; - cpumask_t domain; - cpumask_t old_domain; + cpumask_t cpu_mask; + cpumask_t old_cpu_mask; unsigned move_cleanup_count; u8 move_in_progress : 1; }; diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h --- a/xen/include/xen/irq.h +++ b/xen/include/xen/irq.h @@ -117,7 +117,7 @@ struct irq_cfg { int vector; - cpumask_t domain; + cpumask_t cpu_mask; }; extern struct irq_cfg irq_cfg[];