ia64: fix the build This addresses all remaining build problems introduced over the last several months. Signed-off-by: Jan Beulich --- a/xen/arch/ia64/linux-xen/iosapic.c +++ b/xen/arch/ia64/linux-xen/iosapic.c @@ -275,12 +275,6 @@ set_rte (unsigned int gsi, unsigned int iosapic_intr_info[vector].dest = dest; } -static void -nop (struct irq_desc *desc) -{ - /* do nothing... */ -} - void kexec_disable_iosapic(void) { @@ -428,7 +422,7 @@ iosapic_end_level_irq (struct irq_desc * #define iosapic_shutdown_level_irq mask_irq #define iosapic_enable_level_irq unmask_irq #define iosapic_disable_level_irq mask_irq -#define iosapic_ack_level_irq nop +#define iosapic_ack_level_irq irq_actor_none static hw_irq_controller irq_type_iosapic_level = { .typename = "IO-SAPIC-level", @@ -446,9 +440,9 @@ static hw_irq_controller irq_type_iosapi */ static unsigned int -iosapic_startup_edge_irq (unsigned int irq) +iosapic_startup_edge_irq (struct irq_desc *desc) { - unmask_irq(irq); + unmask_irq(desc); /* * IOSAPIC simply drops interrupts pended while the * corresponding pin was masked, so we can't know if an @@ -458,23 +452,21 @@ iosapic_startup_edge_irq (unsigned int i } static void -iosapic_ack_edge_irq (unsigned int irq) +iosapic_ack_edge_irq (struct irq_desc *desc) { - irq_desc_t *idesc = irq_descp(irq); - - move_irq(irq); + move_irq(idesc->irq); /* * Once we have recorded IRQ_PENDING already, we can mask the * interrupt for real. This prevents IRQ storms from unhandled * devices. */ - if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED)) - mask_irq(irq); + if ((desc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED)) + mask_irq(desc); } #define iosapic_enable_edge_irq unmask_irq -#define iosapic_disable_edge_irq nop -#define iosapic_end_edge_irq nop +#define iosapic_disable_edge_irq irq_disable_none +#define iosapic_end_edge_irq irq_actor_none static hw_irq_controller irq_type_iosapic_edge = { .typename = "IO-SAPIC-edge", --- a/xen/arch/ia64/linux-xen/irq_ia64.c +++ b/xen/arch/ia64/linux-xen/irq_ia64.c @@ -242,6 +242,16 @@ static struct irqaction __read_mostly ip }; #endif +static hw_irq_controller irq_type_ia64_lsapic = { + .typename = "LSAPIC", + .startup = irq_startup_none, + .shutdown = irq_shutdown_none, + .enable = irq_enable_none, + .disable = irq_disable_none, + .ack = irq_actor_none, + .end = irq_actor_none +}; + void register_percpu_irq (ia64_vector vec, struct irqaction *action) { --- a/xen/arch/ia64/linux-xen/mca.c +++ b/xen/arch/ia64/linux-xen/mca.c @@ -428,9 +428,9 @@ void disable_irq_nosync(unsigned int irq return; spin_lock_irqsave(&desc->lock, flags); - if (!desc->depth++) { + if (!desc->arch.depth++) { desc->status |= IRQ_DISABLED; - desc->handler->disable(irq); + desc->handler->disable(desc); } spin_unlock_irqrestore(&desc->lock, flags); } @@ -456,7 +456,7 @@ void enable_irq(unsigned int irq) return; spin_lock_irqsave(&desc->lock, flags); - switch (desc->depth) { + switch (desc->arch.depth) { case 0: WARN_ON(1); break; @@ -468,11 +468,11 @@ void enable_irq(unsigned int irq) desc->status = status | IRQ_REPLAY; hw_resend_irq(desc->handler,irq); } - desc->handler->enable(irq); + desc->handler->enable(desc); /* fall-through */ } default: - desc->depth--; + desc->arch.depth--; } spin_unlock_irqrestore(&desc->lock, flags); } --- a/xen/arch/ia64/linux-xen/sn/kernel/irq.c +++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c @@ -72,6 +72,7 @@ void sn_intr_free(nasid_t local_nasid, i (u64) sn_irq_info->irq_cookie, 0, 0); } +#ifndef XEN static unsigned int sn_startup_irq(unsigned int irq) { return 0; @@ -88,9 +89,16 @@ static void sn_disable_irq(unsigned int static void sn_enable_irq(unsigned int irq) { } +#endif +#ifdef XEN +static void sn_ack_irq(struct irq_desc *desc) +{ + unsigned int irq = desc->irq; +#else static void sn_ack_irq(unsigned int irq) { +#endif u64 event_occurred, mask; irq = irq & 0xff; @@ -102,8 +110,14 @@ static void sn_ack_irq(unsigned int irq) move_native_irq(irq); } +#ifdef XEN +static void sn_end_irq(struct irq_desc *desc) +{ + unsigned int irq = desc->irq; +#else static void sn_end_irq(unsigned int irq) { +#endif int ivec; u64 event_occurred; @@ -224,13 +238,17 @@ static void sn_set_affinity_irq(unsigned static hw_irq_controller irq_type_sn = { #ifndef XEN .name = "SN hub", -#else - .typename = "SN hub", -#endif .startup = sn_startup_irq, .shutdown = sn_shutdown_irq, .enable = sn_enable_irq, .disable = sn_disable_irq, +#else + .typename = "SN hub", + .startup = irq_startup_none, + .shutdown = irq_shutdown_none, + .enable = irq_enable_none, + .disable = irq_disable_none, +#endif .ack = sn_ack_irq, .end = sn_end_irq, #ifndef XEN --- a/xen/arch/ia64/linux/Makefile +++ b/xen/arch/ia64/linux/Makefile @@ -9,7 +9,6 @@ obj-y += efi_stub.o obj-y += extable.o obj-y += flush.o obj-y += hpsim.o -obj-y += irq_lsapic.o obj-y += linuxextable.o obj-y += machvec.o obj-y += memcpy_mck.o --- a/xen/arch/ia64/linux/README.origin +++ b/xen/arch/ia64/linux/README.origin @@ -7,7 +7,6 @@ the instructions in the README there. efi_stub.S -> linux/arch/ia64/kernel/efi_stub.S extable.c -> linux/arch/ia64/mm/extable.c hpsim.S -> linux/arch/ia64/hp/sim/hpsim.S -irq_lsapic.c -> linux/arch/ia64/kernel/irq_lsapic.c linuxextable.c -> linux/kernel/extable.c machvec.c -> linux/arch/ia64/kernel/machvec.c numa.c -> linux/arch/ia64/mm/numa.c --- a/xen/arch/ia64/linux/irq_lsapic.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * LSAPIC Interrupt Controller - * - * This takes care of interrupts that are generated by the CPU's - * internal Streamlined Advanced Programmable Interrupt Controller - * (LSAPIC), such as the ITC and IPI interrupts. - * - * Copyright (C) 1999 VA Linux Systems - * Copyright (C) 1999 Walt Drummond - * Copyright (C) 2000 Hewlett-Packard Co - * Copyright (C) 2000 David Mosberger-Tang - */ - -#include -#include - -static unsigned int -lsapic_noop_startup (unsigned int irq) -{ - return 0; -} - -static void -lsapic_noop (unsigned int irq) -{ - /* nuthing to do... */ -} - -hw_irq_controller irq_type_ia64_lsapic = { - .typename = "LSAPIC", - .startup = lsapic_noop_startup, - .shutdown = lsapic_noop, - .enable = lsapic_noop, - .disable = lsapic_noop, - .ack = lsapic_noop, - .end = lsapic_noop -}; --- a/xen/arch/ia64/xen/dom0_ops.c +++ b/xen/arch/ia64/xen/dom0_ops.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include --- a/xen/arch/ia64/xen/fw_emul.c +++ b/xen/arch/ia64/xen/fw_emul.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include --- a/xen/arch/ia64/xen/hypercall.c +++ b/xen/arch/ia64/xen/hypercall.c @@ -70,7 +70,8 @@ static long __do_pirq_guest_eoi(struct d evtchn_unmask(pirq_to_evtchn(d, pirq)); spin_unlock(&d->event_lock); } - return pirq_guest_eoi(pirq); + pirq_guest_eoi(pirq_info(d, pirq)); + return 0; } long do_pirq_guest_eoi(int pirq) --- a/xen/arch/ia64/xen/irq.c +++ b/xen/arch/ia64/xen/irq.c @@ -95,8 +95,11 @@ int __init init_irq_data(void) struct irq_desc *desc = irq_to_desc(irq); desc->irq = irq; - init_one_irq_desc(desc); + if (init_one_irq_desc(desc)) + BUG(); } + + return 0; } void __do_IRQ_guest(int irq); @@ -105,14 +108,14 @@ void __do_IRQ_guest(int irq); * Special irq handlers. */ -static void ack_none(unsigned int irq) +static void ack_none(struct irq_desc *desc) { /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves, it doesn't deserve * a generic callback i think. */ - printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); + printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", desc->irq, smp_processor_id()); } hw_irq_controller no_irq_type = { @@ -147,11 +150,11 @@ fastcall unsigned int __do_IRQ(unsigned /* * No locking required for CPU-local interrupts: */ - desc->handler->ack(irq); + desc->handler->ack(desc); local_irq_enable(); desc->action->handler(irq, desc->action->dev_id, regs); local_irq_disable(); - desc->handler->end(irq); + desc->handler->end(desc); return 1; } @@ -163,7 +166,7 @@ fastcall unsigned int __do_IRQ(unsigned return 1; } - desc->handler->ack(irq); + desc->handler->ack(desc); status = desc->status & ~IRQ_REPLAY; status |= IRQ_PENDING; /* we _want_ to handle it */ @@ -215,7 +218,7 @@ out: * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ - desc->handler->end(irq); + desc->handler->end(desc); spin_unlock(&desc->lock); return 1; @@ -248,10 +251,10 @@ int setup_vector(unsigned int vector, st *p = new; - desc->depth = 0; + desc->arch.depth = 0; desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST); - desc->handler->startup(vector); - desc->handler->enable(vector); + desc->handler->startup(desc); + desc->handler->enable(desc); desc->arch.vector = vector; spin_unlock_irqrestore(&desc->lock,flags); @@ -287,9 +290,9 @@ void __init release_irq_vector(unsigned spin_lock_irqsave(&desc->lock, flags); clear_bit(vec, ia64_xen_vector); desc->action = NULL; - desc->depth = 1; + desc->arch.depth = 1; desc->status |= IRQ_DISABLED; - desc->handler->shutdown(vec); + desc->handler->shutdown(desc); desc->arch.vector = -1; spin_unlock_irqrestore(&desc->lock, flags); @@ -336,7 +339,7 @@ static void _irq_guest_eoi(irq_desc_t *d clear_pirq_eoi(action->guest[i], vector); desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING); - desc->handler->enable(vector); + desc->handler->enable(desc); } static struct timer irq_guest_eoi_timer[NR_IRQS]; @@ -383,7 +386,7 @@ void __do_IRQ_guest(int irq) if ( already_pending == action->nr_guests ) { stop_timer(&irq_guest_eoi_timer[irq]); - desc->handler->disable(irq); + desc->handler->disable(desc); desc->status |= IRQ_GUEST_EOI_PENDING; for ( i = 0; i < already_pending; ++i ) { @@ -417,31 +420,28 @@ static int pirq_acktype(int irq) return ACKTYPE_NONE; } -int pirq_guest_eoi(struct pirq *pirq) +void pirq_guest_eoi(struct pirq *pirq) { irq_desc_t *desc; irq_guest_action_t *action; - desc = &irq_desc[irq]; + desc = &irq_desc[pirq->pirq]; spin_lock_irq(&desc->lock); action = (irq_guest_action_t *)desc->action; if ( action->ack_type == ACKTYPE_NONE ) { ASSERT(!pirq->masked); - stop_timer(&irq_guest_eoi_timer[irq]); + stop_timer(&irq_guest_eoi_timer[pirq->pirq]); _irq_guest_eoi(desc); } if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) ) { ASSERT(action->ack_type == ACKTYPE_UNMASK); - desc->handler->end(irq); + desc->handler->end(desc); } spin_unlock_irq(&desc->lock); - - return 0; - } int pirq_guest_unmask(struct domain *d) @@ -505,12 +505,12 @@ int pirq_guest_bind(struct vcpu *v, stru action->nr_guests = 0; action->in_flight = 0; action->shareable = will_share; - action->ack_type = pirq_acktype(irq); + action->ack_type = pirq_acktype(pirq->pirq); - desc->depth = 0; + desc->arch.depth = 0; desc->status |= IRQ_GUEST; desc->status &= ~IRQ_DISABLED; - desc->handler->startup(pirq->pirq); + desc->handler->startup(desc); /* Attempt to bind the interrupt target to the correct CPU. */ #if 0 /* FIXME CONFIG_SMP ??? */ @@ -549,9 +549,9 @@ int pirq_guest_bind(struct vcpu *v, stru return rc; } -void pirq_guest_unbind(struct domain *d, int irq, struct pirq *pirq) +void pirq_guest_unbind(struct domain *d, struct pirq *pirq) { - irq_desc_t *desc = &irq_desc[irq]; + irq_desc_t *desc = &irq_desc[pirq->pirq]; irq_guest_action_t *action; unsigned long flags; int i; @@ -569,17 +569,17 @@ void pirq_guest_unbind(struct domain *d, if ( action->ack_type == ACKTYPE_UNMASK ) if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) ) - desc->handler->end(irq); + desc->handler->end(desc); if ( !action->nr_guests ) { BUG_ON(action->in_flight != 0); desc->action = NULL; xfree(action); - desc->depth = 1; + desc->arch.depth = 1; desc->status |= IRQ_DISABLED; desc->status &= ~IRQ_GUEST; - desc->handler->shutdown(irq); + desc->handler->shutdown(desc); } spin_unlock_irqrestore(&desc->lock, flags); @@ -610,10 +610,24 @@ void pirq_set_affinity(struct domain *d, /* FIXME */ } +void (pirq_cleanup_check)(struct pirq *pirq, struct domain *d) +{ + /* + * Check whether all fields have their default values, and delete + * the entry from the tree if so. + * + * NB: Common parts were already checked. + */ + if ( !pt_pirq_cleanup_check(&pirq->arch.dpci) ) + return; + + if ( radix_tree_delete(&d->pirq_tree, pirq->pirq) != pirq ) + BUG(); +} /* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { - sub_preempt_count(IRQ_EXIT_OFFSET); + preempt_count() -= IRQ_EXIT_OFFSET;/* sub_preempt_count(IRQ_EXIT_OFFSET); */ } --- a/xen/arch/ia64/xen/mm.c +++ b/xen/arch/ia64/xen/mm.c @@ -176,6 +176,7 @@ #include #include #include +#include #include #include #include --- a/xen/arch/ia64/xen/pci.c +++ b/xen/arch/ia64/xen/pci.c @@ -83,54 +83,57 @@ pci_sal_write (unsigned int seg, unsigne uint8_t pci_conf_read8( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg) { uint32_t value; - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_read(0, bus, (dev<<3)|func, reg, 1, &value); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_read(seg, bus, (dev<<3)|func, reg, 1, &value); return (uint8_t)value; } uint16_t pci_conf_read16( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg) { uint32_t value; - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_read(0, bus, (dev<<3)|func, reg, 2, &value); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_read(seg, bus, (dev<<3)|func, reg, 2, &value); return (uint16_t)value; } uint32_t pci_conf_read32( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg) { uint32_t value; - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_read(0, bus, (dev<<3)|func, reg, 4, &value); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_read(seg, bus, (dev<<3)|func, reg, 4, &value); return (uint32_t)value; } void pci_conf_write8( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg, - uint8_t data) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg, uint8_t data) { - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_write(0, bus, (dev<<3)|func, reg, 1, data); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_write(seg, bus, (dev<<3)|func, reg, 1, data); } void pci_conf_write16( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg, - uint16_t data) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg, uint16_t data) { - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_write(0, bus, (dev<<3)|func, reg, 2, data); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_write(seg, bus, (dev<<3)|func, reg, 2, data); } void pci_conf_write32( - unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg, - uint32_t data) + unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, + unsigned int reg, uint32_t data) { - BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); - pci_sal_write(0, bus, (dev<<3)|func, reg, 4, data); + BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255)); + pci_sal_write(seg, bus, (dev<<3)|func, reg, 4, data); } int pci_find_ext_capability(int seg, int bus, int devfn, int cap) --- a/xen/arch/ia64/xen/tlb_track.c +++ b/xen/arch/ia64/xen/tlb_track.c @@ -22,6 +22,7 @@ #include #include +#include #include /* for IA64_RR_SHIFT */ #include /* for VRN7 */ #include /* for PSCB() */ --- a/xen/arch/ia64/xen/vhpt.c +++ b/xen/arch/ia64/xen/vhpt.c @@ -516,7 +516,7 @@ void domain_flush_tlb_vhpt(struct domain on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); else on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1); - cpumask_clear_cpu(d->domain_dirty_cpumask); + cpumask_clear(d->domain_dirty_cpumask); } void flush_tlb_for_log_dirty(struct domain *d) @@ -545,7 +545,7 @@ void flush_tlb_for_log_dirty(struct doma } else { on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1); } - cpumask_clear_cpu(d->domain_dirty_cpumask); + cpumask_clear(d->domain_dirty_cpumask); } void flush_tlb_mask(const cpumask_t *mask) --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -27,7 +27,7 @@ #include #include #include /* for hvm_acpi_power_button */ -#include /* for arch_do_domctl */ +#include /* for arch_do_domctl */ #include #include #include --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -365,8 +365,10 @@ static long evtchn_bind_pirq(evtchn_bind bind->port = port; +#ifdef CONFIG_X86 if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); +#endif out: spin_unlock(&d->event_lock); @@ -421,8 +423,10 @@ static long __evtchn_close(struct domain pirq->evtchn = 0; pirq_cleanup_check(pirq, d1); unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]); +#ifdef CONFIG_X86 if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 ) unmap_domain_pirq_emuirq(d1, pirq->pirq); +#endif break; } --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -42,7 +42,12 @@ #include #include #include +#ifdef CONFIG_X86 #include +#else +#define p2m_pod_offline_or_broken_hit(pg) 0 +#define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL) +#endif /* * Comma-separated list of hexadecimal page numbers containing bad bytes. --- a/xen/drivers/passthrough/vtd/dmar.c +++ b/xen/drivers/passthrough/vtd/dmar.c @@ -809,6 +809,7 @@ int platform_supports_intremap(void) return ((dmar_flags & flags) == DMAR_INTR_REMAP); } +#ifdef CONFIG_X86 int platform_supports_x2apic(void) { unsigned int flags = 0; @@ -819,3 +820,4 @@ int platform_supports_x2apic(void) flags = DMAR_INTR_REMAP | DMAR_X2APIC_OPT_OUT; return ((dmar_flags & flags) == DMAR_INTR_REMAP); } +#endif --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -47,8 +47,8 @@ }) #define __ioapic_write_entry(apic, pin, raw, ent) ({ \ ASSERT(raw); \ - __io_apic_write(apic, 0x10 + 2 * (pin), ((u32 *)&_e_)[0]); \ - __io_apic_write(apic, 0x11 + 2 * (pin), ((u32 *)&_e_)[1]); \ + __io_apic_write(apic, 0x10 + 2 * (pin), ((u32 *)&(ent))[0]); \ + __io_apic_write(apic, 0x11 + 2 * (pin), ((u32 *)&(ent))[1]); \ }) #else #include @@ -392,7 +392,7 @@ unsigned int io_apic_read_remap_rte( ( (index = apic_pin_2_ir_idx[apic][ioapic_pin]) < 0 ) ) return __io_apic_read(apic, reg); - old_rte = __ioapic_read_entry(apic, ioapic_pin, TRUE); + old_rte = __ioapic_read_entry(apic, ioapic_pin, 1); if ( remap_entry_to_ioapic_rte(iommu, index, &old_rte) ) return __io_apic_read(apic, reg); @@ -420,7 +420,7 @@ void io_apic_write_remap_rte( return; } - old_rte = __ioapic_read_entry(apic, ioapic_pin, TRUE); + old_rte = __ioapic_read_entry(apic, ioapic_pin, 1); remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte; @@ -440,7 +440,7 @@ void io_apic_write_remap_rte( __io_apic_write(apic, reg & ~1, *(u32 *)&old_rte); } else - __ioapic_write_entry(apic, ioapic_pin, TRUE, old_rte); + __ioapic_write_entry(apic, ioapic_pin, 1, old_rte); } #if defined(__i386__) || defined(__x86_64__) @@ -838,6 +838,8 @@ out: spin_unlock_irqrestore(&iommu->register_lock, flags); } +#ifndef __ia64__ + /* * This function is used to enable Interrupt remapping when * enable x2apic @@ -912,3 +914,5 @@ void iommu_disable_x2apic_IR(void) for_each_drhd_unit ( drhd ) disable_qinval(drhd->iommu); } + +#endif /* !__ia64__ */ --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -33,9 +33,11 @@ #include #include #include +#ifndef __ia64__ #include #include #include +#endif #include "iommu.h" #include "dmar.h" #include "extern.h" @@ -990,7 +992,11 @@ static unsigned int dma_msi_startup(stru return 0; } +#ifndef __ia64__ static void dma_msi_end(struct irq_desc *desc, u8 vector) +#else +static void dma_msi_end(struct irq_desc *desc) +#endif { dma_msi_unmask(desc); ack_APIC_irq(); @@ -1790,6 +1796,7 @@ void iommu_pte_flush(struct domain *d, u static int vtd_ept_page_compatible(struct iommu *iommu) { +#ifndef __ia64__ u64 ept_cap, vtd_cap = iommu->cap; /* EPT is not initialised yet, so we must check the capability in @@ -1799,6 +1806,9 @@ static int vtd_ept_page_compatible(struc return ( ept_has_2mb(ept_cap) == cap_sps_2mb(vtd_cap) && ept_has_1gb(ept_cap) == cap_sps_1gb(vtd_cap) ); +#else + return 0; +#endif } /* @@ -1806,6 +1816,7 @@ static int vtd_ept_page_compatible(struc */ void iommu_set_pgd(struct domain *d) { +#ifndef __ia64__ struct hvm_iommu *hd = domain_hvm_iommu(d); mfn_t pgd_mfn; @@ -1816,6 +1827,7 @@ void iommu_set_pgd(struct domain *d) pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn)); +#endif } static int rmrr_identity_mapping(struct domain *d, @@ -2107,7 +2119,7 @@ int __init intel_vtd_setup(void) iommu_intremap = 0; if ( !vtd_ept_page_compatible(iommu) ) - iommu_hap_pt_share = FALSE; + iommu_hap_pt_share = 0; ret = iommu_set_interrupt(iommu); if ( ret < 0 ) --- a/xen/include/asm-ia64/config.h +++ b/xen/include/asm-ia64/config.h @@ -211,10 +211,6 @@ void sort_main_extable(void); // see common/keyhandler.c #define nop() asm volatile ("nop 0") -// from include/linux/preempt.h (needs including from interrupt.h or smp.h) -#define preempt_enable() do { } while (0) -#define preempt_disable() do { } while (0) - // needed for include/xen/linuxtime.h typedef s64 time_t; typedef s64 suseconds_t; --- a/xen/include/asm-ia64/domain.h +++ b/xen/include/asm-ia64/domain.h @@ -317,12 +317,8 @@ struct arch_vcpu { cpumask_t cache_coherent_map; }; -struct arch_pirq { - struct hvm_pirq_dpci dpci; -}; - #define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.dpci : NULL) -#define dpci_pirq(dpci) container_of(dpci, struct pirq, arch.dpci) +#define dpci_pirq(dp) container_of(dp, struct pirq, arch.dpci) #define alloc_pirq_struct(d) ({ \ struct pirq *pirq = xmalloc(struct pirq); \ --- a/xen/include/asm-ia64/hvm/irq.h +++ b/xen/include/asm-ia64/hvm/irq.h @@ -22,7 +22,7 @@ #ifndef __ASM_IA64_HVM_IRQ_H__ #define __ASM_IA64_HVM_IRQ_H__ -#include +#include #define VIOAPIC_NUM_PINS 48 --- a/xen/include/asm-ia64/linux-xen/asm/hw_irq.h +++ b/xen/include/asm-ia64/linux-xen/asm/hw_irq.h @@ -79,8 +79,6 @@ enum { extern __u8 isa_irq_to_vector_map[16]; #define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] -extern hw_irq_controller irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ - extern int assign_irq_vector (int irq); /* allocate a free vector */ extern void free_irq_vector (int vector); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); --- a/xen/include/asm-ia64/linux-xen/asm/irq.h +++ b/xen/include/asm-ia64/linux-xen/asm/irq.h @@ -15,11 +15,18 @@ #define NR_IRQS 256 #ifdef XEN +#include + struct arch_irq_desc { int vector; + unsigned int depth; cpumask_var_t cpu_mask; }; +struct arch_pirq { + struct hvm_pirq_dpci dpci; +}; + int init_irq_data(void); #endif @@ -66,6 +73,8 @@ extern int request_irq_vector(unsigned i while(!x) #define domain_pirq_to_irq(d, irq) domain_irq_to_vector(d, irq) + +#define hvm_domain_use_pirq(d, info) 0 #endif #endif /* _ASM_IA64_IRQ_H */ --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h @@ -35,6 +35,17 @@ typedef struct { } raw_rwlock_t; #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 } +#define _raw_read_lock(rw) \ +do { \ + raw_rwlock_t *__read_lock_ptr = (rw); \ + \ + while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ + while (*(volatile int *)__read_lock_ptr < 0) \ + cpu_relax(); \ + } \ +} while (0) + #define _raw_read_unlock(rw) \ do { \ raw_rwlock_t *__read_lock_ptr = (rw); \ @@ -68,7 +79,14 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +#define _raw_read_trylock(rw) ({ \ + raw_rwlock_t *__read_lock_ptr = (rw); \ + int orig = ia64_fetchadd(1, (int *) __read_lock_ptr, acq); \ + \ + if (unlikely(orig < 0)) \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ + (orig >= 0); \ +}) #define _raw_write_unlock(x) \ ({ \ --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h @@ -107,7 +107,8 @@ static inline void account_system_vtime( #define irq_enter() \ do { \ account_system_vtime(current); \ - add_preempt_count(HARDIRQ_OFFSET); \ + /*add_preempt_count(HARDIRQ_OFFSET);*/ \ + preempt_count() += HARDIRQ_OFFSET; \ } while (0) extern void irq_exit(void); --- a/xen/include/asm-ia64/xenoprof.h +++ b/xen/include/asm-ia64/xenoprof.h @@ -24,6 +24,8 @@ #ifndef __ASM_XENOPROF_H__ #define __ASM_XENOPROF_H__ +#include + int xenoprof_arch_init(int *num_events, char *cpu_type); int xenoprof_arch_reserve_counters(void); int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg); --- a/xen/include/asm-x86/hypercall.h +++ b/xen/include/asm-x86/hypercall.h @@ -7,7 +7,6 @@ #include #include /* for do_mca */ -#include /* for arch_do_domctl */ #include /* @@ -97,11 +96,6 @@ arch_do_vcpu_op( int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg); extern long -arch_do_domctl( - struct xen_domctl *domctl, - XEN_GUEST_HANDLE(xen_domctl_t) u_domctl); - -extern long arch_do_sysctl( struct xen_sysctl *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl); --- a/xen/include/xen/acpi.h +++ b/xen/include/xen/acpi.h @@ -360,7 +360,9 @@ static inline unsigned int acpi_get_csta static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } #endif +#ifdef XEN_GUEST_HANDLE int acpi_set_pdc_bits(u32 acpi_id, XEN_GUEST_HANDLE(uint32)); +#endif int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *, u32 mask); #ifdef CONFIG_ACPI_NUMA --- a/xen/include/xen/efi.h +++ b/xen/include/xen/efi.h @@ -1,10 +1,12 @@ #ifndef __XEN_EFI_H__ #define __XEN_EFI_H__ +#ifndef __ASSEMBLY__ #include +#endif #if defined(__ia64__) -# #include +# include_next #else # if defined(__i386__) @@ -27,6 +29,8 @@ extern struct efi efi; #endif +#ifndef __ASSEMBLY__ + union xenpf_efi_info; union compat_pf_efi_info; @@ -44,4 +48,6 @@ int efi_runtime_call(struct xenpf_efi_ru int efi_compat_get_info(uint32_t idx, union compat_pf_efi_info *); int efi_compat_runtime_call(struct compat_pf_efi_runtime_call *); +#endif /* !__ASSEMBLY__ */ + #endif /* __XEN_EFI_H__ */ --- a/xen/include/xen/hypercall.h +++ b/xen/include/xen/hypercall.h @@ -36,6 +36,11 @@ do_domctl( XEN_GUEST_HANDLE(xen_domctl_t) u_domctl); extern long +arch_do_domctl( + struct xen_domctl *domctl, + XEN_GUEST_HANDLE(xen_domctl_t) u_domctl); + +extern long do_sysctl( XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl); --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -35,7 +35,11 @@ extern bool_t iommu_debug; extern bool_t amd_iommu_perdev_intremap; /* Does this domain have a P2M table we can use as its IOMMU pagetable? */ +#ifndef __ia64__ #define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share) +#else +#define iommu_use_hap_pt(d) 0 +#endif extern struct rangeset *mmio_ro_ranges; --- a/xen/include/xen/irq.h +++ b/xen/include/xen/irq.h @@ -27,6 +27,7 @@ struct irqaction { #define IRQ_GUEST (1u<<4) /* IRQ is handled by guest OS(es) */ #define IRQ_MOVE_PENDING (1u<<5) /* IRQ is migrating to another CPUs */ #define IRQ_PER_CPU (1u<<6) /* IRQ is per CPU */ +#define IRQ_GUEST_EOI_PENDING (1u<<7) /* IRQ was disabled, pending a guest EOI */ /* Special IRQ numbers. */ #define AUTO_ASSIGN_IRQ (-1) @@ -46,7 +47,11 @@ struct hw_interrupt_type { void (*enable)(struct irq_desc *); void (*disable)(struct irq_desc *); void (*ack)(struct irq_desc *); +#ifdef CONFIG_X86 void (*end)(struct irq_desc *, u8 vector); +#else + void (*end)(struct irq_desc *); +#endif void (*set_affinity)(struct irq_desc *, const cpumask_t *); }; --- a/xen/include/xen/pci.h +++ b/xen/include/xen/pci.h @@ -39,7 +39,9 @@ struct pci_dev_info { u8 bus; u8 devfn; } physfn; - vmask_t used_vectors; +#ifdef CONFIG_X86 + vmask_t used_vectors; +#endif }; struct pci_dev { --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -109,6 +109,10 @@ struct xsm_operations { int (*add_range) (struct domain *d, char *name, unsigned long s, unsigned long e); int (*remove_range) (struct domain *d, char *name, unsigned long s, unsigned long e); + int (*test_assign_device) (uint32_t machine_bdf); + int (*assign_device) (struct domain *d, uint32_t machine_bdf); + int (*deassign_device) (struct domain *d, uint32_t machine_bdf); + long (*__do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op); #ifdef CONFIG_X86 @@ -146,9 +150,6 @@ struct xsm_operations { struct page_info *page); int (*add_to_physmap) (struct domain *d1, struct domain *d2); int (*sendtrigger) (struct domain *d); - int (*test_assign_device) (uint32_t machine_bdf); - int (*assign_device) (struct domain *d, uint32_t machine_bdf); - int (*deassign_device) (struct domain *d, uint32_t machine_bdf); int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind); int (*pin_mem_cacheattr) (struct domain *d); int (*ext_vcpucontext) (struct domain *d, uint32_t cmd); @@ -428,6 +429,21 @@ static inline int xsm_remove_range (stru return xsm_call(remove_range(d, name, s, e)); } +static inline int xsm_test_assign_device(uint32_t machine_bdf) +{ + return xsm_call(test_assign_device(machine_bdf)); +} + +static inline int xsm_assign_device(struct domain *d, uint32_t machine_bdf) +{ + return xsm_call(assign_device(d, machine_bdf)); +} + +static inline int xsm_deassign_device(struct domain *d, uint32_t machine_bdf) +{ + return xsm_call(deassign_device(d, machine_bdf)); +} + static inline long __do_xsm_op (XEN_GUEST_HANDLE(xsm_op_t) op) { #ifdef XSM_ENABLE @@ -612,21 +628,6 @@ static inline int xsm_sendtrigger(struct return xsm_call(sendtrigger(d)); } -static inline int xsm_test_assign_device(uint32_t machine_bdf) -{ - return xsm_call(test_assign_device(machine_bdf)); -} - -static inline int xsm_assign_device(struct domain *d, uint32_t machine_bdf) -{ - return xsm_call(assign_device(d, machine_bdf)); -} - -static inline int xsm_deassign_device(struct domain *d, uint32_t machine_bdf) -{ - return xsm_call(deassign_device(d, machine_bdf)); -} - static inline int xsm_bind_pt_irq(struct domain *d, struct xen_domctl_bind_pt_irq *bind) {