WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Reindent more xenlinux files. Remove defunct header file

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Reindent more xenlinux files. Remove defunct header file.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 29 Sep 2005 10:16:11 +0000
Delivery-date: Thu, 29 Sep 2005 10:13:43 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID c317e0aca9f12b086bdfe1f442a7c2221605a2bd
# Parent  e04b0805febb44a922fcdd4744b9e71a65064417
Reindent more xenlinux files. Remove defunct header file.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/arch/xen/kernel/devmem.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c     Thu Sep 29 10:10:27 2005
@@ -144,3 +144,13 @@
        .mmap           = mmap_mem,
        .open           = open_mem,
 };
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Thu Sep 29 10:10:27 2005
@@ -3,7 +3,7 @@
  * 
  * Communication via Xen event channels.
  * 
- * Copyright (c) 2002-2004, K A Fraser
+ * Copyright (c) 2002-2005, K A Fraser
  * 
  * This file may be distributed separately from the Linux kernel, or
  * incorporated into other software packages, subject to the following license:
@@ -73,23 +73,23 @@
 static u8  cpu_evtchn[NR_EVENT_CHANNELS];
 static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
 
-#define active_evtchns(cpu,sh,idx)              \
-    ((sh)->evtchn_pending[idx] &                \
-     cpu_evtchn_mask[cpu][idx] &                \
-     ~(sh)->evtchn_mask[idx])
+#define active_evtchns(cpu,sh,idx)             \
+       ((sh)->evtchn_pending[idx] &            \
+        cpu_evtchn_mask[cpu][idx] &            \
+        ~(sh)->evtchn_mask[idx])
 
 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
-    clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-    set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-    cpu_evtchn[chn] = cpu;
+       clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
+       set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
+       cpu_evtchn[chn] = cpu;
 }
 
 #else
 
-#define active_evtchns(cpu,sh,idx)              \
-    ((sh)->evtchn_pending[idx] &                \
-     ~(sh)->evtchn_mask[idx])
+#define active_evtchns(cpu,sh,idx)             \
+       ((sh)->evtchn_pending[idx] &            \
+        ~(sh)->evtchn_mask[idx])
 
 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
@@ -108,9 +108,9 @@
 #elif defined (__x86_64__)
 #define IRQ_REG orig_rax
 #endif
-#define do_IRQ(irq, regs) do {                  \
-    (regs)->IRQ_REG = (irq);                    \
-    do_IRQ((regs));                             \
+#define do_IRQ(irq, regs) do {                 \
+       (regs)->IRQ_REG = (irq);                \
+       do_IRQ((regs));                         \
 } while (0)
 #endif
 
@@ -123,249 +123,236 @@
  */
 void force_evtchn_callback(void)
 {
-    (void)HYPERVISOR_xen_version(0, NULL);
+       (void)HYPERVISOR_xen_version(0, NULL);
 }
 EXPORT_SYMBOL(force_evtchn_callback);
 
 /* NB. Interrupts are disabled on entry. */
 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 {
-    u32     l1, l2;
-    unsigned int   l1i, l2i, port;
-    int            irq, cpu = smp_processor_id();
-    shared_info_t *s = HYPERVISOR_shared_info;
-    vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
-
-    vcpu_info->evtchn_upcall_pending = 0;
-
-    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-    while ( l1 != 0 )
-    {
-        l1i = __ffs(l1);
-        l1 &= ~(1 << l1i);
+       u32     l1, l2;
+       unsigned int   l1i, l2i, port;
+       int            irq, cpu = smp_processor_id();
+       shared_info_t *s = HYPERVISOR_shared_info;
+       vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
+
+       vcpu_info->evtchn_upcall_pending = 0;
+
+       /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+       l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+       while (l1 != 0) {
+               l1i = __ffs(l1);
+               l1 &= ~(1 << l1i);
         
-        while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
-        {
-            l2i = __ffs(l2);
-            l2 &= ~(1 << l2i);
+               while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+                       l2i = __ffs(l2);
+                       l2 &= ~(1 << l2i);
             
-            port = (l1i << 5) + l2i;
-            if ( (irq = evtchn_to_irq[port]) != -1 ) {
-                do_IRQ(irq, regs);
-           } else
-                evtchn_device_upcall(port);
-        }
-    }
+                       port = (l1i << 5) + l2i;
+                       if ((irq = evtchn_to_irq[port]) != -1)
+                               do_IRQ(irq, regs);
+                       else
+                               evtchn_device_upcall(port);
+               }
+       }
 }
 EXPORT_SYMBOL(evtchn_do_upcall);
 
 static int find_unbound_irq(void)
 {
-    int irq;
-
-    for ( irq = 0; irq < NR_IRQS; irq++ )
-        if ( irq_bindcount[irq] == 0 )
-            break;
-
-    if ( irq == NR_IRQS )
-        panic("No available IRQ to bind to: increase NR_IRQS!\n");
-
-    return irq;
+       int irq;
+
+       for (irq = 0; irq < NR_IRQS; irq++)
+               if (irq_bindcount[irq] == 0)
+                       break;
+
+       if (irq == NR_IRQS)
+               panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+       return irq;
 }
 
 int bind_virq_to_irq(int virq)
 {
-    evtchn_op_t op;
-    int evtchn, irq;
-    int cpu = smp_processor_id();
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
-    {
-        op.cmd              = EVTCHNOP_bind_virq;
-        op.u.bind_virq.virq = virq;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IRQ %d\n", virq);
-        evtchn = op.u.bind_virq.port;
-
-        irq = find_unbound_irq();
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-
-        per_cpu(virq_to_irq, cpu)[virq] = irq;
-
-        bind_evtchn_to_cpu(evtchn, cpu);
-    }
-
-    irq_bindcount[irq]++;
-
-    spin_unlock(&irq_mapping_update_lock);
+       evtchn_op_t op;
+       int evtchn, irq;
+       int cpu = smp_processor_id();
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
+               op.cmd              = EVTCHNOP_bind_virq;
+               op.u.bind_virq.virq = virq;
+               BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+               evtchn = op.u.bind_virq.port;
+
+               irq = find_unbound_irq();
+               evtchn_to_irq[evtchn] = irq;
+               irq_to_evtchn[irq]    = evtchn;
+
+               per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+               bind_evtchn_to_cpu(evtchn, cpu);
+       }
+
+       irq_bindcount[irq]++;
+
+       spin_unlock(&irq_mapping_update_lock);
     
-    return irq;
+       return irq;
 }
 EXPORT_SYMBOL(bind_virq_to_irq);
 
 void unbind_virq_from_irq(int virq)
 {
-    evtchn_op_t op;
-    int cpu    = smp_processor_id();
-    int irq    = per_cpu(virq_to_irq, cpu)[virq];
-    int evtchn = irq_to_evtchn[irq];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( --irq_bindcount[irq] == 0 )
-    {
-        op.cmd          = EVTCHNOP_close;
-        op.u.close.dom  = DOMID_SELF;
-        op.u.close.port = evtchn;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to unbind virtual IRQ %d\n", virq);
-
-        /*
-         * This is a slight hack. Interdomain ports can be allocated directly 
-         * by userspace, and at that point they get bound by Xen to vcpu 0. We 
-         * therefore need to make sure that if we get an event on an event 
-         * channel we don't know about vcpu 0 handles it. Binding channels to 
-         * vcpu 0 when closing them achieves this.
-         */
-        bind_evtchn_to_cpu(evtchn, 0);
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-        per_cpu(virq_to_irq, cpu)[virq]     = -1;
-    }
-
-    spin_unlock(&irq_mapping_update_lock);
+       evtchn_op_t op;
+       int cpu    = smp_processor_id();
+       int irq    = per_cpu(virq_to_irq, cpu)[virq];
+       int evtchn = irq_to_evtchn[irq];
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if (--irq_bindcount[irq] == 0) {
+               op.cmd          = EVTCHNOP_close;
+               op.u.close.dom  = DOMID_SELF;
+               op.u.close.port = evtchn;
+               BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+
+               /*
+                * This is a slight hack. Interdomain ports can be allocated 
+                * directly by userspace, and at that point they get bound by 
+                * Xen to vcpu 0. We therefore need to make sure that if we get
+                * an event on an event channel we don't know about vcpu 0 
+                * handles it. Binding channels to vcpu 0 when closing them
+                * achieves this.
+                */
+               bind_evtchn_to_cpu(evtchn, 0);
+               evtchn_to_irq[evtchn] = -1;
+               irq_to_evtchn[irq]    = -1;
+               per_cpu(virq_to_irq, cpu)[virq] = -1;
+       }
+
+       spin_unlock(&irq_mapping_update_lock);
 }
 EXPORT_SYMBOL(unbind_virq_from_irq);
 
 int bind_ipi_to_irq(int ipi)
 {
-    evtchn_op_t op;
-    int evtchn, irq;
-    int cpu = smp_processor_id();
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
-    {
-        op.cmd = EVTCHNOP_bind_ipi;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
-        evtchn = op.u.bind_ipi.port;
-
-        irq = find_unbound_irq();
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-
-        per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
-
-        bind_evtchn_to_cpu(evtchn, cpu);
-    } 
-    else
-    {
-        irq = evtchn_to_irq[evtchn];
-    }
-
-    irq_bindcount[irq]++;
-
-    spin_unlock(&irq_mapping_update_lock);
-
-    return irq;
+       evtchn_op_t op;
+       int evtchn, irq;
+       int cpu = smp_processor_id();
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0) {
+               op.cmd = EVTCHNOP_bind_ipi;
+               BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+               evtchn = op.u.bind_ipi.port;
+
+               irq = find_unbound_irq();
+               evtchn_to_irq[evtchn] = irq;
+               irq_to_evtchn[irq]    = evtchn;
+
+               per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+               bind_evtchn_to_cpu(evtchn, cpu);
+       } else {
+               irq = evtchn_to_irq[evtchn];
+       }
+
+       irq_bindcount[irq]++;
+
+       spin_unlock(&irq_mapping_update_lock);
+
+       return irq;
 }
 EXPORT_SYMBOL(bind_ipi_to_irq);
 
 void unbind_ipi_from_irq(int ipi)
 {
-    evtchn_op_t op;
-    int cpu    = smp_processor_id();
-    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
-    int irq    = evtchn_to_irq[evtchn];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( --irq_bindcount[irq] == 0 )
-    {
-        op.cmd          = EVTCHNOP_close;
-        op.u.close.dom  = DOMID_SELF;
-        op.u.close.port = evtchn;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
-
-        /* See comments in unbind_virq_from_irq */
-        bind_evtchn_to_cpu(evtchn, 0);
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-        per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
-    }
-
-    spin_unlock(&irq_mapping_update_lock);
+       evtchn_op_t op;
+       int cpu    = smp_processor_id();
+       int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+       int irq    = evtchn_to_irq[evtchn];
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if (--irq_bindcount[irq] == 0) {
+               op.cmd          = EVTCHNOP_close;
+               op.u.close.dom  = DOMID_SELF;
+               op.u.close.port = evtchn;
+               BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+
+               /* See comments in unbind_virq_from_irq */
+               bind_evtchn_to_cpu(evtchn, 0);
+               evtchn_to_irq[evtchn] = -1;
+               irq_to_evtchn[irq]    = -1;
+               per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+       }
+
+       spin_unlock(&irq_mapping_update_lock);
 }
 EXPORT_SYMBOL(unbind_ipi_from_irq);
 
 int bind_evtchn_to_irq(unsigned int evtchn)
 {
-    int irq;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
-    {
-        irq = find_unbound_irq();
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-    }
-
-    irq_bindcount[irq]++;
-
-    spin_unlock(&irq_mapping_update_lock);
+       int irq;
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if ((irq = evtchn_to_irq[evtchn]) == -1) {
+               irq = find_unbound_irq();
+               evtchn_to_irq[evtchn] = irq;
+               irq_to_evtchn[irq]    = evtchn;
+       }
+
+       irq_bindcount[irq]++;
+
+       spin_unlock(&irq_mapping_update_lock);
     
-    return irq;
+       return irq;
 }
 EXPORT_SYMBOL(bind_evtchn_to_irq);
 
 void unbind_evtchn_from_irq(unsigned int evtchn)
 {
-    int irq = evtchn_to_irq[evtchn];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( --irq_bindcount[irq] == 0 )
-    {
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-    }
-
-    spin_unlock(&irq_mapping_update_lock);
+       int irq = evtchn_to_irq[evtchn];
+
+       spin_lock(&irq_mapping_update_lock);
+
+       if (--irq_bindcount[irq] == 0) {
+               evtchn_to_irq[evtchn] = -1;
+               irq_to_evtchn[irq]    = -1;
+       }
+
+       spin_unlock(&irq_mapping_update_lock);
 }
 EXPORT_SYMBOL(unbind_evtchn_from_irq);
 
 int bind_evtchn_to_irqhandler(
-    unsigned int evtchn,
-    irqreturn_t (*handler)(int, void *, struct pt_regs *),
-    unsigned long irqflags,
-    const char *devname,
-    void *dev_id)
-{
-    unsigned int irq;
-    int retval;
-
-    irq = bind_evtchn_to_irq(evtchn);
-    retval = request_irq(irq, handler, irqflags, devname, dev_id);
-    if ( retval != 0 )
-        unbind_evtchn_from_irq(evtchn);
-
-    return retval;
+       unsigned int evtchn,
+       irqreturn_t (*handler)(int, void *, struct pt_regs *),
+       unsigned long irqflags,
+       const char *devname,
+       void *dev_id)
+{
+       unsigned int irq;
+       int retval;
+
+       irq = bind_evtchn_to_irq(evtchn);
+       retval = request_irq(irq, handler, irqflags, devname, dev_id);
+       if (retval != 0)
+               unbind_evtchn_from_irq(evtchn);
+
+       return retval;
 }
 EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
 
 void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
 {
-    unsigned int irq = evtchn_to_irq[evtchn];
-    free_irq(irq, dev_id);
-    unbind_evtchn_from_irq(evtchn);
+       unsigned int irq = evtchn_to_irq[evtchn];
+       free_irq(irq, dev_id);
+       unbind_evtchn_from_irq(evtchn);
 }
 EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
 
@@ -378,50 +365,50 @@
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
-    evtchn_op_t op;
-    int evtchn;
-
-    spin_lock(&irq_mapping_update_lock);
-    evtchn = irq_to_evtchn[irq];
-    if (!VALID_EVTCHN(evtchn)) {
-        spin_unlock(&irq_mapping_update_lock);
-        return;
-    }
-
-    /* Tell Xen to send future instances of this interrupt to other vcpu. */
-    op.cmd = EVTCHNOP_bind_vcpu;
-    op.u.bind_vcpu.port = evtchn;
-    op.u.bind_vcpu.vcpu = tcpu;
-
-    /*
-     * If this fails, it usually just indicates that we're dealing with a virq 
-     * or IPI channel, which don't actually need to be rebound. Ignore it, 
-     * but don't do the xenlinux-level rebind in that case.
-     */
-    if (HYPERVISOR_event_channel_op(&op) >= 0)
-        bind_evtchn_to_cpu(evtchn, tcpu);
-
-    spin_unlock(&irq_mapping_update_lock);
-
-    /*
-     * Now send the new target processor a NOP IPI. When this returns, it 
-     * will check for any pending interrupts, and so service any that got 
-     * delivered to the wrong processor by mistake.
-     * 
-     * XXX: The only time this is called with interrupts disabled is from the 
-     * hotplug/hotunplug path. In that case, all cpus are stopped with 
-     * interrupts disabled, and the missed interrupts will be picked up when 
-     * they start again. This is kind of a hack.
-     */
-    if (!irqs_disabled())
-        smp_call_function(do_nothing_function, NULL, 0, 0);
+       evtchn_op_t op;
+       int evtchn;
+
+       spin_lock(&irq_mapping_update_lock);
+       evtchn = irq_to_evtchn[irq];
+       if (!VALID_EVTCHN(evtchn)) {
+               spin_unlock(&irq_mapping_update_lock);
+               return;
+       }
+
+       /* Send future instances of this interrupt to other vcpu. */
+       op.cmd = EVTCHNOP_bind_vcpu;
+       op.u.bind_vcpu.port = evtchn;
+       op.u.bind_vcpu.vcpu = tcpu;
+
+       /*
+        * If this fails, it usually just indicates that we're dealing with a 
+        * virq or IPI channel, which don't actually need to be rebound. Ignore
+        * it, but don't do the xenlinux-level rebind in that case.
+        */
+       if (HYPERVISOR_event_channel_op(&op) >= 0)
+               bind_evtchn_to_cpu(evtchn, tcpu);
+
+       spin_unlock(&irq_mapping_update_lock);
+
+       /*
+        * Now send the new target processor a NOP IPI. When this returns, it
+        * will check for any pending interrupts, and so service any that got 
+        * delivered to the wrong processor by mistake.
+        * 
+        * XXX: The only time this is called with interrupts disabled is from
+        * the hotplug/hotunplug path. In that case, all cpus are stopped with 
+        * interrupts disabled, and the missed interrupts will be picked up
+        * when they start again. This is kind of a hack.
+        */
+       if (!irqs_disabled())
+               smp_call_function(do_nothing_function, NULL, 0, 0);
 }
 
 
 static void set_affinity_irq(unsigned irq, cpumask_t dest)
 {
-    unsigned tcpu = first_cpu(dest);
-    rebind_irq_to_cpu(irq, tcpu);
+       unsigned tcpu = first_cpu(dest);
+       rebind_irq_to_cpu(irq, tcpu);
 }
 
 /*
@@ -430,83 +417,82 @@
 
 static unsigned int startup_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    if ( !VALID_EVTCHN(evtchn) )
-        return 0;
-    unmask_evtchn(evtchn);
-    return 0;
+       int evtchn = irq_to_evtchn[irq];
+
+       if (!VALID_EVTCHN(evtchn))
+               return 0;
+       unmask_evtchn(evtchn);
+       return 0;
 }
 
 static void shutdown_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       mask_evtchn(evtchn);
 }
 
 static void enable_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    unmask_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+
+       unmask_evtchn(evtchn);
 }
 
 static void disable_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    mask_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+
+       mask_evtchn(evtchn);
 }
 
 static void ack_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    mask_evtchn(evtchn);
-    clear_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+
+       mask_evtchn(evtchn);
+       clear_evtchn(evtchn);
 }
 
 static void end_dynirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-
-    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-        unmask_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+
+       if (!(irq_desc[irq].status & IRQ_DISABLED))
+               unmask_evtchn(evtchn);
 }
 
 static struct hw_interrupt_type dynirq_type = {
-    "Dynamic-irq",
-    startup_dynirq,
-    shutdown_dynirq,
-    enable_dynirq,
-    disable_dynirq,
-    ack_dynirq,
-    end_dynirq,
-    set_affinity_irq
+       "Dynamic-irq",
+       startup_dynirq,
+       shutdown_dynirq,
+       enable_dynirq,
+       disable_dynirq,
+       ack_dynirq,
+       end_dynirq,
+       set_affinity_irq
 };
 
 static inline void pirq_unmask_notify(int pirq)
 {
-    physdev_op_t op;
-    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
-    {
-        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
-        (void)HYPERVISOR_physdev_op(&op);
-    }
+       physdev_op_t op;
+       if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
+               op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+               (void)HYPERVISOR_physdev_op(&op);
+       }
 }
 
 static inline void pirq_query_unmask(int pirq)
 {
-    physdev_op_t op;
-    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
-    op.u.irq_status_query.irq = pirq;
-    (void)HYPERVISOR_physdev_op(&op);
-    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
-    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
-        set_bit(pirq, &pirq_needs_unmask_notify[0]);
+       physdev_op_t op;
+       op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+       op.u.irq_status_query.irq = pirq;
+       (void)HYPERVISOR_physdev_op(&op);
+       clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+       if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
+               set_bit(pirq, &pirq_needs_unmask_notify[0]);
 }
 
 /*
@@ -517,218 +503,222 @@
 
 static unsigned int startup_pirq(unsigned int irq)
 {
-    evtchn_op_t op;
-    int evtchn;
-
-    op.cmd               = EVTCHNOP_bind_pirq;
-    op.u.bind_pirq.pirq  = irq;
-    /* NB. We are happy to share unless we are probing. */
-    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-    {
-        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
-            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
-        return 0;
-    }
-    evtchn = op.u.bind_pirq.port;
-
-    pirq_query_unmask(irq_to_pirq(irq));
-
-    bind_evtchn_to_cpu(evtchn, 0);
-    evtchn_to_irq[evtchn] = irq;
-    irq_to_evtchn[irq]    = evtchn;
-
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
-
-    return 0;
+       evtchn_op_t op;
+       int evtchn;
+
+       op.cmd               = EVTCHNOP_bind_pirq;
+       op.u.bind_pirq.pirq  = irq;
+       /* NB. We are happy to share unless we are probing. */
+       op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+       if (HYPERVISOR_event_channel_op(&op) != 0) {
+               if ( !probing_irq(irq) )
+                       printk(KERN_INFO "Failed to obtain physical "
+                              "IRQ %d\n", irq);
+               return 0;
+       }
+       evtchn = op.u.bind_pirq.port;
+
+       pirq_query_unmask(irq_to_pirq(irq));
+
+       bind_evtchn_to_cpu(evtchn, 0);
+       evtchn_to_irq[evtchn] = irq;
+       irq_to_evtchn[irq]    = evtchn;
+
+       unmask_evtchn(evtchn);
+       pirq_unmask_notify(irq_to_pirq(irq));
+
+       return 0;
 }
 
 static void shutdown_pirq(unsigned int irq)
 {
-    evtchn_op_t op;
-    int evtchn = irq_to_evtchn[irq];
-
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-
-    mask_evtchn(evtchn);
-
-    op.cmd          = EVTCHNOP_close;
-    op.u.close.dom  = DOMID_SELF;
-    op.u.close.port = evtchn;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-        panic("Failed to unbind physical IRQ %d\n", irq);
-
-    bind_evtchn_to_cpu(evtchn, 0);
-    evtchn_to_irq[evtchn] = -1;
-    irq_to_evtchn[irq]    = -1;
+       evtchn_op_t op;
+       int evtchn = irq_to_evtchn[irq];
+
+       if (!VALID_EVTCHN(evtchn))
+               return;
+
+       mask_evtchn(evtchn);
+
+       op.cmd          = EVTCHNOP_close;
+       op.u.close.dom  = DOMID_SELF;
+       op.u.close.port = evtchn;
+       BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+
+       bind_evtchn_to_cpu(evtchn, 0);
+       evtchn_to_irq[evtchn] = -1;
+       irq_to_evtchn[irq]    = -1;
 }
 
 static void enable_pirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
+       int evtchn = irq_to_evtchn[irq];
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       unmask_evtchn(evtchn);
+       pirq_unmask_notify(irq_to_pirq(irq));
 }
 
 static void disable_pirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       mask_evtchn(evtchn);
 }
 
 static void ack_pirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
-    clear_evtchn(evtchn);
+       int evtchn = irq_to_evtchn[irq];
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       mask_evtchn(evtchn);
+       clear_evtchn(evtchn);
 }
 
 static void end_pirq(unsigned int irq)
 {
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-    {
-        unmask_evtchn(evtchn);
-        pirq_unmask_notify(irq_to_pirq(irq));
-    }
+       int evtchn = irq_to_evtchn[irq];
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       if (!(irq_desc[irq].status & IRQ_DISABLED)) {
+               unmask_evtchn(evtchn);
+               pirq_unmask_notify(irq_to_pirq(irq));
+       }
 }
 
 static struct hw_interrupt_type pirq_type = {
-    "Phys-irq",
-    startup_pirq,
-    shutdown_pirq,
-    enable_pirq,
-    disable_pirq,
-    ack_pirq,
-    end_pirq,
-    set_affinity_irq
+       "Phys-irq",
+       startup_pirq,
+       shutdown_pirq,
+       enable_pirq,
+       disable_pirq,
+       ack_pirq,
+       end_pirq,
+       set_affinity_irq
 };
 
 void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
 {
-    int evtchn = irq_to_evtchn[i];
-    shared_info_t *s = HYPERVISOR_shared_info;
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
-    synch_set_bit(evtchn, &s->evtchn_pending[0]);
+       int evtchn = irq_to_evtchn[i];
+       shared_info_t *s = HYPERVISOR_shared_info;
+       if (!VALID_EVTCHN(evtchn))
+               return;
+       BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
+       synch_set_bit(evtchn, &s->evtchn_pending[0]);
 }
 
 void irq_suspend(void)
 {
-    int pirq, virq, irq, evtchn;
-    int cpu = smp_processor_id(); /* XXX */
-
-    /* Unbind VIRQs from event channels. */
-    for ( virq = 0; virq < NR_VIRQS; virq++ )
-    {
-        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
-            continue;
-        evtchn = irq_to_evtchn[irq];
-
-        /* Mark the event channel as unused in our table. */
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-    }
-
-    /* Check that no PIRQs are still bound. */
-    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
-        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
-            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
-                  pirq, evtchn);
+       int pirq, virq, irq, evtchn;
+       int cpu = smp_processor_id(); /* XXX */
+
+       /* Unbind VIRQs from event channels. */
+       for (virq = 0; virq < NR_VIRQS; virq++) {
+               if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+                       continue;
+               evtchn = irq_to_evtchn[irq];
+
+               /* Mark the event channel as unused in our table. */
+               evtchn_to_irq[evtchn] = -1;
+               irq_to_evtchn[irq]    = -1;
+       }
+
+       /* Check that no PIRQs are still bound. */
+       for (pirq = 0; pirq < NR_PIRQS; pirq++)
+               if ((evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1)
+                       panic("Suspend attempted while PIRQ %d bound "
+                             "to evtchn %d.\n", pirq, evtchn);
 }
 
 void irq_resume(void)
 {
-    evtchn_op_t op;
-    int         virq, irq, evtchn;
-    int cpu = smp_processor_id(); /* XXX */
-
-    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
-        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
-
-    for ( virq = 0; virq < NR_VIRQS; virq++ )
-    {
-        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
-            continue;
-
-        /* Get a new binding from Xen. */
-        op.cmd              = EVTCHNOP_bind_virq;
-        op.u.bind_virq.virq = virq;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IRQ %d\n", virq);
-        evtchn = op.u.bind_virq.port;
+       evtchn_op_t op;
+       int         virq, irq, evtchn;
+       int cpu = smp_processor_id(); /* XXX */
+
+       /* New event-channel space is not 'live' yet. */
+       for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+               mask_evtchn(evtchn);
+
+       for (virq = 0; virq < NR_VIRQS; virq++) {
+               if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+                       continue;
+
+               /* Get a new binding from Xen. */
+               op.cmd              = EVTCHNOP_bind_virq;
+               op.u.bind_virq.virq = virq;
+               BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+               evtchn = op.u.bind_virq.port;
         
-        /* Record the new mapping. */
-        bind_evtchn_to_cpu(evtchn, 0);
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-
-        /* Ready for use. */
-        unmask_evtchn(evtchn);
-    }
+               /* Record the new mapping. */
+               bind_evtchn_to_cpu(evtchn, 0);
+               evtchn_to_irq[evtchn] = irq;
+               irq_to_evtchn[irq]    = evtchn;
+
+               /* Ready for use. */
+               unmask_evtchn(evtchn);
+       }
 }
 
 void __init init_IRQ(void)
 {
-    int i;
-    int cpu;
-
-    irq_ctx_init(0);
-
-    spin_lock_init(&irq_mapping_update_lock);
+       int i;
+       int cpu;
+
+       irq_ctx_init(0);
+
+       spin_lock_init(&irq_mapping_update_lock);
 
 #ifdef CONFIG_SMP
-    /* By default all event channels notify CPU#0. */
-    memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+       /* By default all event channels notify CPU#0. */
+       memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
 #endif
 
-    for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
-        /* No VIRQ -> IRQ mappings. */
-        for ( i = 0; i < NR_VIRQS; i++ )
-            per_cpu(virq_to_irq, cpu)[i] = -1;
-    }
-
-    /* No event-channel -> IRQ mappings. */
-    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
-    {
-        evtchn_to_irq[i] = -1;
-        mask_evtchn(i); /* No event channels are 'live' right now. */
-    }
-
-    /* No IRQ -> event-channel mappings. */
-    for ( i = 0; i < NR_IRQS; i++ )
-        irq_to_evtchn[i] = -1;
-
-    for ( i = 0; i < NR_DYNIRQS; i++ )
-    {
-        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-        irq_bindcount[dynirq_to_irq(i)] = 0;
-
-        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
-        irq_desc[dynirq_to_irq(i)].action  = 0;
-        irq_desc[dynirq_to_irq(i)].depth   = 1;
-        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
-    }
-
-    for ( i = 0; i < NR_PIRQS; i++ )
-    {
-        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-        irq_bindcount[pirq_to_irq(i)] = 1;
-
-        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
-        irq_desc[pirq_to_irq(i)].action  = 0;
-        irq_desc[pirq_to_irq(i)].depth   = 1;
-        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
-    }
-}
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               /* No VIRQ -> IRQ mappings. */
+               for (i = 0; i < NR_VIRQS; i++)
+                       per_cpu(virq_to_irq, cpu)[i] = -1;
+       }
+
+       /* No event-channel -> IRQ mappings. */
+       for (i = 0; i < NR_EVENT_CHANNELS; i++) {
+               evtchn_to_irq[i] = -1;
+               mask_evtchn(i); /* No event channels are 'live' right now. */
+       }
+
+       /* No IRQ -> event-channel mappings. */
+       for (i = 0; i < NR_IRQS; i++)
+               irq_to_evtchn[i] = -1;
+
+       /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+       for (i = 0; i < NR_DYNIRQS; i++) {
+               irq_bindcount[dynirq_to_irq(i)] = 0;
+
+               irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
+               irq_desc[dynirq_to_irq(i)].action  = 0;
+               irq_desc[dynirq_to_irq(i)].depth   = 1;
+               irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+       }
+
+       /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+       for (i = 0; i < NR_PIRQS; i++)
+       {
+               irq_bindcount[pirq_to_irq(i)] = 1;
+
+               irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
+               irq_desc[pirq_to_irq(i)].action  = 0;
+               irq_desc[pirq_to_irq(i)].depth   = 1;
+               irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+       }
+}
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/arch/xen/kernel/fixup.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c      Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c      Thu Sep 29 10:10:27 2005
@@ -37,51 +37,57 @@
 
 #define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define __LINKAGE fastcall
-#else
-#define __LINKAGE asmlinkage
-#endif
+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
+{
+       static unsigned long printed = 0;
+       char info[100];
+       int i;
 
-__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-{
-    static unsigned long printed = 0;
-    char info[100];
-    int i;
+       if (test_and_set_bit(0, &printed))
+               return;
 
-    if ( !test_and_set_bit(0, &printed) )
-    {
-        HYPERVISOR_vm_assist(VMASST_CMD_disable,
-                            VMASST_TYPE_4gb_segments_notify);
+       HYPERVISOR_vm_assist(
+               VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
 
-        sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
+       sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
 
-        DP("");
-        DP("***************************************************************");
-        DP("***************************************************************");
-        DP("** WARNING: Currently emulating unsupported memory accesses  **");
-        DP("**          in /lib/tls libraries. The emulation is very     **");
-        DP("**          slow. To ensure full performance you should      **");
-        DP("**          execute the following as root:                   **");
-        DP("**          mv /lib/tls /lib/tls.disabled                    **");
-        DP("** Offending process: %-38.38s **", info);
-        DP("***************************************************************");
-        DP("***************************************************************");
-        DP("");
 
-        for ( i = 5; i > 0; i-- )
-        {
-            printk("Pausing... %d", i);
-            mdelay(1000);
-            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-        }
-        printk("Continuing...\n\n");
-    }
+       DP("");
+       DP("***************************************************************");
+       DP("***************************************************************");
+       DP("** WARNING: Currently emulating unsupported memory accesses  **");
+       DP("**          in /lib/tls libraries. The emulation is very     **");
+       DP("**          slow. To ensure full performance you should      **");
+       DP("**          execute the following as root:                   **");
+       DP("**          mv /lib/tls /lib/tls.disabled                    **");
+       DP("** Offending process: %-38.38s **", info);
+       DP("***************************************************************");
+       DP("***************************************************************");
+       DP("");
+
+       for (i = 5; i > 0; i--) {
+               printk("Pausing... %d", i);
+               mdelay(1000);
+               printk("\b\b\b\b\b\b\b\b\b\b\b\b");
+       }
+
+       printk("Continuing...\n\n");
 }
 
 static int __init fixup_init(void)
 {
-    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
-    return 0;
+       HYPERVISOR_vm_assist(
+               VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
+       return 0;
 }
 __initcall(fixup_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Thu Sep 29 10:10:27 2005
@@ -12,7 +12,6 @@
 #include <asm-xen/evtchn.h>
 #include <asm/hypervisor.h>
 #include <asm-xen/xen-public/dom0_ops.h>
-#include <asm-xen/queues.h>
 #include <asm-xen/xenbus.h>
 #include <linux/cpu.h>
 #include <linux/kthread.h>
@@ -43,12 +42,10 @@
        HYPERVISOR_shutdown();
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 int reboot_thru_bios = 0;      /* for dmi_scan.c */
 EXPORT_SYMBOL(machine_restart);
 EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_power_off);
-#endif
 
 
 /******************************************************************************
@@ -66,227 +63,221 @@
 
 static int __do_suspend(void *ignore)
 {
-    int i, j, k, fpp;
+       int i, j, k, fpp;
 
 #ifdef CONFIG_XEN_USB_FRONTEND
-    extern void usbif_resume();
+       extern void usbif_resume();
 #else
 #define usbif_resume() do{}while(0)
 #endif
 
-    extern int gnttab_suspend(void);
-    extern int gnttab_resume(void);
-
-    extern void time_suspend(void);
-    extern void time_resume(void);
-    extern unsigned long max_pfn;
-    extern unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[];
-
-#ifdef CONFIG_SMP
-    extern void smp_suspend(void);
-    extern void smp_resume(void);
-
-    static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
-    cpumask_t prev_online_cpus, prev_present_cpus;
-
-    void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
-    int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
-#endif
-
-    extern void xencons_suspend(void);
-    extern void xencons_resume(void);
-
-    int err = 0;
-
-    BUG_ON(smp_processor_id() != 0);
-    BUG_ON(in_interrupt());
+       extern int gnttab_suspend(void);
+       extern int gnttab_resume(void);
+
+       extern void time_suspend(void);
+       extern void time_resume(void);
+       extern unsigned long max_pfn;
+       extern unsigned long *pfn_to_mfn_frame_list_list;
+       extern unsigned long *pfn_to_mfn_frame_list[];
+
+#ifdef CONFIG_SMP
+       extern void smp_suspend(void);
+       extern void smp_resume(void);
+
+       static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
+       cpumask_t prev_online_cpus, prev_present_cpus;
+
+       void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+       int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+#endif
+
+       extern void xencons_suspend(void);
+       extern void xencons_resume(void);
+
+       int err = 0;
+
+       BUG_ON(smp_processor_id() != 0);
+       BUG_ON(in_interrupt());
 
 #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-    if (num_online_cpus() > 1) {
-       printk(KERN_WARNING 
-               "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n");
-       return -EOPNOTSUPP;
-    }
-#endif
-
-    preempt_disable();
-#ifdef CONFIG_SMP
-    /* Take all of the other cpus offline.  We need to be careful not
-       to get preempted between the final test for num_online_cpus()
-       == 1 and disabling interrupts, since otherwise userspace could
-       bring another cpu online, and then we'd be stuffed.  At the
-       same time, cpu_down can reschedule, so we need to enable
-       preemption while doing that.  This kind of sucks, but should be
-       correct. */
-    /* (We don't need to worry about other cpus bringing stuff up,
-       since by the time num_online_cpus() == 1, there aren't any
-       other cpus) */
-    cpus_clear(prev_online_cpus);
-    while (num_online_cpus() > 1) {
+       if (num_online_cpus() > 1) {
+               printk(KERN_WARNING "Can't suspend SMP guests "
+                      "without CONFIG_HOTPLUG_CPU\n");
+               return -EOPNOTSUPP;
+       }
+#endif
+
+       preempt_disable();
+#ifdef CONFIG_SMP
+       /* Take all of the other cpus offline.  We need to be careful not
+          to get preempted between the final test for num_online_cpus()
+          == 1 and disabling interrupts, since otherwise userspace could
+          bring another cpu online, and then we'd be stuffed.  At the
+          same time, cpu_down can reschedule, so we need to enable
+          preemption while doing that.  This kind of sucks, but should be
+          correct. */
+       /* (We don't need to worry about other cpus bringing stuff up,
+          since by the time num_online_cpus() == 1, there aren't any
+          other cpus) */
+       cpus_clear(prev_online_cpus);
+       while (num_online_cpus() > 1) {
+               preempt_enable();
+               for_each_online_cpu(i) {
+                       if (i == 0)
+                               continue;
+                       err = cpu_down(i);
+                       if (err != 0) {
+                               printk(KERN_CRIT "Failed to take all CPUs "
+                                      "down: %d.\n", err);
+                               goto out_reenable_cpus;
+                       }
+                       cpu_set(i, prev_online_cpus);
+               }
+               preempt_disable();
+       }
+#endif
+
+       __cli();
+
        preempt_enable();
-       for_each_online_cpu(i) {
-           if (i == 0)
-               continue;
-           err = cpu_down(i);
-           if (err != 0) {
-               printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
-               goto out_reenable_cpus;
-           }
-           cpu_set(i, prev_online_cpus);
-       }
-       preempt_disable();
-    }
-#endif
-
-    __cli();
-
-    preempt_enable();
-
-#ifdef CONFIG_SMP
-    cpus_clear(prev_present_cpus);
-    for_each_present_cpu(i) {
-       if (i == 0)
-           continue;
-       save_vcpu_context(i, &suspended_cpu_records[i]);
-       cpu_set(i, prev_present_cpus);
-    }
+
+#ifdef CONFIG_SMP
+       cpus_clear(prev_present_cpus);
+       for_each_present_cpu(i) {
+               if (i == 0)
+                       continue;
+               save_vcpu_context(i, &suspended_cpu_records[i]);
+               cpu_set(i, prev_present_cpus);
+       }
 #endif
 
 #ifdef __i386__
-    mm_pin_all();
-    kmem_cache_shrink(pgd_cache);
-#endif
-
-    time_suspend();
-
-#ifdef CONFIG_SMP
-    smp_suspend();
-#endif
-
-    xenbus_suspend();
-
-    xencons_suspend();
-
-    irq_suspend();
-
-    gnttab_suspend();
-
-    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-    clear_fixmap(FIX_SHARED_INFO);
-
-    xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
-    xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
-
-    /* We'll stop somewhere inside this hypercall.  When it returns,
-       we'll start resuming after the restore. */
-    HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
-
-    shutting_down = SHUTDOWN_INVALID; 
-
-    set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-
-    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-
-    memset(empty_zero_page, 0, PAGE_SIZE);
+       mm_pin_all();
+       kmem_cache_shrink(pgd_cache);
+#endif
+
+       time_suspend();
+
+#ifdef CONFIG_SMP
+       smp_suspend();
+#endif
+
+       xenbus_suspend();
+
+       xencons_suspend();
+
+       irq_suspend();
+
+       gnttab_suspend();
+
+       HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+       clear_fixmap(FIX_SHARED_INFO);
+
+       xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
+       xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
+
+       /* We'll stop somewhere inside this hypercall.  When it returns,
+          we'll start resuming after the restore. */
+       HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+
+       shutting_down = SHUTDOWN_INVALID; 
+
+       set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
+
+       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+       memset(empty_zero_page, 0, PAGE_SIZE);
             
-    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
+       HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                virt_to_mfn(pfn_to_mfn_frame_list_list);
   
-    fpp = PAGE_SIZE/sizeof(unsigned long);
-    for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
-    {
-       if ( (j % fpp) == 0 )
-       {
-           k++;
-           pfn_to_mfn_frame_list_list[k] = 
-                   virt_to_mfn(pfn_to_mfn_frame_list[k]);
-           j=0;
-       }
-       pfn_to_mfn_frame_list[k][j] = 
-               virt_to_mfn(&phys_to_machine_mapping[i]);
-    }
-    HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-
-    gnttab_resume();
-
-    irq_resume();
-
-    xencons_resume();
-
-    xenbus_resume();
-
-#ifdef CONFIG_SMP
-    smp_resume();
-#endif
-
-    time_resume();
-
-    usbif_resume();
-
-#ifdef CONFIG_SMP
-    for_each_cpu_mask(i, prev_present_cpus)
-       restore_vcpu_context(i, &suspended_cpu_records[i]);
-#endif
-
-    __sti();
+       fpp = PAGE_SIZE/sizeof(unsigned long);
+       for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
+               if ((j % fpp) == 0) {
+                       k++;
+                       pfn_to_mfn_frame_list_list[k] = 
+                               virt_to_mfn(pfn_to_mfn_frame_list[k]);
+                       j = 0;
+               }
+               pfn_to_mfn_frame_list[k][j] = 
+                       virt_to_mfn(&phys_to_machine_mapping[i]);
+       }
+       HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
+
+       gnttab_resume();
+
+       irq_resume();
+
+       xencons_resume();
+
+       xenbus_resume();
+
+#ifdef CONFIG_SMP
+       smp_resume();
+#endif
+
+       time_resume();
+
+       usbif_resume();
+
+#ifdef CONFIG_SMP
+       for_each_cpu_mask(i, prev_present_cpus)
+               restore_vcpu_context(i, &suspended_cpu_records[i]);
+#endif
+
+       __sti();
 
 #ifdef CONFIG_SMP
  out_reenable_cpus:
-    for_each_cpu_mask(i, prev_online_cpus) {
-       j = cpu_up(i);
-       if (j != 0) {
-           printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
-                  i, j);
-           err = j;
-       }
-    }
-#endif
-
-    return err;
+       for_each_cpu_mask(i, prev_online_cpus) {
+               j = cpu_up(i);
+               if (j != 0) {
+                       printk(KERN_CRIT "Failed to bring cpu "
+                              "%d back up (%d).\n",
+                              i, j);
+                       err = j;
+               }
+       }
+#endif
+
+       return err;
 }
 
 static int shutdown_process(void *__unused)
 {
-    static char *envp[] = { "HOME=/", "TERM=linux", 
-                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-    static char *restart_argv[]  = { "/sbin/reboot", NULL };
-    static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
-
-    extern asmlinkage long sys_reboot(int magic1, int magic2,
-                                      unsigned int cmd, void *arg);
-
-    daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        "shutdown"
-#endif
-        );
-
-    switch ( shutting_down )
-    {
-    case SHUTDOWN_POWEROFF:
-        if ( execve("/sbin/poweroff", poweroff_argv, envp) < 0 )
-        {
-            sys_reboot(LINUX_REBOOT_MAGIC1,
-                       LINUX_REBOOT_MAGIC2,
-                       LINUX_REBOOT_CMD_POWER_OFF,
-                       NULL);
-        }
-        break;
-
-    case SHUTDOWN_REBOOT:
-        if ( execve("/sbin/reboot", restart_argv, envp) < 0 )
-        {
-            sys_reboot(LINUX_REBOOT_MAGIC1,
-                       LINUX_REBOOT_MAGIC2,
-                       LINUX_REBOOT_CMD_RESTART,
-                       NULL);
-        }
-        break;
-    }
-
-    shutting_down = SHUTDOWN_INVALID; /* could try again */
-
-    return 0;
+       static char *envp[] = { "HOME=/", "TERM=linux", 
+                               "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+       static char *restart_argv[]  = { "/sbin/reboot", NULL };
+       static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
+
+       extern asmlinkage long sys_reboot(int magic1, int magic2,
+                                         unsigned int cmd, void *arg);
+
+       daemonize("shutdown");
+
+       switch (shutting_down) {
+       case SHUTDOWN_POWEROFF:
+               if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
+                       sys_reboot(LINUX_REBOOT_MAGIC1,
+                                  LINUX_REBOOT_MAGIC2,
+                                  LINUX_REBOOT_CMD_POWER_OFF,
+                                  NULL);
+               }
+               break;
+
+       case SHUTDOWN_REBOOT:
+               if (execve("/sbin/reboot", restart_argv, envp) < 0) {
+                       sys_reboot(LINUX_REBOOT_MAGIC1,
+                                  LINUX_REBOOT_MAGIC2,
+                                  LINUX_REBOOT_CMD_RESTART,
+                                  NULL);
+               }
+               break;
+       }
+
+       shutting_down = SHUTDOWN_INVALID; /* could try again */
+
+       return 0;
 }
 
 static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
@@ -294,113 +285,109 @@
                                                 const char *name,
                                                 int cpu)
 {
-    struct task_struct *p;
-    p = kthread_create(f, arg, name);
-    kthread_bind(p, cpu);
-    wake_up_process(p);
-    return p;
+       struct task_struct *p;
+       p = kthread_create(f, arg, name);
+       kthread_bind(p, cpu);
+       wake_up_process(p);
+       return p;
 }
 
 static void __shutdown_handler(void *unused)
 {
-    int err;
-
-    if ( shutting_down != SHUTDOWN_SUSPEND )
-    {
-        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
-        if ( err < 0 )
-            printk(KERN_ALERT "Error creating shutdown process!\n");
-    }
-    else
-    {
-       kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
-    }
+       int err;
+
+       if (shutting_down != SHUTDOWN_SUSPEND) {
+               err = kernel_thread(shutdown_process, NULL,
+                                   CLONE_FS | CLONE_FILES);
+               if ( err < 0 )
+                       printk(KERN_ALERT "Error creating shutdown "
+                              "process!\n");
+       } else {
+               kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
+       }
 }
 
 static void shutdown_handler(struct xenbus_watch *watch, const char *node)
 {
-    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-    char *str;
-    int err;
+       static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
+       char *str;
+       int err;
 
  again:
-    err = xenbus_transaction_start();
-    if (err)
-       return;
-    str = (char *)xenbus_read("control", "shutdown", NULL);
-    /* Ignore read errors and empty reads. */
-    if (XENBUS_IS_ERR_READ(str)) {
-       xenbus_transaction_end(1);
-       return;
-    }
-
-    xenbus_write("control", "shutdown", "");
-
-    err = xenbus_transaction_end(0);
-    if (err == -EAGAIN) {
+       err = xenbus_transaction_start();
+       if (err)
+               return;
+       str = (char *)xenbus_read("control", "shutdown", NULL);
+       /* Ignore read errors and empty reads. */
+       if (XENBUS_IS_ERR_READ(str)) {
+               xenbus_transaction_end(1);
+               return;
+       }
+
+       xenbus_write("control", "shutdown", "");
+
+       err = xenbus_transaction_end(0);
+       if (err == -EAGAIN) {
+               kfree(str);
+               goto again;
+       }
+
+       if (strcmp(str, "poweroff") == 0)
+               shutting_down = SHUTDOWN_POWEROFF;
+       else if (strcmp(str, "reboot") == 0)
+               shutting_down = SHUTDOWN_REBOOT;
+       else if (strcmp(str, "suspend") == 0)
+               shutting_down = SHUTDOWN_SUSPEND;
+       else {
+               printk("Ignoring shutdown request: %s\n", str);
+               shutting_down = SHUTDOWN_INVALID;
+       }
+
        kfree(str);
-       goto again;
-    }
-
-    if (strcmp(str, "poweroff") == 0)
-        shutting_down = SHUTDOWN_POWEROFF;
-    else if (strcmp(str, "reboot") == 0)
-        shutting_down = SHUTDOWN_REBOOT;
-    else if (strcmp(str, "suspend") == 0)
-        shutting_down = SHUTDOWN_SUSPEND;
-    else {
-        printk("Ignoring shutdown request: %s\n", str);
-        shutting_down = SHUTDOWN_INVALID;
-    }
-
-    kfree(str);
-
-    if (shutting_down != SHUTDOWN_INVALID)
-        schedule_work(&shutdown_work);
+
+       if (shutting_down != SHUTDOWN_INVALID)
+               schedule_work(&shutdown_work);
 }
 
 #ifdef CONFIG_MAGIC_SYSRQ
 static void sysrq_handler(struct xenbus_watch *watch, const char *node)
 {
-    char sysrq_key = '\0';
-    int err;
+       char sysrq_key = '\0';
+       int err;
 
  again:
-    err = xenbus_transaction_start();
-    if (err)
-       return;
-    if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
-        printk(KERN_ERR "Unable to read sysrq code in control/sysrq\n");
-       xenbus_transaction_end(1);
-       return;
-    }
-
-    if (sysrq_key != '\0')
-       xenbus_printf("control", "sysrq", "%c", '\0');
-
-    err = xenbus_transaction_end(0);
-    if (err == -EAGAIN)
-       goto again;
-
-    if (sysrq_key != '\0') {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        handle_sysrq(sysrq_key, NULL, NULL);
-#else
-        handle_sysrq(sysrq_key, NULL, NULL, NULL);
-#endif
-    }
+       err = xenbus_transaction_start();
+       if (err)
+               return;
+       if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
+               printk(KERN_ERR "Unable to read sysrq code in "
+                      "control/sysrq\n");
+               xenbus_transaction_end(1);
+               return;
+       }
+
+       if (sysrq_key != '\0')
+               xenbus_printf("control", "sysrq", "%c", '\0');
+
+       err = xenbus_transaction_end(0);
+       if (err == -EAGAIN)
+               goto again;
+
+       if (sysrq_key != '\0') {
+               handle_sysrq(sysrq_key, NULL, NULL);
+       }
 }
 #endif
 
 static struct xenbus_watch shutdown_watch = {
-    .node = "control/shutdown",
-    .callback = shutdown_handler
+       .node = "control/shutdown",
+       .callback = shutdown_handler
 };
 
 #ifdef CONFIG_MAGIC_SYSRQ
 static struct xenbus_watch sysrq_watch = {
-    .node ="control/sysrq",
-    .callback = sysrq_handler
+       .node ="control/sysrq",
+       .callback = sysrq_handler
 };
 #endif
 
@@ -413,39 +400,50 @@
                                   unsigned long event,
                                   void *data)
 {
-    int err1 = 0;
+       int err1 = 0;
 #ifdef CONFIG_MAGIC_SYSRQ
-    int err2 = 0;
-#endif
-
-    BUG_ON(down_trylock(&xenbus_lock) == 0);
-
-    err1 = register_xenbus_watch(&shutdown_watch);
+       int err2 = 0;
+#endif
+
+       BUG_ON(down_trylock(&xenbus_lock) == 0);
+
+       err1 = register_xenbus_watch(&shutdown_watch);
 #ifdef CONFIG_MAGIC_SYSRQ
-    err2 = register_xenbus_watch(&sysrq_watch);
-#endif
-
-    if (err1) {
-        printk(KERN_ERR "Failed to set shutdown watcher\n");
-    }
+       err2 = register_xenbus_watch(&sysrq_watch);
+#endif
+
+       if (err1) {
+               printk(KERN_ERR "Failed to set shutdown watcher\n");
+       }
     
 #ifdef CONFIG_MAGIC_SYSRQ
-    if (err2) {
-        printk(KERN_ERR "Failed to set sysrq watcher\n");
-    }
-#endif
-
-    return NOTIFY_DONE;
+       if (err2) {
+               printk(KERN_ERR "Failed to set sysrq watcher\n");
+       }
+#endif
+
+       return NOTIFY_DONE;
 }
 
 static int __init setup_shutdown_event(void)
 {
     
-    xenstore_notifier.notifier_call = setup_shutdown_watcher;
-
-    register_xenstore_notifier(&xenstore_notifier);
+       xenstore_notifier.notifier_call = setup_shutdown_watcher;
+
+       register_xenstore_notifier(&xenstore_notifier);
     
-    return 0;
+       return 0;
 }
 
 subsys_initcall(setup_shutdown_event);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
+#
diff -r e04b0805febb -r c317e0aca9f1 linux-2.6-xen-sparse/arch/xen/kernel/smp.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/smp.c        Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/smp.c        Thu Sep 29 10:10:27 2005
@@ -11,6 +11,15 @@
 int setup_profiling_timer(unsigned int multiplier)
 {
        printk("setup_profiling_timer\n");
-
        return 0;
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c   Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c   Thu Sep 29 10:10:27 2005
@@ -6,13 +6,23 @@
 
 struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
 {
-    if ( xen_base == NULL )
-        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-            panic("Couldn't create /proc/xen");
-    return create_proc_entry(name, mode, xen_base);
+       if ( xen_base == NULL )
+               if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
+                       panic("Couldn't create /proc/xen");
+       return create_proc_entry(name, mode, xen_base);
 }
 
 void remove_xen_proc_entry(const char *name)
 {
-    remove_proc_entry(name, xen_base);
+       remove_proc_entry(name, xen_base);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/balloon.h
--- a/linux-2.6-xen-sparse/include/asm-xen/balloon.h    Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h    Thu Sep 29 10:10:27 2005
@@ -58,3 +58,13 @@
 #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
 
 #endif /* __ASM_BALLOON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/driver_util.h
--- a/linux-2.6-xen-sparse/include/asm-xen/driver_util.h        Thu Sep 29 
08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/driver_util.h        Thu Sep 29 
10:10:27 2005
@@ -14,3 +14,13 @@
 extern void unlock_vm_area(struct vm_struct *area);
 
 #endif /* __ASM_XEN_DRIVER_UTIL_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/evtchn.h
--- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Thu Sep 29 10:10:27 2005
@@ -4,7 +4,7 @@
  * Communication via Xen event channels.
  * Also definitions for the device that demuxes notifications to userspace.
  * 
- * Copyright (c) 2004, K A Fraser
+ * Copyright (c) 2004-2005, K A Fraser
  * 
  * This file may be distributed separately from the Linux kernel, or
  * incorporated into other software packages, subject to the following license:
@@ -61,11 +61,11 @@
  * You *cannot* trust the irq argument passed to the callback handler.
  */
 extern int  bind_evtchn_to_irqhandler(
-    unsigned int evtchn,
-    irqreturn_t (*handler)(int, void *, struct pt_regs *),
-    unsigned long irqflags,
-    const char *devname,
-    void *dev_id);
+       unsigned int evtchn,
+       irqreturn_t (*handler)(int, void *, struct pt_regs *),
+       unsigned long irqflags,
+       const char *devname,
+       void *dev_id);
 extern void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id);
 
 extern void irq_suspend(void);
@@ -79,42 +79,42 @@
 
 static inline void mask_evtchn(int port)
 {
-    shared_info_t *s = HYPERVISOR_shared_info;
-    synch_set_bit(port, &s->evtchn_mask[0]);
+       shared_info_t *s = HYPERVISOR_shared_info;
+       synch_set_bit(port, &s->evtchn_mask[0]);
 }
 
 static inline void unmask_evtchn(int port)
 {
-    shared_info_t *s = HYPERVISOR_shared_info;
-    vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
+       shared_info_t *s = HYPERVISOR_shared_info;
+       vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
 
-    synch_clear_bit(port, &s->evtchn_mask[0]);
+       synch_clear_bit(port, &s->evtchn_mask[0]);
 
-    /*
-     * The following is basically the equivalent of 'hw_resend_irq'. Just like
-     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
-     */
-    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
-         !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
-    {
-        vcpu_info->evtchn_upcall_pending = 1;
-        if ( !vcpu_info->evtchn_upcall_mask )
-            force_evtchn_callback();
-    }
+       /*
+        * The following is basically the equivalent of 'hw_resend_irq'. Just
+        * like a real IO-APIC we 'lose the interrupt edge' if the channel is
+        * masked.
+        */
+       if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
+           !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
+               vcpu_info->evtchn_upcall_pending = 1;
+               if (!vcpu_info->evtchn_upcall_mask)
+                       force_evtchn_callback();
+       }
 }
 
 static inline void clear_evtchn(int port)
 {
-    shared_info_t *s = HYPERVISOR_shared_info;
-    synch_clear_bit(port, &s->evtchn_pending[0]);
+       shared_info_t *s = HYPERVISOR_shared_info;
+       synch_clear_bit(port, &s->evtchn_pending[0]);
 }
 
 static inline int notify_via_evtchn(int port)
 {
-    evtchn_op_t op;
-    op.cmd = EVTCHNOP_send;
-    op.u.send.local_port = port;
-    return HYPERVISOR_event_channel_op(&op);
+       evtchn_op_t op;
+       op.cmd = EVTCHNOP_send;
+       op.u.send.local_port = port;
+       return HYPERVISOR_event_channel_op(&op);
 }
 
 /*
@@ -133,3 +133,13 @@
 #define EVTCHN_UNBIND _IO('E', 3)
 
 #endif /* __ASM_EVTCHN_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/foreign_page.h
--- a/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h       Thu Sep 29 
08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h       Thu Sep 29 
10:10:27 2005
@@ -28,3 +28,13 @@
        ( (void (*) (struct page *)) (page)->mapping )
 
 #endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/gnttab.h
--- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h     Thu Sep 29 10:10:27 2005
@@ -6,7 +6,7 @@
  * 2. Accessing others' memory reservations via grant references.
  * (i.e., mechanisms for both sender and recipient of grant references)
  * 
- * Copyright (c) 2004, K A Fraser
+ * Copyright (c) 2004-2005, K A Fraser
  * Copyright (c) 2005, Christopher Clark
  */
 
@@ -25,10 +25,10 @@
 #endif
 
 struct gnttab_free_callback {
-    struct gnttab_free_callback *next;
-    void (*fn)(void *);
-    void *arg;
-    u16 count;
+       struct gnttab_free_callback *next;
+       void (*fn)(void *);
+       void *arg;
+       u16 count;
 };
 
 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
@@ -73,3 +73,13 @@
 #endif
 
 #endif /* __ASM_GNTTAB_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/xen_proc.h
--- a/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h   Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h   Thu Sep 29 10:10:27 2005
@@ -6,8 +6,18 @@
 #include <linux/proc_fs.h>
 
 extern struct proc_dir_entry *create_xen_proc_entry(
-    const char *name, mode_t mode);
+       const char *name, mode_t mode);
 extern void remove_xen_proc_entry(
-    const char *name);
+       const char *name);
 
 #endif /* __ASM_XEN_PROC_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/xenbus.h
--- a/linux-2.6-xen-sparse/include/asm-xen/xenbus.h     Thu Sep 29 08:59:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/xenbus.h     Thu Sep 29 10:10:27 2005
@@ -139,3 +139,13 @@
 #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
 
 #endif /* _ASM_XEN_XENBUS_H */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r e04b0805febb -r c317e0aca9f1 
linux-2.6-xen-sparse/include/asm-xen/queues.h
--- a/linux-2.6-xen-sparse/include/asm-xen/queues.h     Thu Sep 29 08:59:46 2005
+++ /dev/null   Thu Sep 29 10:10:27 2005
@@ -1,81 +0,0 @@
-
-/*
- * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
- * queues. Unfortunately the semantics is not the same. With task queues we 
- * can defer work until a particular event occurs -- this is not
- * straightforwardly done with work queues (queued work is performed asap, or
- * after some fixed timeout). Conversely, work queues are a (slightly) neater
- * way of deferring work to a process context than using task queues in 2.4.
- * 
- * This is a bit of a needless reimplementation -- should have just pulled
- * the code from 2.4, but I tried leveraging work queues to simplify things.
- * They didn't help. :-(
- */
-
-#ifndef __QUEUES_H__
-#define __QUEUES_H__
-
-#include <linux/version.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-
-struct tq_struct { 
-    void (*fn)(void *);
-    void *arg;
-    struct list_head list;
-    unsigned long pending;
-};
-#define INIT_TQUEUE(_name, _fn, _arg)               \
-    do {                                            \
-        INIT_LIST_HEAD(&(_name)->list);             \
-        (_name)->pending = 0;                       \
-        (_name)->fn = (_fn); (_name)->arg = (_arg); \
-    } while ( 0 )
-#define DECLARE_TQUEUE(_name, _fn, _arg)            \
-    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
-
-typedef struct {
-    struct list_head list;
-    spinlock_t       lock;
-} task_queue;
-#define DECLARE_TASK_QUEUE(_name) \
-    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
-
-static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
-{
-    unsigned long flags;
-    if ( test_and_set_bit(0, &tqe->pending) )
-        return 0;
-    spin_lock_irqsave(&tql->lock, flags);
-    list_add_tail(&tqe->list, &tql->list);
-    spin_unlock_irqrestore(&tql->lock, flags);
-    return 1;
-}
-
-static inline void run_task_queue(task_queue *tql)
-{
-    struct list_head head, *ent;
-    struct tq_struct *tqe;
-    unsigned long flags;
-    void (*fn)(void *);
-    void *arg;
-
-    spin_lock_irqsave(&tql->lock, flags);
-    list_add(&head, &tql->list);
-    list_del_init(&tql->list);
-    spin_unlock_irqrestore(&tql->lock, flags);
-
-    while ( !list_empty(&head) )
-    {
-        ent = head.next;
-        list_del_init(ent);
-        tqe = list_entry(ent, struct tq_struct, list);
-        fn  = tqe->fn;
-        arg = tqe->arg;
-        wmb();
-        tqe->pending = 0;
-        fn(arg);
-    }
-}
-
-#endif /* __QUEUES_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Reindent more xenlinux files. Remove defunct header file., Xen patchbot -unstable <=