WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [linux-ppc-2.6] [ppc-code-merge] merge with linux-2.6.tip-xen.h

To: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Subject: [XenPPC] [linux-ppc-2.6] [ppc-code-merge] merge with linux-2.6.tip-xen.hg
From: Xen patchbot-linux-ppc-2.6 <patchbot-linux-ppc-2.6@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Jun 2006 13:47:20 +0000
Delivery-date: Fri, 09 Jun 2006 07:00:50 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 5c0c59eb5f73e7862590a8adca52bc74171bf2d5
# Parent  760669a37a3a04dd629823cf41a6bc8ee662e224
# Parent  0f9ec4b29f3a25cffcd90e03113f3790e644a3ac
[ppc-code-merge] merge with linux-2.6.tip-xen.hg
---
 arch/ia64/xen/drivers/Makefile              |   22 --
 arch/ia64/xen/drivers/coreMakefile          |   20 -
 arch/ia64/xen/drivers/evtchn_ia64.c         |  261 -------------------------
 arch/ia64/xen/xenconsole.c                  |   19 -
 arch/i386/kernel/head-xen.S                 |    2 
 arch/i386/kernel/process-xen.c              |   14 -
 arch/i386/kernel/time-xen.c                 |   20 +
 arch/i386/kernel/vm86.c                     |    4 
 arch/i386/mm/init-xen.c                     |   14 -
 arch/ia64/Kconfig                           |   36 ++-
 arch/ia64/kernel/iosapic.c                  |   11 -
 arch/ia64/kernel/irq_ia64.c                 |  197 ++++++++++++++++++-
 arch/ia64/kernel/setup.c                    |   21 +-
 arch/ia64/xen/Makefile                      |    2 
 arch/ia64/xen/drivers/xenia64_init.c        |    1 
 arch/ia64/xen/hypervisor.c                  |  285 ++++++++++++++++++++-------
 arch/ia64/xen/xenivt.S                      |   45 +++-
 arch/powerpc/Kconfig                        |    5 
 arch/x86_64/kernel/process-xen.c            |   15 -
 arch/x86_64/kernel/setup-xen.c              |   23 --
 arch/x86_64/kernel/smp-xen.c                |    2 
 arch/x86_64/mm/init-xen.c                   |   43 +++-
 drivers/xen/Kconfig                         |   15 -
 drivers/xen/Makefile                        |    2 
 drivers/xen/balloon/balloon.c               |    6 
 drivers/xen/blkfront/blkfront.c             |   21 +-
 drivers/xen/blkfront/block.h                |    1 
 drivers/xen/blkfront/vbd.c                  |    1 
 drivers/xen/core/Makefile                   |    6 
 drivers/xen/core/cpu_hotplug.c              |    2 
 drivers/xen/core/smpboot.c                  |   20 +
 drivers/xen/netback/loopback.c              |    2 
 drivers/xen/netback/netback.c               |  288 ++++++++++++++++++++++------
 drivers/xen/netback/xenbus.c                |   26 ++
 drivers/xen/netfront/netfront.c             |  173 +++++++++++++---
 drivers/xen/privcmd/privcmd.c               |   12 -
 include/asm-i386/mach-xen/asm/dma-mapping.h |    2 
 include/asm-i386/mach-xen/asm/hypercall.h   |    7 
 include/asm-i386/mach-xen/asm/system.h      |   10 
 include/asm-i386/mach-xen/setup_arch_post.h |    7 
 include/asm-ia64/hw_irq.h                   |    8 
 include/asm-ia64/hypercall.h                |   40 ++-
 include/asm-ia64/hypervisor.h               |    4 
 include/asm-ia64/irq.h                      |   31 +++
 include/asm-ia64/xen/privop.h               |   32 +--
 include/asm-x86_64/mach-xen/asm/hypercall.h |    7 
 include/asm-x86_64/mach-xen/asm/system.h    |    4 
 include/xen/cpu_hotplug.h                   |    6 
 include/xen/interface/arch-ia64.h           |    9 
 include/xen/interface/arch-x86_32.h         |   27 ++
 include/xen/interface/arch-x86_64.h         |   24 +-
 include/xen/interface/callback.h            |   15 +
 include/xen/interface/grant_table.h         |    2 
 include/xen/interface/io/netif.h            |    4 
 include/xen/interface/io/ring.h             |   16 +
 include/xen/interface/memory.h              |   10 
 include/xen/interface/sched_ctl.h           |    2 
 include/xen/interface/xen.h                 |   22 +-
 58 files changed, 1243 insertions(+), 683 deletions(-)

diff -r 760669a37a3a -r 5c0c59eb5f73 arch/i386/kernel/head-xen.S
--- a/arch/i386/kernel/head-xen.S       Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/i386/kernel/head-xen.S       Thu Jun 08 15:10:05 2006 -0400
@@ -173,7 +173,7 @@ ENTRY(cpu_gdt_table)
        .ascii           "|pae_pgdir_above_4gb"
        .ascii           "|supervisor_mode_kernel"
 #ifdef CONFIG_X86_PAE
-       .ascii  ",PAE=yes"
+       .ascii  ",PAE=yes[extended-cr3]"
 #else
        .ascii  ",PAE=no"
 #endif
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/i386/kernel/process-xen.c
--- a/arch/i386/kernel/process-xen.c    Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/i386/kernel/process-xen.c    Thu Jun 08 15:10:05 2006 -0400
@@ -54,6 +54,7 @@
 
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
+#include <xen/cpu_hotplug.h>
 
 #include <linux/err.h>
 
@@ -100,8 +101,6 @@ EXPORT_SYMBOL(enable_hlt);
 EXPORT_SYMBOL(enable_hlt);
 
 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-extern void stop_hz_timer(void);
-extern void start_hz_timer(void);
 void xen_idle(void)
 {
        local_irq_disable();
@@ -111,10 +110,7 @@ void xen_idle(void)
        else {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                smp_mb__after_clear_bit();
-               stop_hz_timer();
-               /* Blocking includes an implicit local_irq_enable(). */
-               HYPERVISOR_block();
-               start_hz_timer();
+               safe_halt();
                set_thread_flag(TIF_POLLING_NRFLAG);
        }
 }
@@ -131,11 +127,7 @@ static inline void play_dead(void)
        cpu_clear(smp_processor_id(), cpu_initialized);
        preempt_enable_no_resched();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-       /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-       cpu_init();
-       touch_softlockup_watchdog();
-       preempt_disable();
-       local_irq_enable();
+       cpu_bringup();
 }
 #else
 static inline void play_dead(void)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/i386/kernel/time-xen.c
--- a/arch/i386/kernel/time-xen.c       Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/i386/kernel/time-xen.c       Thu Jun 08 15:10:05 2006 -0400
@@ -973,7 +973,7 @@ EXPORT_SYMBOL(jiffies_to_st);
  * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
  * These functions are based on implementations from arch/s390/kernel/time.c
  */
-void stop_hz_timer(void)
+static void stop_hz_timer(void)
 {
        unsigned int cpu = smp_processor_id();
        unsigned long j;
@@ -993,10 +993,26 @@ void stop_hz_timer(void)
        BUG_ON(HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0);
 }
 
-void start_hz_timer(void)
+static void start_hz_timer(void)
 {
        cpu_clear(smp_processor_id(), nohz_cpu_mask);
 }
+
+void safe_halt(void)
+{
+       stop_hz_timer();
+       /* Blocking includes an implicit local_irq_enable(). */
+       HYPERVISOR_block();
+       start_hz_timer();
+}
+EXPORT_SYMBOL(safe_halt);
+
+void halt(void)
+{
+       if (irqs_disabled())
+               HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+}
+EXPORT_SYMBOL(halt);
 
 /* No locking required. We are only CPU running, and interrupts are off. */
 void time_resume(void)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/i386/kernel/vm86.c
--- a/arch/i386/kernel/vm86.c   Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/i386/kernel/vm86.c   Thu Jun 08 15:10:05 2006 -0400
@@ -132,7 +132,9 @@ struct pt_regs * fastcall save_v86_state
        current->thread.sysenter_cs = __KERNEL_CS;
        load_esp0(tss, &current->thread);
        current->thread.saved_esp0 = 0;
+#ifndef CONFIG_X86_NO_TSS
        put_cpu();
+#endif
 
        loadsegment(fs, current->thread.saved_fs);
        loadsegment(gs, current->thread.saved_gs);
@@ -310,7 +312,9 @@ static void do_sys_vm86(struct kernel_vm
        if (cpu_has_sep)
                tsk->thread.sysenter_cs = 0;
        load_esp0(tss, &tsk->thread);
+#ifndef CONFIG_X86_NO_TSS
        put_cpu();
+#endif
 
        tsk->thread.screen_bitmap = info->screen_bitmap;
        if (info->flags & VM86_SCREEN_BITMAP)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/i386/mm/init-xen.c
--- a/arch/i386/mm/init-xen.c   Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/i386/mm/init-xen.c   Thu Jun 08 15:10:05 2006 -0400
@@ -558,15 +558,11 @@ void __init paging_init(void)
 
        kmap_init();
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap) ||
-           xen_start_info->shared_info >= xen_start_info->nr_pages) {
-               /* Switch to the real shared_info page, and clear the
-                * dummy page. */
-               set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-               HYPERVISOR_shared_info =
-                       (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-               memset(empty_zero_page, 0, sizeof(empty_zero_page));
-       }
+       /* Switch to the real shared_info page, and clear the
+        * dummy page. */
+       set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
+       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+       memset(empty_zero_page, 0, sizeof(empty_zero_page));
 
        /* Setup mapping of lower 1st MB */
        for (i = 0; i < NR_FIX_ISAMAPS; i++)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/Kconfig
--- a/arch/ia64/Kconfig Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/Kconfig Thu Jun 08 15:10:05 2006 -0400
@@ -81,7 +81,7 @@ config XEN_IA64_DOM0_VP
 
 config XEN_IA64_DOM0_NON_VP
        bool
-       depends on !(XEN && XEN_IA64_DOM0_VP)
+       depends on XEN && !XEN_IA64_DOM0_VP
        default y
        help
          dom0 P=M model
@@ -535,15 +535,39 @@ source "security/Kconfig"
 
 source "crypto/Kconfig"
 
+#
 # override default values of drivers/xen/Kconfig
-if !XEN_IA64_DOM0_VP
+#
+if XEN
+config XEN_UTIL
+       default n if XEN_IA64_DOM0_VP
+
 config HAVE_ARCH_ALLOC_SKB
-        bool
-        default n
+       default n if !XEN_IA64_DOM0_VP
 
 config HAVE_ARCH_DEV_ALLOC_SKB
-        bool
-        default n
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_BALLOON
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_SKBUFF
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_NETDEV_BACKEND
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_NETDEV_FRONTEND
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_DEVMEM
+       default n
+
+config XEN_REBOOT
+       default n
+
+config XEN_SMPBOOT
+       default n
 endif
 
 source "drivers/xen/Kconfig"
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/kernel/iosapic.c
--- a/arch/ia64/kernel/iosapic.c        Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/kernel/iosapic.c        Thu Jun 08 15:10:05 2006 -0400
@@ -191,7 +191,7 @@ static inline void xen_iosapic_write(cha
 
 static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int 
reg)
 {
-       if (!running_on_xen) {
+       if (!is_running_on_xen()) {
                writel(reg, iosapic + IOSAPIC_REG_SELECT);
                return readl(iosapic + IOSAPIC_WINDOW);
        } else
@@ -200,7 +200,7 @@ static inline unsigned int iosapic_read(
 
 static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 
val)
 {
-       if (!running_on_xen) {
+       if (!is_running_on_xen()) {
                writel(reg, iosapic + IOSAPIC_REG_SELECT);
                writel(val, iosapic + IOSAPIC_WINDOW);
        } else
@@ -712,6 +712,11 @@ register_intr (unsigned int gsi, int vec
        iosapic_intr_info[vector].polarity = polarity;
        iosapic_intr_info[vector].dmode    = delivery;
        iosapic_intr_info[vector].trigger  = trigger;
+
+#ifdef CONFIG_XEN
+       if (is_running_on_xen())
+               return 0;
+#endif
 
        if (trigger == IOSAPIC_EDGE)
                irq_type = &irq_type_iosapic_edge;
@@ -1076,7 +1081,7 @@ iosapic_system_init (int system_pcat_com
 
        pcat_compat = system_pcat_compat;
 #ifdef CONFIG_XEN
-       if (running_on_xen)
+       if (is_running_on_xen())
                return;
 #endif
        if (pcat_compat) {
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/kernel/irq_ia64.c
--- a/arch/ia64/kernel/irq_ia64.c       Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/kernel/irq_ia64.c       Thu Jun 08 15:10:05 2006 -0400
@@ -68,7 +68,7 @@ assign_irq_vector (int irq)
        int pos, vector;
 #ifdef CONFIG_XEN
        extern int xen_assign_irq_vector(int);
-       if (running_on_xen)
+       if (is_running_on_xen())
                return xen_assign_irq_vector(irq);
 #endif /* CONFIG_XEN */
  again:
@@ -229,6 +229,151 @@ static struct irqaction ipi_irqaction = 
 };
 #endif
 
+#ifdef CONFIG_XEN
+#include <xen/evtchn.h>
+#include <xen/interface/callback.h>
+
+static char timer_name[NR_CPUS][15];
+static char ipi_name[NR_CPUS][15];
+static char resched_name[NR_CPUS][15];
+
+struct saved_irq {
+       unsigned int irq;
+       struct irqaction *action;
+};
+/* 16 should be far optimistic value, since only several percpu irqs
+ * are registered early.
+ */
+#define MAX_LATE_IRQ   16
+static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
+static unsigned short late_irq_cnt = 0;
+static unsigned short saved_irq_cnt = 0;
+static int xen_slab_ready = 0;
+
+/* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
+ * it ends up to issue several memory accesses upon percpu data and
+ * thus adds unnecessary traffic to other paths.
+ */
+static irqreturn_t
+handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction resched_irqaction = {
+       .handler =      handle_reschedule,
+       .flags =        SA_INTERRUPT,
+       .name =         "RESCHED"
+};
+
+/*
+ * This is xen version percpu irq registration, which needs bind
+ * to xen specific evtchn sub-system. One trick here is that xen
+ * evtchn binding interface depends on kmalloc because related
+ * port needs to be freed at device/cpu down. So we cache the
+ * registration on BSP before slab is ready and then deal them
+ * at later point. For rest instances happening after slab ready,
+ * we hook them to xen evtchn immediately.
+ *
+ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
+ * required.
+ */
+void
+xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
+{
+       char name[15];
+       unsigned int cpu = smp_processor_id();
+       int ret = 0;
+
+       if (xen_slab_ready) {
+               switch (irq) {
+               case IA64_TIMER_VECTOR:
+                       sprintf(timer_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
+                               action->handler, action->flags,
+                               timer_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq 
(%d)\n", name, ret);
+                       break;
+               case IA64_IPI_RESCHEDULE:
+                       sprintf(resched_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
+                               action->handler, action->flags,
+                               resched_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to 
xen irq (%d)\n", name, ret);
+                       break;
+               case IA64_IPI_VECTOR:
+                       sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
+                               action->handler, action->flags,
+                               ipi_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq 
(%d)\n", name, ret);
+                       break;
+               default:
+                       printk(KERN_WARNING "Percpu irq %d is unsupported by 
xen!\n", irq);
+                       break;
+               }
+               BUG_ON(ret < 0);
+       } 
+
+       /* For BSP, we cache registered percpu irqs, and then re-walk
+        * them when initializing APs
+        */
+       if (!cpu && save) {
+               BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
+               saved_percpu_irqs[saved_irq_cnt].irq = irq;
+               saved_percpu_irqs[saved_irq_cnt].action = action;
+               saved_irq_cnt++;
+               if (!xen_slab_ready)
+                       late_irq_cnt++;
+       }
+}
+
+static void
+xen_bind_early_percpu_irq (void)
+{
+       int i;
+
+       xen_slab_ready = 1;
+       /* There's no race when accessing this cached array, since only
+        * BSP will face with such step shortly
+        */
+       for (i = 0; i < late_irq_cnt; i++)
+               xen_register_percpu_irq(saved_percpu_irqs[i].irq,
+                       saved_percpu_irqs[i].action, 0);
+}
+
+/* FIXME: There's no obvious point to check whether slab is ready. So
+ * a hack is used here by utilizing a late time hook.
+ */
+extern void (*late_time_init)(void);
+extern char xen_event_callback;
+extern void xen_init_IRQ(void);
+
+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
+void xen_smp_intr_init(void)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       unsigned int i = 0;
+       struct callback_register event = {
+               .type = CALLBACKTYPE_event,
+               .address = (unsigned long)&xen_event_callback,
+       };
+
+       if (!cpu)
+               return;
+
+       /* This should be piggyback when setup vcpu guest context */
+       BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+
+       for (i = 0; i < saved_irq_cnt; i++)
+               xen_register_percpu_irq(saved_percpu_irqs[i].irq,
+                       saved_percpu_irqs[i].action, 0);
+#endif /* CONFIG_SMP */
+}
+#endif /* CONFIG_XEN */
+
 void
 register_percpu_irq (ia64_vector vec, struct irqaction *action)
 {
@@ -237,6 +382,10 @@ register_percpu_irq (ia64_vector vec, st
 
        for (irq = 0; irq < NR_IRQS; ++irq)
                if (irq_to_vector(irq) == vec) {
+#ifdef CONFIG_XEN
+                       if (is_running_on_xen())
+                               return xen_register_percpu_irq(vec, action, 1);
+#endif
                        desc = irq_descp(irq);
                        desc->status |= IRQ_PER_CPU;
                        desc->handler = &irq_type_ia64_lsapic;
@@ -248,6 +397,21 @@ void __init
 void __init
 init_IRQ (void)
 {
+#ifdef CONFIG_XEN
+       /* Maybe put into platform_irq_init later */
+       if (is_running_on_xen()) {
+               struct callback_register event = {
+                       .type = CALLBACKTYPE_event,
+                       .address = (unsigned long)&xen_event_callback,
+               };
+               xen_init_IRQ();
+               BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+               late_time_init = xen_bind_early_percpu_irq;
+#ifdef CONFIG_SMP
+               register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
+#endif /* CONFIG_SMP */
+       }
+#endif /* CONFIG_XEN */
        register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
 #ifdef CONFIG_SMP
        register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
@@ -266,10 +430,33 @@ ia64_send_ipi (int cpu, int vector, int 
        unsigned long phys_cpu_id;
 
 #ifdef CONFIG_XEN
-        if (running_on_xen) {
-                extern void xen_send_ipi (int cpu, int vec);
-                xen_send_ipi (cpu, vector);
-                return;
+        if (is_running_on_xen()) {
+               int irq = -1;
+
+               /* TODO: we need to call vcpu_up here */
+               if (unlikely(vector == ap_wakeup_vector)) {
+                       extern void xen_send_ipi (int cpu, int vec);
+                       xen_send_ipi (cpu, vector);
+                       //vcpu_prepare_and_up(cpu);
+                       return;
+               }
+
+               switch(vector) {
+               case IA64_IPI_VECTOR:
+                       irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
+                       break;
+               case IA64_IPI_RESCHEDULE:
+                       irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
+                       break;
+               default:
+                       printk(KERN_WARNING"Unsupported IPI type 0x%x\n", 
vector);
+                       irq = 0;
+                       break;
+               }               
+       
+               BUG_ON(irq < 0);
+               notify_remote_via_irq(irq);
+               return;
         }
 #endif /* CONFIG_XEN */
 
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/kernel/setup.c
--- a/arch/ia64/kernel/setup.c  Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/kernel/setup.c  Thu Jun 08 15:10:05 2006 -0400
@@ -248,7 +248,7 @@ reserve_memory (void)
        n++;
 
 #ifdef CONFIG_XEN
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                rsvd_region[n].start = (unsigned 
long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
                rsvd_region[n].end   = rsvd_region[n].start + PAGE_SIZE;
                n++;
@@ -347,8 +347,14 @@ early_console_setup (char *cmdline)
        int earlycons = 0;
 
 #ifdef CONFIG_XEN
-       if (!early_xen_console_setup(cmdline))
+#ifndef CONFIG_IA64_HP_SIM
+       if (is_running_on_xen()) {
+               extern struct console hpsim_cons;
+               hpsim_cons.flags |= CON_BOOT;
+               register_console(&hpsim_cons);
                earlycons++;
+       }
+#endif
 #endif
 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
        {
@@ -419,7 +425,7 @@ setup_arch (char **cmdline_p)
 {
        unw_init();
 #ifdef CONFIG_XEN
-       if (running_on_xen)
+       if (is_running_on_xen())
                setup_xen_features();
 #endif
 
@@ -500,7 +506,7 @@ setup_arch (char **cmdline_p)
 # endif
        }
 #ifdef CONFIG_XEN
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                extern shared_info_t *HYPERVISOR_shared_info;
                extern int xen_init (void);
 
@@ -911,6 +917,13 @@ cpu_init (void)
        /* size of physical stacked register partition plus 8 bytes: */
        __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
        platform_cpu_init();
+#ifdef CONFIG_XEN
+       /* Need to be moved into platform_cpu_init later */
+       if (is_running_on_xen()) {
+               extern void xen_smp_intr_init(void);
+               xen_smp_intr_init();
+       }
+#endif
        pm_idle = default_idle;
 }
 
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/Makefile
--- a/arch/ia64/xen/Makefile    Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/xen/Makefile    Thu Jun 08 15:10:05 2006 -0400
@@ -2,7 +2,7 @@
 # Makefile for Xen components
 #
 
-obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o 
xenconsole.o
+obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o
 
 obj-$(CONFIG_XEN_IA64_DOM0_VP) += hypervisor.o pci-dma-xen.o util.o
 pci-dma-xen-$(CONFIG_XEN_IA64_DOM0_VP) := ../../i386/kernel/pci-dma-xen.o
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/drivers/xenia64_init.c
--- a/arch/ia64/xen/drivers/xenia64_init.c      Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/xen/drivers/xenia64_init.c      Thu Jun 08 15:10:05 2006 -0400
@@ -33,7 +33,6 @@ int xen_init(void)
                s->arch.start_info_pfn, xen_start_info->nr_pages,
                xen_start_info->flags);
 
-       evtchn_init();
        initialized = 1;
        return 0;
 }
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/hypervisor.c
--- a/arch/ia64/xen/hypervisor.c        Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/xen/hypervisor.c        Thu Jun 08 15:10:05 2006 -0400
@@ -314,12 +314,6 @@ gnttab_map_grant_ref_pre(struct gnttab_m
        uint32_t flags;
 
        flags = uop->flags;
-       if (flags & GNTMAP_readonly) {
-#if 0
-               xprintd("GNTMAP_readonly is not supported yet\n");
-#endif
-               flags &= ~GNTMAP_readonly;
-       }
 
        if (flags & GNTMAP_host_map) {
                if (flags & GNTMAP_application_map) {
@@ -360,52 +354,179 @@ struct address_space xen_ia64_foreign_du
 
 ///////////////////////////////////////////////////////////////////////////
 // foreign mapping
+#include <linux/efi.h>
+#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
+
+static unsigned long privcmd_resource_min = 0;
+// Xen/ia64 currently can handle pseudo physical address bits up to
+// (PAGE_SHIFT * 3)
+static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << 
(PAGE_SHIFT * 3)) - 1);
+static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
+
+static unsigned long
+md_end_addr(const efi_memory_desc_t *md)
+{
+       return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+}
+
+#define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE        (1024 * 1024 * 1024UL)
+static int
+xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
+{
+       return (start < end &&
+               (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
+}
+
+static int __init
+xen_ia64_privcmd_init(void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       u64 efi_desc_size;
+       efi_memory_desc_t *md;
+       unsigned long tmp_min;
+       unsigned long tmp_max;
+       unsigned long gap_size;
+       unsigned long prev_end;
+
+       if (!is_running_on_xen())
+               return -1;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       // at first check the used highest address
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               // nothing
+       }
+       md = p - efi_desc_size;
+       privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
+       if (xen_ia64_privcmd_check_size(privcmd_resource_min,
+                                       privcmd_resource_max)) {
+               goto out;
+       }
+
+       // the used highest address is too large. try to find the largest gap.
+       tmp_min = privcmd_resource_max;
+       tmp_max = 0;
+       gap_size = 0;
+       prev_end = 0;
+       for (p = efi_map_start;
+            p < efi_map_end - efi_desc_size;
+            p += efi_desc_size) {
+               unsigned long end;
+               efi_memory_desc_t* next;
+               unsigned long next_start;
+
+               md = p;
+               end = md_end_addr(md);
+               if (end > privcmd_resource_max) {
+                       break;
+               }
+               if (end < prev_end) {
+                       // work around. 
+                       // Xen may pass incompletely sorted memory
+                       // descriptors like
+                       // [x, x + length]
+                       // [x, x]
+                       // this order should be reversed.
+                       continue;
+               }
+               next = p + efi_desc_size;
+               next_start = next->phys_addr;
+               if (next_start > privcmd_resource_max) {
+                       next_start = privcmd_resource_max;
+               }
+               if (end < next_start && gap_size < (next_start - end)) {
+                       tmp_min = end;
+                       tmp_max = next_start;
+                       gap_size = tmp_max - tmp_min;
+               }
+               prev_end = end;
+       }
+
+       privcmd_resource_min = GRANULEROUNDUP(tmp_min);
+       if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
+               privcmd_resource_max = tmp_max;
+               goto out;
+       }
+
+       privcmd_resource_min = tmp_min;
+       privcmd_resource_max = tmp_max;
+       if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
+                                        privcmd_resource_max)) {
+               // Any large enough gap isn't found.
+               // go ahead anyway with the warning hoping that large region
+               // won't be requested.
+               printk(KERN_WARNING "xen privcmd: large enough region for 
privcmd mmap is not found.\n");
+       }
+
+out:
+       printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 
0x%lx] (%ldMB)\n",
+              privcmd_resource_min, privcmd_resource_max, 
+              (privcmd_resource_max - privcmd_resource_min) >> 20);
+       BUG_ON(privcmd_resource_min >= privcmd_resource_max);
+       return 0;
+}
+late_initcall(xen_ia64_privcmd_init);
 
 struct xen_ia64_privcmd_entry {
        atomic_t        map_count;
-       struct page*    page;
+#define INVALID_GPFN   (~0UL)
+       unsigned long   gpfn;
+};
+
+struct xen_ia64_privcmd_range {
+       atomic_t                        ref_count;
+       unsigned long                   pgoff; // in PAGE_SIZE
+       struct resource*                res;
+
+       unsigned long                   num_entries;
+       struct xen_ia64_privcmd_entry   entries[0];
+};
+
+struct xen_ia64_privcmd_vma {
+       struct xen_ia64_privcmd_range*  range;
+
+       unsigned long                   num_entries;
+       struct xen_ia64_privcmd_entry*  entries;
 };
 
 static void
 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
 {
        atomic_set(&entry->map_count, 0);
-       entry->page = NULL;
-}
-
-//TODO alloc_page() to allocate pseudo physical address space is 
-//     waste of memory.
-//     When vti domain is created, qemu maps all of vti domain pages which 
-//     reaches to several hundred megabytes at least.
-//     remove alloc_page().
+       entry->gpfn = INVALID_GPFN;
+}
+
 static int
 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
                            unsigned long addr,
-                           struct xen_ia64_privcmd_entry* entry,
+                           struct xen_ia64_privcmd_range* privcmd_range,
+                           int i,
                            unsigned long mfn,
                            pgprot_t prot,
                            domid_t domid)
 {
        int error = 0;
-       struct page* page;
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
        unsigned long gpfn;
+       unsigned long flags;
 
        BUG_ON((addr & ~PAGE_MASK) != 0);
        BUG_ON(mfn == INVALID_MFN);
 
-       if (entry->page != NULL) {
+       if (entry->gpfn != INVALID_GPFN) {
                error = -EBUSY;
                goto out;
        }
-       page = alloc_page(GFP_KERNEL);
-       if (page == NULL) {
-               error = -ENOMEM;
-               goto out;
-       }
-       gpfn = page_to_pfn(page);
-
-       error = HYPERVISOR_add_physmap(gpfn, mfn, 0/* prot:XXX */,
-                                      domid);
+       gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
+
+       flags = ASSIGN_writable;
+       if (pgprot_val(prot) == PROT_READ) {
+               flags = ASSIGN_readonly;
+       }
+       error = HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
        if (error != 0) {
                goto out;
        }
@@ -413,15 +534,13 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
        prot = vma->vm_page_prot;
        error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
        if (error != 0) {
-               (void)HYPERVISOR_zap_physmap(gpfn, 0);
-               error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
+               error = HYPERVISOR_zap_physmap(gpfn, 0);
                if (error) {
                        BUG();//XXX
                }
-               __free_page(page);
        } else {
                atomic_inc(&entry->map_count);
-               entry->page = page;
+               entry->gpfn = gpfn;
        }
 
 out:
@@ -429,30 +548,28 @@ out:
 }
 
 static void
-xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_entry* entry)
-{
-       struct page* page = entry->page;
-       unsigned long gpfn = page_to_pfn(page);
+xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
+                             int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       unsigned long gpfn = entry->gpfn;
+       //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
+       //      (vma->vm_pgoff - privcmd_range->pgoff);
        int error;
 
        error = HYPERVISOR_zap_physmap(gpfn, 0);
        if (error) {
                BUG();//XXX
        }
-
-       error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
-       if (error) {
-               BUG();//XXX
-       }
-
-       entry->page = NULL;
-       __free_page(page);
+       entry->gpfn = INVALID_GPFN;
 }
 
 static int
-xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_entry* entry)
-{
-       if (entry->page != NULL) {
+xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
+                           int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       if (entry->gpfn != INVALID_GPFN) {
                atomic_inc(&entry->map_count);
        } else {
                BUG_ON(atomic_read(&entry->map_count) != 0);
@@ -460,27 +577,15 @@ xen_ia64_privcmd_entry_open(struct xen_i
 }
 
 static int
-xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_entry* entry)
-{
-       if (entry->page != NULL && atomic_dec_and_test(&entry->map_count)) {
-               xen_ia64_privcmd_entry_munmap(entry);
-       }
-}
-
-struct xen_ia64_privcmd_range {
-       atomic_t                        ref_count;
-       unsigned long                   pgoff; // in PAGE_SIZE
-
-       unsigned long                   num_entries;
-       struct xen_ia64_privcmd_entry   entries[0];
-};
-
-struct xen_ia64_privcmd_vma {
-       struct xen_ia64_privcmd_range*  range;
-
-       unsigned long                   num_entries;
-       struct xen_ia64_privcmd_entry*  entries;
-};
+xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
+                            int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       if (entry->gpfn != INVALID_GPFN &&
+           atomic_dec_and_test(&entry->map_count)) {
+               xen_ia64_privcmd_entry_munmap(privcmd_range, i);
+       }
+}
 
 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
@@ -507,7 +612,7 @@ __xen_ia64_privcmd_vma_open(struct vm_ar
        privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
        for (i = 0; i < privcmd_vma->num_entries; i++) {
-               xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
+               xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
        }
 
        vma->vm_private_data = privcmd_vma;
@@ -533,10 +638,11 @@ xen_ia64_privcmd_vma_close(struct vm_are
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
        struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long i;
 
        for (i = 0; i < privcmd_vma->num_entries; i++) {
-               xen_ia64_privcmd_entry_close(&privcmd_vma->entries[i]);
+               xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
        }
        vma->vm_private_data = NULL;
        kfree(privcmd_vma);
@@ -547,9 +653,11 @@ xen_ia64_privcmd_vma_close(struct vm_are
                        struct xen_ia64_privcmd_entry* entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
-                       BUG_ON(entry->page != NULL);
+                       BUG_ON(entry->gpfn != INVALID_GPFN);
                }
 #endif
+               release_resource(privcmd_range->res);
+               kfree(privcmd_range->res);
                vfree(privcmd_range);
        }
 }
@@ -557,13 +665,18 @@ int
 int
 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
 {
-       unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-       struct xen_ia64_privcmd_range* privcmd_range;
-       struct xen_ia64_privcmd_vma* privcmd_vma;
+       int error;
+       unsigned long size = vma->vm_end - vma->vm_start;
+       unsigned long num_entries = size >> PAGE_SHIFT;
+       struct xen_ia64_privcmd_range* privcmd_range = NULL;
+       struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
+       struct resource* res = NULL;
        unsigned long i;
-       BUG_ON(!running_on_xen);
+       BUG_ON(!is_running_on_xen());
 
        BUG_ON(file->private_data != NULL);
+
+       error = -ENOMEM;
        privcmd_range =
                vmalloc(sizeof(*privcmd_range) +
                        sizeof(privcmd_range->entries[0]) * num_entries);
@@ -574,6 +687,18 @@ privcmd_mmap(struct file * file, struct 
        if (privcmd_vma == NULL) {
                goto out_enomem1;
        }
+       res = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (res == NULL) {
+               goto out_enomem1;
+       }
+       res->name = "Xen privcmd mmap";
+       error = allocate_resource(&iomem_resource, res, size,
+                                 privcmd_resource_min, privcmd_resource_max,
+                                 privcmd_resource_align, NULL, NULL);
+       if (error) {
+               goto out_enomem1;
+       }
+       privcmd_range->res = res;
 
        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
@@ -589,10 +714,11 @@ privcmd_mmap(struct file * file, struct 
        return 0;
 
 out_enomem1:
+       kfree(res);
        kfree(privcmd_vma);
 out_enomem0:
        vfree(privcmd_range);
-       return -ENOMEM;
+       return error;
 }
 
 int
@@ -605,10 +731,13 @@ direct_remap_pfn_range(struct vm_area_st
 {
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
+
        unsigned long i;
        unsigned long offset;
        int error = 0;
-       BUG_ON(!running_on_xen);
+       BUG_ON(!is_running_on_xen());
 
 #if 0
        if (prot != vm->vm_page_prot) {
@@ -618,9 +747,7 @@ direct_remap_pfn_range(struct vm_area_st
 
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
-               struct xen_ia64_privcmd_entry* entry =
-                       &privcmd_vma->entries[i];
-               error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, entry, mfn, prot, domid);
+               error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, privcmd_range, entry_offset + i, mfn, prot, domid);
                if (error != 0) {
                        break;
                }
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/xenivt.S
--- a/arch/ia64/xen/xenivt.S    Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/ia64/xen/xenivt.S    Thu Jun 08 15:10:05 2006 -0400
@@ -206,9 +206,9 @@ ENTRY(vhpt_miss)
        mov r24=r8
        mov r8=r18
        ;;
-(p10)  XEN_HYPER_ITC_D
-       ;;
-(p11)  XEN_HYPER_ITC_I
+(p10)  XEN_HYPER_ITC_I
+       ;;
+(p11)  XEN_HYPER_ITC_D
        ;;
        mov r8=r24
        ;;
@@ -799,7 +799,16 @@ 1: ld8 r18=[r17]
        ;;
 (p6)   cmp.eq p6,p7=r26,r18                    // Only compare if page is 
present
        ;;
+#ifdef CONFIG_XEN
+(p6)   mov r18=r8
+(p6)   mov r8=r25
+       ;;
+(p6)   XEN_HYPER_ITC_D
+       ;;
+(p6)   mov r8=r18
+#else
 (p6)   itc.d r25                               // install updated PTE
+#endif 
        ;;
        /*
         * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
@@ -2038,6 +2047,7 @@ GLOBAL_ENTRY(xen_bsw1)
        ld8 r28=[r30],16; ld8 r29=[r31],16;;
        ld8 r30=[r30]; ld8 r31=[r31];;
        br.ret.sptk.many b0
+END(xen_bsw1)
 #endif
 
        .org ia64_ivt+0x7f00
@@ -2130,5 +2140,32 @@ non_ia32_syscall:
        mov rp=r15
        br.ret.sptk.many rp
 END(dispatch_to_ia32_handler)
-
 #endif /* CONFIG_IA32_SUPPORT */
+
+#ifdef CONFIG_XEN
+       .section .text,"ax"
+GLOBAL_ENTRY(xen_event_callback)
+       mov r31=pr              // prepare to save predicates
+       ;;
+       SAVE_MIN_WITH_COVER     // uses r31; defines r2 and r3
+       ;;
+       movl r3=XSI_PSR_IC
+       mov r14=1
+       ;;
+       st4 [r3]=r14
+       ;;
+       adds r3=8,r2            // set up second base pointer for SAVE_REST
+       srlz.i                  // ensure everybody knows psr.ic is back on
+       ;;
+       SAVE_REST
+       ;;
+       alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+       add out0=16,sp          // pass pointer to pt_regs as first arg
+       ;;
+       srlz.d                  // make sure we see the effect of cr.ivr
+       movl r14=ia64_leave_kernel
+       ;;
+       mov rp=r14
+       br.call.sptk.many b6=evtchn_do_upcall
+END(xen_event_callback)
+#endif
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/powerpc/Kconfig
--- a/arch/powerpc/Kconfig      Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/powerpc/Kconfig      Thu Jun 08 15:10:05 2006 -0400
@@ -400,9 +400,14 @@ config PPC_XEN
 config PPC_XEN
        bool "Enable Xen compatible kernel"
        depends PPC_MULTIPLATFORM && PPC64 && PPC_MAPLE && PPC_PSERIES && SMP
+       select XEN
        select XEN_PRIVILEGED_GUEST
        select XEN_UNPRIVILEGED_GUEST
        select XEN_XENCOMM
+       reverse XEN_BALLOON
+       reverse XEN_REBOOT
+       reverse XEN_SMPBOOT
+       
        help
          This option will compile a kernel compatible with Xen hypervisor
 
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/x86_64/kernel/process-xen.c
--- a/arch/x86_64/kernel/process-xen.c  Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/x86_64/kernel/process-xen.c  Thu Jun 08 15:10:05 2006 -0400
@@ -60,6 +60,8 @@
 #include <asm/ia32.h>
 #include <asm/idle.h>
 
+#include <xen/cpu_hotplug.h>
+
 asmlinkage extern void ret_from_fork(void);
 
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
@@ -111,8 +113,6 @@ void exit_idle(void)
 }
 
 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-extern void stop_hz_timer(void);
-extern void start_hz_timer(void);
 void xen_idle(void)
 {
        local_irq_disable();
@@ -122,10 +122,7 @@ void xen_idle(void)
        else {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                smp_mb__after_clear_bit();
-               stop_hz_timer();
-               /* Blocking includes an implicit local_irq_enable(). */
-               HYPERVISOR_block();
-               start_hz_timer();
+               safe_halt();
                set_thread_flag(TIF_POLLING_NRFLAG);
        }
 }
@@ -138,11 +135,7 @@ static inline void play_dead(void)
        cpu_clear(smp_processor_id(), cpu_initialized);
        preempt_enable_no_resched();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-       /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-       cpu_init();
-       touch_softlockup_watchdog();
-       preempt_disable();
-       local_irq_enable();
+       cpu_bringup();
 }
 #else
 static inline void play_dead(void)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/x86_64/kernel/setup-xen.c
--- a/arch/x86_64/kernel/setup-xen.c    Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/x86_64/kernel/setup-xen.c    Thu Jun 08 15:10:05 2006 -0400
@@ -693,13 +693,6 @@ void __init setup_arch(char **cmdline_p)
 
        setup_xen_features();
 
-       if (xen_feature(XENFEAT_auto_translated_physmap) &&
-           xen_start_info->shared_info < xen_start_info->nr_pages) {
-               HYPERVISOR_shared_info =
-                       (shared_info_t *)__va(xen_start_info->shared_info);
-               memset(empty_zero_page, 0, sizeof(empty_zero_page));
-       }
-
        HYPERVISOR_vm_assist(VMASST_CMD_enable,
                             VMASST_TYPE_writable_pagetables);
 
@@ -860,14 +853,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_XEN
        {
                int i, j, k, fpp;
-               unsigned long va;
-
-               /* 'Initial mapping' of initrd must be destroyed. */
-               for (va = xen_start_info->mod_start;
-                    va < (xen_start_info->mod_start+xen_start_info->mod_len);
-                    va += PAGE_SIZE) {
-                       HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-               }
 
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        /* Make sure we have a large enough P->M table. */
@@ -882,14 +867,6 @@ void __init setup_arch(char **cmdline_p)
                                __pa(xen_start_info->mfn_list),
                                PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
                                                sizeof(unsigned long))));
-
-                       /* Destroyed 'initial mapping' of old p2m table. */
-                       for (va = xen_start_info->mfn_list;
-                            va < (xen_start_info->mfn_list +
-                                  (xen_start_info->nr_pages*sizeof(unsigned 
long)));
-                            va += PAGE_SIZE) {
-                               HYPERVISOR_update_va_mapping(va, __pte_ma(0), 
0);
-                       }
 
                        /*
                         * Initialise the list of the frames that specify the
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/x86_64/kernel/smp-xen.c
--- a/arch/x86_64/kernel/smp-xen.c      Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/x86_64/kernel/smp-xen.c      Thu Jun 08 15:10:05 2006 -0400
@@ -488,7 +488,7 @@ static void smp_really_stop_cpu(void *du
 {
        smp_stop_cpu(); 
        for (;;) 
-               asm("hlt"); 
+               halt();
 } 
 
 void smp_send_stop(void)
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/x86_64/mm/init-xen.c
--- a/arch/x86_64/mm/init-xen.c Wed Jun 07 19:53:53 2006 -0400
+++ b/arch/x86_64/mm/init-xen.c Thu Jun 08 15:10:05 2006 -0400
@@ -710,7 +710,34 @@ void __meminit init_memory_mapping(unsig
                        set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
        }
 
-       BUG_ON(!after_bootmem && start_pfn != table_end);
+       if (!after_bootmem) {
+               BUG_ON(start_pfn != table_end);
+
+               /* Re-vector virtual addresses pointing into the initial
+                  mapping to the just-established permanent ones. */
+               xen_start_info = __va(__pa(xen_start_info));
+               xen_start_info->pt_base = (unsigned long)
+                       __va(__pa(xen_start_info->pt_base));
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       phys_to_machine_mapping =
+                               __va(__pa(xen_start_info->mfn_list));
+                       xen_start_info->mfn_list = (unsigned long)
+                               phys_to_machine_mapping;
+               }
+               if (xen_start_info->mod_start)
+                       xen_start_info->mod_start = (unsigned long)
+                               __va(__pa(xen_start_info->mod_start));
+
+               /* Destroy the Xen-created mappings beyond the kernel image as
+                * well as the temporary mappings created above. Prevents
+                * overlap with modules area (if init mapping is very big).
+                */
+               start = PAGE_ALIGN((unsigned long)_end);
+               end   = __START_KERNEL_map + (table_end << PAGE_SHIFT);
+               for (; start < end; start += PAGE_SIZE)
+                       WARN_ON(HYPERVISOR_update_va_mapping(
+                               start, __pte_ma(0), 0));
+       }
 
        __flush_tlb_all();
 }
@@ -796,15 +823,11 @@ void __init paging_init(void)
        free_area_init_node(0, NODE_DATA(0), zones,
                            __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap) ||
-           xen_start_info->shared_info >= xen_start_info->nr_pages) {
-               /* Switch to the real shared_info page, and clear the
-                * dummy page. */
-               set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-               HYPERVISOR_shared_info =
-                       (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-               memset(empty_zero_page, 0, sizeof(empty_zero_page));
-       }
+       /* Switch to the real shared_info page, and clear the
+        * dummy page. */
+       set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
+       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+       memset(empty_zero_page, 0, sizeof(empty_zero_page));
 
        init_mm.context.pinned = 1;
 
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/Kconfig
--- a/drivers/xen/Kconfig       Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/Kconfig       Thu Jun 08 15:10:05 2006 -0400
@@ -6,7 +6,7 @@ mainmenu "Xen Configuration"
 
 config XEN
        bool
-       default y if X86_XEN || X86_64_XEN || PPC_XEN
+       default y if X86_XEN || X86_64_XEN
        help
          This is the Linux Xen port.
 
@@ -234,24 +234,27 @@ config XEN_UTIL
 
 config XEN_BALLOON
        bool
-       default y if ! PPC_XEN
+       default y
+
+config XEN_DEVMEM
+       bool
+       default y
 
 config XEN_SKBUFF
        bool
+       default y
        depends on NET
-       default y
 
 config XEN_REBOOT
        bool
-       default y if ! PPC_XEN
+       default y
 
 config XEN_SMPBOOT
        bool
+       default y
        depends on SMP
-       default y if ! PPC_XEN
 
 config XEN_XENCOMM
        bool
        default n
-
 endif
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/Makefile
--- a/drivers/xen/Makefile      Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/Makefile      Thu Jun 08 15:10:05 2006 -0400
@@ -1,5 +1,4 @@ obj-y   += core/
 obj-y  += core/
-obj-y  += char/
 obj-y  += console/
 obj-y  += evtchn/
 obj-y  += privcmd/
@@ -7,6 +6,7 @@ obj-y   += xenbus/
 
 obj-$(CONFIG_XEN_UTIL)                 += util.o
 obj-$(CONFIG_XEN_BALLOON)              += balloon/
+obj-$(CONFIG_XEN_DEVMEM)               += char/
 obj-$(CONFIG_XEN_BLKDEV_BACKEND)       += blkback/
 obj-$(CONFIG_XEN_NETDEV_BACKEND)       += netback/
 obj-$(CONFIG_XEN_TPMDEV_BACKEND)       += tpmback/
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/balloon/balloon.c
--- a/drivers/xen/balloon/balloon.c     Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/balloon/balloon.c     Thu Jun 08 15:10:05 2006 -0400
@@ -360,12 +360,6 @@ static void balloon_process(void *unused
 /* Resets the Xen limit, sets new target, and kicks off processing. */
 static void set_new_target(unsigned long target)
 {
-       unsigned long min_target;
-
-       /* Do not allow target to reduce below 2% of maximum memory size. */
-       min_target = max_pfn / 50;
-       target = max(target, min_target);
-
        /* No need for lock. Not read-modify-write updates. */
        hard_limit   = ~0UL;
        target_pages = target;
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/blkfront/blkfront.c
--- a/drivers/xen/blkfront/blkfront.c   Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/blkfront/blkfront.c   Thu Jun 08 15:10:05 2006 -0400
@@ -452,10 +452,6 @@ int blkif_ioctl(struct inode *inode, str
                      command, (long)argument, inode->i_rdev);
 
        switch (command) {
-       case HDIO_GETGEO:
-               /* return ENOSYS to use defaults */
-               return -ENOSYS;
-
        case CDROMMULTISESSION:
                DPRINTK("FIXME: support multisession CDs later\n");
                for (i = 0; i < sizeof(struct cdrom_multisession); i++)
@@ -469,6 +465,23 @@ int blkif_ioctl(struct inode *inode, str
                return -EINVAL; /* same return as native Linux */
        }
 
+       return 0;
+}
+
+
+int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+{
+       /* We don't have real geometry info, but let's at least return
+          values consistent with the size of the device */
+       sector_t nsect = get_capacity(bd->bd_disk);
+       sector_t cylinders = nsect;
+
+       hg->heads = 0xff;
+       hg->sectors = 0x3f;
+       sector_div(cylinders, hg->heads * hg->sectors);
+       hg->cylinders = cylinders;
+       if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
+               hg->cylinders = 0xffff;
        return 0;
 }
 
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/blkfront/block.h
--- a/drivers/xen/blkfront/block.h      Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/blkfront/block.h      Thu Jun 08 15:10:05 2006 -0400
@@ -140,6 +140,7 @@ extern int blkif_release(struct inode *i
 extern int blkif_release(struct inode *inode, struct file *filep);
 extern int blkif_ioctl(struct inode *inode, struct file *filep,
                        unsigned command, unsigned long argument);
+extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
 extern int blkif_check(dev_t dev);
 extern int blkif_revalidate(dev_t dev);
 extern void do_blkif_request (request_queue_t *rq);
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/blkfront/vbd.c
--- a/drivers/xen/blkfront/vbd.c        Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/blkfront/vbd.c        Thu Jun 08 15:10:05 2006 -0400
@@ -91,6 +91,7 @@ static struct block_device_operations xl
        .open = blkif_open,
        .release = blkif_release,
        .ioctl  = blkif_ioctl,
+       .getgeo = blkif_getgeo
 };
 
 DEFINE_SPINLOCK(blkif_io_lock);
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/core/Makefile
--- a/drivers/xen/core/Makefile Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/core/Makefile Thu Jun 08 15:10:05 2006 -0400
@@ -1,15 +1,17 @@
+#
+# Makefile for the linux kernel.
+#
 #
 # Makefile for the linux kernel.
 #
 
 obj-y := evtchn.o gnttab.o features.o
 
-
 obj-$(CONFIG_PROC_FS)          += xen_proc.o
 obj-$(CONFIG_SYSFS)            += hypervisor_sysfs.o
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu_hotplug.o
 obj-$(CONFIG_XEN_SYSFS)                += xen_sysfs.o
+obj-$(CONFIG_IA64)             += xenia64_init.o
 obj-$(CONFIG_XEN_SKBUFF)       += skbuff.o
 obj-$(CONFIG_XEN_REBOOT)       += reboot.o
 obj-$(CONFIG_XEN_SMPBOOT)      += smpboot.o
-obj-$(CONFIG_XEN_XENCOMM)      += xencomm.o
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/core/cpu_hotplug.c
--- a/drivers/xen/core/cpu_hotplug.c    Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/core/cpu_hotplug.c    Thu Jun 08 15:10:05 2006 -0400
@@ -160,7 +160,7 @@ void smp_resume(void)
                vcpu_hotplug(cpu);
 }
 
-int cpu_up_is_allowed(unsigned int cpu)
+int cpu_up_check(unsigned int cpu)
 {
        int rc = 0;
 
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/core/smpboot.c
--- a/drivers/xen/core/smpboot.c        Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/core/smpboot.c        Thu Jun 08 15:10:05 2006 -0400
@@ -87,9 +87,8 @@ void __init prefill_possible_map(void)
 
        for (i = 0; i < NR_CPUS; i++) {
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
-               if (rc == -ENOENT)
-                       break;
-               cpu_set(i, cpu_possible_map);
+               if (rc >= 0)
+                       cpu_set(i, cpu_possible_map);
        }
 }
 
@@ -148,12 +147,17 @@ static void xen_smp_intr_exit(unsigned i
 }
 #endif
 
-static void cpu_bringup(void)
+void cpu_bringup(void)
 {
        cpu_init();
        touch_softlockup_watchdog();
        preempt_disable();
        local_irq_enable();
+}
+
+static void cpu_bringup_and_idle(void)
+{
+       cpu_bringup();
        cpu_idle();
 }
 
@@ -178,7 +182,7 @@ void cpu_initialize_context(unsigned int
        ctxt.user_regs.fs = 0;
        ctxt.user_regs.gs = 0;
        ctxt.user_regs.ss = __KERNEL_DS;
-       ctxt.user_regs.eip = (unsigned long)cpu_bringup;
+       ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
        ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
 
        memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
@@ -202,7 +206,7 @@ void cpu_initialize_context(unsigned int
        ctxt.failsafe_callback_cs  = __KERNEL_CS;
        ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
 
-       ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
+       ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
 #else /* __x86_64__ */
        ctxt.user_regs.cs = __KERNEL_CS;
        ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
@@ -214,7 +218,7 @@ void cpu_initialize_context(unsigned int
        ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
        ctxt.syscall_callback_eip  = (unsigned long)system_call;
 
-       ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
+       ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
 
        ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
 #endif
@@ -395,7 +399,7 @@ int __devinit __cpu_up(unsigned int cpu)
 {
        int rc;
 
-       rc = cpu_up_is_allowed(cpu);
+       rc = cpu_up_check(cpu);
        if (rc)
                return rc;
 
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/netback/loopback.c
--- a/drivers/xen/netback/loopback.c    Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/netback/loopback.c    Thu Jun 08 15:10:05 2006 -0400
@@ -146,11 +146,13 @@ static void loopback_construct(struct ne
        dev->hard_start_xmit = loopback_start_xmit;
        dev->get_stats       = loopback_get_stats;
        dev->set_multicast_list = loopback_set_multicast_list;
+       dev->change_mtu      = NULL; /* allow arbitrary mtu */
 
        dev->tx_queue_len    = 0;
 
        dev->features        = (NETIF_F_HIGHDMA |
                                NETIF_F_LLTX |
+                               NETIF_F_SG |
                                NETIF_F_IP_CSUM);
 
        SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c     Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/netback/netback.c     Thu Jun 08 15:10:05 2006 -0400
@@ -458,6 +458,9 @@ inline static void net_tx_action_dealloc
        dc = dealloc_cons;
        dp = dealloc_prod;
 
+       /* Ensure we see all indexes enqueued by netif_idx_release(). */
+       smp_rmb();
+
        /*
         * Free up any grants we have finished using
         */
@@ -487,6 +490,177 @@ inline static void net_tx_action_dealloc
        }
 }
 
+static void netbk_tx_err(netif_t *netif, RING_IDX end)
+{
+       RING_IDX cons = netif->tx.req_cons;
+
+       do {
+               netif_tx_request_t *txp = RING_GET_REQUEST(&netif->tx, cons);
+               make_tx_response(netif, txp->id, NETIF_RSP_ERROR);
+       } while (++cons < end);
+       netif->tx.req_cons = cons;
+       netif_schedule_work(netif);
+       netif_put(netif);
+}
+
+static int netbk_count_requests(netif_t *netif, netif_tx_request_t *txp,
+                               int work_to_do)
+{
+       netif_tx_request_t *first = txp;
+       RING_IDX cons = netif->tx.req_cons;
+       int frags = 1;
+
+       while (txp->flags & NETTXF_more_data) {
+               if (frags >= work_to_do) {
+                       DPRINTK("Need more frags\n");
+                       return -frags;
+               }
+
+               txp = RING_GET_REQUEST(&netif->tx, cons + frags);
+               if (txp->size > first->size) {
+                       DPRINTK("Frags galore\n");
+                       return -frags;
+               }
+
+               first->size -= txp->size;
+               frags++;
+
+               if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+                       DPRINTK("txp->offset: %x, size: %u\n",
+                               txp->offset, txp->size);
+                       return -frags;
+               }
+       }
+
+       return frags;
+}
+
+static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
+                                                 struct sk_buff *skb,
+                                                 gnttab_map_grant_ref_t *mop)
+{
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       skb_frag_t *frags = shinfo->frags;
+       netif_tx_request_t *txp;
+       unsigned long pending_idx = *((u16 *)skb->data);
+       RING_IDX cons = netif->tx.req_cons + 1;
+       int i, start;
+
+       /* Skip first skb fragment if it is on same page as header fragment. */
+       start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+       for (i = start; i < shinfo->nr_frags; i++) {
+               txp = RING_GET_REQUEST(&netif->tx, cons++);
+               pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
+
+               gnttab_set_map_op(mop++, MMAP_VADDR(pending_idx),
+                                 GNTMAP_host_map | GNTMAP_readonly,
+                                 txp->gref, netif->domid);
+
+               memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+               netif_get(netif);
+               pending_tx_info[pending_idx].netif = netif;
+               frags[i].page = (void *)pending_idx;
+       }
+
+       return mop;
+}
+
+static int netbk_tx_check_mop(struct sk_buff *skb,
+                              gnttab_map_grant_ref_t **mopp)
+{
+       gnttab_map_grant_ref_t *mop = *mopp;
+       int pending_idx = *((u16 *)skb->data);
+       netif_t *netif = pending_tx_info[pending_idx].netif;
+       netif_tx_request_t *txp;
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       int nr_frags = shinfo->nr_frags;
+       int i, err, start;
+
+       /* Check status of header. */
+       err = mop->status;
+       if (unlikely(err)) {
+               txp = &pending_tx_info[pending_idx].req;
+               make_tx_response(netif, txp->id, NETIF_RSP_ERROR);
+               pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+               netif_put(netif);
+       } else {
+               set_phys_to_machine(
+                       __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
+                       FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
+               grant_tx_handle[pending_idx] = mop->handle;
+       }
+
+       /* Skip first skb fragment if it is on same page as header fragment. */
+       start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+       for (i = start; i < nr_frags; i++) {
+               int j, newerr;
+
+               pending_idx = (unsigned long)shinfo->frags[i].page;
+
+               /* Check error status: if okay then remember grant handle. */
+               newerr = (++mop)->status;
+               if (likely(!newerr)) {
+                       set_phys_to_machine(
+                               __pa(MMAP_VADDR(pending_idx))>>PAGE_SHIFT,
+                               FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
+                       grant_tx_handle[pending_idx] = mop->handle;
+                       /* Had a previous error? Invalidate this fragment. */
+                       if (unlikely(err))
+                               netif_idx_release(pending_idx);
+                       continue;
+               }
+
+               /* Error on this fragment: respond to client with an error. */
+               txp = &pending_tx_info[pending_idx].req;
+               make_tx_response(netif, txp->id, NETIF_RSP_ERROR);
+               pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+               netif_put(netif);
+
+               /* Not the first error? Preceding frags already invalidated. */
+               if (err)
+                       continue;
+
+               /* First error: invalidate header and preceding fragments. */
+               pending_idx = *((u16 *)skb->data);
+               netif_idx_release(pending_idx);
+               for (j = start; j < i; j++) {
+                       pending_idx = (unsigned long)shinfo->frags[i].page;
+                       netif_idx_release(pending_idx);
+               }
+
+               /* Remember the error: invalidate all subsequent fragments. */
+               err = newerr;
+       }
+
+       *mopp = mop + 1;
+       return err;
+}
+
+static void netbk_fill_frags(struct sk_buff *skb)
+{
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       int nr_frags = shinfo->nr_frags;
+       int i;
+
+       for (i = 0; i < nr_frags; i++) {
+               skb_frag_t *frag = shinfo->frags + i;
+               netif_tx_request_t *txp;
+               unsigned long pending_idx;
+
+               pending_idx = (unsigned long)frag->page;
+               txp = &pending_tx_info[pending_idx].req;
+               frag->page = virt_to_page(MMAP_VADDR(pending_idx));
+               frag->size = txp->size;
+               frag->page_offset = txp->offset;
+
+               skb->len += txp->size;
+               skb->data_len += txp->size;
+               skb->truesize += txp->size;
+       }
+}
+
 /* Called after netfront has transmitted */
 static void net_tx_action(unsigned long unused)
 {
@@ -504,7 +678,7 @@ static void net_tx_action(unsigned long 
                net_tx_action_dealloc();
 
        mop = tx_map_ops;
-       while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+       while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
                !list_empty(&net_schedule_list)) {
                /* Get a netif from the list with work to do. */
                ent = net_schedule_list.next;
@@ -552,38 +726,44 @@ static void net_tx_action(unsigned long 
                }
                netif->remaining_credit -= txreq.size;
 
-               netif->tx.req_cons++;
-
-               netif_schedule_work(netif);
-
-               if (unlikely(txreq.size < ETH_HLEN) || 
-                   unlikely(txreq.size > ETH_FRAME_LEN)) {
+               ret = netbk_count_requests(netif, &txreq, work_to_do);
+               if (unlikely(ret < 0)) {
+                       netbk_tx_err(netif, i - ret);
+                       continue;
+               }
+               i += ret;
+
+               if (unlikely(ret > MAX_SKB_FRAGS + 1)) {
+                       DPRINTK("Too many frags\n");
+                       netbk_tx_err(netif, i);
+                       continue;
+               }
+
+               if (unlikely(txreq.size < ETH_HLEN)) {
                        DPRINTK("Bad packet size: %d\n", txreq.size);
-                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-                       netif_put(netif);
+                       netbk_tx_err(netif, i);
                        continue; 
                }
 
                /* No crossing a page as the payload mustn't fragment. */
-               if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
+               if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
                        DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
                                txreq.offset, txreq.size, 
                                (txreq.offset &~PAGE_MASK) + txreq.size);
-                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-                       netif_put(netif);
+                       netbk_tx_err(netif, i);
                        continue;
                }
 
                pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
 
-               data_len = (txreq.size > PKT_PROT_LEN) ?
+               data_len = (txreq.size > PKT_PROT_LEN &&
+                           ret < MAX_SKB_FRAGS + 1) ?
                        PKT_PROT_LEN : txreq.size;
 
                skb = alloc_skb(data_len+16, GFP_ATOMIC);
                if (unlikely(skb == NULL)) {
                        DPRINTK("Can't allocate a skb in start_xmit.\n");
-                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-                       netif_put(netif);
+                       netbk_tx_err(netif, i);
                        break;
                }
 
@@ -600,9 +780,23 @@ static void net_tx_action(unsigned long 
                pending_tx_info[pending_idx].netif = netif;
                *((u16 *)skb->data) = pending_idx;
 
+               __skb_put(skb, data_len);
+
+               skb_shinfo(skb)->nr_frags = ret - 1;
+               if (data_len < txreq.size) {
+                       skb_shinfo(skb)->nr_frags++;
+                       skb_shinfo(skb)->frags[0].page =
+                               (void *)(unsigned long)pending_idx;
+               }
+
                __skb_queue_tail(&tx_queue, skb);
 
                pending_cons++;
+
+               mop = netbk_get_requests(netif, skb, mop);
+
+               netif->tx.req_cons = i;
+               netif_schedule_work(netif);
 
                if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
                        break;
@@ -617,75 +811,56 @@ static void net_tx_action(unsigned long 
 
        mop = tx_map_ops;
        while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+               netif_tx_request_t *txp;
+
                pending_idx = *((u16 *)skb->data);
                netif       = pending_tx_info[pending_idx].netif;
-               memcpy(&txreq, &pending_tx_info[pending_idx].req,
-                      sizeof(txreq));
+               txp         = &pending_tx_info[pending_idx].req;
 
                /* Check the remap error code. */
-               if (unlikely(mop->status)) {
+               if (unlikely(netbk_tx_check_mop(skb, &mop))) {
                        printk(KERN_ALERT "#### netback grant fails\n");
-                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-                       netif_put(netif);
+                       skb_shinfo(skb)->nr_frags = 0;
                        kfree_skb(skb);
-                       mop++;
-                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
-                               pending_idx;
                        continue;
                }
-               set_phys_to_machine(
-                       __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
-                       FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-               grant_tx_handle[pending_idx] = mop->handle;
-
-               data_len = (txreq.size > PKT_PROT_LEN) ?
-                       PKT_PROT_LEN : txreq.size;
-
-               __skb_put(skb, data_len);
+
+               data_len = skb->len;
                memcpy(skb->data, 
-                      (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
+                      (void *)(MMAP_VADDR(pending_idx)|txp->offset),
                       data_len);
-               if (data_len < txreq.size) {
+               if (data_len < txp->size) {
                        /* Append the packet payload as a fragment. */
-                       skb_shinfo(skb)->frags[0].page        = 
-                               virt_to_page(MMAP_VADDR(pending_idx));
-                       skb_shinfo(skb)->frags[0].size        =
-                               txreq.size - data_len;
-                       skb_shinfo(skb)->frags[0].page_offset = 
-                               txreq.offset + data_len;
-                       skb_shinfo(skb)->nr_frags = 1;
+                       txp->offset += data_len;
+                       txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
                        netif_idx_release(pending_idx);
                }
-
-               skb->data_len  = txreq.size - data_len;
-               skb->len      += skb->data_len;
-               skb->truesize += skb->data_len;
-
-               skb->dev      = netif->dev;
-               skb->protocol = eth_type_trans(skb, skb->dev);
 
                /*
                 * Old frontends do not assert data_validated but we
                 * can infer it from csum_blank so test both flags.
                 */
-               if (txreq.flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
+               if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        skb->proto_data_valid = 1;
                } else {
                        skb->ip_summed = CHECKSUM_NONE;
                        skb->proto_data_valid = 0;
                }
-               skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
-
-               netif->stats.rx_bytes += txreq.size;
+               skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
+
+               netbk_fill_frags(skb);
+
+               skb->dev      = netif->dev;
+               skb->protocol = eth_type_trans(skb, skb->dev);
+
+               netif->stats.rx_bytes += skb->len;
                netif->stats.rx_packets++;
 
                netif_rx(skb);
                netif->dev->last_rx = jiffies;
-
-               mop++;
        }
 }
 
@@ -695,7 +870,10 @@ static void netif_idx_release(u16 pendin
        unsigned long flags;
 
        spin_lock_irqsave(&_lock, flags);
-       dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+       dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
+       /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
+       smp_wmb();
+       dealloc_prod++;
        spin_unlock_irqrestore(&_lock, flags);
 
        tasklet_schedule(&net_tx_tasklet);
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/netback/xenbus.c
--- a/drivers/xen/netback/xenbus.c      Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/netback/xenbus.c      Thu Jun 08 15:10:05 2006 -0400
@@ -69,6 +69,8 @@ static int netback_probe(struct xenbus_d
 static int netback_probe(struct xenbus_device *dev,
                         const struct xenbus_device_id *id)
 {
+       const char *message;
+       xenbus_transaction_t xbt;
        int err;
        struct backend_info *be = kzalloc(sizeof(struct backend_info),
                                          GFP_KERNEL);
@@ -86,6 +88,27 @@ static int netback_probe(struct xenbus_d
        if (err)
                goto fail;
 
+       do {
+               err = xenbus_transaction_start(&xbt);
+               if (err) {
+                       xenbus_dev_fatal(dev, err, "starting transaction");
+                       goto fail;
+               }
+
+               err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
+               if (err) {
+                       message = "writing feature-sg";
+                       goto abort_transaction;
+               }
+
+               err = xenbus_transaction_end(xbt, 0);
+       } while (err == -EAGAIN);
+
+       if (err) {
+               xenbus_dev_fatal(dev, err, "completing transaction");
+               goto fail;
+       }
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err) {
                goto fail;
@@ -93,6 +116,9 @@ static int netback_probe(struct xenbus_d
 
        return 0;
 
+abort_transaction:
+       xenbus_transaction_end(xbt, 1);
+       xenbus_dev_fatal(dev, err, "%s", message);
 fail:
        DPRINTK("failed");
        netback_remove(dev);
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/netfront/netfront.c
--- a/drivers/xen/netfront/netfront.c   Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/netfront/netfront.c   Thu Jun 08 15:10:05 2006 -0400
@@ -45,6 +45,7 @@
 #include <linux/bitops.h>
 #include <linux/ethtool.h>
 #include <linux/in.h>
+#include <linux/if_ether.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/arp.h>
@@ -173,6 +174,11 @@ static void xennet_sysfs_delif(struct ne
 #define xennet_sysfs_delif(dev) do { } while(0)
 #endif
 
+static inline int xennet_can_sg(struct net_device *dev)
+{
+       return dev->features & NETIF_F_SG;
+}
+
 /**
  * Entry point to this code when a new device is created.  Allocate the basic
  * structures and the ring buffers for communication with the backend, and
@@ -307,8 +313,6 @@ again:
                goto destroy_ring;
        }
 
-       xenbus_switch_state(dev, XenbusStateConnected);
-
        return 0;
 
  abort_transaction:
@@ -370,12 +374,9 @@ static int setup_device(struct xenbus_de
                goto fail;
 
        memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
-       network_connect(netdev);
        info->irq = bind_evtchn_to_irqhandler(
                info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
                netdev);
-       (void)send_fake_arp(netdev);
-       show_device(info);
 
        return 0;
 
@@ -391,15 +392,24 @@ static void backend_changed(struct xenbu
 static void backend_changed(struct xenbus_device *dev,
                            enum xenbus_state backend_state)
 {
+       struct netfront_info *np = dev->data;
+       struct net_device *netdev = np->netdev;
+
        DPRINTK("\n");
 
        switch (backend_state) {
        case XenbusStateInitialising:
-       case XenbusStateInitWait:
        case XenbusStateInitialised:
        case XenbusStateConnected:
        case XenbusStateUnknown:
        case XenbusStateClosed:
+               break;
+
+       case XenbusStateInitWait:
+               network_connect(netdev);
+               xenbus_switch_state(dev, XenbusStateConnected);
+               (void)send_fake_arp(netdev);
+               show_device(np);
                break;
 
        case XenbusStateClosing:
@@ -452,13 +462,17 @@ static int network_open(struct net_devic
        return 0;
 }
 
+static inline int netfront_tx_slot_available(struct netfront_info *np)
+{
+       return RING_FREE_REQUESTS(&np->tx) >= MAX_SKB_FRAGS + 1;
+}
+
 static inline void network_maybe_wake_tx(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
 
        if (unlikely(netif_queue_stopped(dev)) &&
-           !RING_FULL(&np->tx) &&
-           !gnttab_empty_grant_references(&np->gref_tx_head) &&
+           netfront_tx_slot_available(np) &&
            likely(netif_running(dev)))
                netif_wake_queue(dev);
 }
@@ -485,7 +499,7 @@ static void network_tx_buf_gc(struct net
                                printk(KERN_ALERT "network_tx_buf_gc: warning "
                                       "-- grant still in use by backend "
                                       "domain.\n");
-                               break; /* bail immediately */
+                               BUG();
                        }
                        gnttab_end_foreign_access_ref(
                                np->grant_tx_ref[id], GNTMAP_readonly);
@@ -638,36 +652,95 @@ static void network_alloc_rx_buffers(str
        RING_PUSH_REQUESTS(&np->rx);
 }
 
+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+                             struct netif_tx_request *tx)
+{
+       struct netfront_info *np = netdev_priv(dev);
+       char *data = skb->data;
+       unsigned long mfn;
+       RING_IDX prod = np->tx.req_prod_pvt;
+       int frags = skb_shinfo(skb)->nr_frags;
+       unsigned int offset = offset_in_page(data);
+       unsigned int len = skb_headlen(skb);
+       unsigned int id;
+       grant_ref_t ref;
+       int i;
+
+       while (len > PAGE_SIZE - offset) {
+               tx->size = PAGE_SIZE - offset;
+               tx->flags |= NETTXF_more_data;
+               len -= tx->size;
+               data += tx->size;
+               offset = 0;
+
+               id = get_id_from_freelist(np->tx_skbs);
+               np->tx_skbs[id] = skb_get(skb);
+               tx = RING_GET_REQUEST(&np->tx, prod++);
+               tx->id = id;
+               ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+               BUG_ON((signed short)ref < 0);
+
+               mfn = virt_to_mfn(data);
+               gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+                                               mfn, GNTMAP_readonly);
+
+               tx->gref = np->grant_tx_ref[id] = ref;
+               tx->offset = offset;
+               tx->size = len;
+               tx->flags = 0;
+       }
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+               tx->flags |= NETTXF_more_data;
+
+               id = get_id_from_freelist(np->tx_skbs);
+               np->tx_skbs[id] = skb_get(skb);
+               tx = RING_GET_REQUEST(&np->tx, prod++);
+               tx->id = id;
+               ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+               BUG_ON((signed short)ref < 0);
+
+               mfn = pfn_to_mfn(page_to_pfn(frag->page));
+               gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+                                               mfn, GNTMAP_readonly);
+
+               tx->gref = np->grant_tx_ref[id] = ref;
+               tx->offset = frag->page_offset;
+               tx->size = frag->size;
+               tx->flags = 0;
+       }
+
+       np->tx.req_prod_pvt = prod;
+}
 
 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
        struct netif_tx_request *tx;
+       char *data = skb->data;
        RING_IDX i;
        grant_ref_t ref;
        unsigned long mfn;
        int notify;
-
-       if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
-                    PAGE_SIZE)) {
-               struct sk_buff *nskb;
-               nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC|__GFP_NOWARN);
-               if (unlikely(nskb == NULL))
-                       goto drop;
-               skb_put(nskb, skb->len);
-               memcpy(nskb->data, skb->data, skb->len);
-               /* Copy only the header fields we use in this driver. */
-               nskb->dev = skb->dev;
-               nskb->ip_summed = skb->ip_summed;
-               nskb->proto_data_valid = skb->proto_data_valid;
-               dev_kfree_skb(skb);
-               skb = nskb;
+       int frags = skb_shinfo(skb)->nr_frags;
+       unsigned int offset = offset_in_page(data);
+       unsigned int len = skb_headlen(skb);
+
+       frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+       if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+               printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+                      frags);
+               dump_stack();
+               goto drop;
        }
 
        spin_lock_irq(&np->tx_lock);
 
-       if (unlikely(!netif_carrier_ok(dev))) {
+       if (unlikely(!netif_carrier_ok(dev) ||
+                    (frags > 1 && !xennet_can_sg(dev)))) {
                spin_unlock_irq(&np->tx_lock);
                goto drop;
        }
@@ -682,12 +755,12 @@ static int network_start_xmit(struct sk_
        tx->id   = id;
        ref = gnttab_claim_grant_reference(&np->gref_tx_head);
        BUG_ON((signed short)ref < 0);
-       mfn = virt_to_mfn(skb->data);
+       mfn = virt_to_mfn(data);
        gnttab_grant_foreign_access_ref(
                ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
        tx->gref = np->grant_tx_ref[id] = ref;
-       tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-       tx->size = skb->len;
+       tx->offset = offset;
+       tx->size = len;
 
        tx->flags = 0;
        if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
@@ -696,14 +769,17 @@ static int network_start_xmit(struct sk_
                tx->flags |= NETTXF_data_validated;
 
        np->tx.req_prod_pvt = i + 1;
+
+       xennet_make_frags(skb, dev, tx);
+       tx->size = skb->len;
+
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
        if (notify)
                notify_remote_via_irq(np->irq);
 
        network_tx_buf_gc(dev);
 
-       if (RING_FULL(&np->tx) ||
-           gnttab_empty_grant_references(&np->gref_tx_head))
+       if (!netfront_tx_slot_available(np))
                netif_stop_queue(dev);
 
        spin_unlock_irq(&np->tx_lock);
@@ -963,12 +1039,46 @@ static struct net_device_stats *network_
        return &np->stats;
 }
 
+static int xennet_change_mtu(struct net_device *dev, int mtu)
+{
+       int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
+       if (mtu > max)
+               return -EINVAL;
+       dev->mtu = mtu;
+       return 0;
+}
+
+static int xennet_set_sg(struct net_device *dev, u32 data)
+{
+       if (data) {
+               struct netfront_info *np = netdev_priv(dev);
+               int val;
+
+               if (xenbus_scanf(XBT_NULL, np->xbdev->otherend, "feature-sg",
+                                "%d", &val) < 0)
+                       val = 0;
+               if (!val)
+                       return -ENOSYS;
+       } else if (dev->mtu > ETH_DATA_LEN)
+               dev->mtu = ETH_DATA_LEN;
+
+       return ethtool_op_set_sg(dev, data);
+}
+
+static void xennet_set_features(struct net_device *dev)
+{
+       xennet_set_sg(dev, 1);
+}
+
 static void network_connect(struct net_device *dev)
 {
        struct netfront_info *np;
        int i, requeue_idx;
        struct netif_tx_request *tx;
        struct sk_buff *skb;
+
+       xennet_set_features(dev);
 
        np = netdev_priv(dev);
        spin_lock_irq(&np->tx_lock);
@@ -1081,6 +1191,8 @@ static struct ethtool_ops network_ethtoo
 {
        .get_tx_csum = ethtool_op_get_tx_csum,
        .set_tx_csum = ethtool_op_set_tx_csum,
+       .get_sg = ethtool_op_get_sg,
+       .set_sg = xennet_set_sg,
 };
 
 #ifdef CONFIG_SYSFS
@@ -1297,6 +1409,7 @@ static struct net_device * __devinit cre
        netdev->poll            = netif_poll;
        netdev->set_multicast_list = network_set_multicast_list;
        netdev->uninit          = netif_uninit;
+       netdev->change_mtu      = xennet_change_mtu;
        netdev->weight          = 64;
        netdev->features        = NETIF_F_IP_CSUM;
 
diff -r 760669a37a3a -r 5c0c59eb5f73 drivers/xen/privcmd/privcmd.c
--- a/drivers/xen/privcmd/privcmd.c     Wed Jun 07 19:53:53 2006 -0400
+++ b/drivers/xen/privcmd/privcmd.c     Thu Jun 08 15:10:05 2006 -0400
@@ -63,11 +63,11 @@ static int privcmd_ioctl(struct inode *i
                __asm__ __volatile__ (
                        "pushl %%ebx; pushl %%ecx; pushl %%edx; "
                        "pushl %%esi; pushl %%edi; "
-                       "movl  4(%%eax),%%ebx ;"
-                       "movl  8(%%eax),%%ecx ;"
-                       "movl 12(%%eax),%%edx ;"
-                       "movl 16(%%eax),%%esi ;"
-                       "movl 20(%%eax),%%edi ;"
+                       "movl  8(%%eax),%%ebx ;"
+                       "movl 16(%%eax),%%ecx ;"
+                       "movl 24(%%eax),%%edx ;"
+                       "movl 32(%%eax),%%esi ;"
+                       "movl 40(%%eax),%%edi ;"
                        "movl   (%%eax),%%eax ;"
                        "shll $5,%%eax ;"
                        "addl $hypercall_page,%%eax ;"
@@ -214,7 +214,7 @@ static int privcmd_ioctl(struct inode *i
        batch_err:
                printk("batch_err ret=%d vma=%p addr=%lx "
                       "num=%d arr=%p %lx-%lx\n", 
-                      ret, vma, m.addr, m.num, m.arr,
+                      ret, vma, (unsigned long)m.addr, m.num, m.arr,
                       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
                break;
        }
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-i386/mach-xen/asm/dma-mapping.h
--- a/include/asm-i386/mach-xen/asm/dma-mapping.h       Wed Jun 07 19:53:53 
2006 -0400
+++ b/include/asm-i386/mach-xen/asm/dma-mapping.h       Thu Jun 08 15:10:05 
2006 -0400
@@ -128,8 +128,6 @@ dma_get_cache_alignment(void)
         * maximum possible, to be safe */
        return (1 << INTERNODE_CACHE_SHIFT);
 }
-#else
-extern int dma_get_cache_alignment(void);
 #endif
 
 #define dma_is_consistent(d)   (1)
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-i386/mach-xen/asm/hypercall.h
--- a/include/asm-i386/mach-xen/asm/hypercall.h Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-i386/mach-xen/asm/hypercall.h Thu Jun 08 15:10:05 2006 -0400
@@ -260,6 +260,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
        int cmd, void *arg)
 {
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-i386/mach-xen/asm/system.h
--- a/include/asm-i386/mach-xen/asm/system.h    Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-i386/mach-xen/asm/system.h    Thu Jun 08 15:10:05 2006 -0400
@@ -115,10 +115,12 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
        __asm__ ( \
                "movl %%cr3,%0\n\t" \
                :"=r" (__dummy)); \
-       machine_to_phys(__dummy); \
+       __dummy = xen_cr3_to_pfn(__dummy); \
+       mfn_to_pfn(__dummy) << PAGE_SHIFT; \
 })
 #define write_cr3(x) ({                                                \
-       maddr_t __dummy = phys_to_machine(x);                   \
+       unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT);   \
+       __dummy = xen_pfn_to_cr3(__dummy);                      \
        __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy));  \
 })
 
@@ -519,8 +521,8 @@ do {                                                        
                \
                preempt_enable_no_resched();                            \
 } while (0)
 
-#define safe_halt()            ((void)0)
-#define halt()                 ((void)0)
+void safe_halt(void);
+void halt(void);
 
 #define __save_and_cli(x)                                              \
 do {                                                                   \
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-i386/mach-xen/setup_arch_post.h
--- a/include/asm-i386/mach-xen/setup_arch_post.h       Wed Jun 07 19:53:53 
2006 -0400
+++ b/include/asm-i386/mach-xen/setup_arch_post.h       Thu Jun 08 15:10:05 
2006 -0400
@@ -61,13 +61,6 @@ static void __init machine_specific_arch
                .address = { __KERNEL_CS, (unsigned long)nmi },
        };
 
-       if (xen_feature(XENFEAT_auto_translated_physmap) &&
-           xen_start_info->shared_info < xen_start_info->nr_pages) {
-               HYPERVISOR_shared_info =
-                       (shared_info_t *)__va(xen_start_info->shared_info);
-               memset(empty_zero_page, 0, sizeof(empty_zero_page));
-       }
-
        ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
        if (ret == 0)
                ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-ia64/hw_irq.h
--- a/include/asm-ia64/hw_irq.h Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-ia64/hw_irq.h Thu Jun 08 15:10:05 2006 -0400
@@ -15,7 +15,11 @@
 #include <asm/ptrace.h>
 #include <asm/smp.h>
 
+#ifndef CONFIG_XEN
 typedef u8 ia64_vector;
+#else
+typedef u16 ia64_vector;
+#endif
 
 /*
  * 0 special
@@ -86,11 +90,15 @@ extern void ia64_send_ipi (int cpu, int 
 extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int 
redirect);
 extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
 
+#ifndef CONFIG_XEN
 static inline void
 hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
 {
        platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
 }
+#else
+extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
+#endif /* CONFIG_XEN */
 
 /*
  * Default implementations for the irq-descriptor API:
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-ia64/hypercall.h
--- a/include/asm-ia64/hypercall.h      Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-ia64/hypercall.h      Thu Jun 08 15:10:05 2006 -0400
@@ -247,6 +247,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       unsigned int cmd, void *arg)
+{
+    return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
     int cmd, void *arg)
 {
@@ -313,9 +320,20 @@ HYPERVISOR_suspend(
        return rc;
 }
 
+static inline int
+HYPERVISOR_callback_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, callback_op, cmd, arg);
+}
+
 extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
 static inline void exit_idle(void) {}
-#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
+#define do_IRQ(irq, regs) ({                   \
+       irq_enter();                            \
+       __do_IRQ((irq), (regs));                \
+       irq_exit();                             \
+})
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
 #include <linux/err.h>
@@ -418,12 +436,14 @@ HYPERVISOR_ioremap(unsigned long ioaddr,
 HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
 {
        unsigned long ret = ioaddr;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_ioremap(ioaddr, size);
-               if (unlikely(IS_ERR_VALUE(ret)))
+               if (unlikely(ret == -ENOSYS))
                        panic("hypercall %s failed with %ld. "
                              "Please check Xen and Linux config mismatch\n",
                              __func__, -ret);
+               else if (unlikely(IS_ERR_VALUE(ret)))
+                       ret = ioaddr;
        }
        return ret;
 }
@@ -439,7 +459,7 @@ HYPERVISOR_phystomach(unsigned long gpfn
 HYPERVISOR_phystomach(unsigned long gpfn)
 {
        unsigned long ret = gpfn;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_phystomach(gpfn);
        }
        return ret;
@@ -456,7 +476,7 @@ HYPERVISOR_machtophys(unsigned long mfn)
 HYPERVISOR_machtophys(unsigned long mfn)
 {
        unsigned long ret = mfn;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_machtophys(mfn);
        }
        return ret;
@@ -473,7 +493,7 @@ HYPERVISOR_zap_physmap(unsigned long gpf
 HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
 {
        unsigned long ret = 0;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_zap_physmap(gpfn, extent_order);
        }
        return ret;
@@ -481,7 +501,7 @@ HYPERVISOR_zap_physmap(unsigned long gpf
 
 static inline unsigned long
 __HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-                        unsigned int flags, domid_t domid)
+                        unsigned long flags, domid_t domid)
 {
        return _hypercall_imm4(unsigned long, ia64_dom0vp_op,
                               IA64_DOM0VP_add_physmap, gpfn, mfn, flags,
@@ -490,11 +510,11 @@ __HYPERVISOR_add_physmap(unsigned long g
 
 static inline unsigned long
 HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-                      unsigned int flags, domid_t domid)
+                      unsigned long flags, domid_t domid)
 {
        unsigned long ret = 0;
-       BUG_ON(!running_on_xen);//XXX
-       if (running_on_xen) {
+       BUG_ON(!is_running_on_xen());//XXX
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
        }
        return ret;
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-ia64/hypervisor.h
--- a/include/asm-ia64/hypervisor.h     Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-ia64/hypervisor.h     Thu Jun 08 15:10:05 2006 -0400
@@ -46,14 +46,12 @@
 #include <asm/hypercall.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
-#include <asm/xen/privop.h> // for running_on_xen
+#include <asm/xen/privop.h> // for is_running_on_xen()
 
 extern shared_info_t *HYPERVISOR_shared_info;
 extern start_info_t *xen_start_info;
 
 void force_evtchn_callback(void);
-
-#define is_running_on_xen() running_on_xen
 
 /* Turn jiffies into Xen system time. XXX Implement me. */
 #define jiffies_to_st(j)       0
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-ia64/irq.h
--- a/include/asm-ia64/irq.h    Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-ia64/irq.h    Thu Jun 08 15:10:05 2006 -0400
@@ -11,8 +11,39 @@
  * 02/29/00     D.Mosberger    moved most things into hw_irq.h
  */
 
+#ifndef CONFIG_XEN
 #define NR_IRQS                256
 #define NR_IRQ_VECTORS NR_IRQS
+#else
+/*
+ * The flat IRQ space is divided into two regions:
+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
+ *     if we have physical device-access privilege. This region is at the 
+ *     start of the IRQ space so that existing device drivers do not need
+ *     to be modified to translate physical IRQ numbers into our IRQ space.
+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ *     are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE              0
+#define NR_PIRQS               256
+
+#define DYNIRQ_BASE            (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS             256
+
+#define NR_IRQS                        (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS         NR_IRQS
+
+#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
+
+#define RESCHEDULE_VECTOR      0
+#define IPI_VECTOR             1
+#define NR_IPIS                        2
+#endif /* CONFIG_XEN */
 
 /*
  * IRQ line status macro IRQ_PER_CPU is used
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-ia64/xen/privop.h
--- a/include/asm-ia64/xen/privop.h     Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-ia64/xen/privop.h     Thu Jun 08 15:10:05 2006 -0400
@@ -43,6 +43,7 @@
 
 #ifndef __ASSEMBLY__
 extern int running_on_xen;
+#define is_running_on_xen() running_on_xen
 
 #define        XEN_HYPER_SSM_I         asm("break %0" : : "i" 
(HYPERPRIVOP_SSM_I))
 #define        XEN_HYPER_GET_IVR       asm("break %0" : : "i" 
(HYPERPRIVOP_GET_IVR))
@@ -122,7 +123,7 @@ extern void xen_set_eflag(unsigned long)
 
 #define xen_ia64_intrin_local_irq_restore(x)                           \
 {                                                                      \
-     if (running_on_xen) {                                             \
+     if (is_running_on_xen()) {                                                
\
        if ((x) & IA64_PSR_I) { xen_ssm_i(); }                          \
        else { xen_rsm_i(); }                                           \
     }                                                                  \
@@ -131,7 +132,7 @@ extern void xen_set_eflag(unsigned long)
 
 #define        xen_get_psr_i()                                                 
\
 (                                                                      \
-       (running_on_xen) ?                                              \
+       (is_running_on_xen()) ?                                         \
                (xen_get_virtual_psr_i() ? IA64_PSR_I : 0)              \
                : __ia64_get_psr_i()                                    \
 )
@@ -139,7 +140,7 @@ extern void xen_set_eflag(unsigned long)
 #define xen_ia64_ssm(mask)                                             \
 {                                                                      \
        if ((mask)==IA64_PSR_I) {                                       \
-               if (running_on_xen) { xen_ssm_i(); }                    \
+               if (is_running_on_xen()) { xen_ssm_i(); }                       
\
                else { __ia64_ssm(mask); }                              \
        }                                                               \
        else { __ia64_ssm(mask); }                                      \
@@ -148,7 +149,7 @@ extern void xen_set_eflag(unsigned long)
 #define xen_ia64_rsm(mask)                                             \
 {                                                                      \
        if ((mask)==IA64_PSR_I) {                                       \
-               if (running_on_xen) { xen_rsm_i(); }                    \
+               if (is_running_on_xen()) { xen_rsm_i(); }                       
\
                else { __ia64_rsm(mask); }                              \
        }                                                               \
        else { __ia64_rsm(mask); }                                      \
@@ -167,10 +168,11 @@ extern void xen_set_rr(unsigned long ind
 extern void xen_set_rr(unsigned long index, unsigned long val);
 extern unsigned long xen_get_rr(unsigned long index);
 extern void xen_set_kr(unsigned long index, unsigned long val);
-
-/* Note: It may look wrong to test for running_on_xen in each case.
+extern void xen_ptcga(unsigned long addr, unsigned long size);
+
+/* Note: It may look wrong to test for is_running_on_xen() in each case.
  * However regnum is always a constant so, as written, the compiler
- * eliminates the switch statement, whereas running_on_xen must be
+ * eliminates the switch statement, whereas is_running_on_xen() must be
  * tested dynamically. */
 #define xen_ia64_getreg(regnum)                                                
\
 ({                                                                     \
@@ -178,17 +180,17 @@ extern void xen_set_kr(unsigned long ind
                                                                        \
        switch(regnum) {                                                \
        case _IA64_REG_CR_IVR:                                          \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_ivr() :                                 \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
        case _IA64_REG_CR_TPR:                                          \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_tpr() :                                 \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
        case _IA64_REG_AR_EFLAG:                                        \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_eflag() :                               \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
@@ -203,27 +205,27 @@ extern void xen_set_kr(unsigned long ind
 ({                                                                     \
        switch(regnum) {                                                \
        case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:                     \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :    \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_ITM:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_itm(val) :                              \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_TPR:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_tpr(val) :                              \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_EOI:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_eoi() :                                     \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_AR_EFLAG:                                        \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_eflag(val) :                            \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-x86_64/mach-xen/asm/hypercall.h
--- a/include/asm-x86_64/mach-xen/asm/hypercall.h       Wed Jun 07 19:53:53 
2006 -0400
+++ b/include/asm-x86_64/mach-xen/asm/hypercall.h       Thu Jun 08 15:10:05 
2006 -0400
@@ -258,6 +258,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
        int cmd, void *arg)
 {
diff -r 760669a37a3a -r 5c0c59eb5f73 include/asm-x86_64/mach-xen/asm/system.h
--- a/include/asm-x86_64/mach-xen/asm/system.h  Wed Jun 07 19:53:53 2006 -0400
+++ b/include/asm-x86_64/mach-xen/asm/system.h  Thu Jun 08 15:10:05 2006 -0400
@@ -418,8 +418,8 @@ do {                                                        
                \
        preempt_enable_no_resched();                                    \
        ___x; })
 
-#define safe_halt()            ((void)0)
-#define halt()                 ((void)0)
+void safe_halt(void);
+void halt(void);
 
 void cpu_idle_wait(void);
 
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/cpu_hotplug.h
--- a/include/xen/cpu_hotplug.h Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/cpu_hotplug.h Thu Jun 08 15:10:05 2006 -0400
@@ -13,14 +13,16 @@ void cpu_initialize_context(unsigned int
 #define cpu_initialize_context(cpu)    ((void)0)
 #endif
 
-int cpu_up_is_allowed(unsigned int cpu);
+int cpu_up_check(unsigned int cpu);
 void init_xenbus_allowed_cpumask(void);
 int smp_suspend(void);
 void smp_resume(void);
 
+void cpu_bringup(void);
+
 #else /* !defined(CONFIG_HOTPLUG_CPU) */
 
-#define cpu_up_is_allowed(cpu)         (1)
+#define cpu_up_check(cpu)              (0)
 #define init_xenbus_allowed_cpumask()  ((void)0)
 
 static inline int smp_suspend(void)
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/arch-ia64.h
--- a/include/xen/interface/arch-ia64.h Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/arch-ia64.h Thu Jun 08 15:10:05 2006 -0400
@@ -26,6 +26,9 @@ DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(long);
 DEFINE_XEN_GUEST_HANDLE(void);
+
+typedef unsigned long xen_pfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #endif
 
 /* Arch specific VIRQs definition */
@@ -320,6 +323,8 @@ struct arch_initrd_info {
 };
 typedef struct arch_initrd_info arch_initrd_info_t;
 
+typedef unsigned long xen_callback_t;
+
 #define IA64_COMMAND_LINE_SIZE 512
 struct vcpu_guest_context {
 #define VGCF_FPU_VALID (1<<0)
@@ -367,6 +372,10 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
 #define IA64_DOM0VP_add_physmap         18      // assigne machine page frane
                                                 // to dom0's pseudo physical
                                                 // address space.
+// flags for page assignement to pseudo physical address space
+#define _ASSIGN_readonly                0
+#define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
+#define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
 
 #endif /* !__ASSEMBLY__ */
 
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/arch-x86_32.h
--- a/include/xen/interface/arch-x86_32.h       Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/arch-x86_32.h       Thu Jun 08 15:10:05 2006 -0400
@@ -28,6 +28,9 @@ DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(long);
 DEFINE_XEN_GUEST_HANDLE(void);
+
+typedef unsigned long xen_pfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #endif
 
 /*
@@ -138,9 +141,17 @@ struct vcpu_guest_context {
 struct vcpu_guest_context {
     /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
     struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-#define VGCF_I387_VALID (1<<0)
-#define VGCF_HVM_GUEST  (1<<1)
-#define VGCF_IN_KERNEL  (1<<2)
+#define VGCF_I387_VALID                (1<<0)
+#define VGCF_HVM_GUEST                 (1<<1)
+#define VGCF_IN_KERNEL                 (1<<2)
+#define _VGCF_i387_valid               0
+#define VGCF_i387_valid                (1<<_VGCF_i387_valid)
+#define _VGCF_hvm_guest                1
+#define VGCF_hvm_guest                 (1<<_VGCF_hvm_guest)
+#define _VGCF_in_kernel                2
+#define VGCF_in_kernel                 (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events  (1<<_VGCF_failsafe_disables_events)
     unsigned long flags;                    /* VGCF_* flags                 */
     struct cpu_user_regs user_regs;         /* User-level CPU registers     */
     struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
@@ -158,10 +169,18 @@ typedef struct vcpu_guest_context vcpu_g
 typedef struct vcpu_guest_context vcpu_guest_context_t;
 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
 
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
 struct arch_shared_info {
     unsigned long max_pfn;                  /* max pfn that appears in table */
     /* Frame containing list of mfns containing list of mfns containing p2m. */
-    unsigned long pfn_to_mfn_frame_list_list;
+    xen_pfn_t     pfn_to_mfn_frame_list_list;
     unsigned long nmi_reason;
 };
 typedef struct arch_shared_info arch_shared_info_t;
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/arch-x86_64.h
--- a/include/xen/interface/arch-x86_64.h       Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/arch-x86_64.h       Thu Jun 08 15:10:05 2006 -0400
@@ -28,6 +28,9 @@ DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(int);
 DEFINE_XEN_GUEST_HANDLE(long);
 DEFINE_XEN_GUEST_HANDLE(void);
+
+typedef unsigned long xen_pfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #endif
 
 /*
@@ -211,9 +214,19 @@ struct vcpu_guest_context {
 struct vcpu_guest_context {
     /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
     struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-#define VGCF_I387_VALID (1<<0)
-#define VGCF_HVM_GUEST  (1<<1)
-#define VGCF_IN_KERNEL  (1<<2)
+#define VGCF_I387_VALID                (1<<0)
+#define VGCF_HVM_GUEST                 (1<<1)
+#define VGCF_IN_KERNEL                 (1<<2)
+#define _VGCF_i387_valid               0
+#define VGCF_i387_valid                (1<<_VGCF_i387_valid)
+#define _VGCF_hvm_guest                1
+#define VGCF_hvm_guest                 (1<<_VGCF_hvm_guest)
+#define _VGCF_in_kernel                2
+#define VGCF_in_kernel                 (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events  (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events  4
+#define VGCF_syscall_disables_events   (1<<_VGCF_syscall_disables_events)
     unsigned long flags;                    /* VGCF_* flags                 */
     struct cpu_user_regs user_regs;         /* User-level CPU registers     */
     struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
@@ -234,10 +247,13 @@ typedef struct vcpu_guest_context vcpu_g
 typedef struct vcpu_guest_context vcpu_guest_context_t;
 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
 
+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
+
 struct arch_shared_info {
     unsigned long max_pfn;                  /* max pfn that appears in table */
     /* Frame containing list of mfns containing list of mfns containing p2m. */
-    unsigned long pfn_to_mfn_frame_list_list;
+    xen_pfn_t     pfn_to_mfn_frame_list_list;
     unsigned long nmi_reason;
 };
 typedef struct arch_shared_info arch_shared_info_t;
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/callback.h
--- a/include/xen/interface/callback.h  Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/callback.h  Thu Jun 08 15:10:05 2006 -0400
@@ -29,12 +29,20 @@
 #define CALLBACKTYPE_nmi                   4
 
 /*
+ * Disable event deliver during callback? This flag is ignored for event and
+ * NMI callbacks: event delivery is unconditionally disabled.
+ */
+#define _CALLBACKF_mask_events             0
+#define CALLBACKF_mask_events              (1U << _CALLBACKF_mask_events)
+
+/*
  * Register a callback.
  */
 #define CALLBACKOP_register                0
 struct callback_register {
-     int type;
-     xen_callback_t address;
+    uint16_t type;
+    uint16_t flags;
+    xen_callback_t address;
 };
 typedef struct callback_register callback_register_t;
 DEFINE_XEN_GUEST_HANDLE(callback_register_t);
@@ -47,7 +55,8 @@ DEFINE_XEN_GUEST_HANDLE(callback_registe
  */
 #define CALLBACKOP_unregister              1
 struct callback_unregister {
-     int type;
+    uint16_t type;
+    uint16_t _unused;
 };
 typedef struct callback_unregister callback_unregister_t;
 DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/grant_table.h
--- a/include/xen/interface/grant_table.h       Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/grant_table.h       Thu Jun 08 15:10:05 2006 -0400
@@ -244,7 +244,7 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_dump_tabl
 #define GNTTABOP_transfer                4
 struct gnttab_transfer {
     /* IN parameters. */
-    unsigned long mfn;
+    xen_pfn_t     mfn;
     domid_t       domid;
     grant_ref_t   ref;
     /* OUT parameters. */
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/io/netif.h
--- a/include/xen/interface/io/netif.h  Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/io/netif.h  Thu Jun 08 15:10:05 2006 -0400
@@ -26,6 +26,10 @@
 /* Packet data has been validated against protocol checksum. */
 #define _NETTXF_data_validated (1)
 #define  NETTXF_data_validated (1U<<_NETTXF_data_validated)
+
+/* Packet continues in the request. */
+#define _NETTXF_more_data      (2)
+#define  NETTXF_more_data      (1U<<_NETTXF_more_data)
 
 struct netif_tx_request {
     grant_ref_t gref;      /* Reference to buffer page */
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/io/ring.h
--- a/include/xen/interface/io/ring.h   Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/io/ring.h   Thu Jun 08 15:10:05 2006 -0400
@@ -151,19 +151,27 @@ typedef struct __name##_back_ring __name
 #define RING_SIZE(_r)                                                   \
     ((_r)->nr_ents)
 
+/* Number of free requests (for use on front side only). */
+#define RING_FREE_REQUESTS(_r)                                         \
+    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+
 /* Test if there is an empty slot available on the front ring.
  * (This is only meaningful from the front. )
  */
 #define RING_FULL(_r)                                                   \
-    (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
+    (RING_FREE_REQUESTS(_r) == 0)
 
 /* Test if there are outstanding messages to be processed on a ring. */
 #define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
-    ((_r)->rsp_cons != (_r)->sring->rsp_prod)
+    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
 
 #define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
-    (((_r)->req_cons != (_r)->sring->req_prod) &&                       \
-     (((_r)->req_cons - (_r)->rsp_prod_pvt) != RING_SIZE(_r)))
+    ({                                                                 \
+       unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;      \
+       unsigned int rsp = RING_SIZE(_r) -                              \
+                          ((_r)->req_cons - (_r)->rsp_prod_pvt);       \
+       req < rsp ? req : rsp;                                          \
+    })
 
 /* Direct access to individual ring elements, by index. */
 #define RING_GET_REQUEST(_r, _idx)                                      \
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/memory.h
--- a/include/xen/interface/memory.h    Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/memory.h    Thu Jun 08 15:10:05 2006 -0400
@@ -29,7 +29,7 @@ struct xen_memory_reservation {
      *   OUT: GMFN bases of extents that were allocated
      *   (NB. This command also updates the mach_to_phys translation table)
      */
-    XEN_GUEST_HANDLE(ulong) extent_start;
+    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
 
     /* Number of extents, and size/alignment of each (2^extent_order pages). */
     unsigned long  nr_extents;
@@ -87,7 +87,7 @@ struct xen_machphys_mfn_list {
      * any large discontiguities in the machine address space, 2MB gaps in
      * the machphys table will be represented by an MFN base of zero.
      */
-    XEN_GUEST_HANDLE(ulong) extent_start;
+    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
 
     /*
      * Number of extents written to the above array. This will be smaller
@@ -117,7 +117,7 @@ struct xen_add_to_physmap {
     unsigned long idx;
 
     /* GPFN where the source mapping page should appear. */
-    unsigned long gpfn;
+    xen_pfn_t     gpfn;
 };
 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
@@ -135,13 +135,13 @@ struct xen_translate_gpfn_list {
     unsigned long nr_gpfns;
 
     /* List of GPFNs to translate. */
-    XEN_GUEST_HANDLE(ulong) gpfn_list;
+    XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
 
     /*
      * Output list to contain MFN translations. May be the same as the input
      * list (in which case each input GPFN is overwritten with the output MFN).
      */
-    XEN_GUEST_HANDLE(ulong) mfn_list;
+    XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
 };
 typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
 DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/sched_ctl.h
--- a/include/xen/interface/sched_ctl.h Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/sched_ctl.h Thu Jun 08 15:10:05 2006 -0400
@@ -49,7 +49,7 @@ struct sched_adjdom_cmd {
             uint32_t extratime;
             uint32_t weight;
         } sedf;
-        struct csched_domain {
+        struct sched_credit_adjdom {
             uint16_t weight;
             uint16_t cap;
         } credit;
diff -r 760669a37a3a -r 5c0c59eb5f73 include/xen/interface/xen.h
--- a/include/xen/interface/xen.h       Wed Jun 07 19:53:53 2006 -0400
+++ b/include/xen/interface/xen.h       Thu Jun 08 15:10:05 2006 -0400
@@ -199,7 +199,7 @@ struct mmuext_op {
     unsigned int cmd;
     union {
         /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-        unsigned long mfn;
+        xen_pfn_t     mfn;
         /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
         unsigned long linear_addr;
     } arg1;
@@ -236,10 +236,24 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
  */
 #define VMASST_CMD_enable                0
 #define VMASST_CMD_disable               1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
 #define VMASST_TYPE_4gb_segments         0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
 #define VMASST_TYPE_4gb_segments_notify  1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
 #define VMASST_TYPE_writable_pagetables  2
-#define MAX_VMASST_TYPE 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
+#define VMASST_TYPE_pae_extended_cr3     3
+
+#define MAX_VMASST_TYPE                  3
 
 #ifndef __ASSEMBLY__
 
@@ -449,9 +463,9 @@ struct start_info {
     unsigned long nr_pages;     /* Total pages allocated to this domain.  */
     unsigned long shared_info;  /* MACHINE address of shared info struct. */
     uint32_t flags;             /* SIF_xxx flags.                         */
-    unsigned long store_mfn;    /* MACHINE page number of shared page.    */
+    xen_pfn_t store_mfn;        /* MACHINE page number of shared page.    */
     uint32_t store_evtchn;      /* Event channel for store communication. */
-    unsigned long console_mfn;  /* MACHINE address of console page.       */
+    xen_pfn_t console_mfn;      /* MACHINE page number of console page.   */
     uint32_t console_evtchn;    /* Event channel for console messages.    */
     /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
     unsigned long pt_base;      /* VIRTUAL address of page directory.     */
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/drivers/Makefile
--- a/arch/ia64/xen/drivers/Makefile    Wed Jun 07 19:53:53 2006 -0400
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-
-ifneq ($(CONFIG_XEN_IA64_DOM0_VP),y)
-obj-y   += util.o
-endif
-
-obj-y  += core/
-#obj-y += char/
-obj-y  += console/
-obj-y  += evtchn/
-obj-$(CONFIG_XEN_IA64_DOM0_VP) += balloon/
-obj-y  += privcmd/
-obj-y  += xenbus/
-
-obj-$(CONFIG_XEN_BLKDEV_BACKEND)       += blkback/
-obj-$(CONFIG_XEN_NETDEV_BACKEND)       += netback/
-obj-$(CONFIG_XEN_TPMDEV_BACKEND)       += tpmback/
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += blkfront/
-obj-$(CONFIG_XEN_NETDEV_FRONTEND)      += netfront/
-obj-$(CONFIG_XEN_BLKDEV_TAP)           += blktap/
-obj-$(CONFIG_XEN_TPMDEV_FRONTEND)      += tpmfront/
-obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += pciback/
-obj-$(CONFIG_XEN_PCIDEV_FRONTEND)      += pcifront/
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/drivers/coreMakefile
--- a/arch/ia64/xen/drivers/coreMakefile        Wed Jun 07 19:53:53 2006 -0400
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-obj-y   := gnttab.o features.o
-obj-$(CONFIG_PROC_FS) += xen_proc.o
-
-ifeq ($(ARCH),ia64)
-obj-y   += evtchn_ia64.o
-obj-y   += xenia64_init.o
-ifeq ($(CONFIG_XEN_IA64_DOM0_VP),y)
-obj-$(CONFIG_NET)     += skbuff.o
-endif
-else
-obj-y   += reboot.o evtchn.o fixup.o 
-obj-$(CONFIG_SMP)     += smp.o         # setup_profiling_timer def'd in ia64
-obj-$(CONFIG_NET)     += skbuff.o      # until networking is up on ia64
-endif
-obj-$(CONFIG_SYSFS)   += hypervisor_sysfs.o
-obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/drivers/evtchn_ia64.c
--- a/arch/ia64/xen/drivers/evtchn_ia64.c       Wed Jun 07 19:53:53 2006 -0400
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,261 +0,0 @@
-/* NOTE: This file split off from evtchn.c because there was
-   some discussion that the mechanism is sufficiently different.
-   It may be possible to merge it back in the future... djm */
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <asm/hw_irq.h>
-#include <xen/evtchn.h>
-
-#define MAX_EVTCHN 1024
-
-/* Xen will never allocate port zero for any purpose. */
-#define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
-
-/* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
- * for XEN/IA64 - ktian1
- */
-enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
-
-/* Constructor for packed IRQ information. */
-#define mk_irq_info(type, index, evtchn)                               \
-       (((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
-/* Convenient shorthand for packed representation of an unbound IRQ. */
-#define IRQ_UNBOUND    mk_irq_info(IRQT_UNBOUND, 0, 0)
-/* Accessor macros for packed IRQ information. */
-#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
-#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
-#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
-
-/* Packed IRQ information: binding type, sub-type index, and event channel. */
-static u32 irq_info[NR_IRQS];
-
-/* One note for XEN/IA64 is that we have all event channels bound to one
- * physical irq vector. So we always mean evtchn vector identical to 'irq'
- * vector in this context. - ktian1
- */
-static struct {
-       irqreturn_t (*handler)(int, void *, struct pt_regs *);
-       void *dev_id;
-       char opened;    /* Whether allocated */
-} evtchns[MAX_EVTCHN];
-
-/*
- * This lock protects updates to the following mapping and reference-count
- * arrays. The lock does not need to be acquired to read the mapping tables.
- */
-static spinlock_t irq_mapping_update_lock;
-
-void mask_evtchn(int port)
-{
-       shared_info_t *s = HYPERVISOR_shared_info;
-       synch_set_bit(port, &s->evtchn_mask[0]);
-}
-EXPORT_SYMBOL(mask_evtchn);
-
-void unmask_evtchn(int port)
-{
-       shared_info_t *s = HYPERVISOR_shared_info;
-       unsigned int cpu = smp_processor_id();
-       vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-
-#if 0  // FIXME: diverged from x86 evtchn.c
-       /* Slow path (hypercall) if this is a non-local port. */
-       if (unlikely(cpu != cpu_from_evtchn(port))) {
-               struct evtchn_unmask op = { .port = port };
-               (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op);
-               return;
-       }
-#endif
-
-       synch_clear_bit(port, &s->evtchn_mask[0]);
-
-       /*
-        * The following is basically the equivalent of 'hw_resend_irq'. Just
-        * like a real IO-APIC we 'lose the interrupt edge' if the channel is
-        * masked.
-        */
-       if (synch_test_bit(port, &s->evtchn_pending[0]) && 
-           !synch_test_and_set_bit(port / BITS_PER_LONG,
-                                   &vcpu_info->evtchn_pending_sel)) {
-               vcpu_info->evtchn_upcall_pending = 1;
-               if (!vcpu_info->evtchn_upcall_mask)
-                       force_evtchn_callback();
-       }
-}
-EXPORT_SYMBOL(unmask_evtchn);
-
-
-#define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
-int bind_virq_to_irqhandler(
-       unsigned int virq,
-       unsigned int cpu,
-       irqreturn_t (*handler)(int, void *, struct pt_regs *),
-       unsigned long irqflags,
-       const char *devname,
-       void *dev_id)
-{
-    struct evtchn_bind_virq bind_virq;
-    int evtchn;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    bind_virq.virq = virq;
-    bind_virq.vcpu = cpu;
-    if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0)
-        BUG();
-    evtchn = bind_virq.port;
-
-    if (!unbound_irq(evtchn)) {
-        evtchn = -EINVAL;
-        goto out;
-    }
-
-    evtchns[evtchn].handler = handler;
-    evtchns[evtchn].dev_id = dev_id;
-    evtchns[evtchn].opened = 1;
-    irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-
-    unmask_evtchn(evtchn);
-out:
-    spin_unlock(&irq_mapping_update_lock);
-    return evtchn;
-}
-
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
-                   irqreturn_t (*handler)(int, void *, struct pt_regs *),
-                   unsigned long irqflags, const char * devname, void *dev_id)
-{
-    spin_lock(&irq_mapping_update_lock);
-
-    if (!unbound_irq(evtchn)) {
-       evtchn = -EINVAL;
-       goto out;
-    }
-
-    evtchns[evtchn].handler = handler;
-    evtchns[evtchn].dev_id = dev_id;
-    evtchns[evtchn].opened = 1;
-    irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
-
-    unmask_evtchn(evtchn);
-out:
-    spin_unlock(&irq_mapping_update_lock);
-    return evtchn;
-}
-
-int bind_ipi_to_irqhandler(
-       unsigned int ipi,
-       unsigned int cpu,
-       irqreturn_t (*handler)(int, void *, struct pt_regs *),
-       unsigned long irqflags,
-       const char *devname,
-       void *dev_id)
-{
-    printk("%s is called which has not been supported now...?\n", 
__FUNCTION__);
-    while(1);
-}
-
-void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-{
-    struct evtchn_close close;
-    int evtchn = evtchn_from_irq(irq);
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if (unbound_irq(irq))
-        goto out;
-
-    close.port = evtchn;
-    if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
-        BUG();
-
-    switch (type_from_irq(irq)) {
-       case IRQT_VIRQ:
-           /* Add smp stuff later... */
-           break;
-       case IRQT_IPI:
-           /* Add smp stuff later... */
-           break;
-       default:
-           break;
-    }
-
-    mask_evtchn(evtchn);
-    evtchns[evtchn].handler = NULL;
-    evtchns[evtchn].opened = 0;
-
-out:
-    spin_unlock(&irq_mapping_update_lock);
-}
-
-void notify_remote_via_irq(int irq)
-{
-       int evtchn = evtchn_from_irq(irq);
-
-       if (!unbound_irq(evtchn))
-               notify_remote_via_evtchn(evtchn);
-}
-
-irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-    unsigned long  l1, l2;
-    unsigned int   l1i, l2i, port;
-    irqreturn_t (*handler)(int, void *, struct pt_regs *);
-    shared_info_t *s = HYPERVISOR_shared_info;
-    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
-
-    vcpu_info->evtchn_upcall_mask = 1;
-    vcpu_info->evtchn_upcall_pending = 0;
-
-    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-    while ( l1 != 0 )
-    {
-        l1i = __ffs(l1);
-        l1 &= ~(1UL << l1i);
-
-        while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
-        {
-            l2i = __ffs(l2);
-            l2 &= ~(1UL << l2i);
-
-            port = (l1i * BITS_PER_LONG) + l2i;
-            if ( (handler = evtchns[port].handler) != NULL )
-           {
-               clear_evtchn(port);
-                handler(port, evtchns[port].dev_id, regs);
-           }
-            else
-           {
-                evtchn_device_upcall(port);
-           }
-        }
-    }
-    vcpu_info->evtchn_upcall_mask = 0;
-    return IRQ_HANDLED;
-}
-
-void force_evtchn_callback(void)
-{
-       //(void)HYPERVISOR_xen_version(0, NULL);
-}
-
-static struct irqaction evtchn_irqaction = {
-       .handler =      evtchn_interrupt,
-       .flags =        SA_INTERRUPT,
-       .name =         "xen-event-channel"
-};
-
-static int evtchn_irq = 0xe9;
-void __init evtchn_init(void)
-{
-    shared_info_t *s = HYPERVISOR_shared_info;
-
-    register_percpu_irq(evtchn_irq, &evtchn_irqaction);
-
-    s->arch.evtchn_vector = evtchn_irq;
-    printk("xen-event-channel using irq %d\n", evtchn_irq);
-
-    spin_lock_init(&irq_mapping_update_lock);
-    memset(evtchns, 0, sizeof(evtchns));
-}
diff -r 760669a37a3a -r 5c0c59eb5f73 arch/ia64/xen/xenconsole.c
--- a/arch/ia64/xen/xenconsole.c        Wed Jun 07 19:53:53 2006 -0400
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,19 +0,0 @@
-#include <linux/config.h>
-#include <linux/console.h>
-
-int
-early_xen_console_setup (char *cmdline)
-{
-#ifdef CONFIG_XEN
-#ifndef CONFIG_IA64_HP_SIM
-       extern int running_on_xen;
-       if (running_on_xen) {
-               extern struct console hpsim_cons;
-               hpsim_cons.flags |= CON_BOOT;
-               register_console(&hpsim_cons);
-               return 0;
-       }
-#endif
-#endif
-       return -1;
-}

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [XenPPC] [linux-ppc-2.6] [ppc-code-merge] merge with linux-2.6.tip-xen.hg, Xen patchbot-linux-ppc-2 . 6 <=