WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [xenppc-unstable] [ppc] merge

# HG changeset patch
# User Hollis Blanchard <hollisb@xxxxxxxxxx>
# Node ID 91ee504ed40e4513e196673d0554260904f1ce4f
# Parent  29861ae27914ea6905d75276fd0d612969878874
# Parent  fdc26ec44145cecc52518bc790e0611e831259f5
[ppc] merge
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/ia64/xen/drivers/Makefile              |   22 
 linux-2.6-xen-sparse/arch/ia64/xen/drivers/coreMakefile          |   20 
 linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c         |  261 ---
 linux-2.6-xen-sparse/arch/ia64/xen/xenconsole.c                  |   19 
 tools/vtpm/tpm_emulator-0.2b-x86_64.patch                        |  431 -----
 .hgignore                                                        |    6 
 Config.mk                                                        |   14 
 Makefile                                                         |    2 
 buildconfigs/linux-defconfig_xen0_ia64                           |    2 
 buildconfigs/linux-defconfig_xen_ia64                            |    2 
 config/ia64.mk                                                   |    4 
 config/x86_32.mk                                                 |    9 
 config/x86_64.mk                                                 |    9 
 extras/mini-os/Makefile                                          |    3 
 extras/mini-os/include/mm.h                                      |   21 
 extras/mini-os/minios-x86_32.lds                                 |    2 
 extras/mini-os/minios-x86_64.lds                                 |    2 
 extras/mini-os/mm.c                                              |   90 -
 extras/mini-os/x86_32.S                                          |    4 
 extras/mini-os/x86_64.S                                          |    4 
 linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S                 |    2 
 linux-2.6-xen-sparse/arch/i386/kernel/process-xen.c              |   14 
 linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c                 |   20 
 linux-2.6-xen-sparse/arch/i386/kernel/vm86.c                     |    4 
 linux-2.6-xen-sparse/arch/ia64/Kconfig                           |   36 
 linux-2.6-xen-sparse/arch/ia64/kernel/iosapic.c                  |   11 
 linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c                 |  197 ++
 linux-2.6-xen-sparse/arch/ia64/kernel/setup.c                    |   21 
 linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre               |    7 
 linux-2.6-xen-sparse/arch/ia64/xen/Makefile                      |    2 
 linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c        |    1 
 linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c                  |  285 ++-
 linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S                      |   45 
 linux-2.6-xen-sparse/arch/x86_64/kernel/process-xen.c            |   15 
 linux-2.6-xen-sparse/arch/x86_64/kernel/smp-xen.c                |    2 
 linux-2.6-xen-sparse/drivers/xen/Kconfig                         |   26 
 linux-2.6-xen-sparse/drivers/xen/Makefile                        |    9 
 linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c               |    6 
 linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c             |   21 
 linux-2.6-xen-sparse/drivers/xen/blkfront/block.h                |    1 
 linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c                  |    1 
 linux-2.6-xen-sparse/drivers/xen/core/Makefile                   |   16 
 linux-2.6-xen-sparse/drivers/xen/core/smpboot.c                  |   18 
 linux-2.6-xen-sparse/drivers/xen/netback/loopback.c              |    2 
 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h |    2 
 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h   |    7 
 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h      |   10 
 linux-2.6-xen-sparse/include/asm-ia64/hw_irq.h                   |  145 +
 linux-2.6-xen-sparse/include/asm-ia64/hypercall.h                |   40 
 linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h               |    4 
 linux-2.6-xen-sparse/include/asm-ia64/irq.h                      |   69 
 linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h               |   32 
 linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h |    7 
 linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/system.h    |    4 
 linux-2.6-xen-sparse/include/xen/cpu_hotplug.h                   |    2 
 tools/Makefile                                                   |   47 
 tools/Rules.mk                                                   |    2 
 tools/debugger/libxendebug/xendebug.c                            |    5 
 tools/examples/network-bridge                                    |    1 
 tools/firmware/hvmloader/Makefile                                |   12 
 tools/firmware/rombios/Makefile                                  |    4 
 tools/ioemu/hw/cirrus_vga.c                                      |    5 
 tools/ioemu/hw/ne2000.c                                          |   36 
 tools/ioemu/hw/pc.c                                              |    5 
 tools/ioemu/hw/vga.c                                             |    2 
 tools/ioemu/vl.c                                                 |   21 
 tools/ioemu/vl.h                                                 |    2 
 tools/libxc/Makefile                                             |   77 
 tools/libxc/xc_core.c                                            |    2 
 tools/libxc/xc_hvm_build.c                                       |    2 
 tools/libxc/xc_linux_build.c                                     |   26 
 tools/libxc/xc_linux_restore.c                                   |    4 
 tools/libxc/xc_linux_save.c                                      |    6 
 tools/libxc/xc_load_elf.c                                        |    8 
 tools/libxc/xc_pagetab.c                                         |    2 
 tools/libxc/xc_ptrace.c                                          |   11 
 tools/libxc/xc_ptrace_core.c                                     |   15 
 tools/libxc/xg_private.c                                         |   15 
 tools/libxc/xg_private.h                                         |    8 
 tools/misc/Makefile                                              |    2 
 tools/python/xen/util/security.py                                |    9 
 tools/python/xen/xm/main.py                                      |    2 
 tools/vtpm/Makefile                                              |   14 
 tools/vtpm/Rules.mk                                              |    1 
 tools/vtpm/tpm_emulator-0.3-x86_64.patch                         |  381 ++++
 tools/vtpm/vtpm.patch                                            |  407 +++-
 xen/arch/ia64/Rules.mk                                           |   28 
 xen/arch/ia64/linux-xen/entry.S                                  |   17 
 xen/arch/ia64/linux-xen/iosapic.c                                |    2 
 xen/arch/ia64/linux-xen/sal.c                                    |    5 
 xen/arch/ia64/linux-xen/smp.c                                    |    2 
 xen/arch/ia64/linux-xen/smpboot.c                                |    7 
 xen/arch/ia64/linux-xen/time.c                                   |    8 
 xen/arch/ia64/tools/sparse-merge                                 |   10 
 xen/arch/ia64/vmx/pal_emul.c                                     |   24 
 xen/arch/ia64/vmx/vmmu.c                                         |   73 
 xen/arch/ia64/vmx/vmx_init.c                                     |    2 
 xen/arch/ia64/vmx/vmx_interrupt.c                                |   11 
 xen/arch/ia64/vmx/vmx_ivt.S                                      |  816 
++++------
 xen/arch/ia64/vmx/vmx_process.c                                  |  226 --
 xen/arch/ia64/vmx/vtlb.c                                         |  680 
+++-----
 xen/arch/ia64/xen/dom_fw.c                                       |   63 
 xen/arch/ia64/xen/domain.c                                       |  133 +
 xen/arch/ia64/xen/efi_emul.c                                     |    8 
 xen/arch/ia64/xen/hypercall.c                                    |   99 +
 xen/arch/ia64/xen/hyperprivop.S                                  |    5 
 xen/arch/ia64/xen/irq.c                                          |   78 
 xen/arch/ia64/xen/privop.c                                       |    8 
 xen/arch/ia64/xen/process.c                                      |  160 +
 xen/arch/ia64/xen/vcpu.c                                         |   54 
 xen/arch/ia64/xen/xensetup.c                                     |    6 
 xen/arch/ia64/xen/xentime.c                                      |    6 
 xen/arch/x86/audit.c                                             |    4 
 xen/arch/x86/cpu/mtrr/main.c                                     |    2 
 xen/arch/x86/dom0_ops.c                                          |    2 
 xen/arch/x86/domain.c                                            |   20 
 xen/arch/x86/domain_build.c                                      |    6 
 xen/arch/x86/hvm/io.c                                            |   10 
 xen/arch/x86/hvm/platform.c                                      |   24 
 xen/arch/x86/hvm/svm/svm.c                                       |  240 ++
 xen/arch/x86/hvm/vmx/vmx.c                                       |   12 
 xen/arch/x86/i8259.c                                             |    2 
 xen/arch/x86/microcode.c                                         |    2 
 xen/arch/x86/mm.c                                                |  143 -
 xen/arch/x86/setup.c                                             |    2 
 xen/arch/x86/shadow.c                                            |   12 
 xen/arch/x86/shadow32.c                                          |   17 
 xen/arch/x86/shadow_public.c                                     |   14 
 xen/arch/x86/smp.c                                               |    2 
 xen/arch/x86/smpboot.c                                           |   17 
 xen/arch/x86/time.c                                              |    6 
 xen/arch/x86/traps.c                                             |   10 
 xen/arch/x86/x86_32/domain_page.c                                |    2 
 xen/arch/x86/x86_32/mm.c                                         |    3 
 xen/arch/x86/x86_64/mm.c                                         |    3 
 xen/arch/x86/x86_64/traps.c                                      |    2 
 xen/arch/x86/x86_emulate.c                                       |   15 
 xen/common/dom0_ops.c                                            |    2 
 xen/common/domain.c                                              |  134 +
 xen/common/page_alloc.c                                          |    4 
 xen/common/perfc.c                                               |    2 
 xen/common/sched_bvt.c                                           |   36 
 xen/common/sched_credit.c                                        |   30 
 xen/common/sched_sedf.c                                          |   39 
 xen/common/schedule.c                                            |  108 -
 xen/common/trace.c                                               |   12 
 xen/common/xmalloc.c                                             |    2 
 xen/drivers/char/console.c                                       |    6 
 xen/include/asm-ia64/dom_fw.h                                    |   19 
 xen/include/asm-ia64/domain.h                                    |   17 
 xen/include/asm-ia64/event.h                                     |    4 
 xen/include/asm-ia64/linux-xen/asm/pgtable.h                     |    4 
 xen/include/asm-ia64/linux-xen/linux/interrupt.h                 |    4 
 xen/include/asm-ia64/linux/asm/README.origin                     |    1 
 xen/include/asm-ia64/linux/asm/fpswa.h                           |   73 
 xen/include/asm-ia64/mm.h                                        |    3 
 xen/include/asm-ia64/shadow.h                                    |    6 
 xen/include/asm-ia64/vcpu.h                                      |    2 
 xen/include/asm-ia64/vmmu.h                                      |  170 --
 xen/include/asm-ia64/vmx_pal.h                                   |    1 
 xen/include/asm-ia64/vmx_vcpu.h                                  |    5 
 xen/include/asm-x86/page.h                                       |   11 
 xen/include/asm-x86/shadow.h                                     |   31 
 xen/include/public/arch-ia64.h                                   |    6 
 xen/include/public/arch-x86_32.h                                 |    8 
 xen/include/public/arch-x86_64.h                                 |    3 
 xen/include/xen/console.h                                        |    2 
 xen/include/xen/domain.h                                         |   23 
 xen/include/xen/perfc.h                                          |    6 
 xen/include/xen/sched-if.h                                       |   11 
 xen/include/xen/sched.h                                          |   12 
 xen/tools/Makefile                                               |    3 
 xen/tools/figlet/Makefile                                        |    3 
 173 files changed, 4006 insertions(+), 3045 deletions(-)

diff -r 29861ae27914 -r 91ee504ed40e .hgignore
--- a/.hgignore Tue May 30 15:24:31 2006 -0500
+++ b/.hgignore Fri Jun 02 12:31:48 2006 -0500
@@ -113,9 +113,9 @@
 ^tools/firmware/acpi/acpigen$
 ^tools/firmware/hvmloader/hvmloader$
 ^tools/firmware/hvmloader/roms\.h$
-^tools/firmware/rombios/BIOS-bochs-latest$
-^tools/firmware/rombios/_rombios_\.c$
-^tools/firmware/rombios/rombios\.s$
+^tools/firmware/rombios/BIOS-bochs-[^/]*$
+^tools/firmware/rombios/_rombios[^/]*_\.c$
+^tools/firmware/rombios/rombios[^/]*\.s$
 ^tools/firmware/vmxassist/acpi\.h$
 ^tools/firmware/vmxassist/gen$
 ^tools/firmware/vmxassist/offsets\.h$
diff -r 29861ae27914 -r 91ee504ed40e Config.mk
--- a/Config.mk Tue May 30 15:24:31 2006 -0500
+++ b/Config.mk Fri Jun 02 12:31:48 2006 -0500
@@ -38,19 +38,7 @@ CFLAGS    += -g
 CFLAGS    += -g
 endif
 
-ifeq ($(XEN_TARGET_ARCH),x86_32)
-CFLAGS  += -m32 -march=i686
-endif
-
-ifeq ($(XEN_TARGET_ARCH),x86_64)
-CFLAGS  += -m64
-endif
-
-ifeq ($(XEN_TARGET_ARCH),x86_64)
-LIBDIR = lib64
-else
-LIBDIR = lib
-endif
+include $(XEN_ROOT)/config/$(XEN_TARGET_ARCH).mk
 
 ifneq ($(EXTRA_PREFIX),)
 EXTRA_INCLUDES += $(EXTRA_PREFIX)/include
diff -r 29861ae27914 -r 91ee504ed40e Makefile
--- a/Makefile  Tue May 30 15:24:31 2006 -0500
+++ b/Makefile  Fri Jun 02 12:31:48 2006 -0500
@@ -17,7 +17,7 @@ endif
 .PHONY: all
 all: dist
 
-XEN_ROOT=$(CURDIR)
+export XEN_ROOT=$(CURDIR)
 include Config.mk
 include buildconfigs/Rules.mk
 
diff -r 29861ae27914 -r 91ee504ed40e buildconfigs/linux-defconfig_xen0_ia64
--- a/buildconfigs/linux-defconfig_xen0_ia64    Tue May 30 15:24:31 2006 -0500
+++ b/buildconfigs/linux-defconfig_xen0_ia64    Fri Jun 02 12:31:48 2006 -0500
@@ -1023,7 +1023,7 @@ CONFIG_SND_AC97_BUS=y
 CONFIG_SND_AC97_BUS=y
 CONFIG_SND_DUMMY=y
 CONFIG_SND_VIRMIDI=y
-CONFIG_SND_MTPAV=y
+# CONFIG_SND_MTPAV is not set
 CONFIG_SND_SERIAL_U16550=y
 CONFIG_SND_MPU401=y
 
diff -r 29861ae27914 -r 91ee504ed40e buildconfigs/linux-defconfig_xen_ia64
--- a/buildconfigs/linux-defconfig_xen_ia64     Tue May 30 15:24:31 2006 -0500
+++ b/buildconfigs/linux-defconfig_xen_ia64     Fri Jun 02 12:31:48 2006 -0500
@@ -1029,7 +1029,7 @@ CONFIG_SND_AC97_BUS=y
 CONFIG_SND_AC97_BUS=y
 CONFIG_SND_DUMMY=y
 CONFIG_SND_VIRMIDI=y
-CONFIG_SND_MTPAV=y
+# CONFIG_SND_MTPAV is not set
 CONFIG_SND_SERIAL_U16550=y
 CONFIG_SND_MPU401=y
 
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/Makefile
--- a/extras/mini-os/Makefile   Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/Makefile   Fri Jun 02 12:31:48 2006 -0500
@@ -1,7 +1,8 @@ debug ?= y
 debug ?= y
 pae ?= n
 
-include $(CURDIR)/../../Config.mk
+XEN_ROOT = ../..
+include $(XEN_ROOT)/Config.mk
 
 # Set TARGET_ARCH
 override TARGET_ARCH     := $(XEN_TARGET_ARCH)
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/include/mm.h
--- a/extras/mini-os/include/mm.h       Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/include/mm.h       Fri Jun 02 12:31:48 2006 -0500
@@ -53,7 +53,7 @@
 #define PADDR_BITS              32
 #define PADDR_MASK              (~0UL)
 
-#define UNMAPPED_PT_FRAMES        1
+#define NOT_L1_FRAMES           1
 #define PRIpte "08lx"
 typedef unsigned long pgentry_t;
 
@@ -71,7 +71,12 @@ typedef unsigned long pgentry_t;
 
 #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
 
-#define UNMAPPED_PT_FRAMES        2
+/*
+ * If starting from virtual address greater than 0xc0000000,
+ * this value will be 2 to account for final mid-level page
+ * directory which is always mapped in at this location.
+ */
+#define NOT_L1_FRAMES           3
 #define PRIpte "016llx"
 typedef uint64_t pgentry_t;
 
@@ -94,20 +99,10 @@ typedef uint64_t pgentry_t;
 #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
 #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
 
-/* Get physical address of page mapped by pte (paddr_t). */
-#define l1e_get_paddr(x)           \
-    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
-#define l2e_get_paddr(x)           \
-    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
-#define l3e_get_paddr(x)           \
-    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
-#define l4e_get_paddr(x)           \
-    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
-
 #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
 #define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
 
-#define UNMAPPED_PT_FRAMES        3
+#define NOT_L1_FRAMES           3
 #define PRIpte "016lx"
 typedef unsigned long pgentry_t;
 
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/minios-x86_32.lds
--- a/extras/mini-os/minios-x86_32.lds  Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/minios-x86_32.lds  Fri Jun 02 12:31:48 2006 -0500
@@ -3,7 +3,7 @@ ENTRY(_start)
 ENTRY(_start)
 SECTIONS
 {
-  . = 0xC0000000;
+  . = 0x0;
   _text = .;                   /* Text and read-only data */
   .text : {
        *(.text)
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/minios-x86_64.lds
--- a/extras/mini-os/minios-x86_64.lds  Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/minios-x86_64.lds  Fri Jun 02 12:31:48 2006 -0500
@@ -3,7 +3,7 @@ ENTRY(_start)
 ENTRY(_start)
 SECTIONS
 {
-  . = 0xFFFFFFFF80000000;
+  . = 0x0;
   _text = .;                   /* Text and read-only data */
   .text : {
        *(.text)
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/mm.c
--- a/extras/mini-os/mm.c       Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/mm.c       Fri Jun 02 12:31:48 2006 -0500
@@ -375,7 +375,7 @@ void new_pt_frame(unsigned long *pt_pfn,
     struct mmuext_op pin_request;
     
     DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
-           "prev_l_mfn=%lx, offset=%lx\n", 
+           "prev_l_mfn=%lx, offset=%lx", 
            level, *pt_pfn, prev_l_mfn, offset);
 
     /* We need to clear the page, otherwise we might fail to map it
@@ -442,12 +442,64 @@ void new_pt_frame(unsigned long *pt_pfn,
     mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + 
sizeof(pgentry_t) * offset;
     mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
     if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0) 
-    {            
+    {
        printk("ERROR: mmu_update failed\n");
        do_exit();
     }
 
     *pt_pfn += 1;
+}
+
+/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
+static int need_pt_frame(unsigned long virt_address, int level)
+{
+    unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
+#if defined(__x86_64__)
+    unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
+#else
+    unsigned long hyp_virt_end = 0xffffffff;
+#endif
+
+    /* In general frames will _not_ be needed if they were already
+       allocated to map the hypervisor into our VA space */
+#if defined(__x86_64__)
+    if(level == L3_FRAME)
+    {
+        if(l4_table_offset(virt_address) >= 
+           l4_table_offset(hyp_virt_start) &&
+           l4_table_offset(virt_address) <= 
+           l4_table_offset(hyp_virt_end))
+            return 0;
+        return 1;
+    } else
+#endif
+
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+    if(level == L2_FRAME)
+    {
+#if defined(__x86_64__)
+        if(l4_table_offset(virt_address) >= 
+           l4_table_offset(hyp_virt_start) &&
+           l4_table_offset(virt_address) <= 
+           l4_table_offset(hyp_virt_end))
+#endif
+            if(l3_table_offset(virt_address) >= 
+               l3_table_offset(hyp_virt_start) &&
+               l3_table_offset(virt_address) <= 
+               l3_table_offset(hyp_virt_end))
+                return 0;
+
+        return 1;
+    } else 
+#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
+
+    /* Always need l1 frames */
+    if(level == L1_FRAME)
+        return 1;
+
+    printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", 
+        level, hyp_virt_start, hyp_virt_end);
+    return -1;
 }
 
 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
@@ -460,11 +512,21 @@ void build_pagetable(unsigned long *star
     unsigned long offset;
     int count = 0;
 
-    pfn_to_map = (start_info.nr_pt_frames - UNMAPPED_PT_FRAMES) * 
L1_PAGETABLE_ENTRIES;
+    pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * 
L1_PAGETABLE_ENTRIES;
+
+    if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
+    {
+        printk("WARNING: Mini-OS trying to use Xen virtual space. "
+               "Truncating memory from %dMB to ",
+               ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned 
long)&_text)>>20);
+        *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
+        printk("%dMB\n",
+               ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned 
long)&_text)>>20);
+    }
 
     start_address = (unsigned long)pfn_to_virt(pfn_to_map);
     end_address = (unsigned long)pfn_to_virt(*max_pfn);
-    
+
     /* We worked out the virtual memory range to map, now mapping loop */
     printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
 
@@ -477,8 +539,9 @@ void build_pagetable(unsigned long *star
         offset = l4_table_offset(start_address);
         /* Need new L3 pt frame */
         if(!(start_address & L3_MASK)) 
-            new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
-        
+            if(need_pt_frame(start_address, L3_FRAME)) 
+                new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+
         page = tab[offset];
         mfn = pte_to_mfn(page);
         tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
@@ -486,8 +549,9 @@ void build_pagetable(unsigned long *star
 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
         offset = l3_table_offset(start_address);
         /* Need new L2 pt frame */
-        if(!(start_address & L2_MASK)) 
-            new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+        if(!(start_address & L2_MASK))
+            if(need_pt_frame(start_address, L2_FRAME))
+                new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
 
         page = tab[offset];
         mfn = pte_to_mfn(page);
@@ -495,16 +559,16 @@ void build_pagetable(unsigned long *star
 #endif
         offset = l2_table_offset(start_address);        
         /* Need new L1 pt frame */
-        if(!(start_address & L1_MASK)) 
-            new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
-       
+        if(!(start_address & L1_MASK))
+            if(need_pt_frame(start_address, L1_FRAME)) 
+                new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+
         page = tab[offset];
         mfn = pte_to_mfn(page);
         offset = l1_table_offset(start_address);
 
         mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + 
sizeof(pgentry_t) * offset;
-        mmu_updates[count].val = 
-            (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
+        mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << 
PAGE_SHIFT | L1_PROT;
         count++;
         if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
         {
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/x86_32.S
--- a/extras/mini-os/x86_32.S   Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/x86_32.S   Fri Jun 02 12:31:48 2006 -0500
@@ -4,8 +4,8 @@
 .section __xen_guest
        .ascii  "GUEST_OS=Mini-OS"
        .ascii  ",XEN_VER=xen-3.0"
-       .ascii  ",VIRT_BASE=0xc0000000" /* &_text from minios_x86_32.lds */
-       .ascii  ",ELF_PADDR_OFFSET=0xc0000000"
+       .ascii  ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
+       .ascii  ",ELF_PADDR_OFFSET=0x0"
        .ascii  ",HYPERCALL_PAGE=0x2"
 #ifdef CONFIG_X86_PAE
        .ascii  ",PAE=yes"
diff -r 29861ae27914 -r 91ee504ed40e extras/mini-os/x86_64.S
--- a/extras/mini-os/x86_64.S   Tue May 30 15:24:31 2006 -0500
+++ b/extras/mini-os/x86_64.S   Fri Jun 02 12:31:48 2006 -0500
@@ -4,8 +4,8 @@
 .section __xen_guest
        .ascii  "GUEST_OS=Mini-OS"
        .ascii  ",XEN_VER=xen-3.0"
-       .ascii  ",VIRT_BASE=0xffffffff80000000" /* &_text from 
minios_x86_64.lds */
-       .ascii  ",ELF_PADDR_OFFSET=0xffffffff80000000"
+       .ascii  ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
+       .ascii  ",ELF_PADDR_OFFSET=0x0"
        .ascii  ",HYPERCALL_PAGE=0x2"
        .ascii  ",LOADER=generic"
        .byte   0
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S
--- a/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S  Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S  Fri Jun 02 12:31:48 
2006 -0500
@@ -173,7 +173,7 @@ ENTRY(cpu_gdt_table)
        .ascii           "|pae_pgdir_above_4gb"
        .ascii           "|supervisor_mode_kernel"
 #ifdef CONFIG_X86_PAE
-       .ascii  ",PAE=yes"
+       .ascii  ",PAE=yes[extended-cr3]"
 #else
        .ascii  ",PAE=no"
 #endif
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/i386/kernel/process-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/process-xen.c       Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/process-xen.c       Fri Jun 02 
12:31:48 2006 -0500
@@ -55,6 +55,7 @@
 
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
+#include <xen/cpu_hotplug.h>
 
 #include <linux/err.h>
 
@@ -101,8 +102,6 @@ EXPORT_SYMBOL(enable_hlt);
 EXPORT_SYMBOL(enable_hlt);
 
 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-extern void stop_hz_timer(void);
-extern void start_hz_timer(void);
 void xen_idle(void)
 {
        local_irq_disable();
@@ -112,10 +111,7 @@ void xen_idle(void)
        else {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                smp_mb__after_clear_bit();
-               stop_hz_timer();
-               /* Blocking includes an implicit local_irq_enable(). */
-               HYPERVISOR_block();
-               start_hz_timer();
+               safe_halt();
                set_thread_flag(TIF_POLLING_NRFLAG);
        }
 }
@@ -132,11 +128,7 @@ static inline void play_dead(void)
        cpu_clear(smp_processor_id(), cpu_initialized);
        preempt_enable_no_resched();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-       /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-       cpu_init();
-       touch_softlockup_watchdog();
-       preempt_disable();
-       local_irq_enable();
+       cpu_bringup();
 }
 #else
 static inline void play_dead(void)
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c  Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c  Fri Jun 02 12:31:48 
2006 -0500
@@ -973,7 +973,7 @@ EXPORT_SYMBOL(jiffies_to_st);
  * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
  * These functions are based on implementations from arch/s390/kernel/time.c
  */
-void stop_hz_timer(void)
+static void stop_hz_timer(void)
 {
        unsigned int cpu = smp_processor_id();
        unsigned long j;
@@ -993,10 +993,26 @@ void stop_hz_timer(void)
        BUG_ON(HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0);
 }
 
-void start_hz_timer(void)
+static void start_hz_timer(void)
 {
        cpu_clear(smp_processor_id(), nohz_cpu_mask);
 }
+
+void safe_halt(void)
+{
+       stop_hz_timer();
+       /* Blocking includes an implicit local_irq_enable(). */
+       HYPERVISOR_block();
+       start_hz_timer();
+}
+EXPORT_SYMBOL(safe_halt);
+
+void halt(void)
+{
+       if (irqs_disabled())
+               HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+}
+EXPORT_SYMBOL(halt);
 
 /* No locking required. We are only CPU running, and interrupts are off. */
 void time_resume(void)
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/i386/kernel/vm86.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/vm86.c      Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/vm86.c      Fri Jun 02 12:31:48 
2006 -0500
@@ -132,7 +132,9 @@ struct pt_regs * fastcall save_v86_state
        current->thread.sysenter_cs = __KERNEL_CS;
        load_esp0(tss, &current->thread);
        current->thread.saved_esp0 = 0;
+#ifndef CONFIG_X86_NO_TSS
        put_cpu();
+#endif
 
        loadsegment(fs, current->thread.saved_fs);
        loadsegment(gs, current->thread.saved_gs);
@@ -310,7 +312,9 @@ static void do_sys_vm86(struct kernel_vm
        if (cpu_has_sep)
                tsk->thread.sysenter_cs = 0;
        load_esp0(tss, &tsk->thread);
+#ifndef CONFIG_X86_NO_TSS
        put_cpu();
+#endif
 
        tsk->thread.screen_bitmap = info->screen_bitmap;
        if (info->flags & VM86_SCREEN_BITMAP)
diff -r 29861ae27914 -r 91ee504ed40e linux-2.6-xen-sparse/arch/ia64/Kconfig
--- a/linux-2.6-xen-sparse/arch/ia64/Kconfig    Tue May 30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig    Fri Jun 02 12:31:48 2006 -0500
@@ -73,7 +73,7 @@ config XEN_IA64_DOM0_VP
 
 config XEN_IA64_DOM0_NON_VP
        bool
-       depends on !(XEN && XEN_IA64_DOM0_VP)
+       depends on XEN && !XEN_IA64_DOM0_VP
        default y
        help
          dom0 P=M model
@@ -496,15 +496,39 @@ source "security/Kconfig"
 
 source "crypto/Kconfig"
 
+#
 # override default values of drivers/xen/Kconfig
-if !XEN_IA64_DOM0_VP
+#
+if XEN
+config XEN_UTIL
+       default n if XEN_IA64_DOM0_VP
+
 config HAVE_ARCH_ALLOC_SKB
-        bool
-        default n
+       default n if !XEN_IA64_DOM0_VP
 
 config HAVE_ARCH_DEV_ALLOC_SKB
-        bool
-        default n
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_BALLOON
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_SKBUFF
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_NETDEV_BACKEND
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_NETDEV_FRONTEND
+       default n if !XEN_IA64_DOM0_VP
+
+config XEN_DEVMEM
+       default n
+
+config XEN_REBOOT
+       default n
+
+config XEN_SMPBOOT
+       default n
 endif
 
 source "drivers/xen/Kconfig"
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/kernel/iosapic.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/iosapic.c   Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/iosapic.c   Fri Jun 02 12:31:48 
2006 -0500
@@ -171,7 +171,7 @@ static inline void xen_iosapic_write(cha
 
 static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int 
reg)
 {
-       if (!running_on_xen) {
+       if (!is_running_on_xen()) {
                writel(reg, iosapic + IOSAPIC_REG_SELECT);
                return readl(iosapic + IOSAPIC_WINDOW);
        } else
@@ -180,7 +180,7 @@ static inline unsigned int iosapic_read(
 
 static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 
val)
 {
-       if (!running_on_xen) {
+       if (!is_running_on_xen()) {
                writel(reg, iosapic + IOSAPIC_REG_SELECT);
                writel(val, iosapic + IOSAPIC_WINDOW);
        } else
@@ -669,6 +669,11 @@ register_intr (unsigned int gsi, int vec
        iosapic_intr_info[vector].polarity = polarity;
        iosapic_intr_info[vector].dmode    = delivery;
        iosapic_intr_info[vector].trigger  = trigger;
+
+#ifdef CONFIG_XEN
+       if (is_running_on_xen())
+               return 0;
+#endif
 
        if (trigger == IOSAPIC_EDGE)
                irq_type = &irq_type_iosapic_edge;
@@ -1013,7 +1018,7 @@ iosapic_system_init (int system_pcat_com
 
        pcat_compat = system_pcat_compat;
 #ifdef CONFIG_XEN
-       if (running_on_xen)
+       if (is_running_on_xen())
                return;
 #endif
        if (pcat_compat) {
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c  Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c  Fri Jun 02 12:31:48 
2006 -0500
@@ -68,7 +68,7 @@ assign_irq_vector (int irq)
        int pos, vector;
 #ifdef CONFIG_XEN
        extern int xen_assign_irq_vector(int);
-       if (running_on_xen)
+       if (is_running_on_xen())
                return xen_assign_irq_vector(irq);
 #endif /* CONFIG_XEN */
  again:
@@ -229,6 +229,151 @@ static struct irqaction ipi_irqaction = 
 };
 #endif
 
+#ifdef CONFIG_XEN
+#include <xen/evtchn.h>
+#include <xen/interface/callback.h>
+
+static char timer_name[NR_CPUS][15];
+static char ipi_name[NR_CPUS][15];
+static char resched_name[NR_CPUS][15];
+
+struct saved_irq {
+       unsigned int irq;
+       struct irqaction *action;
+};
+/* 16 should be far optimistic value, since only several percpu irqs
+ * are registered early.
+ */
+#define MAX_LATE_IRQ   16
+static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
+static unsigned short late_irq_cnt = 0;
+static unsigned short saved_irq_cnt = 0;
+static int xen_slab_ready = 0;
+
+/* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
+ * it ends up to issue several memory accesses upon percpu data and
+ * thus adds unnecessary traffic to other paths.
+ */
+static irqreturn_t
+handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction resched_irqaction = {
+       .handler =      handle_reschedule,
+       .flags =        SA_INTERRUPT,
+       .name =         "RESCHED"
+};
+
+/*
+ * This is xen version percpu irq registration, which needs bind
+ * to xen specific evtchn sub-system. One trick here is that xen
+ * evtchn binding interface depends on kmalloc because related
+ * port needs to be freed at device/cpu down. So we cache the
+ * registration on BSP before slab is ready and then deal them
+ * at later point. For rest instances happening after slab ready,
+ * we hook them to xen evtchn immediately.
+ *
+ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
+ * required.
+ */
+void
+xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
+{
+       char name[15];
+       unsigned int cpu = smp_processor_id();
+       int ret = 0;
+
+       if (xen_slab_ready) {
+               switch (irq) {
+               case IA64_TIMER_VECTOR:
+                       sprintf(timer_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
+                               action->handler, action->flags,
+                               timer_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq 
(%d)\n", name, ret);
+                       break;
+               case IA64_IPI_RESCHEDULE:
+                       sprintf(resched_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
+                               action->handler, action->flags,
+                               resched_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to 
xen irq (%d)\n", name, ret);
+                       break;
+               case IA64_IPI_VECTOR:
+                       sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
+                               action->handler, action->flags,
+                               ipi_name[cpu], action->dev_id);
+                       printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq 
(%d)\n", name, ret);
+                       break;
+               default:
+                       printk(KERN_WARNING "Percpu irq %d is unsupported by 
xen!\n", irq);
+                       break;
+               }
+               BUG_ON(ret < 0);
+       } 
+
+       /* For BSP, we cache registered percpu irqs, and then re-walk
+        * them when initializing APs
+        */
+       if (!cpu && save) {
+               BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
+               saved_percpu_irqs[saved_irq_cnt].irq = irq;
+               saved_percpu_irqs[saved_irq_cnt].action = action;
+               saved_irq_cnt++;
+               if (!xen_slab_ready)
+                       late_irq_cnt++;
+       }
+}
+
+static void
+xen_bind_early_percpu_irq (void)
+{
+       int i;
+
+       xen_slab_ready = 1;
+       /* There's no race when accessing this cached array, since only
+        * BSP will face with such step shortly
+        */
+       for (i = 0; i < late_irq_cnt; i++)
+               xen_register_percpu_irq(saved_percpu_irqs[i].irq,
+                       saved_percpu_irqs[i].action, 0);
+}
+
+/* FIXME: There's no obvious point to check whether slab is ready. So
+ * a hack is used here by utilizing a late time hook.
+ */
+extern void (*late_time_init)(void);
+extern char xen_event_callback;
+extern void xen_init_IRQ(void);
+
+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
+void xen_smp_intr_init(void)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       unsigned int i = 0;
+       struct callback_register event = {
+               .type = CALLBACKTYPE_event,
+               .address = (unsigned long)&xen_event_callback,
+       };
+
+       if (!cpu)
+               return;
+
+       /* This should be piggyback when setup vcpu guest context */
+       BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+
+       for (i = 0; i < saved_irq_cnt; i++)
+               xen_register_percpu_irq(saved_percpu_irqs[i].irq,
+                       saved_percpu_irqs[i].action, 0);
+#endif /* CONFIG_SMP */
+}
+#endif /* CONFIG_XEN */
+
 void
 register_percpu_irq (ia64_vector vec, struct irqaction *action)
 {
@@ -237,6 +382,10 @@ register_percpu_irq (ia64_vector vec, st
 
        for (irq = 0; irq < NR_IRQS; ++irq)
                if (irq_to_vector(irq) == vec) {
+#ifdef CONFIG_XEN
+                       if (is_running_on_xen())
+                               return xen_register_percpu_irq(vec, action, 1);
+#endif
                        desc = irq_descp(irq);
                        desc->status |= IRQ_PER_CPU;
                        desc->handler = &irq_type_ia64_lsapic;
@@ -248,6 +397,21 @@ void __init
 void __init
 init_IRQ (void)
 {
+#ifdef CONFIG_XEN
+       /* Maybe put into platform_irq_init later */
+       if (is_running_on_xen()) {
+               struct callback_register event = {
+                       .type = CALLBACKTYPE_event,
+                       .address = (unsigned long)&xen_event_callback,
+               };
+               xen_init_IRQ();
+               BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+               late_time_init = xen_bind_early_percpu_irq;
+#ifdef CONFIG_SMP
+               register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
+#endif /* CONFIG_SMP */
+       }
+#endif /* CONFIG_XEN */
        register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
 #ifdef CONFIG_SMP
        register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
@@ -266,10 +430,33 @@ ia64_send_ipi (int cpu, int vector, int 
        unsigned long phys_cpu_id;
 
 #ifdef CONFIG_XEN
-        if (running_on_xen) {
-                extern void xen_send_ipi (int cpu, int vec);
-                xen_send_ipi (cpu, vector);
-                return;
+        if (is_running_on_xen()) {
+               int irq = -1;
+
+               /* TODO: we need to call vcpu_up here */
+               if (unlikely(vector == ap_wakeup_vector)) {
+                       extern void xen_send_ipi (int cpu, int vec);
+                       xen_send_ipi (cpu, vector);
+                       //vcpu_prepare_and_up(cpu);
+                       return;
+               }
+
+               switch(vector) {
+               case IA64_IPI_VECTOR:
+                       irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
+                       break;
+               case IA64_IPI_RESCHEDULE:
+                       irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
+                       break;
+               default:
+                       printk(KERN_WARNING"Unsupported IPI type 0x%x\n", 
vector);
+                       irq = 0;
+                       break;
+               }               
+       
+               BUG_ON(irq < 0);
+               notify_remote_via_irq(irq);
+               return;
         }
 #endif /* CONFIG_XEN */
 
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Fri Jun 02 12:31:48 
2006 -0500
@@ -248,7 +248,7 @@ reserve_memory (void)
        n++;
 
 #ifdef CONFIG_XEN
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                rsvd_region[n].start = (unsigned 
long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
                rsvd_region[n].end   = rsvd_region[n].start + PAGE_SIZE;
                n++;
@@ -347,8 +347,14 @@ early_console_setup (char *cmdline)
        int earlycons = 0;
 
 #ifdef CONFIG_XEN
-       if (!early_xen_console_setup(cmdline))
+#ifndef CONFIG_IA64_HP_SIM
+       if (is_running_on_xen()) {
+               extern struct console hpsim_cons;
+               hpsim_cons.flags |= CON_BOOT;
+               register_console(&hpsim_cons);
                earlycons++;
+       }
+#endif
 #endif
 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
        {
@@ -411,7 +417,7 @@ setup_arch (char **cmdline_p)
 {
        unw_init();
 #ifdef CONFIG_XEN
-       if (running_on_xen)
+       if (is_running_on_xen())
                setup_xen_features();
 #endif
 
@@ -512,7 +518,7 @@ setup_arch (char **cmdline_p)
 # endif
        }
 #ifdef CONFIG_XEN
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                extern shared_info_t *HYPERVISOR_shared_info;
                extern int xen_init (void);
 
@@ -923,6 +929,13 @@ cpu_init (void)
        /* size of physical stacked register partition plus 8 bytes: */
        __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
        platform_cpu_init();
+#ifdef CONFIG_XEN
+       /* Need to be moved into platform_cpu_init later */
+       if (is_running_on_xen()) {
+               extern void xen_smp_intr_init(void);
+               xen_smp_intr_init();
+       }
+#endif
        pm_idle = default_idle;
 }
 
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre
--- a/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre        Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre        Fri Jun 02 
12:31:48 2006 -0500
@@ -10,15 +10,8 @@
 #eventually asm-xsi-offsets needs to be part of hypervisor.h/hypercall.h
 ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/asm-ia64/xen/
 
-#ia64 drivers/xen isn't fully functional yet, workaround...
-#also ignore core/evtchn.c which uses a different irq mechanism than ia64
-#(warning: there be dragons here if these files diverge)
-ln -sf ../../arch/ia64/xen/drivers/Makefile drivers/xen/Makefile
-ln -sf ../../../arch/ia64/xen/drivers/coreMakefile drivers/xen/core/Makefile
-
 #not sure where these ia64-specific files will end up in the future
 ln -sf ../../../arch/ia64/xen/drivers/xenia64_init.c drivers/xen/core
-ln -sf ../../../arch/ia64/xen/drivers/evtchn_ia64.c drivers/xen/core
 
 #still a few x86-ism's in various drivers/xen files, patch them
 #cd drivers/xen
diff -r 29861ae27914 -r 91ee504ed40e linux-2.6-xen-sparse/arch/ia64/xen/Makefile
--- a/linux-2.6-xen-sparse/arch/ia64/xen/Makefile       Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/Makefile       Fri Jun 02 12:31:48 
2006 -0500
@@ -2,7 +2,7 @@
 # Makefile for Xen components
 #
 
-obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o 
xenconsole.o
+obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o
 
 obj-$(CONFIG_XEN_IA64_DOM0_VP) += hypervisor.o pci-dma-xen.o util.o
 pci-dma-xen-$(CONFIG_XEN_IA64_DOM0_VP) := ../../i386/kernel/pci-dma-xen.o
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Fri Jun 02 
12:31:48 2006 -0500
@@ -33,7 +33,6 @@ int xen_init(void)
                s->arch.start_info_pfn, xen_start_info->nr_pages,
                xen_start_info->flags);
 
-       evtchn_init();
        initialized = 1;
        return 0;
 }
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Fri Jun 02 12:31:48 
2006 -0500
@@ -314,12 +314,6 @@ gnttab_map_grant_ref_pre(struct gnttab_m
        uint32_t flags;
 
        flags = uop->flags;
-       if (flags & GNTMAP_readonly) {
-#if 0
-               xprintd("GNTMAP_readonly is not supported yet\n");
-#endif
-               flags &= ~GNTMAP_readonly;
-       }
 
        if (flags & GNTMAP_host_map) {
                if (flags & GNTMAP_application_map) {
@@ -360,52 +354,179 @@ struct address_space xen_ia64_foreign_du
 
 ///////////////////////////////////////////////////////////////////////////
 // foreign mapping
+#include <linux/efi.h>
+#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
+
+static unsigned long privcmd_resource_min = 0;
+// Xen/ia64 currently can handle pseudo physical address bits up to
+// (PAGE_SHIFT * 3)
+static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << 
(PAGE_SHIFT * 3)) - 1);
+static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
+
+static unsigned long
+md_end_addr(const efi_memory_desc_t *md)
+{
+       return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+}
+
+#define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE        (1024 * 1024 * 1024UL)
+static int
+xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
+{
+       return (start < end &&
+               (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
+}
+
+static int __init
+xen_ia64_privcmd_init(void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       u64 efi_desc_size;
+       efi_memory_desc_t *md;
+       unsigned long tmp_min;
+       unsigned long tmp_max;
+       unsigned long gap_size;
+       unsigned long prev_end;
+
+       if (!is_running_on_xen())
+               return -1;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       // at first check the used highest address
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               // nothing
+       }
+       md = p - efi_desc_size;
+       privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
+       if (xen_ia64_privcmd_check_size(privcmd_resource_min,
+                                       privcmd_resource_max)) {
+               goto out;
+       }
+
+       // the used highest address is too large. try to find the largest gap.
+       tmp_min = privcmd_resource_max;
+       tmp_max = 0;
+       gap_size = 0;
+       prev_end = 0;
+       for (p = efi_map_start;
+            p < efi_map_end - efi_desc_size;
+            p += efi_desc_size) {
+               unsigned long end;
+               efi_memory_desc_t* next;
+               unsigned long next_start;
+
+               md = p;
+               end = md_end_addr(md);
+               if (end > privcmd_resource_max) {
+                       break;
+               }
+               if (end < prev_end) {
+                       // work around. 
+                       // Xen may pass incompletely sorted memory
+                       // descriptors like
+                       // [x, x + length]
+                       // [x, x]
+                       // this order should be reversed.
+                       continue;
+               }
+               next = p + efi_desc_size;
+               next_start = next->phys_addr;
+               if (next_start > privcmd_resource_max) {
+                       next_start = privcmd_resource_max;
+               }
+               if (end < next_start && gap_size < (next_start - end)) {
+                       tmp_min = end;
+                       tmp_max = next_start;
+                       gap_size = tmp_max - tmp_min;
+               }
+               prev_end = end;
+       }
+
+       privcmd_resource_min = GRANULEROUNDUP(tmp_min);
+       if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
+               privcmd_resource_max = tmp_max;
+               goto out;
+       }
+
+       privcmd_resource_min = tmp_min;
+       privcmd_resource_max = tmp_max;
+       if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
+                                        privcmd_resource_max)) {
+               // Any large enough gap isn't found.
+               // go ahead anyway with the warning hoping that large region
+               // won't be requested.
+               printk(KERN_WARNING "xen privcmd: large enough region for 
privcmd mmap is not found.\n");
+       }
+
+out:
+       printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 
0x%lx] (%ldMB)\n",
+              privcmd_resource_min, privcmd_resource_max, 
+              (privcmd_resource_max - privcmd_resource_min) >> 20);
+       BUG_ON(privcmd_resource_min >= privcmd_resource_max);
+       return 0;
+}
+late_initcall(xen_ia64_privcmd_init);
 
 struct xen_ia64_privcmd_entry {
        atomic_t        map_count;
-       struct page*    page;
+#define INVALID_GPFN   (~0UL)
+       unsigned long   gpfn;
+};
+
+struct xen_ia64_privcmd_range {
+       atomic_t                        ref_count;
+       unsigned long                   pgoff; // in PAGE_SIZE
+       struct resource*                res;
+
+       unsigned long                   num_entries;
+       struct xen_ia64_privcmd_entry   entries[0];
+};
+
+struct xen_ia64_privcmd_vma {
+       struct xen_ia64_privcmd_range*  range;
+
+       unsigned long                   num_entries;
+       struct xen_ia64_privcmd_entry*  entries;
 };
 
 static void
 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
 {
        atomic_set(&entry->map_count, 0);
-       entry->page = NULL;
-}
-
-//TODO alloc_page() to allocate pseudo physical address space is 
-//     waste of memory.
-//     When vti domain is created, qemu maps all of vti domain pages which 
-//     reaches to several hundred megabytes at least.
-//     remove alloc_page().
+       entry->gpfn = INVALID_GPFN;
+}
+
 static int
 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
                            unsigned long addr,
-                           struct xen_ia64_privcmd_entry* entry,
+                           struct xen_ia64_privcmd_range* privcmd_range,
+                           int i,
                            unsigned long mfn,
                            pgprot_t prot,
                            domid_t domid)
 {
        int error = 0;
-       struct page* page;
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
        unsigned long gpfn;
+       unsigned long flags;
 
        BUG_ON((addr & ~PAGE_MASK) != 0);
        BUG_ON(mfn == INVALID_MFN);
 
-       if (entry->page != NULL) {
+       if (entry->gpfn != INVALID_GPFN) {
                error = -EBUSY;
                goto out;
        }
-       page = alloc_page(GFP_KERNEL);
-       if (page == NULL) {
-               error = -ENOMEM;
-               goto out;
-       }
-       gpfn = page_to_pfn(page);
-
-       error = HYPERVISOR_add_physmap(gpfn, mfn, 0/* prot:XXX */,
-                                      domid);
+       gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
+
+       flags = ASSIGN_writable;
+       if (pgprot_val(prot) == PROT_READ) {
+               flags = ASSIGN_readonly;
+       }
+       error = HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
        if (error != 0) {
                goto out;
        }
@@ -413,15 +534,13 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
        prot = vma->vm_page_prot;
        error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
        if (error != 0) {
-               (void)HYPERVISOR_zap_physmap(gpfn, 0);
-               error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
+               error = HYPERVISOR_zap_physmap(gpfn, 0);
                if (error) {
                        BUG();//XXX
                }
-               __free_page(page);
        } else {
                atomic_inc(&entry->map_count);
-               entry->page = page;
+               entry->gpfn = gpfn;
        }
 
 out:
@@ -429,30 +548,28 @@ out:
 }
 
 static void
-xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_entry* entry)
-{
-       struct page* page = entry->page;
-       unsigned long gpfn = page_to_pfn(page);
+xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
+                             int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       unsigned long gpfn = entry->gpfn;
+       //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
+       //      (vma->vm_pgoff - privcmd_range->pgoff);
        int error;
 
        error = HYPERVISOR_zap_physmap(gpfn, 0);
        if (error) {
                BUG();//XXX
        }
-
-       error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
-       if (error) {
-               BUG();//XXX
-       }
-
-       entry->page = NULL;
-       __free_page(page);
+       entry->gpfn = INVALID_GPFN;
 }
 
 static int
-xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_entry* entry)
-{
-       if (entry->page != NULL) {
+xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
+                           int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       if (entry->gpfn != INVALID_GPFN) {
                atomic_inc(&entry->map_count);
        } else {
                BUG_ON(atomic_read(&entry->map_count) != 0);
@@ -460,27 +577,15 @@ xen_ia64_privcmd_entry_open(struct xen_i
 }
 
 static int
-xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_entry* entry)
-{
-       if (entry->page != NULL && atomic_dec_and_test(&entry->map_count)) {
-               xen_ia64_privcmd_entry_munmap(entry);
-       }
-}
-
-struct xen_ia64_privcmd_range {
-       atomic_t                        ref_count;
-       unsigned long                   pgoff; // in PAGE_SIZE
-
-       unsigned long                   num_entries;
-       struct xen_ia64_privcmd_entry   entries[0];
-};
-
-struct xen_ia64_privcmd_vma {
-       struct xen_ia64_privcmd_range*  range;
-
-       unsigned long                   num_entries;
-       struct xen_ia64_privcmd_entry*  entries;
-};
+xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
+                            int i)
+{
+       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       if (entry->gpfn != INVALID_GPFN &&
+           atomic_dec_and_test(&entry->map_count)) {
+               xen_ia64_privcmd_entry_munmap(privcmd_range, i);
+       }
+}
 
 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
@@ -507,7 +612,7 @@ __xen_ia64_privcmd_vma_open(struct vm_ar
        privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
        for (i = 0; i < privcmd_vma->num_entries; i++) {
-               xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
+               xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
        }
 
        vma->vm_private_data = privcmd_vma;
@@ -533,10 +638,11 @@ xen_ia64_privcmd_vma_close(struct vm_are
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
        struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long i;
 
        for (i = 0; i < privcmd_vma->num_entries; i++) {
-               xen_ia64_privcmd_entry_close(&privcmd_vma->entries[i]);
+               xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
        }
        vma->vm_private_data = NULL;
        kfree(privcmd_vma);
@@ -547,9 +653,11 @@ xen_ia64_privcmd_vma_close(struct vm_are
                        struct xen_ia64_privcmd_entry* entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
-                       BUG_ON(entry->page != NULL);
+                       BUG_ON(entry->gpfn != INVALID_GPFN);
                }
 #endif
+               release_resource(privcmd_range->res);
+               kfree(privcmd_range->res);
                vfree(privcmd_range);
        }
 }
@@ -557,13 +665,18 @@ int
 int
 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
 {
-       unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-       struct xen_ia64_privcmd_range* privcmd_range;
-       struct xen_ia64_privcmd_vma* privcmd_vma;
+       int error;
+       unsigned long size = vma->vm_end - vma->vm_start;
+       unsigned long num_entries = size >> PAGE_SHIFT;
+       struct xen_ia64_privcmd_range* privcmd_range = NULL;
+       struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
+       struct resource* res = NULL;
        unsigned long i;
-       BUG_ON(!running_on_xen);
+       BUG_ON(!is_running_on_xen());
 
        BUG_ON(file->private_data != NULL);
+
+       error = -ENOMEM;
        privcmd_range =
                vmalloc(sizeof(*privcmd_range) +
                        sizeof(privcmd_range->entries[0]) * num_entries);
@@ -574,6 +687,18 @@ privcmd_mmap(struct file * file, struct 
        if (privcmd_vma == NULL) {
                goto out_enomem1;
        }
+       res = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (res == NULL) {
+               goto out_enomem1;
+       }
+       res->name = "Xen privcmd mmap";
+       error = allocate_resource(&iomem_resource, res, size,
+                                 privcmd_resource_min, privcmd_resource_max,
+                                 privcmd_resource_align, NULL, NULL);
+       if (error) {
+               goto out_enomem1;
+       }
+       privcmd_range->res = res;
 
        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
@@ -589,10 +714,11 @@ privcmd_mmap(struct file * file, struct 
        return 0;
 
 out_enomem1:
+       kfree(res);
        kfree(privcmd_vma);
 out_enomem0:
        vfree(privcmd_range);
-       return -ENOMEM;
+       return error;
 }
 
 int
@@ -605,10 +731,13 @@ direct_remap_pfn_range(struct vm_area_st
 {
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
+
        unsigned long i;
        unsigned long offset;
        int error = 0;
-       BUG_ON(!running_on_xen);
+       BUG_ON(!is_running_on_xen());
 
 #if 0
        if (prot != vm->vm_page_prot) {
@@ -618,9 +747,7 @@ direct_remap_pfn_range(struct vm_area_st
 
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
-               struct xen_ia64_privcmd_entry* entry =
-                       &privcmd_vma->entries[i];
-               error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, entry, mfn, prot, domid);
+               error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, privcmd_range, entry_offset + i, mfn, prot, domid);
                if (error != 0) {
                        break;
                }
diff -r 29861ae27914 -r 91ee504ed40e linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S       Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S       Fri Jun 02 12:31:48 
2006 -0500
@@ -206,9 +206,9 @@ ENTRY(vhpt_miss)
        mov r24=r8
        mov r8=r18
        ;;
-(p10)  XEN_HYPER_ITC_D
-       ;;
-(p11)  XEN_HYPER_ITC_I
+(p10)  XEN_HYPER_ITC_I
+       ;;
+(p11)  XEN_HYPER_ITC_D
        ;;
        mov r8=r24
        ;;
@@ -799,7 +799,16 @@ 1: ld8 r18=[r17]
        ;;
 (p6)   cmp.eq p6,p7=r26,r18                    // Only compare if page is 
present
        ;;
+#ifdef CONFIG_XEN
+(p6)   mov r18=r8
+(p6)   mov r8=r25
+       ;;
+(p6)   XEN_HYPER_ITC_D
+       ;;
+(p6)   mov r8=r18
+#else
 (p6)   itc.d r25                               // install updated PTE
+#endif 
        ;;
        /*
         * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
@@ -2038,6 +2047,7 @@ GLOBAL_ENTRY(xen_bsw1)
        ld8 r28=[r30],16; ld8 r29=[r31],16;;
        ld8 r30=[r30]; ld8 r31=[r31];;
        br.ret.sptk.many b0
+END(xen_bsw1)
 #endif
 
        .org ia64_ivt+0x7f00
@@ -2130,5 +2140,32 @@ non_ia32_syscall:
        mov rp=r15
        br.ret.sptk.many rp
 END(dispatch_to_ia32_handler)
-
 #endif /* CONFIG_IA32_SUPPORT */
+
+#ifdef CONFIG_XEN
+       .section .text,"ax"
+GLOBAL_ENTRY(xen_event_callback)
+       mov r31=pr              // prepare to save predicates
+       ;;
+       SAVE_MIN_WITH_COVER     // uses r31; defines r2 and r3
+       ;;
+       movl r3=XSI_PSR_IC
+       mov r14=1
+       ;;
+       st4 [r3]=r14
+       ;;
+       adds r3=8,r2            // set up second base pointer for SAVE_REST
+       srlz.i                  // ensure everybody knows psr.ic is back on
+       ;;
+       SAVE_REST
+       ;;
+       alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+       add out0=16,sp          // pass pointer to pt_regs as first arg
+       ;;
+       srlz.d                  // make sure we see the effect of cr.ivr
+       movl r14=ia64_leave_kernel
+       ;;
+       mov rp=r14
+       br.call.sptk.many b6=evtchn_do_upcall
+END(xen_event_callback)
+#endif
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/x86_64/kernel/process-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/process-xen.c     Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/process-xen.c     Fri Jun 02 
12:31:48 2006 -0500
@@ -60,6 +60,8 @@
 #include <asm/ia32.h>
 #include <asm/idle.h>
 
+#include <xen/cpu_hotplug.h>
+
 asmlinkage extern void ret_from_fork(void);
 
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
@@ -118,8 +120,6 @@ void exit_idle(void)
 }
 
 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-extern void stop_hz_timer(void);
-extern void start_hz_timer(void);
 void xen_idle(void)
 {
        local_irq_disable();
@@ -129,10 +129,7 @@ void xen_idle(void)
        else {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                smp_mb__after_clear_bit();
-               stop_hz_timer();
-               /* Blocking includes an implicit local_irq_enable(). */
-               HYPERVISOR_block();
-               start_hz_timer();
+               safe_halt();
                set_thread_flag(TIF_POLLING_NRFLAG);
        }
 }
@@ -145,11 +142,7 @@ static inline void play_dead(void)
        cpu_clear(smp_processor_id(), cpu_initialized);
        preempt_enable_no_resched();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-       /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-       cpu_init();
-       touch_softlockup_watchdog();
-       preempt_disable();
-       local_irq_enable();
+       cpu_bringup();
 }
 #else
 static inline void play_dead(void)
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/arch/x86_64/kernel/smp-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/smp-xen.c Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/smp-xen.c Fri Jun 02 12:31:48 
2006 -0500
@@ -488,7 +488,7 @@ static void smp_really_stop_cpu(void *du
 {
        smp_stop_cpu(); 
        for (;;) 
-               asm("hlt"); 
+               halt();
 } 
 
 void smp_send_stop(void)
diff -r 29861ae27914 -r 91ee504ed40e linux-2.6-xen-sparse/drivers/xen/Kconfig
--- a/linux-2.6-xen-sparse/drivers/xen/Kconfig  Tue May 30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig  Fri Jun 02 12:31:48 2006 -0500
@@ -228,4 +228,30 @@ config NO_IDLE_HZ
        bool
        default y
 
+config XEN_UTIL
+       bool
+       default y
+
+config XEN_BALLOON
+       bool
+       default y
+
+config XEN_DEVMEM
+       bool
+       default y
+
+config XEN_SKBUFF
+       bool
+       default y
+       depends on NET
+
+config XEN_REBOOT
+       bool
+       default y
+
+config XEN_SMPBOOT
+       bool
+       default y
+       depends on SMP
+
 endif
diff -r 29861ae27914 -r 91ee504ed40e linux-2.6-xen-sparse/drivers/xen/Makefile
--- a/linux-2.6-xen-sparse/drivers/xen/Makefile Tue May 30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/Makefile Fri Jun 02 12:31:48 2006 -0500
@@ -1,14 +1,12 @@
-
-obj-y  += util.o
-
 obj-y  += core/
-obj-y  += char/
 obj-y  += console/
 obj-y  += evtchn/
-obj-y  += balloon/
 obj-y  += privcmd/
 obj-y  += xenbus/
 
+obj-$(CONFIG_XEN_UTIL)                 += util.o
+obj-$(CONFIG_XEN_BALLOON)              += balloon/
+obj-$(CONFIG_XEN_DEVMEM)               += char/
 obj-$(CONFIG_XEN_BLKDEV_BACKEND)       += blkback/
 obj-$(CONFIG_XEN_NETDEV_BACKEND)       += netback/
 obj-$(CONFIG_XEN_TPMDEV_BACKEND)       += tpmback/
@@ -17,4 +15,3 @@ obj-$(CONFIG_XEN_BLKDEV_TAP)          += blkt
 obj-$(CONFIG_XEN_BLKDEV_TAP)           += blktap/
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += pciback/
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND)      += pcifront/
-
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Fri Jun 02 
12:31:48 2006 -0500
@@ -360,12 +360,6 @@ static void balloon_process(void *unused
 /* Resets the Xen limit, sets new target, and kicks off processing. */
 static void set_new_target(unsigned long target)
 {
-       unsigned long min_target;
-
-       /* Do not allow target to reduce below 2% of maximum memory size. */
-       min_target = max_pfn / 50;
-       target = max(target, min_target);
-
        /* No need for lock. Not read-modify-write updates. */
        hard_limit   = ~0UL;
        target_pages = target;
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Fri Jun 02 
12:31:48 2006 -0500
@@ -452,10 +452,6 @@ int blkif_ioctl(struct inode *inode, str
                      command, (long)argument, inode->i_rdev);
 
        switch (command) {
-       case HDIO_GETGEO:
-               /* return ENOSYS to use defaults */
-               return -ENOSYS;
-
        case CDROMMULTISESSION:
                DPRINTK("FIXME: support multisession CDs later\n");
                for (i = 0; i < sizeof(struct cdrom_multisession); i++)
@@ -469,6 +465,23 @@ int blkif_ioctl(struct inode *inode, str
                return -EINVAL; /* same return as native Linux */
        }
 
+       return 0;
+}
+
+
+int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+{
+       /* We don't have real geometry info, but let's at least return
+          values consistent with the size of the device */
+       sector_t nsect = get_capacity(bd->bd_disk);
+       sector_t cylinders = nsect;
+
+       hg->heads = 0xff;
+       hg->sectors = 0x3f;
+       sector_div(cylinders, hg->heads * hg->sectors);
+       hg->cylinders = cylinders;
+       if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
+               hg->cylinders = 0xffff;
        return 0;
 }
 
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Fri Jun 02 12:31:48 
2006 -0500
@@ -140,6 +140,7 @@ extern int blkif_release(struct inode *i
 extern int blkif_release(struct inode *inode, struct file *filep);
 extern int blkif_ioctl(struct inode *inode, struct file *filep,
                        unsigned command, unsigned long argument);
+extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
 extern int blkif_check(dev_t dev);
 extern int blkif_revalidate(dev_t dev);
 extern void do_blkif_request (request_queue_t *rq);
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Fri Jun 02 12:31:48 
2006 -0500
@@ -91,6 +91,7 @@ static struct block_device_operations xl
        .open = blkif_open,
        .release = blkif_release,
        .ioctl  = blkif_ioctl,
+       .getgeo = blkif_getgeo
 };
 
 DEFINE_SPINLOCK(blkif_io_lock);
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/core/Makefile
--- a/linux-2.6-xen-sparse/drivers/xen/core/Makefile    Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/core/Makefile    Fri Jun 02 12:31:48 
2006 -0500
@@ -2,11 +2,13 @@
 # Makefile for the linux kernel.
 #
 
-obj-y   := evtchn.o reboot.o gnttab.o features.o
+obj-y := evtchn.o gnttab.o features.o
 
-obj-$(CONFIG_PROC_FS)     += xen_proc.o
-obj-$(CONFIG_NET)         += skbuff.o
-obj-$(CONFIG_SMP)         += smpboot.o
-obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
-obj-$(CONFIG_SYSFS)       += hypervisor_sysfs.o
-obj-$(CONFIG_XEN_SYSFS)   += xen_sysfs.o
+obj-$(CONFIG_PROC_FS)          += xen_proc.o
+obj-$(CONFIG_SYSFS)            += hypervisor_sysfs.o
+obj-$(CONFIG_HOTPLUG_CPU)      += cpu_hotplug.o
+obj-$(CONFIG_XEN_SYSFS)                += xen_sysfs.o
+obj-$(CONFIG_IA64)             += xenia64_init.o
+obj-$(CONFIG_XEN_SKBUFF)       += skbuff.o
+obj-$(CONFIG_XEN_REBOOT)       += reboot.o
+obj-$(CONFIG_XEN_SMPBOOT)      += smpboot.o
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/core/smpboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/smpboot.c   Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/core/smpboot.c   Fri Jun 02 12:31:48 
2006 -0500
@@ -89,9 +89,8 @@ void __init prefill_possible_map(void)
 
        for (i = 0; i < NR_CPUS; i++) {
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
-               if (rc == -ENOENT)
-                       break;
-               cpu_set(i, cpu_possible_map);
+               if (rc >= 0)
+                       cpu_set(i, cpu_possible_map);
        }
 }
 
@@ -150,12 +149,17 @@ static void xen_smp_intr_exit(unsigned i
 }
 #endif
 
-static void cpu_bringup(void)
+void cpu_bringup(void)
 {
        cpu_init();
        touch_softlockup_watchdog();
        preempt_disable();
        local_irq_enable();
+}
+
+static void cpu_bringup_and_idle(void)
+{
+       cpu_bringup();
        cpu_idle();
 }
 
@@ -180,7 +184,7 @@ void cpu_initialize_context(unsigned int
        ctxt.user_regs.fs = 0;
        ctxt.user_regs.gs = 0;
        ctxt.user_regs.ss = __KERNEL_DS;
-       ctxt.user_regs.eip = (unsigned long)cpu_bringup;
+       ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
        ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
 
        memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
@@ -204,7 +208,7 @@ void cpu_initialize_context(unsigned int
        ctxt.failsafe_callback_cs  = __KERNEL_CS;
        ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
 
-       ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
+       ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
 #else /* __x86_64__ */
        ctxt.user_regs.cs = __KERNEL_CS;
        ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
@@ -216,7 +220,7 @@ void cpu_initialize_context(unsigned int
        ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
        ctxt.syscall_callback_eip  = (unsigned long)system_call;
 
-       ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
+       ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
 
        ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
 #endif
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/drivers/xen/netback/loopback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/loopback.c       Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/loopback.c       Fri Jun 02 
12:31:48 2006 -0500
@@ -146,11 +146,13 @@ static void loopback_construct(struct ne
        dev->hard_start_xmit = loopback_start_xmit;
        dev->get_stats       = loopback_get_stats;
        dev->set_multicast_list = loopback_set_multicast_list;
+       dev->change_mtu      = NULL; /* allow arbitrary mtu */
 
        dev->tx_queue_len    = 0;
 
        dev->features        = (NETIF_F_HIGHDMA |
                                NETIF_F_LLTX |
+                               NETIF_F_SG |
                                NETIF_F_IP_CSUM);
 
        SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h  Tue May 
30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h  Fri Jun 
02 12:31:48 2006 -0500
@@ -128,8 +128,6 @@ dma_get_cache_alignment(void)
         * maximum possible, to be safe */
        return (1 << INTERNODE_CACHE_SHIFT);
 }
-#else
-extern int dma_get_cache_alignment(void);
 #endif
 
 #define dma_is_consistent(d)   (1)
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h    Tue May 
30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h    Fri Jun 
02 12:31:48 2006 -0500
@@ -260,6 +260,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
        int cmd, void *arg)
 {
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h       Tue May 
30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h       Fri Jun 
02 12:31:48 2006 -0500
@@ -116,10 +116,12 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
        __asm__ ( \
                "movl %%cr3,%0\n\t" \
                :"=r" (__dummy)); \
-       machine_to_phys(__dummy); \
+       __dummy = xen_cr3_to_pfn(__dummy); \
+       mfn_to_pfn(__dummy) << PAGE_SHIFT; \
 })
 #define write_cr3(x) ({                                                \
-       maddr_t __dummy = phys_to_machine(x);                   \
+       unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT);   \
+       __dummy = xen_pfn_to_cr3(__dummy);                      \
        __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy));  \
 })
 
@@ -625,8 +627,8 @@ do {                                                        
                \
                preempt_enable_no_resched();                            \
 } while (0)
 
-#define safe_halt()            ((void)0)
-#define halt()                 ((void)0)
+void safe_halt(void);
+void halt(void);
 
 #define __save_and_cli(x)                                              \
 do {                                                                   \
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Fri Jun 02 12:31:48 
2006 -0500
@@ -247,6 +247,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       unsigned int cmd, void *arg)
+{
+    return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
     int cmd, void *arg)
 {
@@ -313,9 +320,20 @@ HYPERVISOR_suspend(
        return rc;
 }
 
+static inline int
+HYPERVISOR_callback_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, callback_op, cmd, arg);
+}
+
 extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
 static inline void exit_idle(void) {}
-#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
+#define do_IRQ(irq, regs) ({                   \
+       irq_enter();                            \
+       __do_IRQ((irq), (regs));                \
+       irq_exit();                             \
+})
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
 #include <linux/err.h>
@@ -418,12 +436,14 @@ HYPERVISOR_ioremap(unsigned long ioaddr,
 HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
 {
        unsigned long ret = ioaddr;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_ioremap(ioaddr, size);
-               if (unlikely(IS_ERR_VALUE(ret)))
+               if (unlikely(ret == -ENOSYS))
                        panic("hypercall %s failed with %ld. "
                              "Please check Xen and Linux config mismatch\n",
                              __func__, -ret);
+               else if (unlikely(IS_ERR_VALUE(ret)))
+                       ret = ioaddr;
        }
        return ret;
 }
@@ -439,7 +459,7 @@ HYPERVISOR_phystomach(unsigned long gpfn
 HYPERVISOR_phystomach(unsigned long gpfn)
 {
        unsigned long ret = gpfn;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_phystomach(gpfn);
        }
        return ret;
@@ -456,7 +476,7 @@ HYPERVISOR_machtophys(unsigned long mfn)
 HYPERVISOR_machtophys(unsigned long mfn)
 {
        unsigned long ret = mfn;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_machtophys(mfn);
        }
        return ret;
@@ -473,7 +493,7 @@ HYPERVISOR_zap_physmap(unsigned long gpf
 HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
 {
        unsigned long ret = 0;
-       if (running_on_xen) {
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_zap_physmap(gpfn, extent_order);
        }
        return ret;
@@ -481,7 +501,7 @@ HYPERVISOR_zap_physmap(unsigned long gpf
 
 static inline unsigned long
 __HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-                        unsigned int flags, domid_t domid)
+                        unsigned long flags, domid_t domid)
 {
        return _hypercall_imm4(unsigned long, ia64_dom0vp_op,
                               IA64_DOM0VP_add_physmap, gpfn, mfn, flags,
@@ -490,11 +510,11 @@ __HYPERVISOR_add_physmap(unsigned long g
 
 static inline unsigned long
 HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-                      unsigned int flags, domid_t domid)
+                      unsigned long flags, domid_t domid)
 {
        unsigned long ret = 0;
-       BUG_ON(!running_on_xen);//XXX
-       if (running_on_xen) {
+       BUG_ON(!is_running_on_xen());//XXX
+       if (is_running_on_xen()) {
                ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
        }
        return ret;
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Fri Jun 02 
12:31:48 2006 -0500
@@ -46,14 +46,12 @@
 #include <asm/hypercall.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
-#include <asm/xen/privop.h> // for running_on_xen
+#include <asm/xen/privop.h> // for is_running_on_xen()
 
 extern shared_info_t *HYPERVISOR_shared_info;
 extern start_info_t *xen_start_info;
 
 void force_evtchn_callback(void);
-
-#define is_running_on_xen() running_on_xen
 
 /* Turn jiffies into Xen system time. XXX Implement me. */
 #define jiffies_to_st(j)       0
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Tue May 30 
15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Fri Jun 02 
12:31:48 2006 -0500
@@ -43,6 +43,7 @@
 
 #ifndef __ASSEMBLY__
 extern int running_on_xen;
+#define is_running_on_xen() running_on_xen
 
 #define        XEN_HYPER_SSM_I         asm("break %0" : : "i" 
(HYPERPRIVOP_SSM_I))
 #define        XEN_HYPER_GET_IVR       asm("break %0" : : "i" 
(HYPERPRIVOP_GET_IVR))
@@ -122,7 +123,7 @@ extern void xen_set_eflag(unsigned long)
 
 #define xen_ia64_intrin_local_irq_restore(x)                           \
 {                                                                      \
-     if (running_on_xen) {                                             \
+     if (is_running_on_xen()) {                                                
\
        if ((x) & IA64_PSR_I) { xen_ssm_i(); }                          \
        else { xen_rsm_i(); }                                           \
     }                                                                  \
@@ -131,7 +132,7 @@ extern void xen_set_eflag(unsigned long)
 
 #define        xen_get_psr_i()                                                 
\
 (                                                                      \
-       (running_on_xen) ?                                              \
+       (is_running_on_xen()) ?                                         \
                (xen_get_virtual_psr_i() ? IA64_PSR_I : 0)              \
                : __ia64_get_psr_i()                                    \
 )
@@ -139,7 +140,7 @@ extern void xen_set_eflag(unsigned long)
 #define xen_ia64_ssm(mask)                                             \
 {                                                                      \
        if ((mask)==IA64_PSR_I) {                                       \
-               if (running_on_xen) { xen_ssm_i(); }                    \
+               if (is_running_on_xen()) { xen_ssm_i(); }                       
\
                else { __ia64_ssm(mask); }                              \
        }                                                               \
        else { __ia64_ssm(mask); }                                      \
@@ -148,7 +149,7 @@ extern void xen_set_eflag(unsigned long)
 #define xen_ia64_rsm(mask)                                             \
 {                                                                      \
        if ((mask)==IA64_PSR_I) {                                       \
-               if (running_on_xen) { xen_rsm_i(); }                    \
+               if (is_running_on_xen()) { xen_rsm_i(); }                       
\
                else { __ia64_rsm(mask); }                              \
        }                                                               \
        else { __ia64_rsm(mask); }                                      \
@@ -167,10 +168,11 @@ extern void xen_set_rr(unsigned long ind
 extern void xen_set_rr(unsigned long index, unsigned long val);
 extern unsigned long xen_get_rr(unsigned long index);
 extern void xen_set_kr(unsigned long index, unsigned long val);
-
-/* Note: It may look wrong to test for running_on_xen in each case.
+extern void xen_ptcga(unsigned long addr, unsigned long size);
+
+/* Note: It may look wrong to test for is_running_on_xen() in each case.
  * However regnum is always a constant so, as written, the compiler
- * eliminates the switch statement, whereas running_on_xen must be
+ * eliminates the switch statement, whereas is_running_on_xen() must be
  * tested dynamically. */
 #define xen_ia64_getreg(regnum)                                                
\
 ({                                                                     \
@@ -178,17 +180,17 @@ extern void xen_set_kr(unsigned long ind
                                                                        \
        switch(regnum) {                                                \
        case _IA64_REG_CR_IVR:                                          \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_ivr() :                                 \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
        case _IA64_REG_CR_TPR:                                          \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_tpr() :                                 \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
        case _IA64_REG_AR_EFLAG:                                        \
-               ia64_intri_res = (running_on_xen) ?                     \
+               ia64_intri_res = (is_running_on_xen()) ?                        
\
                        xen_get_eflag() :                               \
                        __ia64_getreg(regnum);                          \
                break;                                                  \
@@ -203,27 +205,27 @@ extern void xen_set_kr(unsigned long ind
 ({                                                                     \
        switch(regnum) {                                                \
        case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:                     \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :    \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_ITM:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_itm(val) :                              \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_TPR:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_tpr(val) :                              \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_EOI:                                          \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_eoi() :                                     \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_AR_EFLAG:                                        \
-               (running_on_xen) ?                                      \
+               (is_running_on_xen()) ?                                 \
                        xen_set_eflag(val) :                            \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h  Tue May 
30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h  Fri Jun 
02 12:31:48 2006 -0500
@@ -258,6 +258,13 @@ HYPERVISOR_event_channel_op(
 }
 
 static inline int
+HYPERVISOR_acm_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, acm_op, cmd, arg);
+}
+
+static inline int
 HYPERVISOR_xen_version(
        int cmd, void *arg)
 {
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/system.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/system.h     Tue May 
30 15:24:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/system.h     Fri Jun 
02 12:31:48 2006 -0500
@@ -424,8 +424,8 @@ do {                                                        
                \
        preempt_enable_no_resched();                                    \
        ___x; })
 
-#define safe_halt()            ((void)0)
-#define halt()                 ((void)0)
+void safe_halt(void);
+void halt(void);
 
 void cpu_idle_wait(void);
 
diff -r 29861ae27914 -r 91ee504ed40e 
linux-2.6-xen-sparse/include/xen/cpu_hotplug.h
--- a/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h    Tue May 30 15:24:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h    Fri Jun 02 12:31:48 
2006 -0500
@@ -17,6 +17,8 @@ void init_xenbus_allowed_cpumask(void);
 void init_xenbus_allowed_cpumask(void);
 int smp_suspend(void);
 void smp_resume(void);
+
+void cpu_bringup(void);
 
 #else /* !defined(CONFIG_HOTPLUG_CPU) */
 
diff -r 29861ae27914 -r 91ee504ed40e tools/Makefile
--- a/tools/Makefile    Tue May 30 15:24:31 2006 -0500
+++ b/tools/Makefile    Fri Jun 02 12:31:48 2006 -0500
@@ -1,39 +1,38 @@ XEN_ROOT = ../
 XEN_ROOT = ../
 include $(XEN_ROOT)/tools/Rules.mk
 
-SUBDIRS :=
-SUBDIRS += libxc
-SUBDIRS += xenstore
-SUBDIRS += misc
-SUBDIRS += examples
-SUBDIRS += xentrace
-SUBDIRS += xcutils
-SUBDIRS += firmware
-SUBDIRS += security
-SUBDIRS += console
-SUBDIRS += xenmon
-SUBDIRS += guest-headers
-ifeq ($(VTPM_TOOLS),y)
-SUBDIRS += vtpm_manager
-SUBDIRS += vtpm
-endif
-SUBDIRS += xenstat
+SUBDIRS-y :=
+SUBDIRS-y += libxc
+SUBDIRS-y += xenstore
+SUBDIRS-y += misc
+SUBDIRS-y += examples
+SUBDIRS-y += xentrace
+SUBDIRS-$(CONFIG_X86) += xcutils
+SUBDIRS-y += firmware
+SUBDIRS-y += security
+SUBDIRS-y += console
+SUBDIRS-y += xenmon
+SUBDIRS-y += guest-headers
+SUBDIRS-$(VTPM_TOOLS) += vtpm_manager
+SUBDIRS-$(VTPM_TOOLS) += vtpm
+SUBDIRS-y += xenstat
+
 # These don't cross-compile
 ifeq ($(XEN_COMPILE_ARCH),$(XEN_TARGET_ARCH))
-SUBDIRS += python
-SUBDIRS += pygrub
+SUBDIRS-y += python
+SUBDIRS-y += pygrub
 endif
 
 .PHONY: all
 all: check
-       @set -e; for subdir in $(SUBDIRS); do \
+       @set -e; for subdir in $(SUBDIRS-y); do \
                $(MAKE) -C $$subdir $@; \
        done
        $(MAKE) ioemu
 
 .PHONY: install
 install: check
-       @set -e; for subdir in $(SUBDIRS); do \
+       @set -e; for subdir in $(SUBDIRS-y); do \
                $(MAKE) -C $$subdir $@; \
        done
        $(MAKE) ioemuinstall
@@ -41,7 +40,7 @@ install: check
 
 .PHONY: clean
 clean: check_clean
-       @set -e; for subdir in $(SUBDIRS); do \
+       @set -e; for subdir in $(SUBDIRS-y); do \
                $(MAKE) -C $$subdir $@; \
        done
        $(MAKE) ioemuclean
@@ -55,10 +54,10 @@ check_clean:
        $(MAKE) -C check clean
 
 .PHONY: ioemu ioemuinstall ioemuclean
-ifndef XEN_NO_IOEMU
+ifdef CONFIG_IOEMU
 ioemu ioemuinstall ioemuclean:
        [ -f ioemu/config-host.h ] || \
-       (cd ioemu; ./configure --prefix=usr)
+       (cd ioemu; sh ./configure --prefix=usr)
        $(MAKE) -C ioemu $(patsubst ioemu%,%,$@)
 else
 ioemu ioemuinstall ioemuclean:
diff -r 29861ae27914 -r 91ee504ed40e tools/Rules.mk
--- a/tools/Rules.mk    Tue May 30 15:24:31 2006 -0500
+++ b/tools/Rules.mk    Fri Jun 02 12:31:48 2006 -0500
@@ -4,6 +4,8 @@ all:
 all:
 
 include $(XEN_ROOT)/Config.mk
+
+CONFIG_$(shell uname -s) := y
 
 XEN_XC             = $(XEN_ROOT)/tools/python/xen/lowlevel/xc
 XEN_LIBXC          = $(XEN_ROOT)/tools/libxc
diff -r 29861ae27914 -r 91ee504ed40e tools/debugger/libxendebug/xendebug.c
--- a/tools/debugger/libxendebug/xendebug.c     Tue May 30 15:24:31 2006 -0500
+++ b/tools/debugger/libxendebug/xendebug.c     Fri Jun 02 12:31:48 2006 -0500
@@ -346,8 +346,9 @@ xendebug_memory_page (domain_context_p c
         ctxt->cr3_phys[vcpu] = vcpu_ctxt->ctrlreg[3];
         if ( ctxt->cr3_virt[vcpu] )
             munmap(ctxt->cr3_virt[vcpu], PAGE_SIZE);
-        ctxt->cr3_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid,
-                    PAGE_SIZE, PROT_READ, ctxt->cr3_phys[vcpu] >> PAGE_SHIFT);
+        ctxt->cr3_virt[vcpu] = xc_map_foreign_range(
+            xc_handle, ctxt->domid, PAGE_SIZE, PROT_READ,
+            xen_cr3_to_pfn(ctxt->cr3_phys[vcpu]));
         if ( ctxt->cr3_virt[vcpu] == NULL )
             return 0;
     } 
diff -r 29861ae27914 -r 91ee504ed40e tools/examples/network-bridge
--- a/tools/examples/network-bridge     Tue May 30 15:24:31 2006 -0500
+++ b/tools/examples/network-bridge     Fri Jun 02 12:31:48 2006 -0500
@@ -60,6 +60,7 @@ evalVariables "$@"
 evalVariables "$@"
 
 vifnum=${vifnum:-$(ip route list | awk '/^default / { print $NF }' | sed 
's/^[^0-9]*//')}
+vifnum=${vifnum:-0}
 bridge=${bridge:-xenbr${vifnum}}
 netdev=${netdev:-eth${vifnum}}
 antispoof=${antispoof:-no}
diff -r 29861ae27914 -r 91ee504ed40e tools/firmware/hvmloader/Makefile
--- a/tools/firmware/hvmloader/Makefile Tue May 30 15:24:31 2006 -0500
+++ b/tools/firmware/hvmloader/Makefile Fri Jun 02 12:31:48 2006 -0500
@@ -51,12 +51,12 @@ hvmloader: roms.h hvmloader.c acpi_madt.
        $(OBJCOPY) hvmloader.tmp hvmloader
        rm -f hvmloader.tmp
 
-roms.h:        ../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin 
../vgabios/VGABIOS-lgpl-latest.cirrus.bin ../vmxassist/vmxassist.bin
-       ./mkhex rombios ../rombios/BIOS-bochs-latest > roms.h
-       ./mkhex vgabios_stdvga ../vgabios/VGABIOS-lgpl-latest.bin >> roms.h
-       ./mkhex vgabios_cirrusvga ../vgabios/VGABIOS-lgpl-latest.cirrus.bin >> 
roms.h
-       ./mkhex vmxassist ../vmxassist/vmxassist.bin >> roms.h
-       ./mkhex acpi ../acpi/acpi.bin >> roms.h
+roms.h:        ../rombios/BIOS-bochs-8-processors 
../vgabios/VGABIOS-lgpl-latest.bin ../vgabios/VGABIOS-lgpl-latest.cirrus.bin 
../vmxassist/vmxassist.bin
+       sh ./mkhex rombios ../rombios/BIOS-bochs-8-processors > roms.h
+       sh ./mkhex vgabios_stdvga ../vgabios/VGABIOS-lgpl-latest.bin >> roms.h
+       sh ./mkhex vgabios_cirrusvga ../vgabios/VGABIOS-lgpl-latest.cirrus.bin 
>> roms.h
+       sh ./mkhex vmxassist ../vmxassist/vmxassist.bin >> roms.h
+       sh ./mkhex acpi ../acpi/acpi.bin >> roms.h
 
 .PHONY: clean
 clean:
diff -r 29861ae27914 -r 91ee504ed40e tools/firmware/rombios/Makefile
--- a/tools/firmware/rombios/Makefile   Tue May 30 15:24:31 2006 -0500
+++ b/tools/firmware/rombios/Makefile   Fri Jun 02 12:31:48 2006 -0500
@@ -1,7 +1,7 @@ BIOS_BUILDS = BIOS-bochs-latest
-BIOS_BUILDS = BIOS-bochs-latest
+#BIOS_BUILDS = BIOS-bochs-latest
 #BIOS_BUILDS += BIOS-bochs-2-processors
 #BIOS_BUILDS += BIOS-bochs-4-processors
-#BIOS_BUILDS += BIOS-bochs-8-processors
+BIOS_BUILDS += BIOS-bochs-8-processors
 
 .PHONY: all
 all: bios
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/hw/cirrus_vga.c
--- a/tools/ioemu/hw/cirrus_vga.c       Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/hw/cirrus_vga.c       Fri Jun 02 12:31:48 2006 -0500
@@ -2460,7 +2460,6 @@ static CPUWriteMemoryFunc *cirrus_linear
 };
 
 extern FILE *logfile;
-#if defined(__i386__) || defined (__x86_64__)
 static void * set_vram_mapping(unsigned long begin, unsigned long end)
 {
     unsigned long * extent_start = NULL;
@@ -2540,10 +2539,6 @@ static int unset_vram_mapping(unsigned l
     return 0;
 }
 
-#elif defined(__ia64__)
-static void * set_vram_mapping(unsigned long addr, unsigned long end) {}
-static int unset_vram_mapping(unsigned long addr, unsigned long end) {}
-#endif
 extern int vga_accelerate;
 
 /* Compute the memory access functions */
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/hw/ne2000.c
--- a/tools/ioemu/hw/ne2000.c   Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/hw/ne2000.c   Fri Jun 02 12:31:48 2006 -0500
@@ -147,9 +147,33 @@ static void ne2000_reset(NE2000State *s)
     }
 }
 
+static int ne2000_buffer_full(NE2000State *s)
+{
+    int avail, index, boundary;
+
+    index = s->curpag << 8;
+    boundary = s->boundary << 8;
+    if (index <= boundary)
+        /* when index == boundary, we should assume the
+         * buffer is full instead of empty!
+         */
+        avail = boundary - index;
+    else
+        avail = (s->stop - s->start) - (index - boundary);
+
+    return (avail < (MAX_ETH_FRAME_SIZE + 4));
+}
+
 static void ne2000_update_irq(NE2000State *s)
 {
     int isr;
+
+    if (ne2000_buffer_full(s)) {
+        /* The freeing space is not enough, tell the ne2k driver
+         * to fetch these packets!
+         */
+        s->isr |= ENISR_RX;
+    }
     isr = s->isr & s->imr;
 #if defined(DEBUG_NE2000)
     printf("NE2000: Set IRQ line %d to %d (%02x %02x)\n",
@@ -168,19 +192,11 @@ static int ne2000_can_receive(void *opaq
 static int ne2000_can_receive(void *opaque)
 {
     NE2000State *s = opaque;
-    int avail, index, boundary;
     
     if (s->cmd & E8390_STOP)
         return 0;
-    index = s->curpag << 8;
-    boundary = s->boundary << 8;
-    if (index < boundary)
-        avail = boundary - index;
-    else
-        avail = (s->stop - s->start) - (index - boundary);
-    if (avail < (MAX_ETH_FRAME_SIZE + 4))
-        return 0;
-    return MAX_ETH_FRAME_SIZE;
+
+    return (ne2000_buffer_full(s) ? 0 : MAX_ETH_FRAME_SIZE);
 }
 
 #define MIN_BUF_SIZE 60
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/hw/pc.c
--- a/tools/ioemu/hw/pc.c       Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/hw/pc.c       Fri Jun 02 12:31:48 2006 -0500
@@ -537,8 +537,11 @@ void pc_init(uint64_t ram_size, int vga_
     for(i = 0; i < MAX_SERIAL_PORTS; i++) {
         if (serial_hds[i]) {
             sp = serial_init(serial_io[i], serial_irq[i], serial_hds[i]);
-            if (i == SUMMA_PORT)
+            if (i == serial_summa_port) {
                summa_init(sp, serial_hds[i]);
+               fprintf(stderr, "Serial port %d (COM%d) initialized for 
Summagraphics\n",
+                       i, i+1);
+           }
         }
     }
 
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/hw/vga.c
--- a/tools/ioemu/hw/vga.c      Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/hw/vga.c      Fri Jun 02 12:31:48 2006 -0500
@@ -1995,6 +1995,7 @@ void vga_common_init(VGAState *s, Displa
     s->get_resolution = vga_get_resolution;
     /* XXX: currently needed for display */
     vga_state = s;
+    vga_bios_init(s);
 }
 
 
@@ -2082,7 +2083,6 @@ int vga_initialize(PCIBus *bus, DisplayS
 #endif
     }
 
-    vga_bios_init(s);
     return 0;
 }
 
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/vl.c  Fri Jun 02 12:31:48 2006 -0500
@@ -146,6 +146,7 @@ int repeat_key = 1;
 int repeat_key = 1;
 TextConsole *vga_console;
 CharDriverState *serial_hds[MAX_SERIAL_PORTS];
+int serial_summa_port = -1;
 int xc_handle;
 time_t timeoffset = 0;
 
@@ -2498,7 +2499,7 @@ int set_mm_mapping(int xc_handle,
     xc_domain_getinfo(xc_handle, domid, 1, &info);
 
     if ( xc_domain_setmaxmem(xc_handle, domid,
-                             (info.nr_pages + nr_pages) * PAGE_SIZE/1024) != 0)
+                             info.max_memkb + nr_pages * PAGE_SIZE/1024) !=0)
     {
         fprintf(logfile, "set maxmem returned error %d\n", errno);
         return -1;
@@ -2588,8 +2589,8 @@ int main(int argc, char **argv)
     pstrcpy(monitor_device, sizeof(monitor_device), "vc");
 
     pstrcpy(serial_devices[0], sizeof(serial_devices[0]), "vc");
-    pstrcpy(serial_devices[1], sizeof(serial_devices[1]), "null");
-    for(i = 2; i < MAX_SERIAL_PORTS; i++)
+    serial_summa_port = -1;
+    for(i = 1; i < MAX_SERIAL_PORTS; i++)
         serial_devices[i][0] = '\0';
     serial_device_index = 0;
 
@@ -3173,6 +3174,20 @@ int main(int argc, char **argv)
     }
     monitor_init(monitor_hd, !nographic);
 
+    /* Find which port should be the Summagraphics port */
+    /* It's the first unspecified serial line. Note that COM1 is set */
+    /* by default, so the Summagraphics port would be COM2 or higher */
+
+    for(i = 0; i < MAX_SERIAL_PORTS; i++) {
+      if (serial_devices[i][0] != '\0')
+       continue;
+      serial_summa_port = i;
+      pstrcpy(serial_devices[serial_summa_port], sizeof(serial_devices[0]), 
"null");
+      break;
+    }
+
+    /* Now, open the ports */
+
     for(i = 0; i < MAX_SERIAL_PORTS; i++) {
         if (serial_devices[i][0] != '\0') {
             serial_hds[i] = qemu_chr_open(serial_devices[i]);
diff -r 29861ae27914 -r 91ee504ed40e tools/ioemu/vl.h
--- a/tools/ioemu/vl.h  Tue May 30 15:24:31 2006 -0500
+++ b/tools/ioemu/vl.h  Fri Jun 02 12:31:48 2006 -0500
@@ -238,9 +238,9 @@ void console_select(unsigned int index);
 /* serial ports */
 
 #define MAX_SERIAL_PORTS 4
-#define SUMMA_PORT     1
 
 extern CharDriverState *serial_hds[MAX_SERIAL_PORTS];
+extern int serial_summa_port;
 
 /* network redirectors support */
 
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/Makefile
--- a/tools/libxc/Makefile      Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/Makefile      Fri Jun 02 12:31:48 2006 -0500
@@ -10,43 +10,30 @@ XEN_ROOT = ../..
 XEN_ROOT = ../..
 include $(XEN_ROOT)/tools/Rules.mk
 
-SRCS       :=
-SRCS       += xc_bvtsched.c
-SRCS       += xc_core.c
-SRCS       += xc_domain.c
-SRCS       += xc_evtchn.c
-SRCS       += xc_misc.c
-SRCS       += xc_acm.c   
-SRCS       += xc_physdev.c
-SRCS       += xc_private.c
-SRCS       += xc_sedf.c
-SRCS       += xc_csched.c
-SRCS       += xc_tbuf.c
+CTRL_SRCS-y       :=
+CTRL_SRCS-y       += xc_bvtsched.c
+CTRL_SRCS-y       += xc_core.c
+CTRL_SRCS-y       += xc_domain.c
+CTRL_SRCS-y       += xc_evtchn.c
+CTRL_SRCS-y       += xc_misc.c
+CTRL_SRCS-y       += xc_acm.c   
+CTRL_SRCS-y       += xc_physdev.c
+CTRL_SRCS-y       += xc_private.c
+CTRL_SRCS-y       += xc_sedf.c
+CTRL_SRCS-y       += xc_csched.c
+CTRL_SRCS-y       += xc_tbuf.c
+CTRL_SRCS-$(CONFIG_X86) += xc_ptrace.c xc_ptrace_core.c xc_pagetab.c
+CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
 
-ifeq ($(patsubst x86%,x86,$(XEN_TARGET_ARCH)),x86)
-SRCS       += xc_ptrace.c
-SRCS       += xc_ptrace_core.c
-SRCS       += xc_pagetab.c
-endif
-
-SRCS_Linux += xc_linux.c
-
-SRCS       += $(SRCS_Linux)
-
-BUILD_SRCS :=
-#BUILD_SRCS += xc_linux_build.c
-BUILD_SRCS += xc_load_bin.c
-BUILD_SRCS += xc_load_elf.c
-#BUILD_SRCS += xg_private.c
-
-ifeq ($(XEN_TARGET_ARCH),ia64)
-BUILD_SRCS += xc_ia64_stubs.c
-else
-BUILD_SRCS += xc_load_aout9.c
-BUILD_SRCS += xc_linux_restore.c
-BUILD_SRCS += xc_linux_save.c
-BUILD_SRCS += xc_hvm_build.c
-endif
+GUEST_SRCS-y :=
+GUEST_SRCS-y += xc_linux_build.c
+GUEST_SRCS-y += xc_load_bin.c
+GUEST_SRCS-y += xc_load_elf.c
+GUEST_SRCS-y += xg_private.c
+GUEST_SRCS-$(CONFIG_IA64) += xc_ia64_stubs.c
+GUEST_SRCS-$(CONFIG_PLAN9) += xc_load_aout9.c
+GUEST_SRCS-$(CONFIG_MIGRATE) += xc_linux_restore.c xc_linux_save.c
+GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c
 
 CFLAGS   += -Werror
 CFLAGS   += -fno-strict-aliasing
@@ -61,11 +48,11 @@ LDFLAGS  += -L.
 LDFLAGS  += -L.
 DEPS     = .*.d
 
-LIB_OBJS := $(patsubst %.c,%.o,$(SRCS))
-PIC_OBJS := $(patsubst %.c,%.opic,$(SRCS))
+CTRL_LIB_OBJS := $(patsubst %.c,%.o,$(CTRL_SRCS-y))
+CTRL_PIC_OBJS := $(patsubst %.c,%.opic,$(CTRL_SRCS-y))
 
-LIB_BUILD_OBJS := $(patsubst %.c,%.o,$(BUILD_SRCS))
-PIC_BUILD_OBJS := $(patsubst %.c,%.opic,$(BUILD_SRCS))
+GUEST_LIB_OBJS := $(patsubst %.c,%.o,$(GUEST_SRCS-y))
+GUEST_PIC_OBJS := $(patsubst %.c,%.opic,$(GUEST_SRCS-y))
 
 LIB := libxenctrl.a
 LIB += libxenctrl.so libxenctrl.so.$(MAJOR) libxenctrl.so.$(MAJOR).$(MINOR)
@@ -125,7 +112,7 @@ rpm: build
 
 # libxenctrl
 
-libxenctrl.a: $(LIB_OBJS)
+libxenctrl.a: $(CTRL_LIB_OBJS)
        $(AR) rc $@ $^
 
 libxenctrl.so: libxenctrl.so.$(MAJOR)
@@ -133,12 +120,12 @@ libxenctrl.so.$(MAJOR): libxenctrl.so.$(
 libxenctrl.so.$(MAJOR): libxenctrl.so.$(MAJOR).$(MINOR)
        ln -sf $< $@
 
-libxenctrl.so.$(MAJOR).$(MINOR): $(PIC_OBJS)
+libxenctrl.so.$(MAJOR).$(MINOR): $(CTRL_PIC_OBJS)
        $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxenctrl.so.$(MAJOR) 
-shared -o $@ $^
 
 # libxenguest
 
-libxenguest.a: $(LIB_BUILD_OBJS)
+libxenguest.a: $(GUEST_LIB_OBJS)
        $(AR) rc $@ $^
 
 libxenguest.so: libxenguest.so.$(MAJOR)
@@ -146,7 +133,7 @@ libxenguest.so.$(MAJOR): libxenguest.so.
 libxenguest.so.$(MAJOR): libxenguest.so.$(MAJOR).$(MINOR)
        ln -sf $< $@
 
-libxenguest.so.$(MAJOR).$(MINOR): $(PIC_BUILD_OBJS) libxenctrl.so
-       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxenguest.so.$(MAJOR) 
-shared -o $@ $^ -lxenctrl
+libxenguest.so.$(MAJOR).$(MINOR): $(GUEST_PIC_OBJS) libxenctrl.so
+       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxenguest.so.$(MAJOR) 
-shared -o $@ $^ -lz -lxenctrl
 
 -include $(DEPS)
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c     Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_core.c     Fri Jun 02 12:31:48 2006 -0500
@@ -1,6 +1,4 @@
 #include "xg_private.h"
-#define ELFSIZE 32
-#include "xc_elf.h"
 #include <stdlib.h>
 #include <unistd.h>
 
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_hvm_build.c        Fri Jun 02 12:31:48 2006 -0500
@@ -2,9 +2,9 @@
  * xc_hvm_build.c
  */
 
+#define ELFSIZE 32
 #include <stddef.h>
 #include "xg_private.h"
-#define ELFSIZE 32
 #include "xc_elf.h"
 #include <stdlib.h>
 #include <unistd.h>
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_linux_build.c      Fri Jun 02 12:31:48 2006 -0500
@@ -5,14 +5,6 @@
 #include "xg_private.h"
 #include "xc_private.h"
 #include <xenctrl.h>
-
-#if defined(__i386__)
-#define ELFSIZE 32
-#endif
-
-#if defined(__x86_64__) || defined(__ia64__)
-#define ELFSIZE 64
-#endif
 
 #include "xc_elf.h"
 #include "xc_aout9.h"
@@ -213,9 +205,9 @@ static int setup_pg_tables(int xc_handle
     alloc_pt(l2tab, vl2tab, pl2tab);
     vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
     if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = pl2tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl2tab >> PAGE_SHIFT);
     else
-        ctxt->ctrlreg[3] = l2tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
     {
@@ -276,9 +268,9 @@ static int setup_pg_tables_pae(int xc_ha
     alloc_pt(l3tab, vl3tab, pl3tab);
     vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
     if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = pl3tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl3tab >> PAGE_SHIFT);
     else
-        ctxt->ctrlreg[3] = l3tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
     {
@@ -369,9 +361,9 @@ static int setup_pg_tables_64(int xc_han
     alloc_pt(l4tab, vl4tab, pl4tab);
     vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
     if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = pl4tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl4tab >> PAGE_SHIFT);
     else
-        ctxt->ctrlreg[3] = l4tab;
+        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
     {
@@ -835,13 +827,13 @@ static int setup_guest(int xc_handle,
         if ( dsi.pae_kernel )
         {
             if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
-                           ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+                           xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
                 goto error_out;
         }
         else
         {
             if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
-                           ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+                           xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
                 goto error_out;
         }
     }
@@ -853,7 +845,7 @@ static int setup_guest(int xc_handle,
      * correct protection for the page
      */
     if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
-                   ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+                   xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
         goto error_out;
 #endif
 
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c    Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_linux_restore.c    Fri Jun 02 12:31:48 2006 -0500
@@ -536,7 +536,7 @@ int xc_linux_restore(int xc_handle, int 
     }
 
     /* Uncanonicalise the page table base pointer. */
-    pfn = ctxt.ctrlreg[3] >> PAGE_SHIFT;
+    pfn = xen_cr3_to_pfn(ctxt.ctrlreg[3]);
 
     if (pfn >= max_pfn) {
         ERR("PT base is bad: pfn=%lu max_pfn=%lu type=%08lx",
@@ -552,7 +552,7 @@ int xc_linux_restore(int xc_handle, int 
         goto out;
     }
 
-    ctxt.ctrlreg[3] = p2m[pfn] << PAGE_SHIFT;
+    ctxt.ctrlreg[3] = xen_pfn_to_cr3(p2m[pfn]);
 
     /* clear any pending events and the selector */
     memset(&(shared_info->evtchn_pending[0]), 0,
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c       Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_linux_save.c       Fri Jun 02 12:31:48 2006 -0500
@@ -1129,12 +1129,12 @@ int xc_linux_save(int xc_handle, int io_
     }
 
     /* Canonicalise the page table base pointer. */
-    if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.ctrlreg[3] >> PAGE_SHIFT) ) {
+    if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) ) {
         ERR("PT base is not in range of pseudophys map");
         goto out;
     }
-    ctxt.ctrlreg[3] = mfn_to_pfn(ctxt.ctrlreg[3] >> PAGE_SHIFT) <<
-        PAGE_SHIFT;
+    ctxt.ctrlreg[3] = 
+        xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[3])));
 
     if (!write_exact(io_fd, &ctxt, sizeof(ctxt)) ||
         !write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_load_elf.c
--- a/tools/libxc/xc_load_elf.c Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_load_elf.c Fri Jun 02 12:31:48 2006 -0500
@@ -3,14 +3,6 @@
  */
 
 #include "xg_private.h"
-
-#if defined(__i386__)
-#define ELFSIZE 32
-#endif
-#if defined(__x86_64__) || defined(__ia64__) || defined(__powerpc64__)
-#define ELFSIZE 64
-#endif
-
 #include "xc_elf.h"
 #include <stdlib.h>
 #include <endian.h>
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_pagetab.c
--- a/tools/libxc/xc_pagetab.c  Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_pagetab.c  Fri Jun 02 12:31:48 2006 -0500
@@ -78,7 +78,7 @@ unsigned long xc_translate_foreign_addre
         fprintf(stderr, "failed to retreive vcpu context\n");
         goto out;
     }
-    cr3 = ctx.ctrlreg[3];
+    cr3 = ((unsigned long long)xen_cr3_to_pfn(ctx.ctrlreg[3])) << PAGE_SHIFT;
 
     /* Page Map Level 4 */
 
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c   Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_ptrace.c   Fri Jun 02 12:31:48 2006 -0500
@@ -190,7 +190,8 @@ map_domain_va_32(
     static void *v[MAX_VIRT_CPUS];
 
     l2 = xc_map_foreign_range(
-         xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] 
>> PAGE_SHIFT);
+         xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+         xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
     if ( l2 == NULL )
         return NULL;
 
@@ -230,7 +231,8 @@ map_domain_va_pae(
     static void *v[MAX_VIRT_CPUS];
 
     l3 = xc_map_foreign_range(
-        xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] 
>> PAGE_SHIFT);
+        xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+        xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
     if ( l3 == NULL )
         return NULL;
 
@@ -282,8 +284,9 @@ map_domain_va_64(
     if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
         return map_domain_va_32(xc_handle, cpu, guest_va, perm);
 
-    l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
-            PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+    l4 = xc_map_foreign_range(
+        xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+        xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
     if ( l4 == NULL )
         return NULL;
 
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xc_ptrace_core.c
--- a/tools/libxc/xc_ptrace_core.c      Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xc_ptrace_core.c      Fri Jun 02 12:31:48 2006 -0500
@@ -12,8 +12,8 @@ static long   nr_pages = 0;
 static long   nr_pages = 0;
 static unsigned long  *p2m_array = NULL;
 static unsigned long  *m2p_array = NULL;
-static unsigned long            pages_offset;
-static unsigned long            cr3[MAX_VIRT_CPUS];
+static unsigned long   pages_offset;
+static unsigned long   cr3[MAX_VIRT_CPUS];
 
 /* --------------------- */
 
@@ -47,7 +47,7 @@ map_domain_va_core(unsigned long domfd, 
             munmap(cr3_virt[cpu], PAGE_SIZE);
         v = mmap(
             NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
-            map_mtop_offset(cr3_phys[cpu]));
+            map_mtop_offset(xen_cr3_to_pfn(cr3_phys[cpu])));
         if (v == MAP_FAILED)
         {
             perror("mmap failed");
@@ -127,14 +127,15 @@ xc_waitdomain_core(
             sizeof(vcpu_guest_context_t)*nr_vcpus)
             return -1;
 
-        for (i = 0; i < nr_vcpus; i++) {
+        for (i = 0; i < nr_vcpus; i++)
             cr3[i] = ctxt[i].ctrlreg[3];
-        }
+
         if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL)
         {
             printf("Could not allocate p2m_array\n");
             return -1;
         }
+
         if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
             sizeof(unsigned long)*nr_pages)
             return -1;
@@ -146,10 +147,8 @@ xc_waitdomain_core(
         }
         bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
 
-        for (i = 0; i < nr_pages; i++) {
+        for (i = 0; i < nr_pages; i++)
             m2p_array[p2m_array[i]] = i;
-        }
-
     }
     return 0;
 }
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xg_private.c  Fri Jun 02 12:31:48 2006 -0500
@@ -145,3 +145,18 @@ unsigned long csum_page(void *page)
 
     return sum ^ (sum>>32);
 }
+
+__attribute__((weak)) int xc_hvm_build(
+    int xc_handle,
+    uint32_t domid,
+    int memsize,
+    const char *image_name,
+    unsigned int vcpus,
+    unsigned int pae,
+    unsigned int acpi,
+    unsigned int apic,
+    unsigned int store_evtchn,
+    unsigned long *store_mfn)
+{
+    return -ENOSYS;
+}
diff -r 29861ae27914 -r 91ee504ed40e tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Tue May 30 15:24:31 2006 -0500
+++ b/tools/libxc/xg_private.h  Fri Jun 02 12:31:48 2006 -0500
@@ -25,6 +25,14 @@
 #define DECLARE_DOM0_OP dom0_op_t op
 #endif
 
+#ifndef ELFSIZE
+#include <limits.h>
+#if UINT_MAX == ULONG_MAX
+#define ELFSIZE 32
+#else
+#define ELFSIZE 64
+#endif
+#endif
 
 char *xc_read_image(const char *filename, unsigned long *size);
 char *xc_inflate_buffer(const char *in_buf,
diff -r 29861ae27914 -r 91ee504ed40e tools/misc/Makefile
--- a/tools/misc/Makefile       Tue May 30 15:24:31 2006 -0500
+++ b/tools/misc/Makefile       Fri Jun 02 12:31:48 2006 -0500
@@ -25,7 +25,7 @@ build: $(TARGETS)
 build: $(TARGETS)
        $(MAKE) -C miniterm
        $(MAKE) -C cpuperf
-ifneq ($(XEN_TARGET_ARCH),ia64)
+ifeq ($(CONFIG_MBOOTPACK),y)
        $(MAKE) -C mbootpack
 endif
        $(MAKE) -C lomount
diff -r 29861ae27914 -r 91ee504ed40e tools/python/xen/util/security.py
--- a/tools/python/xen/util/security.py Tue May 30 15:24:31 2006 -0500
+++ b/tools/python/xen/util/security.py Fri Jun 02 12:31:48 2006 -0500
@@ -426,6 +426,15 @@ def get_decision(arg1, arg2):
             err("Argument type not supported.")
         ssidref = label2ssidref(arg2[2][1], arg2[1][1])
         arg2 = ['ssidref', str(ssidref)]
+
+    # accept only int or string types for domid and ssidref
+    if isinstance(arg1[1], int):
+        arg1[1] = str(arg1[1])
+    if isinstance(arg2[1], int):
+        arg2[1] = str(arg2[1])
+    if not isinstance(arg1[1], str) or not isinstance(arg2[1], str):
+        err("Invalid id or ssidref type, string or int required")
+
     try:
         decision = acm.getdecision(arg1[0], arg1[1], arg2[0], arg2[1])
     except:
diff -r 29861ae27914 -r 91ee504ed40e tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Tue May 30 15:24:31 2006 -0500
+++ b/tools/python/xen/xm/main.py       Fri Jun 02 12:31:48 2006 -0500
@@ -806,7 +806,7 @@ def xm_top(args):
     os.execvp('xentop', ['xentop'])
 
 def xm_dmesg(args):
-    arg_check(args, "dmesg", 0)
+    arg_check(args, "dmesg", 0, 1)
     
     gopts = Opts(use="""[-c|--clear]
 
diff -r 29861ae27914 -r 91ee504ed40e tools/vtpm/Makefile
--- a/tools/vtpm/Makefile       Tue May 30 15:24:31 2006 -0500
+++ b/tools/vtpm/Makefile       Fri Jun 02 12:31:48 2006 -0500
@@ -9,7 +9,7 @@ VTPM_DIR = vtpm
 VTPM_DIR = vtpm
 
 # Emulator tarball name
-TPM_EMULATOR_TARFILE = tpm_emulator-0.2b.tar.gz
+TPM_EMULATOR_TARFILE = tpm_emulator-0.3.tar.gz
 
 GMP_HEADER = /usr/include/gmp.h
 
@@ -47,23 +47,23 @@ mrproper:
 
 # Create vtpm and TPM emulator dirs
 # apply patches for 1) used as dom0 tpm driver 2) used as vtpm device instance
-$(TPM_EMULATOR_DIR): $(TPM_EMULATOR_TARFILE) tpm_emulator.patch 
tpm_emulator-0.2b-x86_64.patch
+$(TPM_EMULATOR_DIR): $(TPM_EMULATOR_TARFILE) tpm_emulator.patch 
tpm_emulator-0.3-x86_64.patch
        if [ "$(BUILD_EMULATOR)" = "y" ]; then \
                tar -xzf $(TPM_EMULATOR_TARFILE); \
                rm -rf $(TPM_EMULATOR_DIR); \
-               mv tpm_emulator-0.2 $(TPM_EMULATOR_DIR); \
+               mv tpm_emulator-0.3 $(TPM_EMULATOR_DIR); \
                cd $(TPM_EMULATOR_DIR); \
-               patch -p1 < ../tpm_emulator-0.2b-x86_64.patch; \
+               patch -p1 < ../tpm_emulator-0.3-x86_64.patch; \
                patch -p1 <../tpm_emulator.patch; \
        fi
 
-$(VTPM_DIR): $(TPM_EMULATOR_TARFILE) tpm_emulator-0.2b-x86_64.patch vtpm.patch
+$(VTPM_DIR): $(TPM_EMULATOR_TARFILE) tpm_emulator-0.3-x86_64.patch vtpm.patch
        tar -xzf $(TPM_EMULATOR_TARFILE);  
        rm -rf $(VTPM_DIR)
-       mv tpm_emulator-0.2 $(VTPM_DIR); 
+       mv tpm_emulator-0.3 $(VTPM_DIR); 
 
        cd $(VTPM_DIR); \
-       patch -p1 < ../tpm_emulator-0.2b-x86_64.patch; \
+       patch -p1 < ../tpm_emulator-0.3-x86_64.patch; \
        patch -p1 <../vtpm.patch
 
 .PHONY: build_sub
diff -r 29861ae27914 -r 91ee504ed40e tools/vtpm/Rules.mk
--- a/tools/vtpm/Rules.mk       Tue May 30 15:24:31 2006 -0500
+++ b/tools/vtpm/Rules.mk       Fri Jun 02 12:31:48 2006 -0500
@@ -33,6 +33,7 @@ OBJS  = $(patsubst %.c,%.o,$(SRCS))
 
 -include $(DEP_FILES)
 
+# Emulator does not work on 64-bit systems, and may be broken on 32 right now
 BUILD_EMULATOR = n
 
 # Make sure these are just rules
diff -r 29861ae27914 -r 91ee504ed40e tools/vtpm/vtpm.patch
--- a/tools/vtpm/vtpm.patch     Tue May 30 15:24:31 2006 -0500
+++ b/tools/vtpm/vtpm.patch     Fri Jun 02 12:31:48 2006 -0500
@@ -1,23 +1,24 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
-diff -uprN orig/tpm_emulator-0.2-x86_64/AUTHORS vtpm/AUTHORS
---- orig/tpm_emulator-0.2-x86_64/AUTHORS       2005-08-15 00:58:57.000000000 
-0700
-+++ vtpm/AUTHORS       2006-05-17 09:31:11.000000000 -0700
-@@ -1 +1,2 @@
+diff -uprN orig/tpm_emulator-0.3-x86_64/AUTHORS vtpm/AUTHORS
+--- orig/tpm_emulator-0.3-x86_64/AUTHORS       2006-01-10 04:21:45.000000000 
-0800
++++ vtpm/AUTHORS       2006-05-30 12:23:26.000000000 -0700
+@@ -1,2 +1,3 @@
  Mario Strasser <mast@xxxxxxx>
-+INTEL Corp <>
-diff -uprN orig/tpm_emulator-0.2-x86_64/ChangeLog vtpm/ChangeLog
---- orig/tpm_emulator-0.2-x86_64/ChangeLog     2005-08-15 00:58:57.000000000 
-0700
-+++ vtpm/ChangeLog     2006-05-17 09:31:11.000000000 -0700
+ Heiko Stamer <stamer@xxxxxxxx> [DAA]
++INTEL Corp <> [VTPM Extensions]
+diff -uprN orig/tpm_emulator-0.3-x86_64/ChangeLog vtpm/ChangeLog
+--- orig/tpm_emulator-0.3-x86_64/ChangeLog     2006-01-10 04:21:45.000000000 
-0800
++++ vtpm/ChangeLog     2006-05-30 12:23:26.000000000 -0700
 @@ -1,3 +1,7 @@
 +2005-08-16 Intel Corp
-+      Moved module out of kernel to run as a ring 3 app
-+      Modified save_to_file and load_from_file to call a xen backend driver 
to call a VTPM manager
-+
- 2005-08-15  Mario Strasser <mast@xxxxxxx>
-       * all: some typos corrected
-       * tpm_integrity.c: bug in TPM_Extend fixed
-diff -uprN orig/tpm_emulator-0.2-x86_64/crypto/gmp_kernel_wrapper.c 
vtpm/crypto/gmp_kernel_wrapper.c
---- orig/tpm_emulator-0.2-x86_64/crypto/gmp_kernel_wrapper.c   2006-05-17 
09:34:13.000000000 -0700
-+++ vtpm/crypto/gmp_kernel_wrapper.c   2006-05-17 09:31:11.000000000 -0700
++      * Moved module out of kernel to run as a ring 3 app
++      * Modified save_to_file and load_from_file to call a xen backend driver 
to call a VTPM manager
++
+ 2005-12-24  Mario Strasser <mast@xxxxxxx>
+       * tpm_transport.c, tpm_marshalling.c, tpm_structures.h:
+               Transport session functionality added
+diff -uprN orig/tpm_emulator-0.3-x86_64/crypto/gmp_kernel_wrapper.c 
vtpm/crypto/gmp_kernel_wrapper.c
+--- orig/tpm_emulator-0.3-x86_64/crypto/gmp_kernel_wrapper.c   2006-05-30 
12:28:02.000000000 -0700
++++ vtpm/crypto/gmp_kernel_wrapper.c   2006-05-30 12:23:26.000000000 -0700
 @@ -1,5 +1,6 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -77,9 +78,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    }
  }
  
-diff -uprN orig/tpm_emulator-0.2-x86_64/crypto/rsa.c vtpm/crypto/rsa.c
---- orig/tpm_emulator-0.2-x86_64/crypto/rsa.c  2005-08-15 00:58:57.000000000 
-0700
-+++ vtpm/crypto/rsa.c  2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/crypto/rsa.c vtpm/crypto/rsa.c
+--- orig/tpm_emulator-0.3-x86_64/crypto/rsa.c  2006-01-10 04:21:45.000000000 
-0800
++++ vtpm/crypto/rsa.c  2006-05-30 12:23:26.000000000 -0700
 @@ -1,5 +1,6 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -87,7 +88,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
   *
   * This module is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published
-@@ -363,7 +364,7 @@ static int encode_message(int type, uint
+@@ -381,7 +382,7 @@ static int encode_message(int type, uint
        msg[0] = 0x00;
        get_random_bytes(&msg[1], SHA1_DIGEST_LENGTH);
        sha1_init(&ctx);
@@ -96,7 +97,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
        sha1_final(&ctx, &msg[1 + SHA1_DIGEST_LENGTH]);
        memset(&msg[1 + 2 * SHA1_DIGEST_LENGTH], 0x00, 
          msg_len - data_len - 2 * SHA1_DIGEST_LENGTH - 2);
-@@ -411,7 +412,7 @@ static int decode_message(int type, uint
+@@ -429,7 +430,7 @@ static int decode_message(int type, uint
        mask_generation(&msg[1], SHA1_DIGEST_LENGTH,
          &msg[1 + SHA1_DIGEST_LENGTH], msg_len - SHA1_DIGEST_LENGTH - 1);
        sha1_init(&ctx);
@@ -105,10 +106,10 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
        sha1_final(&ctx, &msg[1]);
        if (memcmp(&msg[1], &msg[1 + SHA1_DIGEST_LENGTH], 
            SHA1_DIGEST_LENGTH) != 0) return -1;
-diff -uprN orig/tpm_emulator-0.2-x86_64/linux_module.c vtpm/linux_module.c
---- orig/tpm_emulator-0.2-x86_64/linux_module.c        2006-05-17 
09:34:13.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/linux_module.c vtpm/linux_module.c
+--- orig/tpm_emulator-0.3-x86_64/linux_module.c        2006-05-30 
12:28:02.000000000 -0700
 +++ vtpm/linux_module.c        1969-12-31 16:00:00.000000000 -0800
-@@ -1,163 +0,0 @@
+@@ -1,194 +0,0 @@
 -/* Software-Based Trusted Platform Module (TPM) Emulator for Linux 
 - * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
 - *
@@ -122,7 +123,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 - * GNU General Public License for more details.
 - *
-- * $Id: linux_module.c 19 2005-05-18 08:29:37Z mast $
+- * $Id: linux_module.c 76 2006-01-02 22:17:58Z hstamer $
 - */
 -
 -#include <linux/module.h>
@@ -140,11 +141,11 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -
 -/* module startup parameters */
 -char *startup = "save";
--MODULE_PARM(startup, "s");
+-module_param(startup, charp, 0444);
 -MODULE_PARM_DESC(startup, " Sets the startup mode of the TPM. "
 -  "Possible values are 'clear', 'save' (default) and 'deactivated.");
--char *storage_file = "/var/tpm/tpm_emulator-1.2.0.1";
--MODULE_PARM(storage_file, "s");
+-char *storage_file = "/var/tpm/tpm_emulator-1.2.0.2";
+-module_param(storage_file, charp, 0644);
 -MODULE_PARM_DESC(storage_file, " Sets the persistent-data storage " 
 -  "file of the TPM.");
 -
@@ -172,6 +173,12 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -{
 -  debug("%s()", __FUNCTION__);
 -  clear_bit(STATE_IS_OPEN, (void*)&module_state);
+-  down(&tpm_mutex);
+-  if (tpm_response.data != NULL) {
+-    kfree(tpm_response.data);
+-    tpm_response.data = NULL;
+-  }
+-  up(&tpm_mutex);
 -  return 0;
 -}
 -
@@ -183,6 +190,10 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -    count = min(count, (size_t)tpm_response.size - (size_t)*ppos);
 -    count -= copy_to_user(buf, &tpm_response.data[*ppos], count);
 -    *ppos += count;
+-    if ((size_t)tpm_response.size == (size_t)*ppos) {
+-      kfree(tpm_response.data);
+-      tpm_response.data = NULL;
+-    }
 -  } else {
 -    count = 0;
 -  }
@@ -205,9 +216,29 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -  return count;
 -}
 -
+-#define TPMIOC_CANCEL   _IO('T', 0x00)
+-#define TPMIOC_TRANSMIT _IO('T', 0x01)
+-
 -static int tpm_ioctl(struct inode *inode, struct file *file, unsigned int 
cmd, unsigned long arg)
 -{
--  debug("%s(%d, %ld)", __FUNCTION__, cmd, arg);
+-  debug("%s(%d, %p)", __FUNCTION__, cmd, (char*)arg);
+-  if (cmd == TPMIOC_TRANSMIT) {
+-    uint32_t count = ntohl(*(uint32_t*)(arg + 2));
+-    down(&tpm_mutex);
+-    if (tpm_response.data != NULL) kfree(tpm_response.data);
+-    if (tpm_handle_command((char*)arg, count, &tpm_response.data,
+-                           &tpm_response.size) == 0) {
+-      tpm_response.size -= copy_to_user((char*)arg, tpm_response.data,
+-                            tpm_response.size);
+-      kfree(tpm_response.data);
+-      tpm_response.data = NULL;
+-    } else {
+-      tpm_response.size = 0;
+-      tpm_response.data = NULL;
+-    }
+-    up(&tpm_mutex);
+-    return tpm_response.size;
+-  }
 -  return -1;
 -}
 -
@@ -240,7 +271,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -  /* initialize TPM emulator */
 -  if (!strcmp(startup, "clear")) {
 -    tpm_emulator_init(1);
--  } else if (!strcmp(startup, "save")) { 
+-  } else if (!strcmp(startup, "save")) {
 -    tpm_emulator_init(2);
 -  } else if (!strcmp(startup, "deactivated")) {
 -    tpm_emulator_init(3);
@@ -257,6 +288,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -{
 -  tpm_emulator_shutdown();
 -  misc_deregister(&tpm_dev);
+-  if (tpm_response.data != NULL) kfree(tpm_response.data);
 -}
 -
 -module_init(init_tpm_module);
@@ -264,7 +296,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -
 -uint64_t tpm_get_ticks(void)
 -{
--  static struct timespec old_time = {0, 0}; 
+-  static struct timespec old_time = {0, 0};
 -  struct timespec new_time = current_kernel_time();
 -  uint64_t ticks = (uint64_t)(old_time.tv_sec - new_time.tv_sec) * 1000000
 -                   + (old_time.tv_nsec - new_time.tv_nsec) / 1000;
@@ -272,9 +304,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -  return (ticks > 0) ? ticks : 1;
 -}
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/linux_module.h vtpm/linux_module.h
---- orig/tpm_emulator-0.2-x86_64/linux_module.h        2006-05-17 
09:34:13.000000000 -0700
-+++ vtpm/linux_module.h        2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/linux_module.h vtpm/linux_module.h
+--- orig/tpm_emulator-0.3-x86_64/linux_module.h        2006-05-30 
12:28:02.000000000 -0700
++++ vtpm/linux_module.h        2006-05-30 12:23:26.000000000 -0700
 @@ -1,5 +1,6 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -374,15 +406,15 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  #define BE16_TO_CPU(x) __be16_to_cpu(x)
  #define LE16_TO_CPU(x) __le16_to_cpu(x)
  
-diff -uprN orig/tpm_emulator-0.2-x86_64/Makefile vtpm/Makefile
---- orig/tpm_emulator-0.2-x86_64/Makefile      2006-05-17 09:34:13.000000000 
-0700
-+++ vtpm/Makefile      2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/Makefile vtpm/Makefile
+--- orig/tpm_emulator-0.3-x86_64/Makefile      2006-05-30 12:28:02.000000000 
-0700
++++ vtpm/Makefile      2006-05-30 12:23:26.000000000 -0700
 @@ -1,22 +1,31 @@
  # Software-Based Trusted Platform Module (TPM) Emulator for Linux
  # Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>
-+# Copyright (C) 2005 INTEL Corp.
++# Copyright (C) 2006 INTEL Corp.
  #
- # $Id: Makefile 10 2005-04-26 20:59:50Z mast $
+ # $Id: Makefile 69 2005-12-13 12:55:52Z mast $
  
 -# kernel settings
 -KERNEL_RELEASE := $(shell uname -r)
@@ -394,11 +426,11 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -MODULE_NAME    := tpm_emulator
 +BIN            := vtpmd
  VERSION_MAJOR  := 0
- VERSION_MINOR  := 2
+ VERSION_MINOR  := 3
  VERSION_BUILD  := $(shell date +"%s")
  
 -# enable/disable DEBUG messages
--EXTRA_CFLAGS   += -DDEBUG -g  
+-EXTRA_CFLAGS   += -Wall -DDEBUG -g  
 +# Installation program and options
 +INSTALL         = install
 +INSTALL_PROG    = $(INSTALL) -m0755
@@ -468,10 +500,10 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  
  $(src)/crypto/libgmp.a:
        test -f $(src)/crypto/libgmp.a || ln -s $(GMP_LIB) 
$(src)/crypto/libgmp.a
-diff -uprN orig/tpm_emulator-0.2-x86_64/README vtpm/README
---- orig/tpm_emulator-0.2-x86_64/README        2006-05-17 09:34:13.000000000 
-0700
-+++ vtpm/README        2006-05-17 09:31:11.000000000 -0700
-@@ -13,7 +13,8 @@ $Id: README 8 2005-01-25 21:11:45Z jmoli
+diff -uprN orig/tpm_emulator-0.3-x86_64/README vtpm/README
+--- orig/tpm_emulator-0.3-x86_64/README        2006-05-30 12:28:02.000000000 
-0700
++++ vtpm/README        2006-05-30 12:23:26.000000000 -0700
+@@ -13,7 +13,8 @@ $Id: README 78 2006-01-07 10:45:39Z mast
  Copyright
  --------------------------------------------------------------------------
  Copyright (C) 2004 Mario Strasser <mast@xxxxxxx> and Swiss Federal 
@@ -481,9 +513,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
                
  This program is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_audit.c vtpm/tpm/tpm_audit.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_audit.c       2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_audit.c       2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_audit.c vtpm/tpm/tpm_audit.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_audit.c       2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_audit.c       2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -546,9 +578,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    return TPM_SUCCESS;
  }
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_authorization.c 
vtpm/tpm/tpm_authorization.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_authorization.c       2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_authorization.c       2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_authorization.c 
vtpm/tpm/tpm_authorization.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_authorization.c       2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_authorization.c       2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -557,7 +589,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
   *
   * This module is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published
-@@ -268,7 +269,7 @@ TPM_RESULT tpm_verify_auth(TPM_AUTH *aut
+@@ -279,7 +280,7 @@ TPM_RESULT tpm_verify_auth(TPM_AUTH *aut
  {
    hmac_ctx_t ctx;
    TPM_SESSION_DATA *session;
@@ -565,16 +597,10 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 +  UINT32 auth_handle = CPU_TO_BE32(auth->authHandle);
    
    info("tpm_verify_auth(%08x)", auth->authHandle);
-   /* get dedicated authorization session */
-@@ -316,5 +317,3 @@ void tpm_decrypt_auth_secret(TPM_ENCAUTH
-   for (i = 0; i < sizeof(TPM_SECRET); i++)
-     plainAuth[i] ^= encAuth[i];
- }
--
--
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_capability.c 
vtpm/tpm/tpm_capability.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_capability.c  2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_capability.c  2006-05-17 09:31:11.000000000 -0700
+   /* get dedicated authorization or transport session */
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_capability.c 
vtpm/tpm/tpm_capability.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_capability.c  2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_capability.c  2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -583,7 +609,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
   *
   * This module is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published
-@@ -398,7 +399,7 @@ TPM_RESULT TPM_GetCapability(TPM_CAPABIL
+@@ -406,7 +407,7 @@ TPM_RESULT TPM_GetCapability(TPM_CAPABIL
  
      case TPM_CAP_KEY_HANDLE:
        debug("[TPM_CAP_KEY_HANDLE]");
@@ -592,14 +618,14 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
        return cap_handle(4, (BYTE*)&subCapSize, respSize, resp);
  
      case TPM_CAP_CHECK_LOADED:
-@@ -472,4 +473,3 @@ TPM_RESULT TPM_GetCapability(TPM_CAPABIL
+@@ -480,4 +481,3 @@ TPM_RESULT TPM_GetCapability(TPM_CAPABIL
        return TPM_BAD_MODE;
    }
  }
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_cmd_handler.c 
vtpm/tpm/tpm_cmd_handler.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_cmd_handler.c 2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_cmd_handler.c 2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_cmd_handler.c 
vtpm/tpm/tpm_cmd_handler.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_cmd_handler.c 2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_cmd_handler.c 2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -608,17 +634,17 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
   *
   * This module is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published
-@@ -26,7 +27,7 @@ static void tpm_compute_in_param_digest(
+@@ -73,7 +74,7 @@ void tpm_compute_in_param_digest(TPM_REQ
  {
    sha1_ctx_t sha1;
-   UINT32 offset;
+   UINT32 offset = tpm_get_param_offset(req->ordinal);
 -  UINT32 ord = cpu_to_be32(req->ordinal);
 +  UINT32 ord = CPU_TO_BE32(req->ordinal);
  
-   /* skip all key-handles at the beginning */
-   switch (req->ordinal) {
-@@ -82,8 +83,8 @@ static void tpm_compute_in_param_digest(
- static void tpm_compute_out_param_digest(TPM_COMMAND_CODE ordinal, 
TPM_RESPONSE *rsp)
+   /* compute SHA1 hash */
+   if (offset <= req->paramSize) {
+@@ -89,8 +90,8 @@ void tpm_compute_in_param_digest(TPM_REQ
+ void tpm_compute_out_param_digest(TPM_COMMAND_CODE ordinal, TPM_RESPONSE *rsp)
  {
    sha1_ctx_t sha1;
 -  UINT32 res = cpu_to_be32(rsp->result);
@@ -628,7 +654,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  
    /* compute SHA1 hash */
    sha1_init(&sha1);
-@@ -3081,7 +3082,7 @@ static void tpm_setup_rsp_auth(TPM_COMMA
+@@ -3123,7 +3124,7 @@ static void tpm_setup_rsp_auth(TPM_COMMA
        hmac_update(&hmac, rsp->auth2->digest, sizeof(rsp->auth2->digest));
  #if 0
        if (tpm_get_auth(rsp->auth2->authHandle)->type == TPM_ST_OIAP) {
@@ -637,7 +663,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
          hmac_update(&hmac, (BYTE*)&handle, 4);
        }
  #endif
-@@ -3096,7 +3097,7 @@ static void tpm_setup_rsp_auth(TPM_COMMA
+@@ -3138,7 +3139,7 @@ static void tpm_setup_rsp_auth(TPM_COMMA
        hmac_update(&hmac, rsp->auth1->digest, sizeof(rsp->auth1->digest));
  #if 0
        if (tpm_get_auth(rsp->auth1->authHandle)->type == TPM_ST_OIAP) {
@@ -646,25 +672,20 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
          hmac_update(&hmac, (BYTE*)&handle, 4);
        }
  #endif
-@@ -3179,7 +3180,9 @@ extern const char *tpm_error_to_string(T
- static void tpm_execute_command(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+@@ -3221,7 +3222,9 @@ extern const char *tpm_error_to_string(T
+ void tpm_execute_command(TPM_REQUEST *req, TPM_RESPONSE *rsp)
  {
    TPM_RESULT res;
 -  
 +
-+  req->tag = (BYTE) req->tag;  // VIN HACK!!! 
++  req->tag = (BYTE) req->tag;  // FIXME: Why is this here
 +
    /* setup authorisation as well as response tag and size */
    memset(rsp, 0, sizeof(*rsp));
    switch (req->tag) {
-@@ -3878,4 +3881,3 @@ int tpm_handle_command(const uint8_t *in
-   tpm_free(rsp.param);
-   return 0;
- }
--
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_crypto.c vtpm/tpm/tpm_crypto.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_crypto.c      2006-05-17 
09:34:13.000000000 -0700
-+++ vtpm/tpm/tpm_crypto.c      2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_crypto.c vtpm/tpm/tpm_crypto.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_crypto.c      2006-05-30 
12:28:02.000000000 -0700
++++ vtpm/tpm/tpm_crypto.c      2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -683,13 +704,170 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
      if (rsa_sign(&key->key, RSA_SSA_PKCS1_SHA1, 
          buf, areaToSignSize + 30, *sig)) {
 @@ -383,4 +384,3 @@ TPM_RESULT TPM_CertifyKey2(TPM_KEY_HANDL
-   }  
+   }
    return TPM_SUCCESS;
  }
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_data.c vtpm/tpm/tpm_data.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_data.c        2006-05-17 
09:34:13.000000000 -0700
-+++ vtpm/tpm/tpm_data.c        2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_daa.c vtpm/tpm/tpm_daa.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_daa.c 2006-01-10 04:21:45.000000000 
-0800
++++ vtpm/tpm/tpm_daa.c 2006-05-30 12:23:26.000000000 -0700
+@@ -700,14 +700,14 @@ info("tested until here");
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -787,14 +787,14 @@ info("tested until here");
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -1440,14 +1440,14 @@ info("tested until here");
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -1660,14 +1660,14 @@ info("tested until here");
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -1740,14 +1740,14 @@ info("tested until here");
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -2828,14 +2828,14 @@ TPM_RESULT TPM_DAA_Sign(TPM_HANDLE handl
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -3050,7 +3050,7 @@ TPM_RESULT TPM_DAA_Sign(TPM_HANDLE handl
+         sha1_init(&sha1);
+         sha1_update(&sha1, (BYTE*) &session->DAA_session.DAA_digest, 
+           sizeof(session->DAA_session.DAA_digest));
+-        sha1_update(&sha1, "\x01", 1);
++        sha1_update(&sha1, (BYTE *) "\x01", 1);
+         sha1_update(&sha1, inputData1, inputSize1);
+         sha1_final(&sha1, (BYTE*) &session->DAA_session.DAA_digest);
+       }
+@@ -3078,7 +3078,7 @@ TPM_RESULT TPM_DAA_Sign(TPM_HANDLE handl
+         sha1_init(&sha1);
+         sha1_update(&sha1, (BYTE*) &session->DAA_session.DAA_digest, 
+           sizeof(session->DAA_session.DAA_digest));
+-        sha1_update(&sha1, "\x01", 1);
++        sha1_update(&sha1, (BYTE *) "\x01", 1);
+         rsa_export_modulus(&aikData->key, scratch, &size);
+         sha1_update(&sha1, scratch, size);
+         sha1_final(&sha1, (BYTE*) &session->DAA_session.DAA_digest);
+@@ -3134,14 +3134,14 @@ TPM_RESULT TPM_DAA_Sign(TPM_HANDLE handl
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+@@ -3213,14 +3213,14 @@ TPM_RESULT TPM_DAA_Sign(TPM_HANDLE handl
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x00", 1);
++      sha1_update(&sha1, (BYTE *) "\x00", 1);
+       sha1_final(&sha1, scratch);
+       sha1_init(&sha1);
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_rekey, 
+           sizeof(session->DAA_tpmSpecific.DAA_rekey));
+       sha1_update(&sha1, (BYTE*) &session->DAA_tpmSpecific.DAA_count, 
+           sizeof(session->DAA_tpmSpecific.DAA_count));
+-      sha1_update(&sha1, "\x01", 1);
++      sha1_update(&sha1, (BYTE *) "\x01", 1);
+       sha1_final(&sha1, scratch + SHA1_DIGEST_LENGTH);
+       mpz_init(f), mpz_init(q);
+       mpz_import(f, 2 * SHA1_DIGEST_LENGTH, 1, 1, 0, 0, scratch);
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_data.c vtpm/tpm/tpm_data.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_data.c        2006-05-30 
12:28:02.000000000 -0700
++++ vtpm/tpm/tpm_data.c        2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -698,8 +876,8 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
   *
   * This module is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published
-@@ -15,9 +16,15 @@
-  * $Id: tpm_data.c 9 2005-04-26 18:15:31Z mast $
+@@ -15,10 +16,15 @@
+  * $Id: tpm_data.c 36 2005-10-26 20:31:19Z hstamer $
   */
  
 +#include <sys/types.h>
@@ -710,11 +888,12 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  #include "tpm_emulator.h"
  #include "tpm_structures.h"
  #include "tpm_marshalling.h"
+-#include "linux_module.h"
 +#include "vtpm_manager.h"
  
  TPM_DATA tpmData;
  
-@@ -28,6 +35,7 @@ BOOL tpm_get_physical_presence(void)
+@@ -39,6 +45,7 @@ static inline void init_pcr_attr(int pcr
  
  void tpm_init_data(void)
  {
@@ -722,7 +901,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    /* endorsement key */
    uint8_t ek_n[] =  "\xa8\xdb\xa9\x42\xa8\xf3\xb8\x06\x85\x90\x76\x93\xad\xf7"
      "\x74\xec\x3f\xd3\x3d\x9d\xe8\x2e\xff\x15\xed\x0e\xce\x5f\x93"
-@@ -66,6 +74,8 @@ void tpm_init_data(void)
+@@ -77,6 +84,8 @@ void tpm_init_data(void)
      "\xd1\xc0\x8b\x5b\xa2\x2e\xa7\x15\xca\x50\x75\x10\x48\x9c\x2b"
      "\x18\xb9\x67\x8f\x5d\x64\xc3\x28\x9f\x2f\x16\x2f\x08\xda\x47"
      "\xec\x86\x43\x0c\x80\x99\x07\x34\x0f";
@@ -731,18 +910,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    int i;
    /* reset all data to NULL, FALSE or 0 */
    memset(&tpmData, 0, sizeof(tpmData));
-@@ -85,6 +95,10 @@ void tpm_init_data(void)
-   tpmData.permanent.data.version.revMinor = VERSION_MINOR;
-   /* setup PCR attributes */
-   for (i = 0; i < TPM_NUM_PCR; i++) {
-+    int j;
-+    for (j=0; j < TPM_NUM_LOCALITY; j++) {
-+      tpmData.permanent.data.pcrAttrib[i].pcrExtendLocal[j] = TRUE;
-+    }
-     tpmData.permanent.data.pcrAttrib[i].pcrReset = TRUE;
-   }
-   /* set tick type */
-@@ -115,49 +129,235 @@ void tpm_release_data(void)
+@@ -150,49 +159,235 @@ void tpm_release_data(void)
  
  #ifdef TPM_STORE_TO_FILE
  
@@ -1009,7 +1177,7 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  }
  
  #else
-@@ -232,7 +432,6 @@ int tpm_restore_permanent_data(void)
+@@ -267,7 +462,6 @@ int tpm_restore_permanent_data(void)
  
  int tpm_erase_permanent_data(void)
  {
@@ -1018,9 +1186,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    return res;
  }
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_deprecated.c 
vtpm/tpm/tpm_deprecated.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_deprecated.c  2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_deprecated.c  2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_deprecated.c 
vtpm/tpm/tpm_deprecated.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_deprecated.c  2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_deprecated.c  2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1047,9 +1215,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
                          authContextSize, &contextBlob);
    if (res != TPM_SUCCESS) return res;
    len = *authContextSize;
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_emulator.h 
vtpm/tpm/tpm_emulator.h
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_emulator.h    2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_emulator.h    2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_emulator.h 
vtpm/tpm/tpm_emulator.h
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_emulator.h    2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_emulator.h    2006-05-30 12:23:26.000000000 -0700
 @@ -1,5 +1,6 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1064,12 +1232,12 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
 -#undef  TPM_GENERATE_EK
 +//#undef  TPM_GENERATE_EK
 +#define  TPM_GENERATE_EK
- 
- /**
-  * tpm_emulator_init - initialises and starts the TPM emulator
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_integrity.c 
vtpm/tpm/tpm_integrity.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_integrity.c   2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_integrity.c   2006-05-17 09:31:11.000000000 -0700
+ #undef  TPM_GENERATE_SEED_DAA
+ 
+ #define TPM_MANUFACTURER 0x4554485A /* 'ETHZ' */        
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_integrity.c 
vtpm/tpm/tpm_integrity.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_integrity.c   2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_integrity.c   2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1083,9 +1251,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    return TPM_SUCCESS;
  }
 -
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_structures.h 
vtpm/tpm/tpm_structures.h
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_structures.h  2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_structures.h  2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_structures.h 
vtpm/tpm/tpm_structures.h
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_structures.h  2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_structures.h  2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1103,9 +1271,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  #include "crypto/rsa.h"
  
  /*
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_testing.c 
vtpm/tpm/tpm_testing.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_testing.c     2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_testing.c     2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_testing.c 
vtpm/tpm/tpm_testing.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_testing.c     2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_testing.c     2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1221,9 +1389,9 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
    rsa_private_key_t priv_key;
    rsa_public_key_t pub_key;
  
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpm/tpm_ticks.c vtpm/tpm/tpm_ticks.c
---- orig/tpm_emulator-0.2-x86_64/tpm/tpm_ticks.c       2005-08-15 
00:58:57.000000000 -0700
-+++ vtpm/tpm/tpm_ticks.c       2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_ticks.c vtpm/tpm/tpm_ticks.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_ticks.c       2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_ticks.c       2006-05-30 12:23:26.000000000 -0700
 @@ -1,6 +1,7 @@
  /* Software-Based Trusted Platform Module (TPM) Emulator for Linux
   * Copyright (C) 2004 Mario Strasser <mast@xxxxxxx>,
@@ -1306,9 +1474,69 @@ diff -uprN orig/tpm_emulator-0.2-x86_64/
  }
    
  
-diff -uprN orig/tpm_emulator-0.2-x86_64/tpmd.c vtpm/tpmd.c
---- orig/tpm_emulator-0.2-x86_64/tpmd.c        1969-12-31 16:00:00.000000000 
-0800
-+++ vtpm/tpmd.c        2006-05-17 09:31:11.000000000 -0700
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpm/tpm_transport.c 
vtpm/tpm/tpm_transport.c
+--- orig/tpm_emulator-0.3-x86_64/tpm/tpm_transport.c   2006-01-10 
04:21:45.000000000 -0800
++++ vtpm/tpm/tpm_transport.c   2006-05-30 12:23:26.000000000 -0700
+@@ -59,7 +59,7 @@ static int decrypt_transport_auth(TPM_KE
+ static void transport_log_in(TPM_COMMAND_CODE ordinal, BYTE parameters[20],
+                              BYTE pubKeyHash[20], TPM_DIGEST *transDigest)
+ {
+-  UINT32 tag = cpu_to_be32(TPM_TAG_TRANSPORT_LOG_IN);
++  UINT32 tag = CPU_TO_BE32(TPM_TAG_TRANSPORT_LOG_IN);
+   BYTE *ptr, buf[sizeof_TPM_TRANSPORT_LOG_IN(x)];
+   UINT32 len = sizeof(buf);
+   sha1_ctx_t sha1;
+@@ -76,7 +76,7 @@ static void transport_log_in(TPM_COMMAND
+ static void transport_log_out(TPM_CURRENT_TICKS *currentTicks, BYTE 
parameters[20],
+                               TPM_MODIFIER_INDICATOR locality, TPM_DIGEST 
*transDigest)
+ {
+-  UINT32 tag = cpu_to_be32(TPM_TAG_TRANSPORT_LOG_OUT);
++  UINT32 tag = CPU_TO_BE32(TPM_TAG_TRANSPORT_LOG_OUT);
+   BYTE *ptr, buf[sizeof_TPM_TRANSPORT_LOG_OUT(x)];
+   UINT32 len = sizeof(buf);
+   sha1_ctx_t sha1;
+@@ -191,7 +191,7 @@ static void decrypt_wrapped_command(BYTE
+     sha1_update(&sha1, auth->nonceOdd.nonce, sizeof(auth->nonceOdd.nonce));
+     sha1_update(&sha1, "in", 2);
+     sha1_update(&sha1, secret, sizeof(TPM_SECRET));
+-    j = cpu_to_be32(i);
++    j = CPU_TO_BE32(i);
+     sha1_update(&sha1, (BYTE*)&j, 4);
+     sha1_final(&sha1, mask);
+     for (j = 0; j < sizeof(mask) && buf_len > 0; j++) { 
+@@ -213,7 +213,7 @@ static void encrypt_wrapped_command(BYTE
+     sha1_update(&sha1, auth->nonceOdd.nonce, sizeof(auth->nonceOdd.nonce));
+     sha1_update(&sha1, "out", 3);
+     sha1_update(&sha1, secret, sizeof(TPM_SECRET));
+-    j = cpu_to_be32(i);
++    j = CPU_TO_BE32(i);
+     sha1_update(&sha1, (BYTE*)&j, 4);
+     sha1_final(&sha1, mask);
+     for (j = 0; j < sizeof(mask) && buf_len > 0; j++) { 
+@@ -253,9 +253,9 @@ TPM_RESULT TPM_ExecuteTransport(UINT32 i
+   /* verify authorization */
+   tpm_compute_in_param_digest(&req);
+   sha1_init(&sha1);
+-  res = cpu_to_be32(TPM_ORD_ExecuteTransport);
++  res = CPU_TO_BE32(TPM_ORD_ExecuteTransport);
+   sha1_update(&sha1, (BYTE*)&res, 4);
+-  res = cpu_to_be32(inWrappedCmdSize);
++  res = CPU_TO_BE32(inWrappedCmdSize);
+   sha1_update(&sha1, (BYTE*)&res, 4);
+   sha1_update(&sha1, req.auth1.digest, sizeof(req.auth1.digest));
+   sha1_final(&sha1, auth1->digest);
+@@ -357,7 +357,7 @@ TPM_RESULT TPM_ReleaseTransportSigned(TP
+   /* setup a TPM_SIGN_INFO structure */
+   memcpy(&buf[0], "\x05\x00TRAN", 6);
+   memcpy(&buf[6], antiReplay->nonce, 20);
+-  *(UINT32*)&buf[26] = cpu_to_be32(20);
++  *(UINT32*)&buf[26] = CPU_TO_BE32(20);
+   memcpy(&buf[30], session->transInternal.transDigest.digest, 20);
+   /* sign info structure */ 
+   res = tpm_sign(key, auth1, TRUE, buf, sizeof(buf), signature, signSize);
+diff -uprN orig/tpm_emulator-0.3-x86_64/tpmd.c vtpm/tpmd.c
+--- orig/tpm_emulator-0.3-x86_64/tpmd.c        1969-12-31 16:00:00.000000000 
-0800
++++ vtpm/tpmd.c        2006-05-30 12:23:26.000000000 -0700
 @@ -0,0 +1,207 @@
 +/* Software-Based Trusted Platform Module (TPM) Emulator for Linux
 + * Copyright (C) 2005 INTEL Corp
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk    Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/Rules.mk    Fri Jun 02 12:31:48 2006 -0500
@@ -11,25 +11,25 @@ endif
 endif
 
 # Used only by linux/Makefile.
-AFLAGS_KERNEL  += -mconstant-gp
+AFLAGS_KERNEL  += -mconstant-gp -nostdinc $(CPPFLAGS)
 
 # Note: .S -> .o rule uses AFLAGS and CFLAGS.
 
-CFLAGS  += -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
-CFLAGS  += -mconstant-gp
+CFLAGS += -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
+CFLAGS += -mconstant-gp
 #CFLAGS  += -O3                # -O3 over-inlines making debugging tough!
-CFLAGS  += -O2         # but no optimization causes compile errors!
-CFLAGS  += -fomit-frame-pointer -D__KERNEL__
-CFLAGS  += -iwithprefix include
-CPPFLAGS+= -I$(BASEDIR)/include                                         \
-           -I$(BASEDIR)/include/asm-ia64                                \
-           -I$(BASEDIR)/include/asm-ia64/linux                                 
\
-           -I$(BASEDIR)/include/asm-ia64/linux-xen                     \
+CFLAGS += -O2          # but no optimization causes compile errors!
+CFLAGS += -fomit-frame-pointer -D__KERNEL__
+CFLAGS += -iwithprefix include
+CPPFLAGS+= -I$(BASEDIR)/include                                                
\
+          -I$(BASEDIR)/include/asm-ia64                                \
+          -I$(BASEDIR)/include/asm-ia64/linux                          \
+          -I$(BASEDIR)/include/asm-ia64/linux-xen                      \
           -I$(BASEDIR)/include/asm-ia64/linux-null                     \
-           -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
-CFLAGS += $(CPPFLAGS)
+          -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
+CFLAGS += $(CPPFLAGS)
 #CFLAGS  += -Wno-pointer-arith -Wredundant-decls
-CFLAGS  += -DIA64 -DXEN -DLINUX_2_6 -DV_IOSAPIC_READY
+CFLAGS += -DIA64 -DXEN -DLINUX_2_6 -DV_IOSAPIC_READY
 CFLAGS += -ffixed-r13 -mfixed-range=f2-f5,f12-f127
 CFLAGS += -g
 #CFLAGS  += -DVTI_DEBUG
@@ -40,7 +40,7 @@ CFLAGS        += -DCONFIG_XEN_IA64_DOM0_VP
 CFLAGS += -DCONFIG_XEN_IA64_DOM0_VP
 endif
 ifeq ($(no_warns),y)
-CFLAGS += -Wa,--fatal-warnings
+CFLAGS += -Wa,--fatal-warnings -Werror -Wno-uninitialized
 endif
 
 LDFLAGS := -g
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/entry.S
--- a/xen/arch/ia64/linux-xen/entry.S   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/entry.S   Fri Jun 02 12:31:48 2006 -0500
@@ -906,17 +906,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
     ;;
        alloc loc0=ar.pfs,0,1,1,0
        adds out0=16,r12
-    adds r7 = PT(EML_UNAT)+16,r12
-       ;;
-    ld8 r7 = [r7]
-       ;;
-#if 0
-leave_kernel_self:
-    cmp.ne p8,p0 = r0, r7
-(p8) br.sptk.few leave_kernel_self
-       ;; 
-#endif
-(pUStk)        br.call.sptk.many b0=deliver_pending_interrupt
+       adds r7 = PT(EML_UNAT)+16,r12
+       ;;
+       ld8 r7 = [r7]
+       ;;
+(pUStk)        br.call.sptk.many b0=reflect_event
+//(pUStk)      br.call.sptk.many b0=deliver_pending_interrupt
     ;;
        mov ar.pfs=loc0
        mov ar.unat=r7  /* load eml_unat  */
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/iosapic.c Fri Jun 02 12:31:48 2006 -0500
@@ -1112,12 +1112,14 @@ map_iosapic_to_node(unsigned int gsi_bas
 }
 #endif
 
+#ifndef XEN
 static int __init iosapic_enable_kmalloc (void)
 {
        iosapic_kmalloc_ok = 1;
        return 0;
 }
 core_initcall (iosapic_enable_kmalloc);
+#endif
 
 #ifdef XEN
 /* nop for now */
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/sal.c
--- a/xen/arch/ia64/linux-xen/sal.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/sal.c     Fri Jun 02 12:31:48 2006 -0500
@@ -229,7 +229,12 @@ ia64_sal_init (struct ia64_sal_systab *s
                return;
        }
 
+#ifdef XEN /* warning cleanup */
+       if (strncmp((char *)systab->signature, "SST_", 4) != 0)
+#else
        if (strncmp(systab->signature, "SST_", 4) != 0)
+#endif
+               
                printk(KERN_ERR "bad signature in system table!");
 
        check_versions(systab);
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/smp.c     Fri Jun 02 12:31:48 2006 -0500
@@ -196,7 +196,9 @@ handle_IPI (int irq, void *dev_id, struc
                mb();   /* Order data access and bit testing. */
        }
        put_cpu();
+#ifndef XEN
        return IRQ_HANDLED;
+#endif
 }
 
 /*
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/smpboot.c Fri Jun 02 12:31:48 2006 -0500
@@ -62,6 +62,7 @@
 #include <asm/unistd.h>
 
 #ifdef XEN
+#include <xen/domain.h>
 #include <asm/hw_irq.h>
 int ht_per_core = 1;
 #ifndef CONFIG_SMP
@@ -197,7 +198,11 @@ sync_master (void *arg)
  * negative that it is behind.
  */
 static inline long
+#ifdef XEN /* warning cleanup */
+get_delta (unsigned long *rt, unsigned long *master)
+#else
 get_delta (long *rt, long *master)
+#endif
 {
        unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
        unsigned long tcenter, t0, t1, tm;
@@ -483,7 +488,7 @@ do_rest:
 #else
        struct vcpu *v;
 
-       v = idle_vcpu[cpu] = alloc_vcpu(idle_vcpu[0]->domain, cpu, cpu);
+       v = alloc_idle_vcpu(cpu);
        BUG_ON(v == NULL);
 
        //printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/linux-xen/time.c
--- a/xen/arch/ia64/linux-xen/time.c    Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/time.c    Fri Jun 02 12:31:48 2006 -0500
@@ -158,7 +158,11 @@ ia64_init_itm (void)
 {
        unsigned long platform_base_freq, itc_freq;
        struct pal_freq_ratio itc_ratio, proc_ratio;
+#ifdef XEN /* warning cleanup */
+       unsigned long status, platform_base_drift, itc_drift;
+#else
        long status, platform_base_drift, itc_drift;
+#endif
 
        /*
         * According to SAL v2.6, we need to use a SAL call to determine the 
platform base
@@ -197,7 +201,11 @@ ia64_init_itm (void)
        itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
 
        local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
+#ifdef XEN /* warning cleanup */
+       printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
+#else
        printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+#endif
               "ITC freq=%lu.%03luMHz", smp_processor_id(),
               platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
               itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 
1000) % 1000);
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/tools/sparse-merge
--- a/xen/arch/ia64/tools/sparse-merge  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/tools/sparse-merge  Fri Jun 02 12:31:48 2006 -0500
@@ -33,9 +33,17 @@ cd $LINUXPATH || exit 1
 cd $LINUXPATH || exit 1
 OLDCSET=$(hg parents | awk '/^changeset:/{print($2)}' | cut -f 1 -d :)
 for t in $OLDTAG $NEWTAG; do
+    [[ $t == *.* ]] || continue
     if ! hg tags | cut -f1 -d' ' | grep -Fx $t; then
        echo "Tag $t not found, ketching up"
-       hg up -C ${t%.*} || exit 1
+       if [[ $t == *-* ]]; then
+           # rc/pre/git versions start at the previous stable release
+           micro=${t%%-*}; micro=${micro##*.}
+           stable=${t%%-*}; stable=${stable%.*}.$((micro-1))
+           hg up -C $stable
+       else
+           hg up -C ${t%.*} || exit 1
+       fi
        ketchup ${t#v} || exit 1
        hg addremove
        hg ci -m $t
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/pal_emul.c      Fri Jun 02 12:31:48 2006 -0500
@@ -21,6 +21,7 @@
 #include <asm/vmx_vcpu.h>
 #include <asm/pal.h>
 #include <asm/sal.h>
+#include <asm/dom_fw.h>
 #include <asm/tlb.h>
 #include <asm/vmx_mm_def.h>
 
@@ -42,7 +43,14 @@ set_pal_result (VCPU *vcpu,struct ia64_p
        vcpu_set_gr(vcpu,11, result.v2,0);
 }
 
-
+static void
+set_sal_result (VCPU *vcpu,struct sal_ret_values result) {
+
+       vcpu_set_gr(vcpu,8, result.r8,0);
+       vcpu_set_gr(vcpu,9, result.r9,0);
+       vcpu_set_gr(vcpu,10, result.r10,0);
+       vcpu_set_gr(vcpu,11, result.r11,0);
+}
 static struct ia64_pal_retval
 pal_cache_flush (VCPU *vcpu) {
        UINT64 gr28,gr29, gr30, gr31;
@@ -196,6 +204,10 @@ pal_debug_info(VCPU *vcpu){
 
 static struct ia64_pal_retval
 pal_fixed_addr(VCPU *vcpu){
+       struct ia64_pal_retval result;
+
+       result.status= -1; //unimplemented
+       return result;
 }
 
 static struct ia64_pal_retval
@@ -450,4 +462,12 @@ pal_emul( VCPU *vcpu) {
                set_pal_result (vcpu, result);
 }
 
-
+void
+sal_emul(VCPU *v) {
+       struct sal_ret_values result;
+       result = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
+                             vcpu_get_gr(v,34),vcpu_get_gr(v,35),
+                             vcpu_get_gr(v,36),vcpu_get_gr(v,37),
+                             vcpu_get_gr(v,38),vcpu_get_gr(v,39));
+       set_sal_result(v, result);      
+}
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmmu.c  Fri Jun 02 12:31:48 2006 -0500
@@ -199,8 +199,11 @@ void machine_tlb_insert(struct vcpu *d, 
     mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
     mtlb.ppn = get_mfn(d->domain,tlb->ppn);
     mtlb_ppn=mtlb.ppn;
+
+#if 0
     if (mtlb_ppn == INVALID_MFN)
         panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn 
number.\n");
+#endif
 
     psr = ia64_clear_ic();
     if ( cl == ISIDE_TLB ) {
@@ -319,17 +322,17 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
 //        if( tlb == NULL )
 //             tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
         if (tlb)
-               gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & 
(PSIZE(tlb->ps)-1) );
+            gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & 
(PSIZE(tlb->ps)-1) );
     }
     if( gpip){
-        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
-       if( mfn == INVALID_MFN )  panic_domain(vcpu_regs(vcpu),"fetch_code: 
invalid memory\n");
-       vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
+        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
+        if( mfn == INVALID_MFN )  panic_domain(vcpu_regs(vcpu),"fetch_code: 
invalid memory\n");
+        vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
     }else{
-       tlb = vhpt_lookup(gip);
-       if( tlb == NULL)
-           panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
-       vpa =(u64 
*)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
+        tlb = vhpt_lookup(gip);
+        if( tlb == NULL)
+            panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
+        vpa =(u64 
*)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
     }
     *code1 = *vpa++;
     *code2 = *vpa;
@@ -338,6 +341,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
 
 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
+#ifdef VTLB_DEBUG
     int slot;
     u64 ps, va;
     ps = itir_ps(itir);
@@ -348,14 +352,17 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
         panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
         return IA64_FAULT;
     }
+#endif //VTLB_DEBUG    
     thash_purge_and_insert(vcpu, pte, itir, ifa);
     return IA64_NO_FAULT;
 }
 
 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
+    u64 gpfn;
+#ifdef VTLB_DEBUG    
     int slot;
-    u64 ps, va, gpfn;
+    u64 ps, va;
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
     slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
@@ -364,9 +371,18 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
         panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
         return IA64_FAULT;
     }
+#endif //VTLB_DEBUG
     gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
-    if(VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain,gpfn))
-        pte |= VTLB_PTE_IO;
+    if (VMX_DOMAIN(vcpu)) {
+        if (__gpfn_is_io(vcpu->domain, gpfn))
+            pte |= VTLB_PTE_IO;
+        else
+            /* Ensure WB attribute if pte is related to a normal mem page,
+             * which is required by vga acceleration since qemu maps shared
+             * vram buffer with WB.
+             */
+            pte &= ~_PAGE_MA_MASK;
+    }
     thash_purge_and_insert(vcpu, pte, itir, ifa);
     return IA64_NO_FAULT;
 
@@ -377,11 +393,14 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
 
 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
 {
+#ifdef VTLB_DEBUG
     int index;
+#endif    
     u64 ps, va, rid;
-
+    thash_data_t * p_itr;
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
+#ifdef VTLB_DEBUG    
     index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     if (index >=0) {
         // generate MCA.
@@ -389,9 +408,11 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
         return IA64_FAULT;
     }
     thash_purge_entries(vcpu, va, ps);
+#endif    
     vcpu_get_rr(vcpu, va, &rid);
     rid = rid& RR_RID_MASK;
-    vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, 
rid);
+    p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
+    vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
     vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
     return IA64_NO_FAULT;
 }
@@ -399,11 +420,15 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
 
 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
 {
+#ifdef VTLB_DEBUG
     int index;
-    u64 ps, va, gpfn, rid;
-
+    u64 gpfn;
+#endif    
+    u64 ps, va, rid;
+    thash_data_t * p_dtr;
     ps = itir_ps(itir);
     va = PAGEALIGN(ifa, ps);
+#ifdef VTLB_DEBUG    
     index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     if (index>=0) {
         // generate MCA.
@@ -412,10 +437,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
     }
     thash_purge_entries(vcpu, va, ps);
     gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
-    if(__gpfn_is_io(vcpu->domain,gpfn))
+    if(VMX_DOMAIN(vcpu) && _gpfn_is_io(vcpu->domain,gpfn))
         pte |= VTLB_PTE_IO;
+#endif    
     vcpu_get_rr(vcpu, va, &rid);
     rid = rid& RR_RID_MASK;
+    p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
     vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, 
rid);
     vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
     return IA64_NO_FAULT;
@@ -432,7 +459,6 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
     index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     if (index>=0) {
         vcpu->arch.dtrs[index].pte.p=0;
-        index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     }
     thash_purge_entries(vcpu, va, ps);
     return IA64_NO_FAULT;
@@ -447,7 +473,6 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
     index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     if (index>=0) {
         vcpu->arch.itrs[index].pte.p=0;
-        index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     }
     thash_purge_entries(vcpu, va, ps);
     return IA64_NO_FAULT;
@@ -530,7 +555,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
     visr.ir=pt_isr.ir;
     vpsr.val = vmx_vcpu_get_psr(vcpu);
     if(vpsr.ic==0){
-         visr.ni=1;
+        visr.ni=1;
     }
     visr.na=1;
     data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
@@ -648,14 +673,14 @@ long
 long
 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
 {
-    unsigned long      mpfn, gpfn, m, n = *len;
-    unsigned long      end;    /* end of the area mapped by current entry */
-    thash_data_t       *entry;
+    unsigned long  mpfn, gpfn, m, n = *len;
+    unsigned long  end;   /* end of the area mapped by current entry */
+    thash_data_t   *entry;
     struct vcpu *v = current;
 
     entry = vtlb_lookup(v, va, DSIDE_TLB);
     if (entry == NULL)
-       return -EFAULT;
+        return -EFAULT;
 
     gpfn =(entry->ppn>>(PAGE_SHIFT-12));
     gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
@@ -668,7 +693,7 @@ __domain_va_to_ma(unsigned long va, unsi
     /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
     /* Current entry can't map all requested area */
     if ((m + n) > end)
-       n = end - m;
+        n = end - m;
 
     *ma = m;
     *len = n;
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_init.c      Fri Jun 02 12:31:48 2006 -0500
@@ -346,7 +346,7 @@ int vmx_build_physmap_table(struct domai
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               __assign_domain_page(d, j, io_ranges[i].type);
+               __assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable);
        }
 
        /* Map normal memory below 3G */
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Fri Jun 02 12:31:48 2006 -0500
@@ -390,3 +390,14 @@ page_not_present(VCPU *vcpu, u64 vadr)
     inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
 }
 
+/* Deal with
+ *  Data access rights vector
+ */
+void
+data_access_rights(VCPU *vcpu, u64 vadr)
+{
+    /* If vPSR.ic, IFA, ITIR */
+    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
+    inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
+}
+
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Fri Jun 02 12:31:48 2006 -0500
@@ -2,10 +2,10 @@
  * arch/ia64/kernel/vmx_ivt.S
  *
  * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *     David Mosberger <davidm@xxxxxxxxxx>
+ *      Stephane Eranian <eranian@xxxxxxxxxx>
+ *      David Mosberger <davidm@xxxxxxxxxx>
  * Copyright (C) 2000, 2002-2003 Intel Co
- *     Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ *      Asit Mallick <asit.k.mallick@xxxxxxxxx>
  *      Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
  *      Kenneth Chen <kenneth.w.chen@xxxxxxxxx>
  *      Fenghua Yu <fenghua.yu@xxxxxxxxx>
@@ -31,7 +31,7 @@
  *
  *  For each entry, the comment is as follows:
  *
- *             // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  *  entry offset ----/     /         /                  /          /
  *  entry number ---------/         /                  /          /
  *  size of the entry -------------/                  /          /
@@ -96,13 +96,13 @@ vmx_fault_##n:;          \
     ;;                  \
 
 
-#define VMX_REFLECT(n)                         \
-       mov r31=pr;                                                             
        \
-       mov r19=n;                      /* prepare to save predicates */        
        \
-    mov r29=cr.ipsr;        \
+#define VMX_REFLECT(n)    \
+    mov r31=pr;           \
+    mov r19=n;       /* prepare to save predicates */ \
+    mov r29=cr.ipsr;      \
     ;;      \
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
-(p7) br.sptk.many vmx_dispatch_reflection;        \
+(p7)br.sptk.many vmx_dispatch_reflection;        \
     VMX_FAULT(n);            \
 
 
@@ -115,10 +115,10 @@ END(vmx_panic)
 
 
 
-       .section .text.ivt,"ax"
-
-       .align 32768    // align on 32KB boundary
-       .global vmx_ia64_ivt
+    .section .text.ivt,"ax"
+
+    .align 32768    // align on 32KB boundary
+    .global vmx_ia64_ivt
 vmx_ia64_ivt:
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
@@ -127,7 +127,7 @@ ENTRY(vmx_vhpt_miss)
     VMX_FAULT(0)
 END(vmx_vhpt_miss)
 
-       .org vmx_ia64_ivt+0x400
+    .org vmx_ia64_ivt+0x400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
 ENTRY(vmx_itlb_miss)
@@ -141,9 +141,13 @@ ENTRY(vmx_itlb_miss)
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
-    ;;
     ttag r20 = r16
-    mov r18 = r17      
+    ;;
+    mov r18 = r17
+    adds r28 = VLE_TITAG_OFFSET,r17
+    adds r19 = VLE_CCHAIN_OFFSET, r17
+    ;;
+    ld8 r17 = [r19]
     ;;
 vmx_itlb_loop:
     cmp.eq p6,p0 = r0, r17
@@ -161,43 +165,22 @@ vmx_itlb_loop:
 (p7)mov r17 = r23;
 (p7)br.sptk vmx_itlb_loop
     ;;
-    adds r23 = VLE_PGFLAGS_OFFSET, r17
-    adds r24 = VLE_ITIR_OFFSET, r17
-    ;;
-    ld8 r25 = [r23]
-    ld8 r26 = [r24]
-    ;;
-    cmp.eq p6,p7=r18,r17
-(p6) br vmx_itlb_loop1
-    ;;
+    ld8 r25 = [r17]
     ld8 r27 = [r18]
-    ;;
-    extr.u r19 = r27, 56, 8
-    extr.u r20 = r25, 56, 8
-    ;;
-    dep r27 = r20, r27, 56, 8
-    dep r25 = r19, r25, 56, 8
-    ;;
-    st8 [r18] = r25,8
-    st8 [r23] = r27
-    ;;
-    ld8 r28 = [r18]
-    ;;
-    st8 [r18] = r26,8
-    st8 [r24] = r28
-    ;;
-    ld8 r30 = [r18]
-    ;;
-    st8 [r18] = r22
-    st8 [r16] = r30 
-    ;;
-vmx_itlb_loop1:
-    mov cr.itir = r26
+    ld8 r29 = [r28]
+    ;;
+    st8 [r16] = r29
+    st8 [r28] = r22
+    extr.u r19 = r27, 56, 4
+    ;;
+    dep r27 = r0, r27, 56, 4
+    dep r25 = r19, r25, 56, 4
+    ;;
+    st8 [r18] = r25
+    st8 [r17] = r27
     ;;
     itc.i r25
-    ;;
-    srlz.i
-    ;;
+    dv_serialize_data
     mov r17=cr.isr
     mov r23=r31
     mov r22=b0
@@ -219,7 +202,7 @@ vmx_itlb_out:
     VMX_FAULT(1);
 END(vmx_itlb_miss)
 
-       .org vmx_ia64_ivt+0x0800
+    .org vmx_ia64_ivt+0x0800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
 ENTRY(vmx_dtlb_miss)
@@ -232,9 +215,13 @@ ENTRY(vmx_dtlb_miss)
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
-    ;;
     ttag r20 = r16
-    mov r18 = r17      
+    ;;
+    mov r18 = r17
+    adds r28 = VLE_TITAG_OFFSET,r17
+    adds r19 = VLE_CCHAIN_OFFSET, r17
+    ;;
+    ld8 r17 = [r19]
     ;;
 vmx_dtlb_loop:
     cmp.eq p6,p0 = r0, r17
@@ -252,43 +239,22 @@ vmx_dtlb_loop:
 (p7)mov r17 = r23;
 (p7)br.sptk vmx_dtlb_loop
     ;;
-    adds r23 = VLE_PGFLAGS_OFFSET, r17
-    adds r24 = VLE_ITIR_OFFSET, r17
-    ;;
-    ld8 r25 = [r23]
-    ld8 r26 = [r24]
-    ;;
-    cmp.eq p6,p7=r18,r17
-(p6) br vmx_dtlb_loop1
-    ;;
+    ld8 r25 = [r17]
     ld8 r27 = [r18]
-    ;;
-    extr.u r19 = r27, 56, 8
-    extr.u r20 = r25, 56, 8
-    ;;
-    dep r27 = r20, r27, 56, 8
-    dep r25 = r19, r25, 56, 8
-    ;;
-    st8 [r18] = r25,8
-    st8 [r23] = r27
-    ;;
-    ld8 r28 = [r18]
-    ;;
-    st8 [r18] = r26,8
-    st8 [r24] = r28
-    ;;
-    ld8 r30 = [r18]
-    ;;
-    st8 [r18] = r22
-    st8 [r16] = r30 
-    ;;
-vmx_dtlb_loop1:
-    mov cr.itir = r26
-    ;;
+    ld8 r29 = [r28]
+    ;;
+    st8 [r16] = r29
+    st8 [r28] = r22
+    extr.u r19 = r27, 56, 4
+    ;;
+    dep r27 = r0, r27, 56, 4
+    dep r25 = r19, r25, 56, 4
+    ;;
+    st8 [r18] = r25
+    st8 [r17] = r27
+    ;;    
     itc.d r25
-    ;;
-    srlz.d;
-    ;;
+    dv_serialize_data
     mov r17=cr.isr
     mov r23=r31
     mov r22=b0
@@ -310,7 +276,7 @@ vmx_dtlb_out:
     VMX_FAULT(2);
 END(vmx_dtlb_miss)
 
-       .org vmx_ia64_ivt+0x0c00
+    .org vmx_ia64_ivt+0x0c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
 ENTRY(vmx_alt_itlb_miss)
@@ -321,88 +287,84 @@ ENTRY(vmx_alt_itlb_miss)
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.spnt vmx_fault_3
 vmx_alt_itlb_miss_1:
-       mov r16=cr.ifa          // get address that caused the TLB miss
+    mov r16=cr.ifa    // get address that caused the TLB miss
     ;;
     tbit.z p6,p7=r16,63
 (p6)br.spnt vmx_fault_3
     ;;
-       movl r17=PAGE_KERNEL
-       mov r24=cr.ipsr
-       movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-       ;;
-       and r19=r19,r16         // clear ed, reserved bits, and PTE control bits
-       shr.u r18=r16,55        // move address bit 59 to bit 4
-       ;;
-       and r18=0x10,r18        // bit 4=address-bit(61)
-       or r19=r17,r19          // insert PTE control bits into r19
-       ;;
-       movl r20=IA64_GRANULE_SHIFT<<2
-       or r19=r19,r18          // set bit 4 (uncached) if the access was to 
region 6
-       ;;
-       mov cr.itir=r20
-       ;;
-       srlz.i
-       ;;
-       itc.i r19               // insert the TLB entry
-       mov pr=r31,-1
-       rfi
+    movl r17=PAGE_KERNEL
+    mov r24=cr.ipsr
+    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+    ;;
+    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
+    shr.u r18=r16,55    // move address bit 59 to bit 4
+    ;;
+    and r18=0x10,r18    // bit 4=address-bit(61)
+    or r19=r17,r19      // insert PTE control bits into r19
+    ;;
+    movl r20=IA64_GRANULE_SHIFT<<2
+    or r19=r19,r18     // set bit 4 (uncached) if the access was to region 6
+    ;;
+    mov cr.itir=r20
+    ;;
+    itc.i r19          // insert the TLB entry
+    mov pr=r31,-1
+    rfi
     VMX_FAULT(3);
 END(vmx_alt_itlb_miss)
 
 
-       .org vmx_ia64_ivt+0x1000
+    .org vmx_ia64_ivt+0x1000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
 ENTRY(vmx_alt_dtlb_miss)
     VMX_DBG_FAULT(4)
-       mov r31=pr
+    mov r31=pr
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.spnt vmx_fault_4
 vmx_alt_dtlb_miss_1:
-       mov r16=cr.ifa          // get address that caused the TLB miss
+    mov r16=cr.ifa             // get address that caused the TLB miss
     ;;
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
-       // Test for the address of virtual frame_table
-       shr r22=r16,56;;
-       cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
-(p8)   br.cond.sptk frametable_miss ;;
+    // Test for the address of virtual frame_table
+    shr r22=r16,56;;
+    cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
+(p8)br.cond.sptk frametable_miss ;;
 #endif
     tbit.z p6,p7=r16,63
 (p6)br.spnt vmx_fault_4
     ;;
-       movl r17=PAGE_KERNEL
-       mov r20=cr.isr
-       movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-       mov r24=cr.ipsr
-       ;;
-       and r22=IA64_ISR_CODE_MASK,r20          // get the isr.code field
-       tbit.nz p6,p7=r20,IA64_ISR_SP_BIT       // is speculation bit on?
-       shr.u r18=r16,55                        // move address bit 59 to bit 4
-       and r19=r19,r16                         // clear ed, reserved bits, and 
PTE control bits
-       tbit.nz p9,p0=r20,IA64_ISR_NA_BIT       // is non-access bit on?
-       ;;
-       and r18=0x10,r18        // bit 4=address-bit(61)
-(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22    // check isr.code field
-       dep r24=-1,r24,IA64_PSR_ED_BIT,1
-       or r19=r19,r17          // insert PTE control bits into r19
-       ;;
-       or r19=r19,r18          // set bit 4 (uncached) if the access was to 
region 6
-(p6) mov cr.ipsr=r24
-       movl r20=IA64_GRANULE_SHIFT<<2
-       ;;
-       mov cr.itir=r20
-       ;;
-       srlz.i
-       ;;
-(p7) itc.d r19         // insert the TLB entry
-       mov pr=r31,-1
-       rfi
+    movl r17=PAGE_KERNEL
+    mov r20=cr.isr
+    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+    mov r24=cr.ipsr
+    ;;
+    and r22=IA64_ISR_CODE_MASK,r20             // get the isr.code field
+    tbit.nz p6,p7=r20,IA64_ISR_SP_BIT          // is speculation bit on?
+    shr.u r18=r16,55                           // move address bit 59 to bit 4
+    and r19=r19,r16                            // clear ed, reserved bits, and 
PTE control bits
+    tbit.nz p9,p0=r20,IA64_ISR_NA_BIT          // is non-access bit on?
+    ;;
+    and r18=0x10,r18                           // bit 4=address-bit(61)
+(p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22     // check isr.code field
+    dep r24=-1,r24,IA64_PSR_ED_BIT,1
+    or r19=r19,r17                             // insert PTE control bits into 
r19
+    ;;
+    or r19=r19,r18                             // set bit 4 (uncached) if the 
access was to region 6
+(p6)mov cr.ipsr=r24
+    movl r20=IA64_GRANULE_SHIFT<<2
+    ;;
+    mov cr.itir=r20
+    ;;
+(p7)itc.d r19          // insert the TLB entry
+    mov pr=r31,-1
+    rfi
     VMX_FAULT(4);
 END(vmx_alt_dtlb_miss)
 
-       .org vmx_ia64_ivt+0x1400
+    .org vmx_ia64_ivt+0x1400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
 ENTRY(vmx_nested_dtlb_miss)
@@ -410,52 +372,52 @@ ENTRY(vmx_nested_dtlb_miss)
     VMX_FAULT(5)
 END(vmx_nested_dtlb_miss)
 
-       .org vmx_ia64_ivt+0x1800
+    .org vmx_ia64_ivt+0x1800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
 ENTRY(vmx_ikey_miss)
     VMX_DBG_FAULT(6)
-       VMX_REFLECT(6)
+    VMX_REFLECT(6)
 END(vmx_ikey_miss)
 
-       .org vmx_ia64_ivt+0x1c00
+    .org vmx_ia64_ivt+0x1c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
 ENTRY(vmx_dkey_miss)
     VMX_DBG_FAULT(7)
-       VMX_REFLECT(7)
+    VMX_REFLECT(7)
 END(vmx_dkey_miss)
 
-       .org vmx_ia64_ivt+0x2000
+    .org vmx_ia64_ivt+0x2000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
 ENTRY(vmx_dirty_bit)
     VMX_DBG_FAULT(8)
-       VMX_REFLECT(8)
+    VMX_REFLECT(8)
 END(vmx_dirty_bit)
 
-       .org vmx_ia64_ivt+0x2400
+    .org vmx_ia64_ivt+0x2400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
 ENTRY(vmx_iaccess_bit)
     VMX_DBG_FAULT(9)
-       VMX_REFLECT(9)
+    VMX_REFLECT(9)
 END(vmx_iaccess_bit)
 
-       .org vmx_ia64_ivt+0x2800
+    .org vmx_ia64_ivt+0x2800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
 ENTRY(vmx_daccess_bit)
     VMX_DBG_FAULT(10)
-       VMX_REFLECT(10)
+    VMX_REFLECT(10)
 END(vmx_daccess_bit)
 
-       .org vmx_ia64_ivt+0x2c00
+    .org vmx_ia64_ivt+0x2c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
 ENTRY(vmx_break_fault)
     VMX_DBG_FAULT(11)
-       mov r31=pr
+    mov r31=pr
     mov r19=11
     mov r30=cr.iim
     movl r29=0x1100
@@ -473,12 +435,12 @@ ENTRY(vmx_break_fault)
     VMX_FAULT(11);
 END(vmx_break_fault)
 
-       .org vmx_ia64_ivt+0x3000
+    .org vmx_ia64_ivt+0x3000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
 ENTRY(vmx_interrupt)
 //    VMX_DBG_FAULT(12)
-       mov r31=pr              // prepare to save predicates
+    mov r31=pr         // prepare to save predicates
     mov r19=12
     mov r29=cr.ipsr
     ;;
@@ -487,58 +449,58 @@ ENTRY(vmx_interrupt)
     ;;
 (p7) br.sptk vmx_dispatch_interrupt
     ;;
-       mov r27=ar.rsc                  /* M */
-       mov r20=r1                      /* A */
-       mov r25=ar.unat         /* M */
-       mov r26=ar.pfs                  /* I */
-       mov r28=cr.iip                  /* M */
-       cover               /* B (or nothing) */
-       ;;
-       mov r1=sp
-       ;;
-       invala                          /* M */
-       mov r30=cr.ifs
-       ;;
+    mov r27=ar.rsc             /* M */
+    mov r20=r1                 /* A */
+    mov r25=ar.unat            /* M */
+    mov r26=ar.pfs             /* I */
+    mov r28=cr.iip             /* M */
+    cover                      /* B (or nothing) */
+    ;;
+    mov r1=sp
+    ;;
+    invala                     /* M */
+    mov r30=cr.ifs
+    ;;
     addl r1=-IA64_PT_REGS_SIZE,r1
     ;;
-       adds r17=2*L1_CACHE_BYTES,r1            /* really: biggest cache-line 
size */
-       adds r16=PT(CR_IPSR),r1
-       ;;
-       lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
-       st8 [r16]=r29           /* save cr.ipsr */
-       ;;
-       lfetch.fault.excl.nt1 [r17]
-       mov r29=b0
-       ;;
-       adds r16=PT(R8),r1      /* initialize first base pointer */
-       adds r17=PT(R9),r1      /* initialize second base pointer */
-       mov r18=r0                      /* make sure r18 isn't NaT */
-       ;;
+    adds r17=2*L1_CACHE_BYTES,r1       /* really: biggest cache-line size */
+    adds r16=PT(CR_IPSR),r1
+    ;;
+    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
+    st8 [r16]=r29                      /* save cr.ipsr */
+    ;;
+    lfetch.fault.excl.nt1 [r17]
+    mov r29=b0
+    ;;
+    adds r16=PT(R8),r1         /* initialize first base pointer */
+    adds r17=PT(R9),r1         /* initialize second base pointer */
+    mov r18=r0                 /* make sure r18 isn't NaT */
+    ;;
 .mem.offset 0,0; st8.spill [r16]=r8,16
 .mem.offset 8,0; st8.spill [r17]=r9,16
         ;;
 .mem.offset 0,0; st8.spill [r16]=r10,24
 .mem.offset 8,0; st8.spill [r17]=r11,24
         ;;
-       st8 [r16]=r28,16        /* save cr.iip */
-       st8 [r17]=r30,16        /* save cr.ifs */
-       mov r8=ar.fpsr          /* M */
-       mov r9=ar.csd
-       mov r10=ar.ssd
-       movl r11=FPSR_DEFAULT   /* L-unit */
-       ;;
-       st8 [r16]=r25,16        /* save ar.unat */
-       st8 [r17]=r26,16        /* save ar.pfs */
-       shl r18=r18,16          /* compute ar.rsc to be used for "loadrs" */
-       ;;
-    st8 [r16]=r27,16   /* save ar.rsc */
-    adds r17=16,r17    /* skip over ar_rnat field */
-    ;;          /* avoid RAW on r16 & r17 */
-    st8 [r17]=r31,16   /* save predicates */
-    adds r16=16,r16    /* skip over ar_bspstore field */
-    ;;
-    st8 [r16]=r29,16   /* save b0 */
-    st8 [r17]=r18,16   /* save ar.rsc value for "loadrs" */
+    st8 [r16]=r28,16           /* save cr.iip */
+    st8 [r17]=r30,16           /* save cr.ifs */
+    mov r8=ar.fpsr             /* M */
+    mov r9=ar.csd
+    mov r10=ar.ssd
+    movl r11=FPSR_DEFAULT      /* L-unit */
+    ;;
+    st8 [r16]=r25,16           /* save ar.unat */
+    st8 [r17]=r26,16           /* save ar.pfs */
+    shl r18=r18,16             /* compute ar.rsc to be used for "loadrs" */
+    ;;
+    st8 [r16]=r27,16           /* save ar.rsc */
+    adds r17=16,r17            /* skip over ar_rnat field */
+    ;;
+    st8 [r17]=r31,16           /* save predicates */
+    adds r16=16,r16            /* skip over ar_bspstore field */
+    ;;
+    st8 [r16]=r29,16           /* save b0 */
+    st8 [r17]=r18,16           /* save ar.rsc value for "loadrs" */
     ;;
 .mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
 .mem.offset 8,0; st8.spill [r17]=r12,16
@@ -561,18 +523,18 @@ ENTRY(vmx_interrupt)
     ;;                                          \
     bsw.1
     ;;
-       alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-       mov out0=cr.ivr         // pass cr.ivr as first arg
-       add out1=16,sp          // pass pointer to pt_regs as second arg
-
-       ssm psr.ic
+    alloc r14=ar.pfs,0,0,2,0   // must be first in an insn group
+    mov out0=cr.ivr            // pass cr.ivr as first arg
+    add out1=16,sp             // pass pointer to pt_regs as second arg
+
+    ssm psr.ic
     ;;
     srlz.i
-       ;;
+    ;;
     (p15) ssm psr.i
-       adds r3=8,r2            // set up second base pointer for SAVE_REST
-       srlz.i                  // ensure everybody knows psr.ic is back on
-       ;;
+    adds r3=8,r2               // set up second base pointer for SAVE_REST
+    srlz.i                     // ensure everybody knows psr.ic is back on
+    ;;
 .mem.offset 0,0; st8.spill [r2]=r16,16
 .mem.offset 8,0; st8.spill [r3]=r17,16
     ;;
@@ -599,8 +561,8 @@ ENTRY(vmx_interrupt)
 .mem.offset 0,0; st8.spill [r2]=r30,16
 .mem.offset 8,0; st8.spill [r3]=r31,32
     ;;
-    mov ar.fpsr=r11     /* M-unit */
-    st8 [r2]=r8,8      /* ar.ccv */
+    mov ar.fpsr=r11       /* M-unit */
+    st8 [r2]=r8,8         /* ar.ccv */
     adds r24=PT(B6)-PT(F7),r3
     ;;
     stf.spill [r2]=f6,32
@@ -619,95 +581,95 @@ ENTRY(vmx_interrupt)
     st8 [r24]=r9           /* ar.csd */
     st8 [r25]=r10          /* ar.ssd */
     ;;
-       srlz.d                  // make sure we see the effect of cr.ivr
-       movl r14=ia64_leave_nested
-       ;;
-       mov rp=r14
-       br.call.sptk.many b6=ia64_handle_irq
-       ;;
+    srlz.d             // make sure we see the effect of cr.ivr
+    movl r14=ia64_leave_nested
+    ;;
+    mov rp=r14
+    br.call.sptk.many b6=ia64_handle_irq
+    ;;
 END(vmx_interrupt)
 
-       .org vmx_ia64_ivt+0x3400
+    .org vmx_ia64_ivt+0x3400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3400 Entry 13 (size 64 bundles) Reserved
 ENTRY(vmx_virtual_exirq)
-       VMX_DBG_FAULT(13)
-       mov r31=pr
-        mov r19=13
-        br.sptk vmx_dispatch_vexirq
+    VMX_DBG_FAULT(13)
+    mov r31=pr
+    mov r19=13
+    br.sptk vmx_dispatch_vexirq
 END(vmx_virtual_exirq)
 
-       .org vmx_ia64_ivt+0x3800
+    .org vmx_ia64_ivt+0x3800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3800 Entry 14 (size 64 bundles) Reserved
     VMX_DBG_FAULT(14)
-       VMX_FAULT(14)
-
-
-       .org vmx_ia64_ivt+0x3c00
+    VMX_FAULT(14)
+
+
+    .org vmx_ia64_ivt+0x3c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3c00 Entry 15 (size 64 bundles) Reserved
     VMX_DBG_FAULT(15)
-       VMX_FAULT(15)
-
-
-       .org vmx_ia64_ivt+0x4000
+    VMX_FAULT(15)
+
+
+    .org vmx_ia64_ivt+0x4000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x4000 Entry 16 (size 64 bundles) Reserved
     VMX_DBG_FAULT(16)
-       VMX_FAULT(16)
-
-       .org vmx_ia64_ivt+0x4400
+    VMX_FAULT(16)
+
+    .org vmx_ia64_ivt+0x4400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x4400 Entry 17 (size 64 bundles) Reserved
     VMX_DBG_FAULT(17)
-       VMX_FAULT(17)
-
-       .org vmx_ia64_ivt+0x4800
+    VMX_FAULT(17)
+
+    .org vmx_ia64_ivt+0x4800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x4800 Entry 18 (size 64 bundles) Reserved
     VMX_DBG_FAULT(18)
-       VMX_FAULT(18)
-
-       .org vmx_ia64_ivt+0x4c00
+    VMX_FAULT(18)
+
+    .org vmx_ia64_ivt+0x4c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x4c00 Entry 19 (size 64 bundles) Reserved
     VMX_DBG_FAULT(19)
-       VMX_FAULT(19)
+    VMX_FAULT(19)
 
     .org vmx_ia64_ivt+0x5000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
 ENTRY(vmx_page_not_present)
-       VMX_DBG_FAULT(20)
-       VMX_REFLECT(20)
+    VMX_DBG_FAULT(20)
+    VMX_REFLECT(20)
 END(vmx_page_not_present)
 
     .org vmx_ia64_ivt+0x5100
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
 ENTRY(vmx_key_permission)
-       VMX_DBG_FAULT(21)
-       VMX_REFLECT(21)
+    VMX_DBG_FAULT(21)
+    VMX_REFLECT(21)
 END(vmx_key_permission)
 
     .org vmx_ia64_ivt+0x5200
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
 ENTRY(vmx_iaccess_rights)
-       VMX_DBG_FAULT(22)
-       VMX_REFLECT(22)
+    VMX_DBG_FAULT(22)
+    VMX_REFLECT(22)
 END(vmx_iaccess_rights)
 
-       .org vmx_ia64_ivt+0x5300
+    .org vmx_ia64_ivt+0x5300
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
 ENTRY(vmx_daccess_rights)
-       VMX_DBG_FAULT(23)
-       VMX_REFLECT(23)
+    VMX_DBG_FAULT(23)
+    VMX_REFLECT(23)
 END(vmx_daccess_rights)
 
-       .org vmx_ia64_ivt+0x5400
+    .org vmx_ia64_ivt+0x5400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
 ENTRY(vmx_general_exception)
@@ -716,106 +678,106 @@ ENTRY(vmx_general_exception)
 //    VMX_FAULT(24)
 END(vmx_general_exception)
 
-       .org vmx_ia64_ivt+0x5500
+    .org vmx_ia64_ivt+0x5500
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
 ENTRY(vmx_disabled_fp_reg)
-       VMX_DBG_FAULT(25)
-       VMX_REFLECT(25)
+    VMX_DBG_FAULT(25)
+    VMX_REFLECT(25)
 END(vmx_disabled_fp_reg)
 
-       .org vmx_ia64_ivt+0x5600
+    .org vmx_ia64_ivt+0x5600
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
 ENTRY(vmx_nat_consumption)
-       VMX_DBG_FAULT(26)
-       VMX_REFLECT(26)
+    VMX_DBG_FAULT(26)
+    VMX_REFLECT(26)
 END(vmx_nat_consumption)
 
-       .org vmx_ia64_ivt+0x5700
+    .org vmx_ia64_ivt+0x5700
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
 ENTRY(vmx_speculation_vector)
-       VMX_DBG_FAULT(27)
-       VMX_REFLECT(27)
+    VMX_DBG_FAULT(27)
+    VMX_REFLECT(27)
 END(vmx_speculation_vector)
 
-       .org vmx_ia64_ivt+0x5800
+    .org vmx_ia64_ivt+0x5800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5800 Entry 28 (size 16 bundles) Reserved
     VMX_DBG_FAULT(28)
-       VMX_FAULT(28)
-
-       .org vmx_ia64_ivt+0x5900
+    VMX_FAULT(28)
+
+    .org vmx_ia64_ivt+0x5900
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
 ENTRY(vmx_debug_vector)
     VMX_DBG_FAULT(29)
-       VMX_FAULT(29)
+    VMX_FAULT(29)
 END(vmx_debug_vector)
 
-       .org vmx_ia64_ivt+0x5a00
+    .org vmx_ia64_ivt+0x5a00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
 ENTRY(vmx_unaligned_access)
-       VMX_DBG_FAULT(30)
-       VMX_REFLECT(30)
+    VMX_DBG_FAULT(30)
+    VMX_REFLECT(30)
 END(vmx_unaligned_access)
 
-       .org vmx_ia64_ivt+0x5b00
+    .org vmx_ia64_ivt+0x5b00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
 ENTRY(vmx_unsupported_data_reference)
-       VMX_DBG_FAULT(31)
-       VMX_REFLECT(31)
+    VMX_DBG_FAULT(31)
+    VMX_REFLECT(31)
 END(vmx_unsupported_data_reference)
 
-       .org vmx_ia64_ivt+0x5c00
+    .org vmx_ia64_ivt+0x5c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
 ENTRY(vmx_floating_point_fault)
-       VMX_DBG_FAULT(32)
-       VMX_REFLECT(32)
+    VMX_DBG_FAULT(32)
+    VMX_REFLECT(32)
 END(vmx_floating_point_fault)
 
-       .org vmx_ia64_ivt+0x5d00
+    .org vmx_ia64_ivt+0x5d00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
 ENTRY(vmx_floating_point_trap)
-       VMX_DBG_FAULT(33)
-       VMX_REFLECT(33)
+    VMX_DBG_FAULT(33)
+    VMX_REFLECT(33)
 END(vmx_floating_point_trap)
 
-       .org vmx_ia64_ivt+0x5e00
+    .org vmx_ia64_ivt+0x5e00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
 ENTRY(vmx_lower_privilege_trap)
-       VMX_DBG_FAULT(34)
-       VMX_REFLECT(34)
+    VMX_DBG_FAULT(34)
+    VMX_REFLECT(34)
 END(vmx_lower_privilege_trap)
 
-       .org vmx_ia64_ivt+0x5f00
+    .org vmx_ia64_ivt+0x5f00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
 ENTRY(vmx_taken_branch_trap)
-       VMX_DBG_FAULT(35)
-       VMX_REFLECT(35)
+    VMX_DBG_FAULT(35)
+    VMX_REFLECT(35)
 END(vmx_taken_branch_trap)
 
-       .org vmx_ia64_ivt+0x6000
+    .org vmx_ia64_ivt+0x6000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
 ENTRY(vmx_single_step_trap)
-       VMX_DBG_FAULT(36)
-       VMX_REFLECT(36)
+    VMX_DBG_FAULT(36)
+    VMX_REFLECT(36)
 END(vmx_single_step_trap)
 
-       .org vmx_ia64_ivt+0x6100
+    .org vmx_ia64_ivt+0x6100
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
 ENTRY(vmx_virtualization_fault)
 //    VMX_DBG_FAULT(37)
-       mov r31=pr
+    mov r31=pr
     mov r19=37
     adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
     adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
@@ -826,197 +788,197 @@ ENTRY(vmx_virtualization_fault)
     br.sptk vmx_dispatch_virtualization_fault
 END(vmx_virtualization_fault)
 
-       .org vmx_ia64_ivt+0x6200
+    .org vmx_ia64_ivt+0x6200
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6200 Entry 38 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(38)
-       VMX_FAULT(38)
-
-       .org vmx_ia64_ivt+0x6300
+    VMX_DBG_FAULT(38)
+    VMX_FAULT(38)
+
+    .org vmx_ia64_ivt+0x6300
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6300 Entry 39 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(39)
-       VMX_FAULT(39)
-
-       .org vmx_ia64_ivt+0x6400
+    VMX_DBG_FAULT(39)
+    VMX_FAULT(39)
+
+    .org vmx_ia64_ivt+0x6400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6400 Entry 40 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(40)
-       VMX_FAULT(40)
-
-       .org vmx_ia64_ivt+0x6500
+    VMX_DBG_FAULT(40)
+    VMX_FAULT(40)
+
+    .org vmx_ia64_ivt+0x6500
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6500 Entry 41 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(41)
-       VMX_FAULT(41)
-
-       .org vmx_ia64_ivt+0x6600
+    VMX_DBG_FAULT(41)
+    VMX_FAULT(41)
+
+    .org vmx_ia64_ivt+0x6600
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6600 Entry 42 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(42)
-       VMX_FAULT(42)
-
-       .org vmx_ia64_ivt+0x6700
+    VMX_DBG_FAULT(42)
+    VMX_FAULT(42)
+
+    .org vmx_ia64_ivt+0x6700
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6700 Entry 43 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(43)
-       VMX_FAULT(43)
-
-       .org vmx_ia64_ivt+0x6800
+    VMX_DBG_FAULT(43)
+    VMX_FAULT(43)
+
+    .org vmx_ia64_ivt+0x6800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6800 Entry 44 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(44)
-       VMX_FAULT(44)
-
-       .org vmx_ia64_ivt+0x6900
+    VMX_DBG_FAULT(44)
+    VMX_FAULT(44)
+
+    .org vmx_ia64_ivt+0x6900
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception 
(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
 ENTRY(vmx_ia32_exception)
-       VMX_DBG_FAULT(45)
-       VMX_FAULT(45)
+    VMX_DBG_FAULT(45)
+    VMX_FAULT(45)
 END(vmx_ia32_exception)
 
-       .org vmx_ia64_ivt+0x6a00
+    .org vmx_ia64_ivt+0x6a00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
 ENTRY(vmx_ia32_intercept)
-       VMX_DBG_FAULT(46)
-       VMX_FAULT(46)
+    VMX_DBG_FAULT(46)
+    VMX_FAULT(46)
 END(vmx_ia32_intercept)
 
-       .org vmx_ia64_ivt+0x6b00
+    .org vmx_ia64_ivt+0x6b00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
 ENTRY(vmx_ia32_interrupt)
-       VMX_DBG_FAULT(47)
-       VMX_FAULT(47)
+    VMX_DBG_FAULT(47)
+    VMX_FAULT(47)
 END(vmx_ia32_interrupt)
 
-       .org vmx_ia64_ivt+0x6c00
+    .org vmx_ia64_ivt+0x6c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6c00 Entry 48 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(48)
-       VMX_FAULT(48)
-
-       .org vmx_ia64_ivt+0x6d00
+    VMX_DBG_FAULT(48)
+    VMX_FAULT(48)
+
+    .org vmx_ia64_ivt+0x6d00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6d00 Entry 49 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(49)
-       VMX_FAULT(49)
-
-       .org vmx_ia64_ivt+0x6e00
+    VMX_DBG_FAULT(49)
+    VMX_FAULT(49)
+
+    .org vmx_ia64_ivt+0x6e00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6e00 Entry 50 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(50)
-       VMX_FAULT(50)
-
-       .org vmx_ia64_ivt+0x6f00
+    VMX_DBG_FAULT(50)
+    VMX_FAULT(50)
+
+    .org vmx_ia64_ivt+0x6f00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6f00 Entry 51 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(51)
-       VMX_FAULT(51)
-
-       .org vmx_ia64_ivt+0x7000
+    VMX_DBG_FAULT(51)
+    VMX_FAULT(51)
+
+    .org vmx_ia64_ivt+0x7000
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7000 Entry 52 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(52)
-       VMX_FAULT(52)
-
-       .org vmx_ia64_ivt+0x7100
+    VMX_DBG_FAULT(52)
+    VMX_FAULT(52)
+
+    .org vmx_ia64_ivt+0x7100
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7100 Entry 53 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(53)
-       VMX_FAULT(53)
-
-       .org vmx_ia64_ivt+0x7200
+    VMX_DBG_FAULT(53)
+    VMX_FAULT(53)
+
+    .org vmx_ia64_ivt+0x7200
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7200 Entry 54 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(54)
-       VMX_FAULT(54)
-
-       .org vmx_ia64_ivt+0x7300
+    VMX_DBG_FAULT(54)
+    VMX_FAULT(54)
+
+    .org vmx_ia64_ivt+0x7300
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7300 Entry 55 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(55)
-       VMX_FAULT(55)
-
-       .org vmx_ia64_ivt+0x7400
+    VMX_DBG_FAULT(55)
+    VMX_FAULT(55)
+
+    .org vmx_ia64_ivt+0x7400
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7400 Entry 56 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(56)
-       VMX_FAULT(56)
-
-       .org vmx_ia64_ivt+0x7500
+    VMX_DBG_FAULT(56)
+    VMX_FAULT(56)
+
+    .org vmx_ia64_ivt+0x7500
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7500 Entry 57 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(57)
-       VMX_FAULT(57)
-
-       .org vmx_ia64_ivt+0x7600
+    VMX_DBG_FAULT(57)
+    VMX_FAULT(57)
+
+    .org vmx_ia64_ivt+0x7600
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7600 Entry 58 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(58)
-       VMX_FAULT(58)
-
-       .org vmx_ia64_ivt+0x7700
+    VMX_DBG_FAULT(58)
+    VMX_FAULT(58)
+
+    .org vmx_ia64_ivt+0x7700
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7700 Entry 59 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(59)
-       VMX_FAULT(59)
-
-       .org vmx_ia64_ivt+0x7800
+    VMX_DBG_FAULT(59)
+    VMX_FAULT(59)
+
+    .org vmx_ia64_ivt+0x7800
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7800 Entry 60 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(60)
-       VMX_FAULT(60)
-
-       .org vmx_ia64_ivt+0x7900
+    VMX_DBG_FAULT(60)
+    VMX_FAULT(60)
+
+    .org vmx_ia64_ivt+0x7900
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7900 Entry 61 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(61)
-       VMX_FAULT(61)
-
-       .org vmx_ia64_ivt+0x7a00
+    VMX_DBG_FAULT(61)
+    VMX_FAULT(61)
+
+    .org vmx_ia64_ivt+0x7a00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7a00 Entry 62 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(62)
-       VMX_FAULT(62)
-
-       .org vmx_ia64_ivt+0x7b00
+    VMX_DBG_FAULT(62)
+    VMX_FAULT(62)
+
+    .org vmx_ia64_ivt+0x7b00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7b00 Entry 63 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(63)
-       VMX_FAULT(63)
-
-       .org vmx_ia64_ivt+0x7c00
+    VMX_DBG_FAULT(63)
+    VMX_FAULT(63)
+
+    .org vmx_ia64_ivt+0x7c00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7c00 Entry 64 (size 16 bundles) Reserved
     VMX_DBG_FAULT(64)
-       VMX_FAULT(64)
-
-       .org vmx_ia64_ivt+0x7d00
+    VMX_FAULT(64)
+
+    .org vmx_ia64_ivt+0x7d00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7d00 Entry 65 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(65)
-       VMX_FAULT(65)
-
-       .org vmx_ia64_ivt+0x7e00
+    VMX_DBG_FAULT(65)
+    VMX_FAULT(65)
+
+    .org vmx_ia64_ivt+0x7e00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7e00 Entry 66 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(66)
-       VMX_FAULT(66)
-
-       .org vmx_ia64_ivt+0x7f00
+    VMX_DBG_FAULT(66)
+    VMX_FAULT(66)
+
+    .org vmx_ia64_ivt+0x7f00
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x7f00 Entry 67 (size 16 bundles) Reserved
-       VMX_DBG_FAULT(67)
-       VMX_FAULT(67)
-
-       .org vmx_ia64_ivt+0x8000
-    // There is no particular reason for this code to be here, other than that
-    // there happens to be space here that would go unused otherwise.  If this
-    // fault ever gets "unreserved", simply moved the following code to a more
-    // suitable spot...
+    VMX_DBG_FAULT(67)
+    VMX_FAULT(67)
+
+    .org vmx_ia64_ivt+0x8000
+// There is no particular reason for this code to be here, other than that
+// there happens to be space here that would go unused otherwise.  If this
+// fault ever gets "unreserved", simply moved the following code to a more
+// suitable spot...
 
 
 ENTRY(vmx_dispatch_reflection)
@@ -1165,24 +1127,24 @@ END(vmx_hypercall_dispatch)
 
 
 ENTRY(vmx_dispatch_interrupt)
-       VMX_SAVE_MIN_WITH_COVER_R19     // uses r31; defines r2 and r3
-       ;;
-       alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-       mov out0=cr.ivr         // pass cr.ivr as first arg
-       adds r3=8,r2            // set up second base pointer for SAVE_REST
-    ;;
-       ssm psr.ic
-       ;;
+    VMX_SAVE_MIN_WITH_COVER_R19        // uses r31; defines r2 and r3
+    ;;
+    alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
+    mov out0=cr.ivr            // pass cr.ivr as first arg
+    adds r3=8,r2               // set up second base pointer for SAVE_REST
+    ;;
+    ssm psr.ic
+    ;;
     srlz.i
     ;;
     (p15) ssm psr.i
-       movl r14=ia64_leave_hypervisor
-       ;;
-       VMX_SAVE_REST
-       mov rp=r14
-       ;;
-       add out1=16,sp          // pass pointer to pt_regs as second arg
-       br.call.sptk.many b6=ia64_handle_irq
+    movl r14=ia64_leave_hypervisor
+    ;;
+    VMX_SAVE_REST
+    mov rp=r14
+    ;;
+    add out1=16,sp             // pass pointer to pt_regs as second arg
+    br.call.sptk.many b6=ia64_handle_irq
 END(vmx_dispatch_interrupt)
 
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_process.c   Fri Jun 02 12:31:48 2006 -0500
@@ -64,6 +64,7 @@ extern void ivhpt_fault (VCPU *vcpu, u64
 extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
 
 #define DOMN_PAL_REQUEST    0x110000
+#define DOMN_SAL_REQUEST    0x110001
 
 static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
     0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
@@ -96,85 +97,6 @@ void vmx_reflect_interruption(UINT64 ifa
     inject_guest_interruption(vcpu, vector);
 }
 
-static void
-vmx_handle_hypercall (VCPU *v, REGS *regs)
-{
-    struct ia64_pal_retval y;
-    struct sal_ret_values x;
-    unsigned long i, sal_param[8];
-
-    switch (regs->r2) {
-        case FW_HYPERCALL_PAL_CALL:
-            //printf("*** PAL hypercall: index=%d\n",regs->r28);
-            //FIXME: This should call a C routine
-            y = pal_emulator_static(VCPU(v, vgr[12]));
-            regs->r8 = y.status; regs->r9 = y.v0;
-            regs->r10 = y.v1; regs->r11 = y.v2;
-#if 0
-            if (regs->r8)
-                printk("Failed vpal emulation, with index:0x%lx\n",
-                       VCPU(v, vgr[12]));
-#endif
-            break;
-        case FW_HYPERCALL_SAL_CALL:
-            for (i = 0; i < 8; i++)
-                vcpu_get_gr_nat(v, 32+i, &sal_param[i]);
-            x = sal_emulator(sal_param[0], sal_param[1],
-                             sal_param[2], sal_param[3],
-                             sal_param[4], sal_param[5],
-                             sal_param[6], sal_param[7]);
-            regs->r8 = x.r8; regs->r9 = x.r9;
-            regs->r10 = x.r10; regs->r11 = x.r11;
-#if 0
-            if (regs->r8)
-                printk("Failed vsal emulation, with index:0x%lx\n",
-                       sal_param[0]);
-#endif
-            break;
-        case FW_HYPERCALL_EFI_RESET_SYSTEM:
-            printf("efi.reset_system called ");
-            if (current->domain == dom0) {
-                printf("(by dom0)\n ");
-                (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
-            }
-            printf("(not supported for non-0 domain)\n");
-            regs->r8 = EFI_UNSUPPORTED;
-            break;
-        case FW_HYPERCALL_EFI_GET_TIME:
-            {
-                unsigned long *tv, *tc;
-                vcpu_get_gr_nat(v, 32, (u64 *)&tv);
-                vcpu_get_gr_nat(v, 33, (u64 *)&tc);
-                printf("efi_get_time(%p,%p) called...",tv,tc);
-                tv = __va(translate_domain_mpaddr((unsigned long)tv));
-                if (tc) tc = __va(translate_domain_mpaddr((unsigned long)tc));
-                regs->r8 = (*efi.get_time)((efi_time_t *)tv,(efi_time_cap_t 
*)tc);
-                printf("and returns %lx\n",regs->r8);
-            }
-            break;
-        case FW_HYPERCALL_EFI_SET_TIME:
-        case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
-        case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
-            // FIXME: need fixes in efi.h from 2.6.9
-        case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
-            // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
-            // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
-            // POINTER ARGUMENTS WILL BE VIRTUAL!!
-        case FW_HYPERCALL_EFI_GET_VARIABLE:
-            // FIXME: need fixes in efi.h from 2.6.9
-        case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
-        case FW_HYPERCALL_EFI_SET_VARIABLE:
-        case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
-            // FIXME: need fixes in efi.h from 2.6.9
-            regs->r8 = EFI_UNSUPPORTED;
-            break;
-    }
-#if 0
-    if (regs->r8)
-        printk("Failed vgfw emulation, with index:0x%lx\n",
-               regs->r2);
-#endif
-}
 
 IA64FAULT
 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long 
isr, unsigned long iim)
@@ -183,12 +105,12 @@ vmx_ia64_handle_break (unsigned long ifa
     struct vcpu *v = current;
 
 #ifdef CRASH_DEBUG
-       if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
+    if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
         IS_VMM_ADDRESS(regs->cr_iip)) {
-               if (iim == 0)
-                       show_registers(regs);
-               debugger_trap_fatal(0 /* don't care */, regs);
-       } else
+        if (iim == 0)
+            show_registers(regs);
+        debugger_trap_fatal(0 /* don't care */, regs);
+    } else
 #endif
     {
         if (iim == 0) 
@@ -197,13 +119,17 @@ vmx_ia64_handle_break (unsigned long ifa
         if (!user_mode(regs)) {
             /* Allow hypercalls only when cpl = 0.  */
             if (iim == d->arch.breakimm) {
-                vmx_handle_hypercall (v ,regs);
-                vmx_vcpu_increment_iip(current);
+                ia64_hypercall(regs);
+                vmx_vcpu_increment_iip(v);
                 return IA64_NO_FAULT;
             }
             else if(iim == DOMN_PAL_REQUEST){
-                pal_emul(current);
-                vmx_vcpu_increment_iip(current);
+                pal_emul(v);
+                vmx_vcpu_increment_iip(v);
+                return IA64_NO_FAULT;
+            }else if(iim == DOMN_SAL_REQUEST){
+                sal_emul(v);
+                vmx_vcpu_increment_iip(v);
                 return IA64_NO_FAULT;
             }
         }
@@ -247,45 +173,45 @@ void save_banked_regs_to_vpd(VCPU *v, RE
 // NEVER successful if already reflecting a trap/fault because psr.i==0
 void leave_hypervisor_tail(struct pt_regs *regs)
 {
-       struct domain *d = current->domain;
-       struct vcpu *v = current;
-       // FIXME: Will this work properly if doing an RFI???
-       if (!is_idle_domain(d) ) {      // always comes from guest
-               extern void vmx_dorfirfi(void);
-               struct pt_regs *user_regs = vcpu_regs(current);
-               if (local_softirq_pending())
-                       do_softirq();
-               local_irq_disable();
- 
-               if (user_regs != regs)
-                       printk("WARNING: checking pending interrupt in nested 
interrupt!!!\n");
-
-               /* VMX Domain N has other interrupt source, saying DM  */
-                if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
+    struct domain *d = current->domain;
+    struct vcpu *v = current;
+    // FIXME: Will this work properly if doing an RFI???
+    if (!is_idle_domain(d) ) { // always comes from guest
+        extern void vmx_dorfirfi(void);
+        struct pt_regs *user_regs = vcpu_regs(current);
+        if (local_softirq_pending())
+            do_softirq();
+        local_irq_disable();
+
+        if (user_regs != regs)
+            printk("WARNING: checking pending interrupt in nested 
interrupt!!!\n");
+
+        /* VMX Domain N has other interrupt source, saying DM  */
+        if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
                       vmx_intr_assist(v);
 
-               /* FIXME: Check event pending indicator, and set
-                * pending bit if necessary to inject back to guest.
-                * Should be careful about window between this check
-                * and above assist, since IOPACKET_PORT shouldn't be
-                * injected into vmx domain.
-                *
-                * Now hardcode the vector as 0x10 temporarily
-                */
-//             if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
-//                     VCPU(v, irr[0]) |= 1UL << 0x10;
-//                     v->arch.irq_new_pending = 1;
-//             }
-
-               if ( v->arch.irq_new_pending ) {
-                       v->arch.irq_new_pending = 0;
-                       vmx_check_pending_irq(v);
-               }
+        /* FIXME: Check event pending indicator, and set
+         * pending bit if necessary to inject back to guest.
+         * Should be careful about window between this check
+         * and above assist, since IOPACKET_PORT shouldn't be
+         * injected into vmx domain.
+         *
+         * Now hardcode the vector as 0x10 temporarily
+         */
+//       if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
+//           VCPU(v, irr[0]) |= 1UL << 0x10;
+//           v->arch.irq_new_pending = 1;
+//       }
+
+        if ( v->arch.irq_new_pending ) {
+            v->arch.irq_new_pending = 0;
+            vmx_check_pending_irq(v);
+        }
 //        if (VCPU(v,vac).a_bsw){
 //            save_banked_regs_to_vpd(v,regs);
 //        }
 
-       }
+    }
 }
 
 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
@@ -302,7 +228,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
 {
     IA64_PSR vpsr;
     int type=ISIDE_TLB;
-    u64 vhpt_adr, gppa;
+    u64 vhpt_adr, gppa, pteval, rr, itir;
     ISR misr;
 //    REGS *regs;
     thash_data_t *data;
@@ -314,18 +240,6 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     vpsr.val = vmx_vcpu_get_psr(v);
     misr.val=VMX(v,cr_isr);
 
-/*  TODO
-    if(v->domain->id && vec == 2 &&
-       vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
-        emulate_ins(&v);
-        return;
-    }
-*/
-/*    if(vadr == 0x1ea18c00 ){
-        ia64_clear_ic();
-        while(1);
-    }
- */
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
         if(vec==2){
             
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
@@ -338,31 +252,25 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     }
     if(vec == 1) type = ISIDE_TLB;
     else if(vec == 2) type = DSIDE_TLB;
-    else panic_domain(regs,"wrong vec:%0xlx\n",vec);
+    else panic_domain(regs,"wrong vec:%lx\n",vec);
 
 //    prepare_if_physical_mode(v);
 
     if((data=vtlb_lookup(v, vadr,type))!=0){
-//     gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
-//        if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
+//       gppa = 
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+//       if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
         if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
-               gppa = 
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
-            emulate_io_inst(v, gppa, data->ma);
+            if(data->pl >= ((regs->cr_ipsr>>IA64_PSR_CPL0_BIT)&3)){
+                gppa = 
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+                emulate_io_inst(v, gppa, data->ma);
+            }else{
+                vcpu_set_isr(v,misr.val);
+                data_access_rights(v, vadr);
+            }
             return IA64_FAULT;
         }
 
-//     if ( data->ps != vrr.ps ) {
-//             machine_tlb_insert(v, data);
-//     }
-//     else {
-/*        if ( data->contiguous&&(!data->tc)){
-               machine_tlb_insert(v, data);
-        }
-        else{
- */
-            thash_vhpt_insert(&v->arch.vhpt,data->page_flags, data->itir 
,vadr);
-//        }
-//         }
+        thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
     }else if(type == DSIDE_TLB){
         if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
             if(vpsr.ic){
@@ -381,7 +289,13 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
             }
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
+            if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
+                if (pteval & _PAGE_P){
+                    vcpu_get_rr(v, vadr, &rr);
+                    itir = rr&(RR_RID_MASK | RR_PS_MASK);
+                    thash_purge_and_insert(v, pteval, itir , vadr);
+                    return IA64_NO_FAULT;
+                }
                 if(vpsr.ic){
                     vcpu_set_isr(v, misr.val);
                     dtlb_fault(v, vadr);
@@ -423,7 +337,13 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
             return IA64_FAULT;
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
-            if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
+            if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
+                if (pteval & _PAGE_P){
+                    vcpu_get_rr(v, vadr, &rr);
+                    itir = rr&(RR_RID_MASK | RR_PS_MASK);
+                    thash_purge_and_insert(v, pteval, itir , vadr);
+                    return IA64_NO_FAULT;
+                }
                 if(!vpsr.ic){
                     misr.ni=1;
                 }
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vtlb.c  Fri Jun 02 12:31:48 2006 -0500
@@ -36,27 +36,26 @@ thash_data_t *__alloc_chain(thash_cb_t *
 
 static void cch_mem_init(thash_cb_t *hcb)
 {
-    thash_data_t *p, *q;
+    int num;
+    thash_data_t *p;
 
     hcb->cch_freelist = p = hcb->cch_buf;
-
-    for ( q=p+1; (u64)(q + 1) <= (u64)hcb->cch_buf + hcb->cch_sz;
-        p++, q++ ) {
-        p->next = q;
-    }
+    num = (hcb->cch_sz/sizeof(thash_data_t))-1;
+    do{
+        p->next =p+1;
+        p++;
+        num--;
+    }while(num);
     p->next = NULL;
 }
 
 static thash_data_t *cch_alloc(thash_cb_t *hcb)
 {
     thash_data_t *p;
-
     if ( (p = hcb->cch_freelist) != NULL ) {
         hcb->cch_freelist = p->next;
-        return p;
-    }else{
-        return NULL;
-    }
+    }
+    return p;
 }
 
 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
@@ -101,17 +100,13 @@ static void __rem_hash_head(thash_cb_t *
 static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
 {
     thash_data_t *next=hash->next;
-
-/*    if ( hcb->remove_notifier ) {
-        (hcb->remove_notifier)(hcb,hash);
-    } */
-    if ( next != NULL ) {
+    if ( next) {
         next->len=hash->len-1;
         *hash = *next;
         cch_free (hcb, next);
     }
     else {
-        INVALIDATE_HASH_HEADER(hcb, hash);
+        hash->ti=1;
     }
 }
 
@@ -145,125 +140,109 @@ thash_data_t *__vtr_lookup(VCPU *vcpu, u
 }
 
 
-/*
- * Get the machine format of VHPT entry.
- *    PARAS:
- *  1: tlb: means the tlb format hash entry converting to VHPT.
- *  2: va means the guest virtual address that must be coverd by
- *     the translated machine VHPT.
- *  3: vhpt: means the machine format VHPT converting from tlb.
- *    NOTES:
- *  1: In case of the machine address is discontiguous,
- *     "tlb" needs to be covered by several machine VHPT. va
- *     is used to choice one of them.
- *  2: Foreign map is supported in this API.
- *    RETURN:
- *  0/1: means successful or fail.
- *
- */
-int __tlb_to_vhpt(thash_cb_t *hcb, thash_data_t *vhpt, u64 va)
-{
-    u64 padr,pte;
-    ASSERT ( hcb->ht == THASH_VHPT );
-    padr = vhpt->ppn >>(vhpt->ps-ARCH_PAGE_SHIFT)<<vhpt->ps;
-    padr += va&((1UL<<vhpt->ps)-1);
-    pte=lookup_domain_mpa(current->domain,padr);
-    if((pte>>56))
-        return 0;
-    vhpt->etag = ia64_ttag(va);
-    vhpt->ps = PAGE_SHIFT;
-    vhpt->ppn = 
(pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
-    vhpt->next = 0;
-    return 1;
-}
-
-static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
+static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
 {
     thash_data_t *p;
-    if(hash->next){
-        p=hash->next;
-        while(p->next)
-            p=p->next;
-        p->next=hcb->cch_freelist;
-        hcb->cch_freelist=hash->next;
-        hash->next=0;
+    int i=0;
+    
+    p=hash;
+    for(i=0; i < MAX_CCN_DEPTH; i++){
+        p=p->next;
+    }
+    p->next=hcb->cch_freelist;
+    hcb->cch_freelist=hash->next;
+    hash->len=0;
+    hash->next=0;
+}
+
+
+
+
+static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
+{
+    u64 tag;
+    thash_data_t *head, *cch;
+    pte = pte & ~PAGE_FLAGS_RV_MASK;
+
+    head = (thash_data_t *)ia64_thash(ifa);
+    tag = ia64_ttag(ifa);
+    if( INVALID_VHPT(head) ) {
+        head->page_flags = pte;
+        head->etag = tag;
+        return;
+    }
+
+    if(head->len>=MAX_CCN_DEPTH){
+        thash_recycle_cch(hcb, head);
+        cch = cch_alloc(hcb);
+    }
+    else{
+        cch = __alloc_chain(hcb);
+    }
+    cch->page_flags=head->page_flags;
+    cch->etag=head->etag;
+    cch->next=head->next;
+    head->page_flags=pte;
+    head->etag=tag;
+    head->next = cch;
+    head->len = cch->len+1;
+    cch->len = 0;
+    return;
+}
+
+void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va)
+{
+    u64 phy_pte;
+    phy_pte=translate_phy_pte(v, &pte, itir, va);
+    vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+}
+/*
+ *   vhpt lookup
+ */
+
+thash_data_t * vhpt_lookup(u64 va)
+{
+    thash_data_t *hash, *head;
+    u64 tag, pte;
+    head = (thash_data_t *)ia64_thash(va);
+    hash=head;
+    tag = ia64_ttag(va);
+    do{
+        if(hash->etag == tag)
+            break;
+        hash=hash->next;
+    }while(hash);
+    if(hash && hash!=head){
+        pte = hash->page_flags;
+        hash->page_flags = head->page_flags;
+        head->page_flags = pte;
+        tag = hash->etag;
+        hash->etag = head->etag;
+        head->etag = tag;
+        head->len = hash->len;
         hash->len=0;
-    }
-}
-
-/*  vhpt only has entries with PAGE_SIZE page size */
-
-void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
-{
-    thash_data_t   vhpt_entry, *hash_table, *cch;
-    vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
-    vhpt_entry.itir=itir;
-
-    if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
-        return;
-    //panic("Can't convert to machine VHPT entry\n");
-    }
-
-    hash_table = (thash_data_t *)ia64_thash(ifa);
-    if( INVALID_VHPT(hash_table) ) {
-        *hash_table = vhpt_entry;
-        hash_table->next = 0;
-       return;
-    }
-
-    cch = hash_table;
-    while(cch){
-        if(cch->etag == vhpt_entry.etag){
-            if(cch->ppn == vhpt_entry.ppn)
-                return;
-            else
-                while(1);
-        }
-        cch = cch->next;
-    }
-
-    if(hash_table->len>=MAX_CCN_DEPTH){
-       thash_remove_cch(hcb, hash_table);
-       cch = cch_alloc(hcb);
-       *cch = *hash_table;
-        *hash_table = vhpt_entry;
-       hash_table->len = 1;
-        hash_table->next = cch;
-       return;
-    }
-
-    // TODO: Add collision chain length limitation.
-     cch = __alloc_chain(hcb);
-     if(cch == NULL){
-           *hash_table = vhpt_entry;
-            hash_table->next = 0;
-     }else{
-            *cch = *hash_table;
-            *hash_table = vhpt_entry;
-            hash_table->next = cch;
-           hash_table->len = cch->len + 1;
-           cch->len = 0;
-
-    }
-    return /*hash_table*/;
-}
-
-/*
- *   vhpt lookup
- */
-
-thash_data_t * vhpt_lookup(u64 va)
-{
-    thash_data_t *hash;
-    u64 tag;
-    hash = (thash_data_t *)ia64_thash(va);
-    tag = ia64_ttag(va);
-    while(hash){
-       if(hash->etag == tag)
-               return hash;
-        hash=hash->next;
-    }
-    return NULL;
+        return head;
+    }
+    return hash;
+}
+
+u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+{
+    u64 ret;
+    vhpt_lookup(iha);
+    asm volatile ("rsm psr.ic|psr.i;;"
+                  "srlz.d;;"
+                  "ld8.s r9=[%1];;"
+                  "tnat.nz p6,p7=r9;;"
+                  "(p6) mov %0=1;"
+                  "(p6) mov r9=r0;"
+                  "(p7) mov %0=r0;"
+                  "(p7) st8 [%2]=r9;;"
+                  "ssm psr.ic;;"
+                  "srlz.d;;"
+                  "ssm psr.i;;"
+             : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+    return ret;
 }
 
 
@@ -310,7 +289,6 @@ static void vtlb_purge(thash_cb_t *hcb, 
 /*
  *  purge VHPT and machine TLB
  */
-
 static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
 {
     thash_data_t *hash_table, *prev, *next;
@@ -332,7 +310,7 @@ static void vhpt_purge(thash_cb_t *hcb, 
                     prev->next=next->next;
                     cch_free(hcb,next);
                     hash_table->len--;
-                    break;
+                    break; 
                 }
                 prev=next;
                 next=next->next;
@@ -347,16 +325,21 @@ static void vhpt_purge(thash_cb_t *hcb, 
  * Recycle all collisions chain in VTLB or VHPT.
  *
  */
-
-void thash_recycle_cch(thash_cb_t *hcb)
-{
-    thash_data_t    *hash_table;
-
-    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        thash_remove_cch(hcb,hash_table);
-    }
-}
+void thash_recycle_cch_all(thash_cb_t *hcb)
+{
+    int num;
+    thash_data_t *head;
+    head=hcb->hash;
+    num = (hcb->hash_sz/sizeof(thash_data_t));
+    do{
+        head->len = 0;
+        head->next = 0;
+        head++;
+        num--;
+    }while(num);
+    cch_mem_init(hcb);
+}
+
 
 thash_data_t *__alloc_chain(thash_cb_t *hcb)
 {
@@ -364,7 +347,7 @@ thash_data_t *__alloc_chain(thash_cb_t *
 
     cch = cch_alloc(hcb);
     if(cch == NULL){
-        thash_recycle_cch(hcb);
+        thash_recycle_cch_all(hcb);
         cch = cch_alloc(hcb);
     }
     return cch;
@@ -385,51 +368,38 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
     /* int flag; */
     ia64_rr vrr;
     /* u64 gppn, ppns, ppne; */
-    u64 tag, ps;
-    ps = itir_ps(itir);
+    u64 tag;
     vcpu_get_rr(current, va, &vrr.rrval);
-    if (vrr.ps != ps) {
+#ifdef VTLB_DEBUG    
+    if (vrr.ps != itir_ps(itir)) {
 //        machine_tlb_insert(hcb->vcpu, entry);
         panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d 
ps=%ld\n",
-                     va, vrr.ps, ps);
+             va, vrr.ps, itir_ps(itir));
         return;
     }
+#endif
     hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
     if( INVALID_TLB(hash_table) ) {
         hash_table->page_flags = pte;
         hash_table->itir=itir;
         hash_table->etag=tag;
         hash_table->next = 0;
-    }
-    else if (hash_table->len>=MAX_CCN_DEPTH){
-        thash_remove_cch(hcb, hash_table);
+        return;
+    }
+    if (hash_table->len>=MAX_CCN_DEPTH){
+        thash_recycle_cch(hcb, hash_table);
         cch = cch_alloc(hcb);
-        *cch = *hash_table;
-        hash_table->page_flags = pte;
-        hash_table->itir=itir;
-        hash_table->etag=tag;
-        hash_table->len = 1;
-        hash_table->next = cch;
-    }
-
+    }
     else {
-        // TODO: Add collision chain length limitation.
         cch = __alloc_chain(hcb);
-        if(cch == NULL){
-            hash_table->page_flags = pte;
-            hash_table->itir=itir;
-            hash_table->etag=tag;
-            hash_table->next = 0;
-        }else{
-            *cch = *hash_table;
-            hash_table->page_flags = pte;
-            hash_table->itir=itir;
-            hash_table->etag=tag;
-            hash_table->next = cch;
-            hash_table->len = cch->len + 1;
-            cch->len = 0;
-        }
-    }
+    }
+    *cch = *hash_table;
+    hash_table->page_flags = pte;
+    hash_table->itir=itir;
+    hash_table->etag=tag;
+    hash_table->next = cch;
+    hash_table->len = cch->len + 1;
+    cch->len = 0;
     return ;
 }
 
@@ -473,6 +443,23 @@ void thash_purge_entries(VCPU *v, u64 va
     vhpt_purge(&v->arch.vhpt, va, ps);
 }
 
+u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
+{
+    u64 ps, addr;
+    union pte_flags phy_pte;
+    ps = itir_ps(itir);
+    phy_pte.val = *pte;
+    addr = *pte;
+    addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
+    addr = lookup_domain_mpa(v->domain, addr);
+    if(addr & GPFN_IO_MASK){
+        *pte |= VTLB_PTE_IO;
+        return -1;
+    }
+    phy_pte.ppn = addr >> ARCH_PAGE_SHIFT;
+    return phy_pte.val;
+}
+
 
 /*
  * Purge overlap TCs and then insert the new entry to emulate itc ops.
@@ -480,59 +467,79 @@ void thash_purge_entries(VCPU *v, u64 va
  */
 void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa)
 {
-    u64 ps, va;
+    u64 ps;//, va;
+    u64 phy_pte;
     ps = itir_ps(itir);
-    va = PAGEALIGN(ifa,ps);
-    if(vcpu_quick_region_check(v->arch.tc_regions,va))
-        vtlb_purge(&v->arch.vtlb, va, ps);
-    vhpt_purge(&v->arch.vhpt, va, ps);
-    if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)){
-        vtlb_insert(&v->arch.vtlb, pte, itir, va);
-       vcpu_quick_region_set(PSCBX(v,tc_regions),va);
-    }  
-    if(!(pte&VTLB_PTE_IO)){
-        va = PAGEALIGN(ifa,PAGE_SHIFT);
-        thash_vhpt_insert(&v->arch.vhpt, pte, itir, va);
-    }
-}
-
-
+
+    if(VMX_DOMAIN(v)){
+        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+        if(ps==PAGE_SHIFT){
+            if(!(pte&VTLB_PTE_IO)){
+                vhpt_purge(&v->arch.vhpt, ifa, ps);
+                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+            }
+            else{
+                vhpt_purge(&v->arch.vhpt, ifa, ps);
+                vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+                vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+            }
+        }
+        else{
+            vhpt_purge(&v->arch.vhpt, ifa, ps);
+            vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+            vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+            if(!(pte&VTLB_PTE_IO)){
+                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+            }
+        }
+    }
+    else{
+        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+        if(ps!=PAGE_SHIFT){
+            vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+            vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+        }
+        machine_tlb_purge(ifa, ps);
+        vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+    }
+}
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
  *
  */
 
-// TODO: add sections.
+//TODO: add sections.
 void thash_purge_all(VCPU *v)
 {
-    thash_data_t    *hash_table;
-    /* thash_data_t    *entry; */
-    thash_cb_t  *hcb,*vhpt;
-    /* u64 i, start, end; */
-    hcb =&v->arch.vtlb;
+    int num;
+    thash_data_t *head;
+    thash_cb_t  *vtlb,*vhpt;
+    vtlb =&v->arch.vtlb;
     vhpt =&v->arch.vhpt;
-#ifdef  VTLB_DEBUG
-       extern u64  sanity_check;
-    static u64 statistics_before_purge_all=0;
-    if ( statistics_before_purge_all ) {
-       sanity_check = 1;
-        check_vtlb_sanity(hcb);
-    }
-#endif
-    ASSERT ( hcb->ht == THASH_TLB );
-
-    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        INVALIDATE_TLB_HEADER(hash_table);
-    }
-    cch_mem_init (hcb);
-
-    hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
-    for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
-        INVALIDATE_VHPT_HEADER(hash_table);
-    }
-    cch_mem_init (vhpt);
+
+    head=vtlb->hash;
+    num = (vtlb->hash_sz/sizeof(thash_data_t));
+    do{
+        head->page_flags = 0;
+        head->etag = 1UL<<63;
+        head->next = 0;
+        head++;
+        num--;
+    }while(num);
+    cch_mem_init(vtlb);
+    
+    head=vhpt->hash;
+    num = (vhpt->hash_sz/sizeof(thash_data_t));
+    do{
+        head->page_flags = 0;
+        head->etag = 1UL<<63;
+        head->next = 0;
+        head++;
+        num--;
+    }while(num);
+    cch_mem_init(vhpt);
+
     local_flush_tlb_all();
 }
 
@@ -547,7 +554,7 @@ void thash_purge_all(VCPU *v)
 
 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
 {
-    thash_data_t    *hash_table, *cch;
+    thash_data_t  *cch;
     u64     tag;
     ia64_rr vrr;
     thash_cb_t * hcb= &v->arch.vtlb;
@@ -559,18 +566,14 @@ thash_data_t *vtlb_lookup(VCPU *v, u64 v
     if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
         return NULL;
     
-
     vcpu_get_rr(v,va,&vrr.rrval);
-    hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
-
-    if ( INVALID_ENTRY(hcb, hash_table ) )
-        return NULL;
-
-
-    for (cch=hash_table; cch; cch = cch->next) {
+    cch = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
+
+    do{
         if(cch->etag == tag)
             return cch;
-    }
+        cch = cch->next;
+    }while(cch);
     return NULL;
 }
 
@@ -580,198 +583,33 @@ thash_data_t *vtlb_lookup(VCPU *v, u64 v
  */
 void thash_init(thash_cb_t *hcb, u64 sz)
 {
-    thash_data_t    *hash_table;
-
-    cch_mem_init (hcb);
+    int num;
+    thash_data_t *head, *p;
+
     hcb->pta.val = (unsigned long)hcb->hash;
     hcb->pta.vf = 1;
     hcb->pta.ve = 1;
     hcb->pta.size = sz;
-//    hcb->get_rr_fn = vmmu_get_rr;
-    ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
-    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-
-    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        INVALIDATE_HASH_HEADER(hcb,hash_table);
-    }
-}
-
-#ifdef  VTLB_DEBUG
-/*
-static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
-u64  sanity_check=0;
-u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
-{
-    thash_data_t *cch;
-    thash_data_t    *ovl;
-    search_section_t s_sect;
-    u64     num=0;
-
-    s_sect.v = 0;
-    for (cch=hash; cch; cch=cch->next) {
-        ovl = thash_find_overlap(vhpt, cch, s_sect);
-        while ( ovl != NULL ) {
-            ovl->checked = 1;
-            ovl = (vhpt->next_overlap)(vhpt);
-        };
-        num ++;
-    }
-    if ( num >= MAX_CCH_LENGTH ) {
-       cch_length_statistics[MAX_CCH_LENGTH] ++;
-    }
-    else {
-       cch_length_statistics[num] ++;
-    }
-    return num;
-}
-
-void check_vtlb_sanity(thash_cb_t *vtlb)
-{
-//    struct page_info *page;
-    u64  hash_num, i, psr;
-    static u64 check_ok_num, check_fail_num,check_invalid;
-//  void *vb1, *vb2;
-    thash_data_t  *hash, *cch;
-    thash_data_t    *ovl;
-    search_section_t s_sect;
-    thash_cb_t *vhpt = vtlb->vhpt;
-    u64   invalid_ratio;
- 
-    if ( sanity_check == 0 ) return;
-    sanity_check --;
-    s_sect.v = 0;
-//    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
-//    if ( page == NULL ) {
-//        panic("No enough contiguous memory for init_domain_mm\n");
-//    };
-//    vb1 = page_to_virt(page);
-//    printf("Allocated page=%lp vbase=%lp\n", page, vb1);
-//    vb2 = vb1 + vtlb->hash_sz;
-    hash_num = vhpt->hash_sz / sizeof(thash_data_t);
-//    printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
-    printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n", 
-                vtlb, vtlb->hash,vtlb->hash_sz,
-                vhpt, vhpt->hash, vhpt->hash_sz);
-    //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
-    //memcpy(vb2, vhpt->hash, vhpt->hash_sz);
-    for ( i=0; i < 
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
-       cch_length_statistics[i] = 0;
-    }
-
-    local_irq_save(psr);
-
-    hash = vhpt->hash;
-    for (i=0; i < hash_num; i++) {
-        if ( !INVALID_ENTRY(vhpt, hash) ) {
-            for ( cch= hash; cch; cch=cch->next) {
-                cch->checked = 0;
-            }
-        }
-        hash ++;
-    }
-    printf("Done vhpt clear checked flag, hash_num=0x%lx\n", hash_num);
-    check_invalid = 0;
-    check_ok_num=0;
-    hash = vtlb->hash;
-    for ( i=0; i< hash_num; i++ ) {
-        if ( !INVALID_ENTRY(vtlb, hash) ) {
-            check_ok_num += vtlb_chain_sanity(vtlb, vhpt, hash);
-        }
-        else {
-            check_invalid++;
-        }
-        hash ++;
-    }
-    printf("Done vtlb entry check, hash=%p\n", hash);
-    printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", 
check_ok_num,check_invalid);
-    invalid_ratio = 1000*check_invalid / hash_num;
-    printf("%02ld.%01ld%% entries are invalid\n", 
-               invalid_ratio/10, invalid_ratio % 10 );
-    for (i=0; i<NDTRS; i++) {
-        ovl = thash_find_overlap(vhpt, &vtlb->ts->dtr[i], s_sect);
-        while ( ovl != NULL ) {
-            ovl->checked = 1;
-            ovl = (vhpt->next_overlap)(vhpt);
-        };
-    }
-    printf("Done dTR\n");
-    for (i=0; i<NITRS; i++) {
-        ovl = thash_find_overlap(vhpt, &vtlb->ts->itr[i], s_sect);
-        while ( ovl != NULL ) {
-            ovl->checked = 1;
-            ovl = (vhpt->next_overlap)(vhpt);
-        };
-    }
-    printf("Done iTR\n");
-    check_fail_num = 0;
-    check_invalid = 0;
-    check_ok_num=0;
-    hash = vhpt->hash;
-    for (i=0; i < hash_num; i++) {
-        if ( !INVALID_ENTRY(vhpt, hash) ) {
-            for ( cch= hash; cch; cch=cch->next) {
-                if ( !cch->checked ) {
-                    printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
-                    check_fail_num ++;
-                }
-                else {
-                    check_ok_num++;
-                }
-            }
-        }
-        else {
-            check_invalid ++;
-        }
-        hash ++;
-    }
-    local_irq_restore(psr);
-    printf("check_ok_num=0x%lx check_fail_num=0x%lx check_invalid=0x%lx\n", 
-            check_ok_num, check_fail_num, check_invalid);
-    //memcpy(vtlb->hash, vb1, vtlb->hash_sz);
-    //memcpy(vhpt->hash, vb2, vhpt->hash_sz);
-    printf("The statistics of collision chain length is listed\n");
-    for ( i=0; i < 
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
-       printf("CCH length=%02ld, chain number=%ld\n", i, 
cch_length_statistics[i]);
-    }
-//    free_domheap_pages(page, VCPU_TLB_ORDER);
-    printf("Done check_vtlb\n");
-}
-
-void dump_vtlb(thash_cb_t *vtlb)
-{
-    static u64  dump_vtlb=0;
-    thash_data_t  *hash, *cch, *tr;
-    u64     hash_num,i;
-
-    if ( dump_vtlb == 0 ) return;
-    dump_vtlb --;
-    hash_num = vtlb->hash_sz / sizeof(thash_data_t);
-    hash = vtlb->hash;
-
-    printf("Dump vTC\n");
-    for ( i = 0; i < hash_num; i++ ) {
-        if ( !INVALID_ENTRY(vtlb, hash) ) {
-            printf("VTLB at hash=%p\n", hash);
-            for (cch=hash; cch; cch=cch->next) {
-                printf("Entry %p va=%lx ps=%d rid=%d\n",
-                    cch, cch->vadr, cch->ps, cch->rid);
-            }
-        }
-        hash ++;
-    }
-    printf("Dump vDTR\n");
-    for (i=0; i<NDTRS; i++) {
-        tr = &DTR(vtlb,i);
-        printf("Entry %p va=%lx ps=%d rid=%d\n",
-                    tr, tr->vadr, tr->ps, tr->rid);
-    }
-    printf("Dump vITR\n");
-    for (i=0; i<NITRS; i++) {
-        tr = &ITR(vtlb,i);
-        printf("Entry %p va=%lx ps=%d rid=%d\n",
-                    tr, tr->vadr, tr->ps, tr->rid);
-    }
-    printf("End of vTLB dump\n");
-}
-*/
-#endif
+    hcb->cch_rec_head = hcb->hash;
+    
+    head=hcb->hash;
+    num = (hcb->hash_sz/sizeof(thash_data_t));
+    do{
+        head->itir = PAGE_SHIFT<<2;
+        head->etag = 1UL<<63;
+        head->next = 0;
+        head++;
+        num--;
+    }while(num);
+    
+    hcb->cch_freelist = p = hcb->cch_buf;
+    num = (hcb->cch_sz/sizeof(thash_data_t))-1;
+    do{
+        p->itir = PAGE_SHIFT<<2;
+        p->next =p+1;
+        p++;
+        num--;
+    }while(num);
+    p->itir = PAGE_SHIFT<<2;
+    p->next = NULL;
+}
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/dom_fw.c        Fri Jun 02 12:31:48 2006 -0500
@@ -15,6 +15,7 @@
 #include <asm/pal.h>
 #include <asm/sal.h>
 #include <asm/meminit.h>
+#include <asm/fpswa.h>
 #include <xen/compile.h>
 #include <xen/acpi.h>
 
@@ -60,6 +61,29 @@ dom_pa(unsigned long imva)
     } while (0)
 
 // builds a hypercall bundle at domain physical address
+static void dom_fpswa_hypercall_patch(struct domain *d)
+{
+       unsigned long *entry_imva, *patch_imva;
+       unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
+       unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
+
+#ifndef CONFIG_XEN_IA64_DOM0_VP
+       if (d == dom0) {
+               entry_paddr += dom0_start;
+               patch_paddr += dom0_start;
+       }
+#endif
+       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, entry_paddr);
+       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, patch_paddr);
+       entry_imva = (unsigned long *) domain_mpa_to_imva(d, entry_paddr);
+       patch_imva = (unsigned long *) domain_mpa_to_imva(d, patch_paddr);
+
+       *entry_imva++ = patch_paddr;
+       *entry_imva   = 0;
+       build_hypercall_bundle(patch_imva, d->arch.breakimm, 
FW_HYPERCALL_FPSWA, 1);
+}
+
+// builds a hypercall bundle at domain physical address
 static void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, 
unsigned long hypercall)
 {
        unsigned long *imva;
@@ -71,7 +95,6 @@ static void dom_efi_hypercall_patch(stru
        imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
        build_hypercall_bundle(imva, d->arch.breakimm, hypercall, 1);
 }
-
 
 // builds a hypercall bundle at domain physical address
 static void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, 
unsigned long hypercall,unsigned long ret)
@@ -697,6 +720,7 @@ struct dom0_passthrough_arg {
 struct dom0_passthrough_arg {
 #ifdef CONFIG_XEN_IA64_DOM0_VP
     struct domain*      d;
+    int                 flags;
 #endif
     efi_memory_desc_t *md;
     int*                i;
@@ -711,7 +735,7 @@ dom_fw_dom0_passthrough(efi_memory_desc_
 #ifdef CONFIG_XEN_IA64_DOM0_VP
     struct domain* d = arg->d;
     u64 start = md->phys_addr;
-    u64 end = start + (md->num_pages << EFI_PAGE_SHIFT);
+    u64 size = md->num_pages << EFI_PAGE_SHIFT;
 
     if (md->type == EFI_MEMORY_MAPPED_IO ||
         md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
@@ -720,13 +744,12 @@ dom_fw_dom0_passthrough(efi_memory_desc_
         //    It requires impractical memory to map such a huge region
         //    to a domain.
         //    For now we don't map it, but later we must fix this.
-        if (md->type == EFI_MEMORY_MAPPED_IO &&
-            ((md->num_pages << EFI_PAGE_SHIFT) > 0x100000000UL))
+        if (md->type == EFI_MEMORY_MAPPED_IO && (size > 0x100000000UL))
             return 0;
 
-        paddr = assign_domain_mmio_page(d, start, end - start);
+        paddr = assign_domain_mmio_page(d, start, size);
     } else
-        paddr = assign_domain_mach_page(d, start, end - start);
+        paddr = assign_domain_mach_page(d, start, size, arg->flags);
 #else
     paddr = md->phys_addr;
 #endif
@@ -771,6 +794,7 @@ dom_fw_init (struct domain *d, const cha
        struct ia64_sal_systab *sal_systab;
        struct ia64_sal_desc_entry_point *sal_ed;
        struct ia64_sal_desc_ap_wakeup *sal_wakeup;
+       fpswa_interface_t *fpswa_inf;
        efi_memory_desc_t *efi_memmap, *md;
        struct ia64_boot_param *bp;
        unsigned long *pfn;
@@ -812,6 +836,7 @@ dom_fw_init (struct domain *d, const cha
        sal_systab  = (void *) cp; cp += sizeof(*sal_systab);
        sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
        sal_wakeup  = (void *) cp; cp += sizeof(*sal_wakeup);
+       fpswa_inf   = (void *) cp; cp += sizeof(*fpswa_inf);
        efi_memmap  = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
        bp          = (void *) cp; cp += sizeof(*bp);
        pfn         = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
@@ -819,6 +844,7 @@ dom_fw_init (struct domain *d, const cha
 
        /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
        d->arch.efi_runtime = efi_runtime;
+       d->arch.fpswa_inf   = fpswa_inf;
 
        if (args) {
                if (arglen >= 1024)
@@ -874,9 +900,10 @@ dom_fw_init (struct domain *d, const cha
        }
        if (d == dom0) {
 #ifdef CONFIG_XEN_IA64_DOM0_VP
-# define ASSIGN_DOMAIN_MACH_PAGE(d, p) assign_domain_mach_page(d, p, PAGE_SIZE)
+# define ASSIGN_DOMAIN_MACH_PAGE(d, p) \
+        assign_domain_mach_page((d), (p), PAGE_SIZE, ASSIGN_readonly)
 #else
-# define ASSIGN_DOMAIN_MACH_PAGE(d, p) ({p;})
+# define ASSIGN_DOMAIN_MACH_PAGE(d, p) (p)
 #endif
 
                printf("Domain0 EFI passthrough:");
@@ -960,6 +987,11 @@ dom_fw_init (struct domain *d, const cha
                checksum += *cp;
 
        sal_systab->checksum = -checksum;
+
+       /* Fill in the FPSWA interface: */
+       fpswa_inf->revision = fpswa_interface->revision;
+       dom_fpswa_hypercall_patch(d);
+       fpswa_inf->fpswa = (void *) FW_HYPERCALL_FPSWA_ENTRY_PADDR + 
start_mpaddr;
 
        i = 0;
        if (d == dom0) {
@@ -990,17 +1022,24 @@ dom_fw_init (struct domain *d, const cha
                /* pass through the I/O port space */
                if (!running_on_sim) {
                        struct dom0_passthrough_arg arg;
+                       arg.md = &efi_memmap[i];
+                       arg.i = &i;
 #ifdef CONFIG_XEN_IA64_DOM0_VP
                        arg.d = d;
-#endif
-                       arg.md = &efi_memmap[i];
-                       arg.i = &i;
+                       arg.flags = ASSIGN_writable;
+#endif
                        //XXX Is this needed?
                        efi_memmap_walk_type(EFI_RUNTIME_SERVICES_CODE,
                                             dom_fw_dom0_passthrough, &arg);
                        // for ACPI table.
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+                       arg.flags = ASSIGN_readonly;
+#endif
                        efi_memmap_walk_type(EFI_RUNTIME_SERVICES_DATA,
                                             dom_fw_dom0_passthrough, &arg);
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+                       arg.flags = ASSIGN_writable;
+#endif
                        efi_memmap_walk_type(EFI_ACPI_RECLAIM_MEMORY,
                                             dom_fw_dom0_passthrough, &arg);
                        efi_memmap_walk_type(EFI_MEMORY_MAPPED_IO,
@@ -1037,7 +1076,7 @@ dom_fw_init (struct domain *d, const cha
        bp->console_info.num_rows = 25;
        bp->console_info.orig_x = 0;
        bp->console_info.orig_y = 24;
-       bp->fpswa = 0;
+       bp->fpswa = dom_pa((unsigned long) fpswa_inf);
        if (d == dom0) {
                // XXX CONFIG_XEN_IA64_DOM0_VP
                // initrd_start address is hard coded in start_kernel()
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/domain.c        Fri Jun 02 12:31:48 2006 -0500
@@ -42,6 +42,7 @@
 
 #include <asm/vcpu.h>   /* for function declarations */
 #include <public/arch-ia64.h>
+#include <xen/domain.h>
 #include <asm/vmx.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_vpd.h>
@@ -79,6 +80,31 @@ void build_physmap_table(struct domain *
 
 static void try_to_clear_PGC_allocate(struct domain* d,
                                       struct page_info* page);
+
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+static struct domain *dom_xen, *dom_io;
+
+// followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c
+void
+alloc_dom_xen_and_dom_io(void)
+{
+    /*
+     * Initialise our DOMID_XEN domain.
+     * Any Xen-heap pages that we will allow to be mapped will have
+     * their domain field set to dom_xen.
+     */
+    dom_xen = alloc_domain(DOMID_XEN);
+    BUG_ON(dom_xen == NULL);
+
+    /*
+     * Initialise our DOMID_IO domain.
+     * This domain owns I/O pages that are within the range of the page_info
+     * array. Mappings occur at the priv of the caller.
+     */
+    dom_io = alloc_domain(DOMID_IO);
+    BUG_ON(dom_io == NULL);
+}
+#endif
 
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void arch_domain_destroy(struct domain *d)
@@ -612,6 +638,12 @@ share_xen_page_with_guest(struct page_in
     spin_unlock(&d->page_alloc_lock);
 }
 
+void
+share_xen_page_with_privileged_guests(struct page_info *page, int readonly)
+{
+    share_xen_page_with_guest(page, dom_xen, readonly);
+}
+
 //XXX !xxx_present() should be used instread of !xxx_none()?
 static pte_t*
 lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
@@ -793,17 +825,19 @@ assign_new_domain0_page(struct domain *d
 }
 
 /* map a physical address to the specified metaphysical addr */
+// flags: currently only ASSIGN_readonly
 void
 __assign_domain_page(struct domain *d,
-                     unsigned long mpaddr, unsigned long physaddr)
+                     unsigned long mpaddr, unsigned long physaddr,
+                     unsigned long flags)
 {
     pte_t *pte;
+    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
     if (pte_none(*pte)) {
-        set_pte(pte,
-                pfn_pte(physaddr >> PAGE_SHIFT,
-                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+        set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
+                             __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags)));
         mb ();
     } else
         printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
@@ -820,7 +854,7 @@ assign_domain_page(struct domain *d,
     BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
     ret = get_page(page, d);
     BUG_ON(ret == 0);
-    __assign_domain_page(d, mpaddr, physaddr);
+    __assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable);
 
     //XXX CONFIG_XEN_IA64_DOM0_VP
     //    TODO racy
@@ -830,12 +864,13 @@ assign_domain_page(struct domain *d,
 #ifdef CONFIG_XEN_IA64_DOM0_VP
 static void
 assign_domain_same_page(struct domain *d,
-                          unsigned long mpaddr, unsigned long size)
+                        unsigned long mpaddr, unsigned long size,
+                        unsigned long flags)
 {
     //XXX optimization
     unsigned long end = mpaddr + size;
     for (; mpaddr < end; mpaddr += PAGE_SIZE) {
-        __assign_domain_page(d, mpaddr, mpaddr);
+        __assign_domain_page(d, mpaddr, mpaddr, flags);
     }
 }
 
@@ -902,15 +937,16 @@ assign_domain_mmio_page(struct domain *d
                 __func__, __LINE__, d, mpaddr, size);
         return -EINVAL;
     }
-    assign_domain_same_page(d, mpaddr, size);
+    assign_domain_same_page(d, mpaddr, size, ASSIGN_writable);
     return mpaddr;
 }
 
 unsigned long
 assign_domain_mach_page(struct domain *d,
-                        unsigned long mpaddr, unsigned long size)
-{
-    assign_domain_same_page(d, mpaddr, size);
+                        unsigned long mpaddr, unsigned long size,
+                        unsigned long flags)
+{
+    assign_domain_same_page(d, mpaddr, size, flags);
     return mpaddr;
 }
 
@@ -1072,15 +1108,14 @@ unsigned long lookup_domain_mpa(struct d
                }
                pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
-               pte = &pteval;
-               return *(unsigned long *)pte;
+               return pte_val(pteval);
        }
 #endif
        pte = lookup_noalloc_domain_pte(d, mpaddr);
        if (pte != NULL) {
                if (pte_present(*pte)) {
 //printk("lookup_domain_page: found mapping for %lx, 
pte=%lx\n",mpaddr,pte_val(*pte));
-                       return *(unsigned long *)pte;
+                       return pte_val(*pte);
                } else if (VMX_DOMAIN(d->vcpu[0]))
                        return GPFN_INV_MASK;
        }
@@ -1094,7 +1129,10 @@ unsigned long lookup_domain_mpa(struct d
                printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
                       mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
        mpafoo(mpaddr);
-       return 0;
+
+       //XXX This is a work around until the emulation memory access to a 
region
+       //    where memory or device are attached is implemented.
+       return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | 
_PAGE_AR_RWX)));
 }
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
@@ -1118,21 +1156,23 @@ out:
 
 // caller must get_page(mfn_to_page(mfn)) before
 // caller must call set_gpfn_from_mfn().
+// flags: currently only ASSIGN_readonly
 static void
 assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
-                           unsigned long mfn, unsigned int flags)
+                           unsigned long mfn, unsigned long flags)
 {
     struct mm_struct *mm = &d->arch.mm;
     pte_t* pte;
     pte_t old_pte;
     pte_t npte;
+    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
 
     // update pte
-    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
+    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
     old_pte = ptep_xchg(mm, mpaddr, pte, npte);
-    if (!pte_none(old_pte)) {
+    if (pte_mem(old_pte)) {
         unsigned long old_mfn;
         struct page_info* old_page;
 
@@ -1159,16 +1199,31 @@ assign_domain_page_replace(struct domain
 
 unsigned long
 dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
-                   unsigned int flags, domid_t domid)
+                   unsigned long flags, domid_t domid)
 {
     int error = 0;
-
     struct domain* rd;
+
     rd = find_domain_by_id(domid);
     if (unlikely(rd == NULL)) {
-        error = -EINVAL;
-        goto out0;
-    }
+        switch (domid) {
+        case DOMID_XEN:
+            rd = dom_xen;
+            break;
+        case DOMID_IO:
+            rd = dom_io;
+            break;
+        default:
+            DPRINTK("d 0x%p domid %d "
+                    "pgfn 0x%lx mfn 0x%lx flags 0x%lx domid %d\n",
+                    d, d->domain_id, gpfn, mfn, flags, domid);
+            error = -ESRCH;
+            goto out0;
+        }
+        BUG_ON(rd == NULL);
+        get_knownalive_domain(rd);
+    }
+
     if (unlikely(rd == d)) {
         error = -EINVAL;
         goto out1;
@@ -1178,7 +1233,7 @@ dom0vp_add_physmap(struct domain* d, uns
         goto out1;
     }
 
-    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* flags:XXX */);
+    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
     //don't update p2m table because this page belongs to rd, not d.
 out1:
     put_domain(rd);
@@ -1198,23 +1253,18 @@ create_grant_host_mapping(unsigned long 
     struct page_info* page;
     int ret;
 
-    if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
+    if (flags & (GNTMAP_device_map | 
+                 GNTMAP_application_map | GNTMAP_contains_pte)) {
         DPRINTK("%s: flags 0x%x\n", __func__, flags);
         return GNTST_general_error;
-    }
-    if (flags & GNTMAP_readonly) {
-#if 0
-        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
-                __func__, flags);
-#endif
-        flags &= ~GNTMAP_readonly;
     }
 
     page = mfn_to_page(mfn);
     ret = get_page(page, page_get_owner(page));
     BUG_ON(ret == 0);
-    assign_domain_page_replace(d, gpaddr, mfn, flags);
-
+
+    assign_domain_page_replace(d, gpaddr, mfn, (flags & GNTMAP_readonly)?
+                                              ASSIGN_readonly: 
ASSIGN_writable);
     return GNTST_okay;
 }
 
@@ -1233,22 +1283,17 @@ destroy_grant_host_mapping(unsigned long
         DPRINTK("%s: flags 0x%x\n", __func__, flags);
         return GNTST_general_error;
     }
-    if (flags & GNTMAP_readonly) {
-#if 0
-        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
-                __func__, flags);
-#endif
-        flags &= ~GNTMAP_readonly;
-    }
 
     pte = lookup_noalloc_domain_pte(d, gpaddr);
     if (pte == NULL || !pte_present(*pte) || pte_pfn(*pte) != mfn)
-        return GNTST_general_error;//XXX GNTST_bad_pseudo_phys_addr
+        return GNTST_general_error;
 
     // update pte
     old_pte = ptep_get_and_clear(&d->arch.mm, gpaddr, pte);
     if (pte_present(old_pte)) {
-        old_mfn = pte_pfn(old_pte);//XXX
+        old_mfn = pte_pfn(old_pte);
+    } else {
+        return GNTST_general_error;
     }
     domain_page_flush(d, gpaddr, old_mfn, INVALID_MFN);
 
@@ -1349,7 +1394,7 @@ guest_physmap_add_page(struct domain *d,
 
     ret = get_page(mfn_to_page(mfn), d);
     BUG_ON(ret == 0);
-    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* XXX */);
+    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable);
     set_gpfn_from_mfn(mfn, gpfn);//XXX SMP
 
     //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> 
PAGE_SHIFT));
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/efi_emul.c
--- a/xen/arch/ia64/xen/efi_emul.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/efi_emul.c      Fri Jun 02 12:31:48 2006 -0500
@@ -21,6 +21,7 @@
 #include <asm/pgalloc.h>
 #include <asm/vcpu.h>
 #include <asm/dom_fw.h>
+#include <asm/fpswa.h>
 #include <public/sched.h>
 
 extern unsigned long translate_domain_mpaddr(unsigned long);
@@ -75,6 +76,7 @@ efi_emulate_set_virtual_address_map(
        unsigned long *vfn;
        struct domain *d = current->domain;
        efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
+       fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
 
        if (descriptor_version != EFI_MEMDESC_VERSION) {
                printf ("efi_emulate_set_virtual_address_map: memory descriptor 
version unmatched\n");
@@ -119,6 +121,12 @@ efi_emulate_set_virtual_address_map(
                
EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
                
EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
                
EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
+
+               vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) 
fpswa_inf->fpswa);
+               *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
+               *vfn   = 0;
+               fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 
16UL + md->virt_addr);
+               break;
        }
 
        /* The virtual address map has been applied. */
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/hypercall.c     Fri Jun 02 12:31:48 2006 -0500
@@ -14,6 +14,7 @@
 
 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
 #include <asm/sal.h>   /* FOR struct ia64_sal_retval */
+#include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
 
 #include <asm/vcpu.h>
 #include <asm/dom_fw.h>
@@ -25,9 +26,12 @@
 #include <asm/hw_irq.h>
 #include <public/physdev.h>
 #include <xen/domain.h>
+#include <public/callback.h>
+#include <xen/event.h>
 
 static long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop);
 static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
+static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
 /* FIXME: where these declarations should be there ? */
 extern int dump_privop_counts_to_user(char *, int);
 extern int zero_privop_counts_to_user(char *, int);
@@ -67,7 +71,7 @@ hypercall_t ia64_hypercall_table[] =
        (hypercall_t)do_ni_hypercall,           /* do_acm_op */
        (hypercall_t)do_ni_hypercall,           /* do_nmi_op */
        (hypercall_t)do_sched_op,
-       (hypercall_t)do_ni_hypercall,           /*  */                          
/* 30 */
+       (hypercall_t)do_callback_op,            /*  */                  /* 30 */
        (hypercall_t)do_ni_hypercall,           /*  */
        (hypercall_t)do_event_channel_op,
        (hypercall_t)do_physdev_op,
@@ -178,12 +182,19 @@ fw_hypercall_ipi (struct pt_regs *regs)
        return;
 }
 
+static fpswa_ret_t
+fw_hypercall_fpswa (struct vcpu *v)
+{
+       return PSCBX(v, fpswa_ret);
+}
+
 static IA64FAULT
 fw_hypercall (struct pt_regs *regs)
 {
        struct vcpu *v = current;
        struct sal_ret_values x;
        efi_status_t efi_ret_value;
+       fpswa_ret_t fpswa_ret;
        IA64FAULT fault; 
        unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
 
@@ -200,11 +211,8 @@ fw_hypercall (struct pt_regs *regs)
                VCPU(v,pending_interruption) = 1;
 #endif
                if (regs->r28 == PAL_HALT_LIGHT) {
-                       int pi;
-#define SPURIOUS_VECTOR 15
-                       pi = vcpu_check_pending_interrupts(v);
-                       if (pi != SPURIOUS_VECTOR) {
-                               if (!VCPU(v,pending_interruption))
+                       if (vcpu_deliverable_interrupts(v) ||
+                               event_pending(v)) {
                                        idle_when_pending++;
                                vcpu_pend_unspecified_interrupt(v);
 //printf("idle w/int#%d pending!\n",pi);
@@ -253,6 +261,13 @@ fw_hypercall (struct pt_regs *regs)
            case FW_HYPERCALL_IPI:
                fw_hypercall_ipi (regs);
                break;
+           case FW_HYPERCALL_FPSWA:
+               fpswa_ret = fw_hypercall_fpswa (v);
+               regs->r8  = fpswa_ret.status;
+               regs->r9  = fpswa_ret.err0;
+               regs->r10 = fpswa_ret.err1;
+               regs->r11 = fpswa_ret.err2;
+               break;
            default:
                printf("unknown ia64 fw hypercall %lx\n", regs->r2);
                regs->r8 = do_ni_hypercall();
@@ -435,3 +450,75 @@ long do_event_channel_op_compat(XEN_GUES
 
     return do_event_channel_op(op.cmd, guest_handle_from_ptr(&uop.p->u, void));
 }
+
+static long register_guest_callback(struct callback_register *reg)
+{
+    long ret = 0;
+    struct vcpu *v = current;
+
+    if (IS_VMM_ADDRESS(reg->address))
+        return -EINVAL;
+
+    switch ( reg->type )
+    {
+    case CALLBACKTYPE_event:
+        v->arch.event_callback_ip    = reg->address;
+        break;
+
+    case CALLBACKTYPE_failsafe:
+        v->arch.failsafe_callback_ip = reg->address;
+        break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+static long unregister_guest_callback(struct callback_unregister *unreg)
+{
+    return -EINVAL ;
+}
+
+/* First time to add callback to xen/ia64, so let's just stick to
+ * the newer callback interface.
+ */
+static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    long ret;
+
+    switch ( cmd )
+    {
+    case CALLBACKOP_register:
+    {
+        struct callback_register reg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&reg, arg, 1) )
+            break;
+
+        ret = register_guest_callback(&reg);
+    }
+    break;
+
+    case CALLBACKOP_unregister:
+    {
+        struct callback_unregister unreg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&unreg, arg, 1) )
+            break;
+
+        ret = unregister_guest_callback(&unreg);
+    }
+    break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/hyperprivop.S   Fri Jun 02 12:31:48 2006 -0500
@@ -106,6 +106,11 @@ GLOBAL_ENTRY(fast_hyperprivop)
        or r23=r23,r24; or r21=r21,r22;;
        or r20=r23,r21;;
 1:     // when we get to here r20=~=interrupts pending
+       // Check pending event indication
+(p7)   adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18;;
+(p7)   ld8 r20=[r20];;
+(p7)   adds r20=-1,r20;;
+(p7)   ld1 r20=[r20];;
 
        // HYPERPRIVOP_RFI?
        cmp.eq p7,p6=HYPERPRIVOP_RFI,r17
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/irq.c   Fri Jun 02 12:31:48 2006 -0500
@@ -70,11 +70,13 @@
  */
 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
        [0 ... NR_IRQS-1] = {
-               .status = IRQ_DISABLED | IRQ_GUEST,
+               .status = IRQ_DISABLED,
                .handler = &no_irq_type,
                .lock = SPIN_LOCK_UNLOCKED
        }
 };
+
+void __do_IRQ_guest(int irq);
 
 /*
  * Special irq handlers.
@@ -167,9 +169,7 @@ fastcall unsigned int __do_IRQ(unsigned 
        spin_lock(&desc->lock);
 
        if (desc->status & IRQ_GUEST) {
-               /* __do_IRQ_guest(irq); */
-               vcpu_pend_interrupt(dom0->vcpu[0],irq);
-               vcpu_wake(dom0->vcpu[0]);
+               __do_IRQ_guest(irq);
                spin_unlock(&desc->lock);
                return 1;
        }
@@ -392,6 +392,9 @@ typedef struct {
     u8 nr_guests;
     u8 in_flight;
     u8 shareable;
+    u8 ack_type;
+#define ACKTYPE_NONE   0     /* No final acknowledgement is required */
+#define ACKTYPE_UNMASK 1     /* Unmask notification is required */
     struct domain *guest[IRQ_MAX_GUESTS];
 } irq_guest_action_t;
 
@@ -405,10 +408,24 @@ void __do_IRQ_guest(int irq)
     for ( i = 0; i < action->nr_guests; i++ )
     {
         d = action->guest[i];
-        if ( !test_and_set_bit(irq, &d->pirq_mask) )
+        if ( (action->ack_type != ACKTYPE_NONE) &&
+             !test_and_set_bit(irq, &d->pirq_mask) )
             action->in_flight++;
         send_guest_pirq(d, irq);
     }
+}
+
+int pirq_acktype(int irq)
+{
+    irq_desc_t *desc = &irq_desc[irq];
+
+    if (!strcmp(desc->handler->typename, "IO-SAPIC-level"))
+        return ACKTYPE_UNMASK;
+
+    if (!strcmp(desc->handler->typename, "IO-SAPIC-edge"))
+        return ACKTYPE_NONE;
+
+    return ACKTYPE_NONE;
 }
 
 int pirq_guest_eoi(struct domain *d, int irq)
@@ -422,7 +439,10 @@ int pirq_guest_eoi(struct domain *d, int
     spin_lock_irq(&desc->lock);
     if ( test_and_clear_bit(irq, &d->pirq_mask) &&
          (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+    {
+        ASSERT(action->ack_type == ACKTYPE_UNMASK);
         desc->handler->end(irq);
+    }
     spin_unlock_irq(&desc->lock);
 
     return 0;
@@ -431,7 +451,6 @@ int pirq_guest_eoi(struct domain *d, int
 
 int pirq_guest_unmask(struct domain *d)
 {
-    irq_desc_t    *desc;
     int            irq;
     shared_info_t *s = d->shared_info;
 
@@ -439,13 +458,9 @@ int pirq_guest_unmask(struct domain *d)
           irq < NR_IRQS;
           irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
     {
-        desc = &irq_desc[irq];
-        spin_lock_irq(&desc->lock);
-        if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) &&
-             test_and_clear_bit(irq, &d->pirq_mask) &&
-             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
-            desc->handler->end(irq);
-        spin_unlock_irq(&desc->lock);
+        if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) )
+            pirq_guest_eoi(d, irq);
+
     }
 
     return 0;
@@ -459,6 +474,11 @@ int pirq_guest_bind(struct vcpu *v, int 
     int                 rc = 0;
 
     spin_lock_irqsave(&desc->lock, flags);
+
+    if (desc->handler == &no_irq_type) {
+        spin_unlock_irqrestore(&desc->lock, flags);
+        return -ENOSYS;
+    }
 
     action = (irq_guest_action_t *)desc->action;
 
@@ -483,6 +503,7 @@ int pirq_guest_bind(struct vcpu *v, int 
         action->nr_guests = 0;
         action->in_flight = 0;
         action->shareable = will_share;
+        action->ack_type  = pirq_acktype(irq);
         
         desc->depth = 0;
         desc->status |= IRQ_GUEST;
@@ -529,26 +550,26 @@ int pirq_guest_unbind(struct domain *d, 
 
     action = (irq_guest_action_t *)desc->action;
 
-    if ( test_and_clear_bit(irq, &d->pirq_mask) &&
-         (--action->in_flight == 0) )
-        desc->handler->end(irq);
-
-    if ( action->nr_guests == 1 )
-    {
+    i = 0;
+    while ( action->guest[i] && (action->guest[i] != d) )
+        i++;
+    memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
+    action->nr_guests--;
+
+    if ( action->ack_type == ACKTYPE_UNMASK )
+        if ( test_and_clear_bit(irq, &d->pirq_mask) &&
+             (--action->in_flight == 0) )
+            desc->handler->end(irq);
+
+    if ( !action->nr_guests )
+    {
+        BUG_ON(action->in_flight != 0);
         desc->action = NULL;
         xfree(action);
         desc->depth   = 1;
         desc->status |= IRQ_DISABLED;
         desc->status &= ~IRQ_GUEST;
         desc->handler->shutdown(irq);
-    }
-    else
-    {
-        i = 0;
-        while ( action->guest[i] != d )
-            i++;
-        memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
-        action->nr_guests--;
     }
 
     spin_unlock_irqrestore(&desc->lock, flags);    
@@ -598,10 +619,9 @@ void process_soft_irq(void)
 
 // this is a temporary hack until real console input is implemented
 extern void domain_pend_keyboard_interrupt(int irq);
-irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs 
*regs)
+void guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
 {
        domain_pend_keyboard_interrupt(irq);
-       return 0;
 }
 
 void serial_input_init(void)
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/privop.c        Fri Jun 02 12:31:48 2006 -0500
@@ -129,10 +129,10 @@ IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 
 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
 {
        UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
-       UINT64 addr_range;
-
-       addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
-       return vcpu_ptc_l(vcpu,vadr,addr_range);
+       UINT64 log_range;
+
+       log_range = ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
+       return vcpu_ptc_l(vcpu,vadr,log_range);
 }
 
 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/process.c       Fri Jun 02 12:31:48 2006 -0500
@@ -33,6 +33,7 @@
 #include "hpsim_ssc.h"
 #include <xen/multicall.h>
 #include <asm/debugger.h>
+#include <asm/fpswa.h>
 
 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
 /* FIXME: where these declarations shold be there ? */
@@ -85,6 +86,8 @@ u64 translate_domain_pte(u64 pteval, u64
        struct domain *d = current->domain;
        ia64_itir_t itir = {.itir = itir__};
        u64 mask, mpaddr, pteval2;
+       u64 arflags;
+       u64 arflags2;
 
        pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
 
@@ -123,6 +126,20 @@ u64 translate_domain_pte(u64 pteval, u64
        }
 #endif
        pteval2 = lookup_domain_mpa(d,mpaddr);
+       arflags  = pteval  & _PAGE_AR_MASK;
+       arflags2 = pteval2 & _PAGE_AR_MASK;
+       if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
+#if 0
+               DPRINTK("%s:%d "
+                       "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
+                       "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
+                       __func__, __LINE__,
+                       pteval, arflags, address, itir__,
+                       pteval2, arflags2, mpaddr);
+#endif
+               pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
+}
+
        pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
        pteval2 |= (pteval & _PAGE_ED);
        pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
@@ -246,6 +263,40 @@ printf("*#*#*#* about to deliver early t
        reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
 }
 
+void reflect_event(struct pt_regs *regs)
+{
+       unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
+       struct vcpu *v = current;
+
+       /* Sanity check */
+       if (is_idle_vcpu(v) || !user_mode(regs)) {
+               //printk("WARN: invocation to reflect_event in nested xen\n");
+               return;
+       }
+
+       if (!event_pending(v))
+               return;
+
+       if (!PSCB(v,interrupt_collection_enabled))
+               printf("psr.ic off, delivering event, 
ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
+                      regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
+       PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
+       PSCB(v,precover_ifs) = regs->cr_ifs;
+       vcpu_bsw0(v);
+       PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
+       PSCB(v,isr) = isr;
+       PSCB(v,iip) = regs->cr_iip;
+       PSCB(v,ifs) = 0;
+       PSCB(v,incomplete_regframe) = 0;
+
+       regs->cr_iip = v->arch.event_callback_ip;
+       regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
+       regs->r31 = XSI_IPSR;
+
+       v->vcpu_info->evtchn_upcall_mask = 1;
+       PSCB(v,interrupt_collection_enabled) = 0;
+}
+
 // ONLY gets called from ia64_leave_kernel
 // ONLY call with interrupts disabled?? (else might miss one?)
 // NEVER successful if already reflecting a trap/fault because psr.i==0
@@ -255,7 +306,6 @@ void deliver_pending_interrupt(struct pt
        struct vcpu *v = current;
        // FIXME: Will this work properly if doing an RFI???
        if (!is_idle_domain(d) && user_mode(regs)) {
-               //vcpu_poke_timer(v);
                if (vcpu_deliverable_interrupts(v))
                        reflect_extint(regs);
                else if (PSCB(v,pending_interruption))
@@ -343,6 +393,98 @@ void ia64_do_page_fault (unsigned long a
        PSCB(current,iha) = iha;
        PSCB(current,ifa) = address;
        reflect_interruption(isr, regs, fault);
+}
+
+fpswa_interface_t *fpswa_interface = 0;
+
+void trap_init (void)
+{
+       if (ia64_boot_param->fpswa)
+               /* FPSWA fixup: make the interface pointer a virtual address: */
+               fpswa_interface = __va(ia64_boot_param->fpswa);
+       else
+               printk("No FPSWA supported.\n");
+}
+
+static fpswa_ret_t
+fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
+           unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
+           unsigned long *ifs, struct pt_regs *regs)
+{
+       fp_state_t fp_state;
+       fpswa_ret_t ret;
+
+       if (!fpswa_interface)
+               return ((fpswa_ret_t) {-1, 0, 0, 0});
+
+       memset(&fp_state, 0, sizeof(fp_state_t));
+
+       /*
+        * compute fp_state.  only FP registers f6 - f11 are used by the
+        * kernel, so set those bits in the mask and set the low volatile
+        * pointer to point to these registers.
+        */
+       fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
+
+       fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
+       /*
+        * unsigned long (*EFI_FPSWA) (
+        *      unsigned long    trap_type,
+        *      void             *Bundle,
+        *      unsigned long    *pipsr,
+        *      unsigned long    *pfsr,
+        *      unsigned long    *pisr,
+        *      unsigned long    *ppreds,
+        *      unsigned long    *pifs,
+        *      void             *fp_state);
+        */
+       ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
+                                       ipsr, fpsr, isr, pr, ifs, &fp_state);
+
+       return ret;
+}
+
+/*
+ * Handle floating-point assist faults and traps for domain.
+ */
+static unsigned long
+handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
+{
+       struct vcpu *v = current;
+       IA64_BUNDLE bundle;
+       IA64_BUNDLE __get_domain_bundle(UINT64);
+       unsigned long fault_ip;
+       fpswa_ret_t ret;
+
+       fault_ip = regs->cr_iip;
+       /*
+        * When the FP trap occurs, the trapping instruction is completed.
+        * If ipsr.ri == 0, there is the trapping instruction in previous 
bundle.
+        */
+       if (!fp_fault && (ia64_psr(regs)->ri == 0))
+               fault_ip -= 16;
+       bundle = __get_domain_bundle(fault_ip);
+       if (!bundle.i64[0] && !bundle.i64[1]) {
+               printk("%s: floating-point bundle at 0x%lx not mapped\n",
+                      __FUNCTION__, fault_ip);
+               return -1;
+       }
+
+       ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
+                        &isr, &regs->pr, &regs->cr_ifs, regs);
+
+       if (ret.status) {
+               PSCBX(v, fpswa_ret) = ret;
+               printk("%s(%s): fp_emulate() returned %ld\n",
+                      __FUNCTION__, fp_fault?"fault":"trap", ret.status);
+       } else {
+               if (fp_fault) {
+                       /* emulation was successful */
+                       vcpu_increment_iip(v);
+               }
+       }
+
+       return ret.status;
 }
 
 void
@@ -709,29 +851,31 @@ ia64_handle_reflection (unsigned long if
                        vector = IA64_NAT_CONSUMPTION_VECTOR; break;
                }
 #endif
-printf("*** NaT fault... attempting to handle as privop\n");
-printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
-       isr, ifa, regs->cr_iip, psr);
+               printf("*** NaT fault... attempting to handle as privop\n");
+               printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
+                      isr, ifa, regs->cr_iip, psr);
                //regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
                // certain NaT faults are higher priority than privop faults
                vector = priv_emulate(v,regs,isr);
                if (vector == IA64_NO_FAULT) {
-printf("*** Handled privop masquerading as NaT fault\n");
+                       printf("*** Handled privop masquerading as NaT 
fault\n");
                        return;
                }
                vector = IA64_NAT_CONSUMPTION_VECTOR; break;
            case 27:
-//printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
+               //printf("*** Handled speculation vector, 
itc=%lx!\n",ia64_get_itc());
                PSCB(current,iim) = iim;
                vector = IA64_SPECULATION_VECTOR; break;
            case 30:
                // FIXME: Should we handle unaligned refs in Xen??
                vector = IA64_UNALIGNED_REF_VECTOR; break;
            case 32:
-               printf("ia64_handle_reflection: handling FP fault");
+               if (!(handle_fpu_swa(1, regs, isr))) return;
+               printf("ia64_handle_reflection: handling FP fault\n");
                vector = IA64_FP_FAULT_VECTOR; break;
            case 33:
-               printf("ia64_handle_reflection: handling FP trap");
+               if (!(handle_fpu_swa(0, regs, isr))) return;
+               printf("ia64_handle_reflection: handling FP trap\n");
                vector = IA64_FP_TRAP_VECTOR; break;
            case 34:
                printf("ia64_handle_reflection: handling lowerpriv trap");
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/vcpu.c  Fri Jun 02 12:31:48 2006 -0500
@@ -18,6 +18,7 @@
 #include <asm/vhpt.h>
 #include <asm/tlbflush.h>
 #include <xen/event.h>
+#include <asm/vmx_phy_mode.h>
 
 /* FIXME: where these declarations should be there ? */
 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct 
pt_regs *regs);
@@ -649,16 +650,18 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
                printf("vcpu_pend_interrupt: bad vector\n");
                return;
        }
-    if ( VMX_DOMAIN(vcpu) ) {
-           set_bit(vector,VCPU(vcpu,irr));
-    } else
-    {
-       if (test_bit(vector,PSCBX(vcpu,irr))) {
-//printf("vcpu_pend_interrupt: overrun\n");
-       }
-       set_bit(vector,PSCBX(vcpu,irr));
-       PSCB(vcpu,pending_interruption) = 1;
-    }
+
+       if (vcpu->arch.event_callback_ip) {
+               printf("Deprecated interface. Move to new event based 
solution\n");
+               return;
+       }
+               
+       if ( VMX_DOMAIN(vcpu) ) {
+               set_bit(vector,VCPU(vcpu,irr));
+       } else {
+               set_bit(vector,PSCBX(vcpu,irr));
+               PSCB(vcpu,pending_interruption) = 1;
+       }
 }
 
 #define        IA64_TPR_MMI    0x10000
@@ -673,6 +676,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
 {
        UINT64 *p, *r, bits, bitnum, mask, i, vector;
+
+       if (vcpu->arch.event_callback_ip)
+               return SPURIOUS_VECTOR;
 
        /* Always check pending event, since guest may just ack the
         * event injection without handle. Later guest may throw out
@@ -1151,7 +1157,16 @@ void vcpu_pend_timer(VCPU *vcpu)
                // don't deliver another
                return;
        }
-       vcpu_pend_interrupt(vcpu, itv);
+       if (vcpu->arch.event_callback_ip) {
+               /* A small window may occur when injecting vIRQ while related
+                * handler has not been registered. Don't fire in such case.
+                */
+               if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
+                       send_guest_vcpu_virq(vcpu, VIRQ_ITC);
+                       PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
+               }
+       } else
+               vcpu_pend_interrupt(vcpu, itv);
 }
 
 // returns true if ready to deliver a timer interrupt too early
@@ -1834,12 +1849,17 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 
        return IA64_NO_FAULT;
 }
 
-IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
-{
-       printk("vcpu_ptc_l: called, not implemented yet\n");
-       return IA64_ILLOP_FAULT;
-}
-
+IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range)
+{
+       /* Purge TC  */
+       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
+       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+       
+       /*Purge all tlb and vhpt*/
+       vcpu_flush_tlb_vhpt_range (vadr, log_range);
+
+       return IA64_NO_FAULT;
+}
 // At privlvl=0, fc performs no access rights or protection key checks, while
 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
 // read but no protection key check.  Thus in order to avoid an unexpected
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/xensetup.c      Fri Jun 02 12:31:48 2006 -0500
@@ -35,8 +35,6 @@ char saved_command_line[COMMAND_LINE_SIZ
 char saved_command_line[COMMAND_LINE_SIZE];
 char dom0_command_line[COMMAND_LINE_SIZE];
 
-struct vcpu *idle_vcpu[NR_CPUS];
-
 cpumask_t cpu_present_map;
 
 extern unsigned long domain0_ready;
@@ -53,6 +51,7 @@ extern void setup_per_cpu_areas(void);
 extern void setup_per_cpu_areas(void);
 extern void mem_init(void);
 extern void init_IRQ(void);
+extern void trap_init(void);
 
 /* opt_nosmp: If true, secondary processors are ignored. */
 static int opt_nosmp = 0;
@@ -321,6 +320,8 @@ void start_kernel(void)
 
     init_frametable();
 
+    trap_init();
+
     alloc_dom0();
 
     end_boot_allocator();
@@ -337,6 +338,7 @@ printk("About to call scheduler_init()\n
     BUG_ON(idle_domain == NULL);
 
     late_setup_arch((char **) &cmdline);
+    alloc_dom_xen_and_dom_io();
     setup_per_cpu_areas();
     mem_init();
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/ia64/xen/xentime.c
--- a/xen/arch/ia64/xen/xentime.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/ia64/xen/xentime.c       Fri Jun 02 12:31:48 2006 -0500
@@ -105,7 +105,7 @@ void do_settime(unsigned long secs, unsi
     return;
 }
 
-irqreturn_t
+void
 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 {
        unsigned long new_itm, old_itc;
@@ -134,7 +134,7 @@ xen_timer_interrupt (int irq, void *dev_
        new_itm = local_cpu_data->itm_next;
 
        if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
-               return IRQ_HANDLED;
+               return;
 
        while (1) {
                new_itm += local_cpu_data->itm_delta;
@@ -185,8 +185,6 @@ xen_timer_interrupt (int irq, void *dev_
                /* double check, in case we got hit by a (slow) PMI: */
        } while (time_after_eq(ia64_get_itc(), new_itm));
        raise_softirq(TIMER_SOFTIRQ);
-
-       return IRQ_HANDLED;
 }
 
 static struct irqaction xen_timer_irqaction = {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/audit.c      Fri Jun 02 12:31:48 2006 -0500
@@ -432,10 +432,10 @@ int audit_adjust_pgtables(struct domain 
 
         for_each_vcpu(d, v)
         {
-            if ( pagetable_get_paddr(v->arch.guest_table) )
+            if ( !pagetable_is_null(v->arch.guest_table) )
                 adjust(mfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
                        !shadow_mode_refcounts(d));
-            if ( pagetable_get_paddr(v->arch.shadow_table) )
+            if ( !pagetable_is_null(v->arch.shadow_table) )
                 adjust(mfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
                        0);
             if ( v->arch.monitor_shadow_ref )
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/cpu/mtrr/main.c      Fri Jun 02 12:31:48 2006 -0500
@@ -43,7 +43,7 @@
 #include "mtrr.h"
 
 /* No blocking mutexes in Xen. Spin instead. */
-#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
+#define DECLARE_MUTEX(_m) DEFINE_SPINLOCK(_m)
 #define down(_m) spin_lock(_m)
 #define up(_m) spin_unlock(_m)
 #define lock_cpu_hotplug() ((void)0)
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/dom0_ops.c   Fri Jun 02 12:31:48 2006 -0500
@@ -467,7 +467,7 @@ void arch_getdomaininfo_ctxt(
     if ( hvm_guest(v) )
         c->flags |= VGCF_HVM_GUEST;
 
-    c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table);
+    c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
 
     c->vm_assist = v->domain->vm_assist;
 }
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/domain.c     Fri Jun 02 12:31:48 2006 -0500
@@ -259,7 +259,7 @@ int arch_set_info_guest(
     struct vcpu *v, struct vcpu_guest_context *c)
 {
     struct domain *d = v->domain;
-    unsigned long phys_basetab = INVALID_MFN;
+    unsigned long cr3_pfn;
     int i, rc;
 
     if ( !(c->flags & VGCF_HVM_GUEST) )
@@ -322,12 +322,8 @@ int arch_set_info_guest(
 
     if ( !(c->flags & VGCF_HVM_GUEST) )
     {
-        phys_basetab = c->ctrlreg[3];
-        phys_basetab =
-            (gmfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
-            (phys_basetab & ~PAGE_MASK);
-
-        v->arch.guest_table = mk_pagetable(phys_basetab);
+        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
+        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
     }
 
     if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
@@ -335,14 +331,14 @@ int arch_set_info_guest(
 
     if ( c->flags & VGCF_HVM_GUEST )
     {
-        v->arch.guest_table = mk_pagetable(0);
+        v->arch.guest_table = pagetable_null();
 
         if ( !hvm_initialize_guest_resources(v) )
             return -EINVAL;
     }
     else if ( shadow_mode_refcounts(d) )
     {
-        if ( !get_page(mfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
+        if ( !get_page(mfn_to_page(cr3_pfn), d) )
         {
             destroy_gdt(v);
             return -EINVAL;
@@ -350,7 +346,7 @@ int arch_set_info_guest(
     }
     else
     {
-        if ( !get_page_and_type(mfn_to_page(phys_basetab>>PAGE_SHIFT), d,
+        if ( !get_page_and_type(mfn_to_page(cr3_pfn), d,
                                 PGT_base_page_table) )
         {
             destroy_gdt(v);
@@ -935,7 +931,7 @@ void domain_relinquish_resources(struct 
                 put_page_type(mfn_to_page(pfn));
             put_page(mfn_to_page(pfn));
 
-            v->arch.guest_table = mk_pagetable(0);
+            v->arch.guest_table = pagetable_null();
         }
 
         if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 )
@@ -944,7 +940,7 @@ void domain_relinquish_resources(struct 
                 put_page_type(mfn_to_page(pfn));
             put_page(mfn_to_page(pfn));
 
-            v->arch.guest_table_user = mk_pagetable(0);
+            v->arch.guest_table_user = pagetable_null();
         }
     }
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/domain_build.c       Fri Jun 02 12:31:48 2006 -0500
@@ -443,13 +443,13 @@ int construct_dom0(struct domain *d,
         l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
             l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
     }
-    v->arch.guest_table = mk_pagetable((unsigned long)l3start);
+    v->arch.guest_table = pagetable_from_paddr((unsigned long)l3start);
 #else
     l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
     memcpy(l2tab, idle_pg_table, PAGE_SIZE);
     l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
         l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
-    v->arch.guest_table = mk_pagetable((unsigned long)l2start);
+    v->arch.guest_table = pagetable_from_paddr((unsigned long)l2start);
 #endif
 
     for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
@@ -577,7 +577,7 @@ int construct_dom0(struct domain *d,
         l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
-    v->arch.guest_table = mk_pagetable(__pa(l4start));
+    v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
 
     l4tab += l4_table_offset(dsi.v_start);
     mfn = alloc_spfn;
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/hvm/io.c     Fri Jun 02 12:31:48 2006 -0500
@@ -648,6 +648,16 @@ static void hvm_mmio_assist(struct vcpu 
             regs->eflags &= ~X86_EFLAGS_CF;
 
         break;
+
+    case INSTR_XCHG:
+       if (src & REGISTER) {
+               index = operand_index(src);
+               set_reg_value(size, index, 0, regs, p->u.data);
+       } else {
+               index = operand_index(dst);
+               set_reg_value(size, index, 0, regs, p->u.data);
+       }
+       break;
     }
 
     hvm_load_cpu_guest_regs(v, regs);
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/hvm/platform.c       Fri Jun 02 12:31:48 2006 -0500
@@ -954,10 +954,26 @@ void handle_mmio(unsigned long va, unsig
         mmio_opp->instr = mmio_inst.instr;
         mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
         mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
-
-        /* send the request and wait for the value */
-        send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
-                      mmio_inst.op_size, 0, IOREQ_WRITE, 0);
+       if (mmio_inst.operand[0] & REGISTER) {
+               long value;
+               unsigned long operand = mmio_inst.operand[0];
+               value = get_reg_value(operand_size(operand), 
+                                     operand_index(operand), 0,
+                                     mmio_opp->inst_decoder_regs);
+               /* send the request and wait for the value */
+               send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
+                      mmio_inst.op_size, value, IOREQ_WRITE, 0);
+       } else {
+               /* the destination is a register */
+               long value;
+               unsigned long operand = mmio_inst.operand[1];
+               value = get_reg_value(operand_size(operand), 
+                                     operand_index(operand), 0,
+                                     mmio_opp->inst_decoder_regs);
+               /* send the request and wait for the value */
+               send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
+                      mmio_inst.op_size, value, IOREQ_WRITE, 0);
+       }
         break;
 
     default:
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Jun 02 12:31:48 2006 -0500
@@ -51,9 +51,6 @@
 
 #define SVM_EXTRA_DEBUG
 
-/* Useful define */
-#define MAX_INST_SIZE  15
-
 #define set_segment_register(name, value)  \
        __asm__ __volatile__ ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
 
@@ -74,6 +71,9 @@ void svm_dump_regs(const char *from, str
 void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
 
 static void svm_relinquish_guest_resources(struct domain *d);
+static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
+        struct cpu_user_regs *regs);
+
 
 
 extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
@@ -84,28 +84,26 @@ struct svm_percore_globals svm_globals[N
 /*
  * Initializes the POOL of ASID used by the guests per core.
  */
-void asidpool_init( int core )
+void asidpool_init(int core)
 {
     int i;
-    svm_globals[core].ASIDpool.asid_lock = SPIN_LOCK_UNLOCKED;
-    spin_lock(&svm_globals[core].ASIDpool.asid_lock);
+
+    spin_lock_init(&svm_globals[core].ASIDpool.asid_lock);
+
     /* Host ASID is always in use */
     svm_globals[core].ASIDpool.asid[INITIAL_ASID] = ASID_INUSE;
-    for( i=1; i<ASID_MAX; i++ )
-    {
+    for ( i = 1; i < ASID_MAX; i++ )
        svm_globals[core].ASIDpool.asid[i] = ASID_AVAILABLE;
-    }
-    spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
 }
 
 
 /* internal function to get the next available ASID */
-static int asidpool_fetch_next( struct vmcb_struct *vmcb, int core )
+static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
 {
     int i;   
-    for( i = 1; i < ASID_MAX; i++ )
-    {
-        if( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
+    for ( i = 1; i < ASID_MAX; i++ )
+    {
+        if ( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
         {
             vmcb->guest_asid = i;
             svm_globals[core].ASIDpool.asid[i] = ASID_INUSE;
@@ -438,6 +436,42 @@ unsigned long svm_get_ctrl_reg(struct vc
     return 0;                   /* dummy */
 }
 
+
+/* SVM-specific intitialization code for VCPU application processors */
+void svm_init_ap_context(struct vcpu_guest_context *ctxt, 
+        int vcpuid, int trampoline_vector)
+{
+    int i;
+    struct vcpu *v, *bsp = current;
+    struct domain *d = bsp->domain;
+    cpu_user_regs_t *regs;;
+
+  
+    if ((v = d->vcpu[vcpuid]) == NULL)
+    {
+        printk("vcpuid %d is invalid!  good-bye.\n", vcpuid);
+        domain_crash_synchronous();
+    }
+    regs = &v->arch.guest_context.user_regs;
+
+    memset(ctxt, 0, sizeof(*ctxt));
+    for (i = 0; i < 256; ++i)
+    {
+        ctxt->trap_ctxt[i].vector = i;
+        ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
+    }
+
+
+    /*
+     * We execute the trampoline code in real mode. The trampoline vector
+     * passed to us is page alligned and is the physicall frame number for
+     * the code. We will execute this code in real mode. 
+     */
+    ctxt->user_regs.eip = 0x0;
+    ctxt->user_regs.cs = (trampoline_vector << 8); 
+    ctxt->flags = VGCF_HVM_GUEST;
+}
+
 int start_svm(void)
 {
     u32 eax, ecx, edx;
@@ -484,6 +518,7 @@ int start_svm(void)
     hvm_funcs.paging_enabled = svm_paging_enabled;
     hvm_funcs.instruction_length = svm_instruction_length;
     hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
+    hvm_funcs.init_ap_context = svm_init_ap_context;
 
     hvm_enabled = 1;    
 
@@ -660,6 +695,20 @@ static void arch_svm_do_launch(struct vc
     if (svm_dbg_on)
         svm_dump_host_regs(__func__);
 #endif
+    if (v->vcpu_id != 0) 
+    {
+       u16     cs_sel = regs->cs;
+       /*
+        * This is the launch of an AP; set state so that we begin executing
+        * the trampoline code in real-mode.
+        */
+       svm_do_vmmcall_reset_to_realmode(v, regs);      
+       /* Adjust the state to execute the trampoline code.*/
+       v->arch.hvm_svm.vmcb->rip = 0;
+       v->arch.hvm_svm.vmcb->cs.sel= cs_sel;
+       v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
+    }
+       
     reset_stack_and_jump(svm_asm_do_launch);
 }
 
@@ -695,34 +744,34 @@ static void svm_ctxt_switch_to(struct vc
 
 void svm_final_setup_guest(struct vcpu *v)
 {
+    struct domain *d = v->domain;
+    struct vcpu *vc;
+
     v->arch.schedule_tail    = arch_svm_do_launch;
     v->arch.ctxt_switch_from = svm_ctxt_switch_from;
     v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
 
-    if (v == v->domain->vcpu[0]) 
-    {
-       struct domain *d = v->domain;
-       struct vcpu *vc;
-
-       /* Initialize monitor page table */
-       for_each_vcpu(d, vc)
-           vc->arch.monitor_table = mk_pagetable(0);
-
-        /* 
-         * Required to do this once per domain
-         * TODO: add a seperate function to do these.
-         */
-        memset(&d->shared_info->evtchn_mask[0], 0xff, 
-               sizeof(d->shared_info->evtchn_mask));       
-
-        /* 
-         * Put the domain in shadow mode even though we're going to be using
-         * the shared 1:1 page table initially. It shouldn't hurt 
-         */
-        shadow_mode_enable(d, 
-                SHM_enable|SHM_refcounts|
-               SHM_translate|SHM_external|SHM_wr_pt_pte);
-    }
+    if ( v != d->vcpu[0] )
+        return;
+
+    /* Initialize monitor page table */
+    for_each_vcpu( d, vc )
+        vc->arch.monitor_table = pagetable_null();
+
+    /* 
+     * Required to do this once per domain
+     * TODO: add a seperate function to do these.
+     */
+    memset(&d->shared_info->evtchn_mask[0], 0xff, 
+           sizeof(d->shared_info->evtchn_mask));       
+
+    /* 
+     * Put the domain in shadow mode even though we're going to be using
+     * the shared 1:1 page table initially. It shouldn't hurt 
+     */
+    shadow_mode_enable(d,
+                       SHM_enable|SHM_refcounts|
+                       SHM_translate|SHM_external|SHM_wr_pt_pte);
 }
 
 
@@ -819,7 +868,7 @@ static int svm_do_page_fault(unsigned lo
     /* Use 1:1 page table to identify MMIO address space */
     if (mmio_space(gpa))
     {
-       /* No support for APIC */
+        /* No support for APIC */
         if (!hvm_apic_support(v->domain) && gpa >= 0xFEC00000)
         { 
             int inst_len;
@@ -896,8 +945,10 @@ static void svm_do_general_protection_fa
     svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
 }
 
-/* Reserved bits: [31:14], [12:1] */
-#define SVM_VCPU_CPUID_L1_RESERVED 0xffffdffe
+/* Reserved bits ECX: [31:14], [12:4], [2:1]*/
+#define SVM_VCPU_CPUID_L1_ECX_RESERVED 0xffffdff6
+/* Reserved bits EDX: [31:29], [27], [22:20], [18], [10] */
+#define SVM_VCPU_CPUID_L1_EDX_RESERVED 0xe8740400
 
 static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb, unsigned long input, 
         struct cpu_user_regs *regs) 
@@ -920,20 +971,17 @@ static void svm_vmexit_do_cpuid(struct v
 
     cpuid(input, &eax, &ebx, &ecx, &edx);
 
-    if (input == 1)
+    if (input == 0x00000001)
     {
         if ( !hvm_apic_support(v->domain) ||
                 !vlapic_global_enabled((VLAPIC(v))) )
         {
-            clear_bit(X86_FEATURE_APIC, &edx);
-            /* Since the apic is disabled, avoid any confusion about SMP cpus 
being available */
-            clear_bit(X86_FEATURE_HT, &edx);  /* clear the hyperthread bit */
-            ebx &= 0xFF00FFFF;  /* set the logical processor count to 1 */
-            ebx |= 0x00010000;
-        }
-           
+            /* Since the apic is disabled, avoid any confusion 
+              about SMP cpus being available */
+           clear_bit(X86_FEATURE_APIC, &edx);
+        }
+
 #if CONFIG_PAGING_LEVELS < 3
-        clear_bit(X86_FEATURE_NX, &edx);
         clear_bit(X86_FEATURE_PAE, &edx);
         clear_bit(X86_FEATURE_PSE, &edx);
         clear_bit(X86_FEATURE_PSE36, &edx);
@@ -942,24 +990,90 @@ static void svm_vmexit_do_cpuid(struct v
         {
             if ( !v->domain->arch.hvm_domain.pae_enabled )
             {
-               clear_bit(X86_FEATURE_PAE, &edx);
-               clear_bit(X86_FEATURE_NX, &edx);
+               clear_bit(X86_FEATURE_PAE, &edx);
             }
             clear_bit(X86_FEATURE_PSE, &edx);
             clear_bit(X86_FEATURE_PSE36, &edx);
         }
 #endif 
         /* Clear out reserved bits. */
-        ecx &= ~SVM_VCPU_CPUID_L1_RESERVED; /* mask off reserved bits */
+        ecx &= ~SVM_VCPU_CPUID_L1_ECX_RESERVED;
+        edx &= ~SVM_VCPU_CPUID_L1_EDX_RESERVED;
+
         clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
-    }
+
+       /* Guest should only see one logical processor.
+        * See details on page 23 of AMD CPUID Specification. 
+       */
+       clear_bit(X86_FEATURE_HT, &edx);  /* clear the hyperthread bit */
+       ebx &= 0xFF00FFFF;  /* clear the logical processor count when HTT=0 */
+       ebx |= 0x00010000;  /* set to 1 just for precaution */
+    }
+    else if ( ( input > 0x00000005 ) && ( input < 0x80000000 ) )
+    {
+       eax = ebx = ecx = edx = 0x0;
+    }
+    else if ( input == 0x80000001 )
+    {
+       /* We duplicate some CPUID_00000001 code because many bits of 
+          CPUID_80000001_EDX overlaps with CPUID_00000001_EDX. */
+
+        if ( !hvm_apic_support(v->domain) ||
+            !vlapic_global_enabled((VLAPIC(v))) )
+        {
+            /* Since the apic is disabled, avoid any confusion 
+              about SMP cpus being available */
+           clear_bit(X86_FEATURE_APIC, &edx);
+        }
+
+       /* Clear the Cmp_Legacy bit 
+        * This bit is supposed to be zero when HTT = 0.
+        * See details on page 23 of AMD CPUID Specification. 
+       */
+       clear_bit(X86_FEATURE_CMP_LEGACY & 31, &ecx);
+
 #ifdef __i386__
-    else if ( input == 0x80000001 )
-    {
         /* Mask feature for Intel ia32e or AMD long mode. */
+        clear_bit(X86_FEATURE_LAHF_LM & 31, &ecx);
+
         clear_bit(X86_FEATURE_LM & 31, &edx);
-    }
+        clear_bit(X86_FEATURE_SYSCALL & 31, &edx);
 #endif
+
+#if CONFIG_PAGING_LEVELS < 3
+       clear_bit(X86_FEATURE_NX & 31, &edx);
+        clear_bit(X86_FEATURE_PAE, &edx);
+        clear_bit(X86_FEATURE_PSE, &edx);
+        clear_bit(X86_FEATURE_PSE36, &edx);
+#else
+        if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
+        {
+            if ( !v->domain->arch.hvm_domain.pae_enabled )
+            {
+               clear_bit(X86_FEATURE_NX & 31, &edx);
+               clear_bit(X86_FEATURE_PAE, &edx);
+            }
+            clear_bit(X86_FEATURE_PSE, &edx);
+            clear_bit(X86_FEATURE_PSE36, &edx);
+        }
+#endif 
+
+        /* Make SVM feature invisible to the guest. */
+        clear_bit(X86_FEATURE_SVME & 31, &ecx);
+       
+       /* So far, we do not support 3DNow for the guest. */
+       clear_bit(X86_FEATURE_3DNOW & 31, &edx);
+       clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
+    }
+    else if ( ( input == 0x80000007 ) || ( input == 0x8000000A  ) )
+    {
+       /* Mask out features of power management and SVM extension. */
+       eax = ebx = ecx = edx = 0;
+    }
+    else if ( input == 0x80000008 )
+    {
+       ecx &= 0xFFFFFF00; /* Make sure Number of CPU core is 1 when HTT=0 */
+    }
 
     regs->eax = (unsigned long)eax;
     regs->ebx = (unsigned long)ebx;
@@ -1454,7 +1568,7 @@ static int svm_set_cr0(unsigned long val
         }
 
         /* Now arch.guest_table points to machine physical. */
-        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+        v->arch.guest_table = pagetable_from_pfn(mfn);
         update_pagetables(v);
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
@@ -1474,7 +1588,7 @@ static int svm_set_cr0(unsigned long val
         if ( v->arch.hvm_svm.cpu_cr3 ) {
             put_page(mfn_to_page(get_mfn_from_gpfn(
                       v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
-            v->arch.guest_table = mk_pagetable(0);
+            v->arch.guest_table = pagetable_null();
         }
 
     /*
@@ -1483,7 +1597,7 @@ static int svm_set_cr0(unsigned long val
      * created.
      */
     if ((value & X86_CR0_PE) == 0) {
-       if (value & X86_CR0_PG) {
+        if (value & X86_CR0_PG) {
             svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             return 0;
         }
@@ -1624,7 +1738,7 @@ static int mov_to_cr(int gpreg, int cr, 
             }
 
             old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+            v->arch.guest_table = pagetable_from_pfn(mfn);
 
             if (old_base_mfn)
                 put_page(mfn_to_page(old_base_mfn));
@@ -1681,7 +1795,7 @@ static int mov_to_cr(int gpreg, int cr, 
                  * Now arch.guest_table points to machine physical.
                  */
 
-                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+                v->arch.guest_table = pagetable_from_pfn(mfn);
                 update_pagetables(v);
 
                 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
@@ -2036,7 +2150,7 @@ void svm_handle_invlpg(const short invlp
 void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
-    u8 opcode[MAX_INST_SIZE], prefix, length = MAX_INST_SIZE;
+    u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN;
     unsigned long g_vaddr;
     int inst_len;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Jun 02 12:31:48 2006 -0500
@@ -66,7 +66,7 @@ void vmx_final_setup_guest(struct vcpu *
 
         /* Initialize monitor page table */
         for_each_vcpu(d, vc)
-            vc->arch.monitor_table = mk_pagetable(0);
+            vc->arch.monitor_table = pagetable_null();
 
         /*
          * Required to do this once per domain
@@ -1223,7 +1223,7 @@ vmx_world_restore(struct vcpu *v, struct
         if(!get_page(mfn_to_page(mfn), v->domain))
                 return 0;
         old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+        v->arch.guest_table = pagetable_from_pfn(mfn);
         if (old_base_mfn)
              put_page(mfn_to_page(old_base_mfn));
         /*
@@ -1459,7 +1459,7 @@ static int vmx_set_cr0(unsigned long val
         /*
          * Now arch.guest_table points to machine physical.
          */
-        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+        v->arch.guest_table = pagetable_from_pfn(mfn);
         update_pagetables(v);
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
@@ -1477,7 +1477,7 @@ static int vmx_set_cr0(unsigned long val
         if ( v->arch.hvm_vmx.cpu_cr3 ) {
             put_page(mfn_to_page(get_mfn_from_gpfn(
                       v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
-            v->arch.guest_table = mk_pagetable(0);
+            v->arch.guest_table = pagetable_null();
         }
 
     /*
@@ -1635,7 +1635,7 @@ static int mov_to_cr(int gp, int cr, str
                 domain_crash_synchronous(); /* need to take a clean path */
             }
             old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+            v->arch.guest_table = pagetable_from_pfn(mfn);
             if (old_base_mfn)
                 put_page(mfn_to_page(old_base_mfn));
             /*
@@ -1690,7 +1690,7 @@ static int mov_to_cr(int gp, int cr, str
                  * Now arch.guest_table points to machine physical.
                  */
 
-                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+                v->arch.guest_table = pagetable_from_pfn(mfn);
                 update_pagetables(v);
 
                 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/i8259.c
--- a/xen/arch/x86/i8259.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/i8259.c      Fri Jun 02 12:31:48 2006 -0500
@@ -102,7 +102,7 @@ BUILD_SMP_INTERRUPT(thermal_interrupt,TH
  * moves to arch independent land
  */
 
-spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(i8259A_lock);
 
 static void disable_8259A_vector(unsigned int vector)
 {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/microcode.c
--- a/xen/arch/x86/microcode.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/microcode.c  Fri Jun 02 12:31:48 2006 -0500
@@ -83,7 +83,7 @@
 #include <asm/processor.h>
 
 #define pr_debug(x...) ((void)0)
-#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
+#define DECLARE_MUTEX(_m) DEFINE_SPINLOCK(_m)
 #define down(_m) spin_lock(_m)
 #define up(_m) spin_unlock(_m)
 #define vmalloc(_s) xmalloc_bytes(_s)
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/mm.c Fri Jun 02 12:31:48 2006 -0500
@@ -89,6 +89,7 @@
 #include <xen/kernel.h>
 #include <xen/lib.h>
 #include <xen/mm.h>
+#include <xen/domain.h>
 #include <xen/sched.h>
 #include <xen/errno.h>
 #include <xen/perfc.h>
@@ -187,20 +188,16 @@ void arch_init_memory(void)
      * Any Xen-heap pages that we will allow to be mapped will have
      * their domain field set to dom_xen.
      */
-    dom_xen = alloc_domain();
-    spin_lock_init(&dom_xen->page_alloc_lock);
-    atomic_set(&dom_xen->refcnt, 1);
-    dom_xen->domain_id = DOMID_XEN;
+    dom_xen = alloc_domain(DOMID_XEN);
+    BUG_ON(dom_xen == NULL);
 
     /*
      * Initialise our DOMID_IO domain.
      * This domain owns I/O pages that are within the range of the page_info
      * array. Mappings occur at the priv of the caller.
      */
-    dom_io = alloc_domain();
-    spin_lock_init(&dom_io->page_alloc_lock);
-    atomic_set(&dom_io->refcnt, 1);
-    dom_io->domain_id = DOMID_IO;
+    dom_io = alloc_domain(DOMID_IO);
+    BUG_ON(dom_io == NULL);
 
     /* First 1MB of RAM is historically marked as I/O. */
     for ( i = 0; i < 0x100; i++ )
@@ -266,8 +263,15 @@ void share_xen_page_with_privileged_gues
 /* Only PDPTs above 4GB boundary need to be shadowed in low memory. */
 #define l3tab_needs_shadow(mfn) (mfn >= 0x100000)
 #else
-/* In debug builds we aggressively shadow PDPTs to exercise code paths. */
-#define l3tab_needs_shadow(mfn) ((mfn << PAGE_SHIFT) != __pa(idle_pg_table))
+/*
+ * In debug builds we aggressively shadow PDPTs to exercise code paths.
+ * We cannot safely shadow the idle page table, nor shadow-mode page tables
+ * (detected by lack of an owning domain). Always shadow PDPTs above 4GB.
+ */
+#define l3tab_needs_shadow(mfn)                         \
+    ((((mfn << PAGE_SHIFT) != __pa(idle_pg_table)) &&   \
+      (page_get_owner(mfn_to_page(mfn)) != NULL)) ||    \
+     (mfn >= 0x100000))
 #endif
 
 static l1_pgentry_t *fix_pae_highmem_pl1e;
@@ -1598,12 +1602,18 @@ int get_page_type(struct page_info *page
             {
                 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
                 {
-                    if ( current->domain == page_get_owner(page) )
+                    if ( (current->domain == page_get_owner(page)) &&
+                         ((x & PGT_type_mask) == PGT_writable_page) )
                     {
                         /*
                          * This ensures functions like set_gdt() see up-to-date
                          * type info without needing to clean up writable p.t.
-                         * state on the fast path.
+                         * state on the fast path. We take this path only
+                         * when the current type is writable because:
+                         *  1. It's the only type that this path can decrement.
+                         *  2. If we take this path more liberally then we can
+                         *     enter a recursive loop via get_page_from_l1e()
+                         *     during pagetable revalidation.
                          */
                         LOCK_BIGLOCK(current->domain);
                         cleanup_writable_pagetable(current->domain);
@@ -1704,7 +1714,7 @@ int new_guest_cr3(unsigned long mfn)
         {
             /* Switch to idle pagetable: this VCPU has no active p.t. now. */
             old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = mk_pagetable(0);
+            v->arch.guest_table = pagetable_null();
             update_pagetables(v);
             write_cr3(__pa(idle_pg_table));
             if ( old_base_mfn != 0 )
@@ -1726,7 +1736,7 @@ int new_guest_cr3(unsigned long mfn)
     invalidate_shadow_ldt(v);
 
     old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-    v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+    v->arch.guest_table = pagetable_from_pfn(mfn);
     update_pagetables(v); /* update shadow_table and monitor_table */
 
     write_ptbase(v);
@@ -1993,7 +2003,7 @@ int do_mmuext_op(
             {
                 unsigned long old_mfn =
                     pagetable_get_pfn(v->arch.guest_table_user);
-                v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
+                v->arch.guest_table_user = pagetable_from_pfn(mfn);
                 if ( old_mfn != 0 )
                     put_page_and_type(mfn_to_page(old_mfn));
             }
@@ -2200,99 +2210,88 @@ int do_mmu_update(
 
             switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
             {
-            case PGT_l1_page_table: 
-                ASSERT( !shadow_mode_refcounts(d) );
-                if ( likely(get_page_type(
+            case PGT_l1_page_table:
+            case PGT_l2_page_table:
+            case PGT_l3_page_table:
+            case PGT_l4_page_table:
+            {
+                ASSERT(!shadow_mode_refcounts(d));
+                if ( unlikely(!get_page_type(
                     page, type_info & (PGT_type_mask|PGT_va_mask))) )
+                    goto not_a_pt;
+
+                switch ( type_info & PGT_type_mask )
                 {
-                    l1_pgentry_t l1e;
-
-                    /* FIXME: doesn't work with PAE */
-                    l1e = l1e_from_intpte(req.val);
+                case PGT_l1_page_table:
+                {
+                    l1_pgentry_t l1e = l1e_from_intpte(req.val);
                     okay = mod_l1_entry(va, l1e);
                     if ( okay && unlikely(shadow_mode_enabled(d)) )
                         shadow_l1_normal_pt_update(
                             d, req.ptr, l1e, &sh_mapcache);
-                    put_page_type(page);
                 }
                 break;
-            case PGT_l2_page_table:
-                ASSERT( !shadow_mode_refcounts(d) );
-                if ( likely(get_page_type(
-                    page, type_info & (PGT_type_mask|PGT_va_mask))) )
+                case PGT_l2_page_table:
                 {
-                    l2_pgentry_t l2e;
-
-                    /* FIXME: doesn't work with PAE */
-                    l2e = l2e_from_intpte(req.val);
+                    l2_pgentry_t l2e = l2e_from_intpte(req.val);
                     okay = mod_l2_entry(
                         (l2_pgentry_t *)va, l2e, mfn, type_info);
                     if ( okay && unlikely(shadow_mode_enabled(d)) )
                         shadow_l2_normal_pt_update(
                             d, req.ptr, l2e, &sh_mapcache);
-                    put_page_type(page);
                 }
                 break;
 #if CONFIG_PAGING_LEVELS >= 3
-            case PGT_l3_page_table:
-                ASSERT( !shadow_mode_refcounts(d) );
-                if ( likely(get_page_type(
-                    page, type_info & (PGT_type_mask|PGT_va_mask))) )
+                case PGT_l3_page_table:
                 {
-                    l3_pgentry_t l3e;
-
-                    /* FIXME: doesn't work with PAE */
-                    l3e = l3e_from_intpte(req.val);
+                    l3_pgentry_t l3e = l3e_from_intpte(req.val);
                     okay = mod_l3_entry(va, l3e, mfn, type_info);
                     if ( okay && unlikely(shadow_mode_enabled(d)) )
                         shadow_l3_normal_pt_update(
                             d, req.ptr, l3e, &sh_mapcache);
-                    put_page_type(page);
                 }
                 break;
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
-            case PGT_l4_page_table:
-                ASSERT( !shadow_mode_refcounts(d) );
-                if ( likely(get_page_type(
-                    page, type_info & (PGT_type_mask|PGT_va_mask))) )
+                case PGT_l4_page_table:
                 {
-                    l4_pgentry_t l4e;
-
-                    l4e = l4e_from_intpte(req.val);
+                    l4_pgentry_t l4e = l4e_from_intpte(req.val);
                     okay = mod_l4_entry(va, l4e, mfn, type_info);
                     if ( okay && unlikely(shadow_mode_enabled(d)) )
                         shadow_l4_normal_pt_update(
                             d, req.ptr, l4e, &sh_mapcache);
-                    put_page_type(page);
                 }
                 break;
 #endif
+                }
+
+                put_page_type(page);
+            }
+            break;
+
             default:
-                if ( likely(get_page_type(page, PGT_writable_page)) )
+            not_a_pt:
+            {
+                if ( unlikely(!get_page_type(page, PGT_writable_page)) )
+                    break;
+
+                if ( shadow_mode_enabled(d) )
                 {
-                    if ( shadow_mode_enabled(d) )
-                    {
-                        shadow_lock(d);
-
-                        __mark_dirty(d, mfn);
-
-                        if ( page_is_page_table(page) &&
-                             !page_out_of_sync(page) )
-                        {
-                            shadow_mark_mfn_out_of_sync(v, gmfn, mfn);
-                        }
-                    }
-
-                    *(intpte_t *)va = req.val;
-                    okay = 1;
-
-                    if ( shadow_mode_enabled(d) )
-                        shadow_unlock(d);
-
-                    put_page_type(page);
+                    shadow_lock(d);
+                    __mark_dirty(d, mfn);
+                    if ( page_is_page_table(page) && !page_out_of_sync(page) )
+                        shadow_mark_mfn_out_of_sync(v, gmfn, mfn);
                 }
-                break;
+
+                *(intpte_t *)va = req.val;
+                okay = 1;
+
+                if ( shadow_mode_enabled(d) )
+                    shadow_unlock(d);
+
+                put_page_type(page);
+            }
+            break;
             }
 
             unmap_domain_page_with_cache(va, &mapcache);
@@ -3696,7 +3695,7 @@ int map_pages_to_xen(
             {
                 local_flush_tlb_pge();
                 if ( !(l2e_get_flags(ol2e) & _PAGE_PSE) )
-                    free_xen_pagetable(l2e_get_page(*pl2e));
+                    free_xen_pagetable(l2e_get_page(ol2e));
             }
 
             virt    += 1UL << L2_PAGETABLE_SHIFT;
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/setup.c      Fri Jun 02 12:31:48 2006 -0500
@@ -85,8 +85,6 @@ extern void early_cpu_init(void);
 
 struct tss_struct init_tss[NR_CPUS];
 
-struct vcpu *idle_vcpu[NR_CPUS];
-
 extern unsigned long cpu0_stack[];
 
 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/shadow.c     Fri Jun 02 12:31:48 2006 -0500
@@ -1724,7 +1724,8 @@ static int resync_all(struct domain *d, 
                         unshadow_l1 = 1;
                     else {
                         need_flush |= error;
-                        set_guest_back_ptr(d, *sl1e_p, smfn, i);
+                        if ( l1e_get_flags(*sl1e_p) & _PAGE_PRESENT )
+                            set_guest_back_ptr(d, *sl1e_p, smfn, i);
                     }
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
@@ -2471,7 +2472,7 @@ static void shadow_update_pagetables(str
     if ( !get_shadow_ref(smfn) )
         BUG();
     old_smfn = pagetable_get_pfn(v->arch.shadow_table);
-    v->arch.shadow_table = mk_pagetable((u64)smfn << PAGE_SHIFT);
+    v->arch.shadow_table = pagetable_from_pfn(smfn);
     if ( old_smfn )
         put_shadow_ref(old_smfn);
 
@@ -3480,15 +3481,16 @@ static void shadow_set_l2e_64(unsigned l
 
     __shadow_get_l3e(v, va, &sl3e);
     if (!(l3e_get_flags(sl3e) & _PAGE_PRESENT)) {
-         if (create_l2_shadow) {
+        if (create_l2_shadow) {
             perfc_incrc(shadow_set_l2e_force_map);
             shadow_map_into_current(v, va, PAGING_L2, PAGING_L3);
             __shadow_get_l3e(v, va, &sl3e);
         } else {
             printk("For non HVM shadow, create_l1_shadow:%d\n", 
create_l2_shadow);
         }
-         shadow_update_min_max(l4e_get_pfn(sl4e), l3_table_offset(va));
-
+
+        if ( v->domain->arch.ops->guest_paging_levels == PAGING_L4 )
+            shadow_update_min_max(l4e_get_pfn(sl4e), l3_table_offset(va));
     }
 
     if ( put_ref_check ) {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/shadow32.c   Fri Jun 02 12:31:48 2006 -0500
@@ -583,7 +583,7 @@ static void free_shadow_pages(struct dom
         if ( pagetable_get_paddr(v->arch.shadow_table) )
         {
             put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
-            v->arch.shadow_table = mk_pagetable(0);
+            v->arch.shadow_table = pagetable_null();
 
             if ( shadow_mode_external(d) )
             {
@@ -765,7 +765,7 @@ static void alloc_monitor_pagetable(stru
     mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
     mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty();
 
-    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
+    v->arch.monitor_table = pagetable_from_pfn(mmfn);
     v->arch.monitor_vtable = mpl2e;
 
     if ( v->vcpu_id == 0 )
@@ -830,7 +830,7 @@ void free_monitor_pagetable(struct vcpu 
     unmap_domain_page_global(v->arch.monitor_vtable);
     free_domheap_page(mfn_to_page(mfn));
 
-    v->arch.monitor_table = mk_pagetable(0);
+    v->arch.monitor_table = pagetable_null();
     v->arch.monitor_vtable = 0;
 }
 
@@ -992,7 +992,7 @@ alloc_p2m_table(struct domain *d)
 
         l1tab = map_domain_page(page_to_mfn(page));
         memset(l1tab, 0, PAGE_SIZE);
-        d->arch.phys_table = mk_pagetable(page_to_maddr(page));
+        d->arch.phys_table = pagetable_from_page(page);
     }
 
     list_ent = d->page_list.next;
@@ -1126,7 +1126,7 @@ int shadow_direct_map_init(struct domain
     memset(root, 0, PAGE_SIZE);
     unmap_domain_page(root);
 
-    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
+    d->arch.phys_table = pagetable_from_page(page);
 
     return 1;
 }
@@ -1156,7 +1156,7 @@ void shadow_direct_map_clean(struct doma
 
     unmap_domain_page(l2e);
 
-    d->arch.phys_table = mk_pagetable(0);
+    d->arch.phys_table = pagetable_null();
 }
 
 int __shadow_mode_enable(struct domain *d, unsigned int mode)
@@ -2691,7 +2691,8 @@ static int resync_all(struct domain *d, 
                         unshadow_l1 = 1;
                     else {
                         need_flush |= error;
-                        set_guest_back_ptr(d, shadow1[i], smfn, i);
+                        if ( l1e_get_flags(shadow1[i]) & _PAGE_PRESENT )
+                            set_guest_back_ptr(d, shadow1[i], smfn, i);
                     }
 
                     // can't update snapshots of linear page tables -- they
@@ -3230,7 +3231,7 @@ void __update_pagetables(struct vcpu *v)
     if ( !get_shadow_ref(smfn) )
         BUG();
     old_smfn = pagetable_get_pfn(v->arch.shadow_table);
-    v->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
+    v->arch.shadow_table = pagetable_from_pfn(smfn);
     if ( old_smfn )
         put_shadow_ref(old_smfn);
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/shadow_public.c      Fri Jun 02 12:31:48 2006 -0500
@@ -50,7 +50,7 @@ int shadow_direct_map_init(struct domain
     memset(root, 0, PAGE_SIZE);
     root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
 
-    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
+    d->arch.phys_table = pagetable_from_page(page);
 
     unmap_domain_page(root);
     return 1;
@@ -92,7 +92,7 @@ void shadow_direct_map_clean(struct doma
 
     unmap_domain_page(l3e);
 
-    d->arch.phys_table = mk_pagetable(0);
+    d->arch.phys_table = pagetable_null();
 }
 
 /****************************************************************************/
@@ -338,7 +338,7 @@ static void alloc_monitor_pagetable(stru
 
     /* map the phys_to_machine map into the per domain Read-Only MPT space */
 
-    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
+    v->arch.monitor_table = pagetable_from_pfn(mmfn);
     v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e;
     mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
 
@@ -380,7 +380,7 @@ void free_monitor_pagetable(struct vcpu 
     unmap_domain_page_global(v->arch.monitor_vtable);
     free_domheap_page(mfn_to_page(mfn));
 
-    v->arch.monitor_table = mk_pagetable(0);
+    v->arch.monitor_table = pagetable_null();
     v->arch.monitor_vtable = 0;
 }
 #elif CONFIG_PAGING_LEVELS == 3
@@ -431,7 +431,7 @@ static void alloc_monitor_pagetable(stru
     for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
         mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty();
 
-    v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */
+    v->arch.monitor_table = pagetable_from_pfn(m3mfn);
     v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e;
 
     if ( v->vcpu_id == 0 )
@@ -492,7 +492,7 @@ void free_monitor_pagetable(struct vcpu 
     unmap_domain_page_global(v->arch.monitor_vtable);
     free_domheap_page(mfn_to_page(m3mfn));
 
-    v->arch.monitor_table = mk_pagetable(0);
+    v->arch.monitor_table = pagetable_null();
     v->arch.monitor_vtable = 0;
 }
 #endif
@@ -924,7 +924,7 @@ void free_shadow_pages(struct domain *d)
         if ( pagetable_get_paddr(v->arch.shadow_table) )
         {
             put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
-            v->arch.shadow_table = mk_pagetable(0);
+            v->arch.shadow_table = pagetable_null();
 
             if ( shadow_mode_external(d) )
             {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/smp.c        Fri Jun 02 12:31:48 2006 -0500
@@ -161,7 +161,7 @@ void send_IPI_mask_phys(cpumask_t mask, 
     local_irq_restore(flags);
 }
 
-static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(flush_lock);
 static cpumask_t flush_cpumask;
 static unsigned long flush_va;
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/smpboot.c    Fri Jun 02 12:31:48 2006 -0500
@@ -37,6 +37,7 @@
 #include <xen/init.h>
 #include <xen/kernel.h>
 #include <xen/mm.h>
+#include <xen/domain.h>
 #include <xen/sched.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
@@ -886,28 +887,16 @@ static int __devinit do_boot_cpu(int api
        int timeout;
        unsigned long start_eip;
        unsigned short nmi_high = 0, nmi_low = 0;
-       struct domain *d;
        struct vcpu *v;
-       int vcpu_id;
 
        ++cpucount;
 
        booting_cpu = cpu;
 
-       if ((vcpu_id = cpu % MAX_VIRT_CPUS) == 0) {
-               d = domain_create(IDLE_DOMAIN_ID, cpu);
-               BUG_ON(d == NULL);
-               v = d->vcpu[0];
-       } else {
-               d = idle_vcpu[cpu - vcpu_id]->domain;
-               BUG_ON(d == NULL);
-               v = alloc_vcpu(d, vcpu_id, cpu);
-       }
-
-       idle_vcpu[cpu] = v;
+       v = alloc_idle_vcpu(cpu);
        BUG_ON(v == NULL);
 
-       v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+       v->arch.monitor_table = pagetable_from_paddr(__pa(idle_pg_table));
 
        /* start_eip had better be page-aligned! */
        start_eip = setup_trampoline();
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/time.c       Fri Jun 02 12:31:48 2006 -0500
@@ -40,10 +40,10 @@ boolean_param("hpet_force", opt_hpet_for
 
 unsigned long cpu_khz;  /* CPU clock frequency in kHz. */
 unsigned long hpet_address;
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(rtc_lock);
 unsigned long volatile jiffies;
 static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
-static spinlock_t wc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(wc_lock);
 
 struct time_scale {
     int shift;
@@ -67,7 +67,7 @@ static s_time_t stime_platform_stamp;
 static s_time_t stime_platform_stamp;
 static u64 platform_timer_stamp;
 static struct time_scale platform_timer_scale;
-static spinlock_t platform_timer_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(platform_timer_lock);
 static u64 (*read_platform_count)(void);
 
 /*
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/traps.c      Fri Jun 02 12:31:48 2006 -0500
@@ -876,7 +876,7 @@ static int emulate_privileged_op(struct 
                     PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
                 break;
             }
-            regs->edi += (regs->eflags & EF_DF) ? -(int)op_bytes : op_bytes;
+            regs->edi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes);
             break;
 
         case 0x6e: /* OUTSB */
@@ -902,7 +902,7 @@ static int emulate_privileged_op(struct 
                 outl_user((u32)data, (u16)regs->edx, v, regs);
                 break;
             }
-            regs->esi += (regs->eflags & EF_DF) ? -(int)op_bytes : op_bytes;
+            regs->esi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes);
             break;
         }
 
@@ -1034,8 +1034,8 @@ static int emulate_privileged_op(struct 
             break;
             
         case 3: /* Read CR3 */
-            *reg = pfn_to_paddr(mfn_to_gmfn(v->domain,
-                                    pagetable_get_pfn(v->arch.guest_table)));
+            *reg = xen_pfn_to_cr3(mfn_to_gmfn(
+                v->domain, pagetable_get_pfn(v->arch.guest_table)));
             break;
 
         case 4: /* Read CR4 */
@@ -1085,7 +1085,7 @@ static int emulate_privileged_op(struct 
         case 3: /* Write CR3 */
             LOCK_BIGLOCK(v->domain);
             cleanup_writable_pagetable(v->domain);
-            (void)new_guest_cr3(gmfn_to_mfn(v->domain, paddr_to_pfn(*reg)));
+            (void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
             UNLOCK_BIGLOCK(v->domain);
             break;
 
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/x86_32/domain_page.c Fri Jun 02 12:31:48 2006 -0500
@@ -183,7 +183,7 @@ static unsigned long inuse[BITS_TO_LONGS
 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
 static unsigned int inuse_cursor;
-static spinlock_t globalmap_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(globalmap_lock);
 
 void *map_domain_page_global(unsigned long pfn)
 {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/x86_32/mm.c  Fri Jun 02 12:31:48 2006 -0500
@@ -75,7 +75,8 @@ void __init paging_init(void)
     printk("PAE disabled.\n");
 #endif
 
-    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+    idle_vcpu[0]->arch.monitor_table =
+        pagetable_from_paddr(__pa(idle_pg_table));
 
     if ( cpu_has_pge )
     {
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/x86_64/mm.c  Fri Jun 02 12:31:48 2006 -0500
@@ -81,7 +81,8 @@ void __init paging_init(void)
     l2_pgentry_t *l2_ro_mpt;
     struct page_info *pg;
 
-    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+    idle_vcpu[0]->arch.monitor_table =
+        pagetable_from_paddr(__pa(idle_pg_table));
 
     /* Create user-accessible L2 directory to map the MPT for guests. */
     l3_ro_mpt = alloc_xenheap_page();
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/x86_64/traps.c       Fri Jun 02 12:31:48 2006 -0500
@@ -195,7 +195,7 @@ unsigned long do_iret(void)
     /* Returning to user mode? */
     if ( (iret_saved.cs & 3) == 3 )
     {
-        if ( unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
+        if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
         {
             DPRINTK("Guest switching to user mode with no user page tables\n");
             domain_crash_synchronous();
diff -r 29861ae27914 -r 91ee504ed40e xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/arch/x86/x86_emulate.c        Fri Jun 02 12:31:48 2006 -0500
@@ -380,11 +380,12 @@ do{ __asm__ __volatile__ (              
       ((reg) & ((1UL << (ad_bytes << 3)) - 1))))
 #define register_address_increment(reg, inc)                            \
 do {                                                                    \
+    int _inc = (inc); /* signed type ensures sign extension to long */  \
     if ( ad_bytes == sizeof(unsigned long) )                            \
-        (reg) += (inc);                                                 \
+        (reg) += _inc;                                                  \
     else                                                                \
         (reg) = ((reg) & ~((1UL << (ad_bytes << 3)) - 1)) |             \
-                (((reg) + (inc)) & ((1UL << (ad_bytes << 3)) - 1));     \
+                (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1));      \
 } while (0)
 
 void *
@@ -858,7 +859,7 @@ x86_emulate_memop(
                                          &dst.val, 8, ctxt)) != 0 )
                     goto done;
             }
-            register_address_increment(_regs.esp, -(int)dst.bytes);
+            register_address_increment(_regs.esp, -dst.bytes);
             if ( (rc = ops->write_std(register_address(_regs.ss, _regs.esp),
                                       dst.val, dst.bytes, ctxt)) != 0 )
                 goto done;
@@ -942,9 +943,9 @@ x86_emulate_memop(
                 goto done;
         }
         register_address_increment(
-            _regs.esi, (_regs.eflags & EFLG_DF) ? -(int)dst.bytes : dst.bytes);
+            _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
         register_address_increment(
-            _regs.edi, (_regs.eflags & EFLG_DF) ? -(int)dst.bytes : dst.bytes);
+            _regs.edi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
         break;
     case 0xa6 ... 0xa7: /* cmps */
         DPRINTF("Urk! I don't handle CMPS.\n");
@@ -955,7 +956,7 @@ x86_emulate_memop(
         dst.ptr   = (unsigned long *)cr2;
         dst.val   = _regs.eax;
         register_address_increment(
-            _regs.edi, (_regs.eflags & EFLG_DF) ? -(int)dst.bytes : dst.bytes);
+            _regs.edi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
         break;
     case 0xac ... 0xad: /* lods */
         dst.type  = OP_REG;
@@ -964,7 +965,7 @@ x86_emulate_memop(
         if ( (rc = ops->read_emulated(cr2, &dst.val, dst.bytes, ctxt)) != 0 )
             goto done;
         register_address_increment(
-            _regs.esi, (_regs.eflags & EFLG_DF) ? -(int)dst.bytes : dst.bytes);
+            _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
         break;
     case 0xae ... 0xaf: /* scas */
         DPRINTF("Urk! I don't handle SCAS.\n");
diff -r 29861ae27914 -r 91ee504ed40e xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/dom0_ops.c     Fri Jun 02 12:31:48 2006 -0500
@@ -95,7 +95,7 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op
     long ret = 0;
     struct dom0_op curop, *op = &curop;
     void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
-    static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
+    static DEFINE_SPINLOCK(dom0_lock);
 
     if ( !IS_PRIV(current->domain) )
         return -EPERM;
diff -r 29861ae27914 -r 91ee504ed40e xen/common/domain.c
--- a/xen/common/domain.c       Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/domain.c       Fri Jun 02 12:31:48 2006 -0500
@@ -32,22 +32,111 @@ struct domain *domain_list;
 
 struct domain *dom0;
 
-struct domain *domain_create(domid_t dom_id, unsigned int cpu)
-{
-    struct domain *d, **pd;
-    struct vcpu *v;
-
-    if ( (d = alloc_domain()) == NULL )
+struct vcpu *idle_vcpu[NR_CPUS];
+
+struct domain *alloc_domain(domid_t domid)
+{
+    struct domain *d;
+
+    if ( (d = xmalloc(struct domain)) == NULL )
         return NULL;
 
-    d->domain_id = dom_id;
-
+    memset(d, 0, sizeof(*d));
+    d->domain_id = domid;
     atomic_set(&d->refcnt, 1);
-
     spin_lock_init(&d->big_lock);
     spin_lock_init(&d->page_alloc_lock);
     INIT_LIST_HEAD(&d->page_list);
     INIT_LIST_HEAD(&d->xenpage_list);
+
+    return d;
+}
+
+
+void free_domain(struct domain *d)
+{
+    struct vcpu *v;
+    int i;
+
+    sched_destroy_domain(d);
+
+    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+        if ( (v = d->vcpu[i]) != NULL )
+            free_vcpu_struct(v);
+
+    xfree(d);
+}
+
+
+struct vcpu *alloc_vcpu(
+    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
+{
+    struct vcpu *v;
+
+    BUG_ON(d->vcpu[vcpu_id] != NULL);
+
+    if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
+        return NULL;
+
+    v->domain = d;
+    v->vcpu_id = vcpu_id;
+    v->processor = cpu_id;
+    atomic_set(&v->pausecnt, 0);
+    v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
+
+    v->cpu_affinity = is_idle_domain(d) ?
+        cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
+
+    v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
+    v->runstate.state_entry_time = NOW();
+
+    if ( (vcpu_id != 0) && !is_idle_domain(d) )
+        set_bit(_VCPUF_down, &v->vcpu_flags);
+
+    if ( sched_init_vcpu(v) < 0 )
+    {
+        free_vcpu_struct(v);
+        return NULL;
+    }
+
+    d->vcpu[vcpu_id] = v;
+    if ( vcpu_id != 0 )
+        d->vcpu[v->vcpu_id-1]->next_in_list = v;
+
+    return v;
+}
+
+struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
+{
+    struct domain *d;
+    struct vcpu *v;
+    unsigned int vcpu_id;
+
+    if ((vcpu_id = cpu_id % MAX_VIRT_CPUS) == 0)
+    {
+        d = domain_create(IDLE_DOMAIN_ID, cpu_id);
+        BUG_ON(d == NULL);
+        v = d->vcpu[0];
+    }
+    else
+    {
+        d = idle_vcpu[cpu_id - vcpu_id]->domain;
+        BUG_ON(d == NULL);
+        v = alloc_vcpu(d, vcpu_id, cpu_id);
+    }
+
+    idle_vcpu[cpu_id] = v;
+
+    return v;
+}
+
+struct domain *domain_create(domid_t domid, unsigned int cpu)
+{
+    struct domain *d, **pd;
+    struct vcpu *v;
+
+    if ( (d = alloc_domain(domid)) == NULL )
+        return NULL;
 
     rangeset_domain_initialise(d);
 
@@ -74,14 +163,14 @@ struct domain *domain_create(domid_t dom
     if ( !is_idle_domain(d) )
     {
         write_lock(&domlist_lock);
-        pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
+        pd = &domain_list; /* NB. domain_list maintained in order of domid. */
         for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
             if ( (*pd)->domain_id > d->domain_id )
                 break;
         d->next_in_list = *pd;
         *pd = d;
-        d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
-        domain_hash[DOMAIN_HASH(dom_id)] = d;
+        d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
+        domain_hash[DOMAIN_HASH(domid)] = d;
         write_unlock(&domlist_lock);
     }
 
@@ -126,19 +215,16 @@ struct domain *find_domain_by_id(domid_t
 
 void domain_kill(struct domain *d)
 {
-    struct vcpu *v;
-
     domain_pause(d);
-    if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
-    {
-        for_each_vcpu(d, v)
-            sched_rem_domain(v);
-        gnttab_release_mappings(d);
-        domain_relinquish_resources(d);
-        put_domain(d);
-
-        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
-    }
+
+    if ( test_and_set_bit(_DOMF_dying, &d->domain_flags) )
+        return;
+
+    gnttab_release_mappings(d);
+    domain_relinquish_resources(d);
+    put_domain(d);
+
+    send_guest_global_virq(dom0, VIRQ_DOM_EXC);
 }
 
 
diff -r 29861ae27914 -r 91ee504ed40e xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/page_alloc.c   Fri Jun 02 12:31:48 2006 -0500
@@ -59,7 +59,7 @@ custom_param("lowmem_emergency_pool", pa
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 
-static spinlock_t page_scrub_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(page_scrub_lock);
 LIST_HEAD(page_scrub_list);
 
 /*********************
@@ -250,7 +250,7 @@ static struct list_head heap[NR_ZONES][M
 
 static unsigned long avail[NR_ZONES];
 
-static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(heap_lock);
 
 void end_boot_allocator(void)
 {
diff -r 29861ae27914 -r 91ee504ed40e xen/common/perfc.c
--- a/xen/common/perfc.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/perfc.c        Fri Jun 02 12:31:48 2006 -0500
@@ -209,7 +209,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
 /* Dom0 control of perf counters */
 int perfc_control(dom0_perfccontrol_t *pc)
 {
-    static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+    static DEFINE_SPINLOCK(lock);
     u32 op = pc->op;
     int rc;
 
diff -r 29861ae27914 -r 91ee504ed40e xen/common/sched_bvt.c
--- a/xen/common/sched_bvt.c    Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/sched_bvt.c    Fri Jun 02 12:31:48 2006 -0500
@@ -160,15 +160,14 @@ static inline u32 calc_evt(struct vcpu *
 }
 
 /**
- * bvt_alloc_task - allocate BVT private structures for a task
- * @p:              task to allocate private structures for
- *
+ * bvt_init_vcpu - allocate BVT private structures for a VCPU.
  * Returns non-zero on failure.
  */
-static int bvt_alloc_task(struct vcpu *v)
+static int bvt_init_vcpu(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct bvt_dom_info *inf;
+    struct bvt_vcpu_info *einf;
 
     if ( (d->sched_priv == NULL) )
     {
@@ -199,15 +198,7 @@ static int bvt_alloc_task(struct vcpu *v
         init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
     }
 
-    return 0;
-}
-
-/*
- * Add and remove a domain
- */
-static void bvt_add_task(struct vcpu *v) 
-{
-    struct bvt_vcpu_info *einf = EBVT_INFO(v);
+    einf = EBVT_INFO(v);
 
     /* Allocate per-CPU context if this is the first domain to be added. */
     if ( CPU_INFO(v->processor) == NULL )
@@ -223,13 +214,15 @@ static void bvt_add_task(struct vcpu *v)
         einf->avt = einf->evt = ~0U;
         BUG_ON(__task_on_runqueue(v));
         __add_to_runqueue_head(v);
-    } 
+    }
     else 
     {
         /* Set avt and evt to system virtual time. */
         einf->avt = CPU_SVT(v->processor);
         einf->evt = CPU_SVT(v->processor);
     }
+
+    return 0;
 }
 
 static void bvt_wake(struct vcpu *v)
@@ -298,10 +291,9 @@ static int bvt_set_affinity(struct vcpu 
 
 
 /**
- * bvt_free_task - free BVT private structures for a task
- * @d:             task
- */
-static void bvt_free_task(struct domain *d)
+ * bvt_destroy_domain - free BVT private structures for a domain.
+ */
+static void bvt_destroy_domain(struct domain *d)
 {
     struct bvt_dom_info *inf = BVT_INFO(d);
 
@@ -568,10 +560,10 @@ struct scheduler sched_bvt_def = {
     .name     = "Borrowed Virtual Time",
     .opt_name = "bvt",
     .sched_id = SCHED_BVT,
-    
-    .alloc_task     = bvt_alloc_task,
-    .add_task       = bvt_add_task,
-    .free_task      = bvt_free_task,
+
+    .init_vcpu      = bvt_init_vcpu,
+    .destroy_domain = bvt_destroy_domain,
+
     .do_schedule    = bvt_do_schedule,
     .control        = bvt_ctl,
     .adjdom         = bvt_adjdom,
diff -r 29861ae27914 -r 91ee504ed40e xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/sched_credit.c Fri Jun 02 12:31:48 2006 -0500
@@ -75,14 +75,13 @@
     } while ( 0 );
 
 #define CSCHED_STATS_EXPAND_SCHED(_MACRO)   \
-    _MACRO(vcpu_alloc)                      \
-    _MACRO(vcpu_add)                        \
+    _MACRO(vcpu_init)                       \
     _MACRO(vcpu_sleep)                      \
     _MACRO(vcpu_wake_running)               \
     _MACRO(vcpu_wake_onrunq)                \
     _MACRO(vcpu_wake_runnable)              \
     _MACRO(vcpu_wake_not_runnable)          \
-    _MACRO(dom_free)                        \
+    _MACRO(dom_destroy)                     \
     _MACRO(schedule)                        \
     _MACRO(tickle_local_idler)              \
     _MACRO(tickle_local_over)               \
@@ -429,14 +428,14 @@ __csched_vcpu_acct_idle_locked(struct cs
 }
 
 static int
-csched_vcpu_alloc(struct vcpu *vc)
+csched_vcpu_init(struct vcpu *vc)
 {
     struct domain * const dom = vc->domain;
     struct csched_dom *sdom;
     struct csched_vcpu *svc;
     int16_t pri;
 
-    CSCHED_STAT_CRANK(vcpu_alloc);
+    CSCHED_STAT_CRANK(vcpu_init);
 
     /* Allocate, if appropriate, per-domain info */
     if ( is_idle_vcpu(vc) )
@@ -489,19 +488,13 @@ csched_vcpu_alloc(struct vcpu *vc)
     if ( likely(sdom != NULL) )
         csched_vcpu_acct(svc, 0);
 
-    return 0;
-}
-
-static void
-csched_vcpu_add(struct vcpu *vc) 
-{
-    CSCHED_STAT_CRANK(vcpu_add);
-
     /* Allocate per-PCPU info */
     if ( unlikely(!CSCHED_PCPU(vc->processor)) )
         csched_pcpu_init(vc->processor);
 
     CSCHED_VCPU_CHECK(vc);
+
+    return 0;
 }
 
 static void
@@ -644,12 +637,12 @@ csched_dom_cntl(
 }
 
 static void
-csched_dom_free(struct domain *dom)
+csched_dom_destroy(struct domain *dom)
 {
     struct csched_dom * const sdom = CSCHED_DOM(dom);
     int i;
 
-    CSCHED_STAT_CRANK(dom_free);
+    CSCHED_STAT_CRANK(dom_destroy);
 
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
     {
@@ -1215,14 +1208,15 @@ struct scheduler sched_credit_def = {
     .opt_name       = "credit",
     .sched_id       = SCHED_CREDIT,
 
-    .alloc_task     = csched_vcpu_alloc,
-    .add_task       = csched_vcpu_add,
+    .init_vcpu      = csched_vcpu_init,
+    .destroy_domain = csched_dom_destroy,
+
     .sleep          = csched_vcpu_sleep,
     .wake           = csched_vcpu_wake,
+
     .set_affinity   = csched_vcpu_set_affinity,
 
     .adjdom         = csched_dom_cntl,
-    .free_task      = csched_dom_free,
 
     .tick           = csched_tick,
     .do_schedule    = csched_schedule,
diff -r 29861ae27914 -r 91ee504ed40e xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/sched_sedf.c   Fri Jun 02 12:31:48 2006 -0500
@@ -328,11 +328,9 @@ static inline void __add_to_runqueue_sor
 }
 
 
-/* Allocates memory for per domain private scheduling data*/
-static int sedf_alloc_task(struct vcpu *v)
-{
-    PRINT(2, "sedf_alloc_task was called, domain-id %i.%i\n",
-          v->domain->domain_id, v->vcpu_id);
+static int sedf_init_vcpu(struct vcpu *v)
+{
+    struct sedf_vcpu_info *inf;
 
     if ( v->domain->sched_priv == NULL )
     {
@@ -344,23 +342,11 @@ static int sedf_alloc_task(struct vcpu *
 
     if ( (v->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
         return -1;
-
     memset(v->sched_priv, 0, sizeof(struct sedf_vcpu_info));
 
-    return 0;
-}
-
-
-/* Setup the sedf_dom_info */
-static void sedf_add_task(struct vcpu *v)
-{
-    struct sedf_vcpu_info *inf = EDOM_INFO(v);
-
+    inf = EDOM_INFO(v);
     inf->vcpu = v;
  
-    PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",
-          v->domain->domain_id, v->vcpu_id);
-
     /* Allocate per-CPU context if this is the first domain to be added. */
     if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
     {
@@ -408,14 +394,13 @@ static void sedf_add_task(struct vcpu *v
         EDOM_INFO(v)->deadl_abs = 0;
         EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
     }
-}
-
-/* Frees memory used by domain info */
-static void sedf_free_task(struct domain *d)
+
+    return 0;
+}
+
+static void sedf_destroy_domain(struct domain *d)
 {
     int i;
-
-    PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
 
     xfree(d->sched_priv);
  
@@ -1452,9 +1437,9 @@ struct scheduler sched_sedf_def = {
     .opt_name = "sedf",
     .sched_id = SCHED_SEDF,
     
-    .alloc_task     = sedf_alloc_task,
-    .add_task       = sedf_add_task,
-    .free_task      = sedf_free_task,
+    .init_vcpu      = sedf_init_vcpu,
+    .destroy_domain = sedf_destroy_domain,
+
     .do_schedule    = sedf_do_schedule,
     .dump_cpu_state = sedf_dump_cpu_state,
     .sleep          = sedf_sleep,
diff -r 29861ae27914 -r 91ee504ed40e xen/common/schedule.c
--- a/xen/common/schedule.c     Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/schedule.c     Fri Jun 02 12:31:48 2006 -0500
@@ -99,74 +99,7 @@ void vcpu_runstate_get(struct vcpu *v, s
     }
 }
 
-struct domain *alloc_domain(void)
-{
-    struct domain *d;
-
-    if ( (d = xmalloc(struct domain)) != NULL )
-        memset(d, 0, sizeof(*d));
-
-    return d;
-}
-
-void free_domain(struct domain *d)
-{
-    struct vcpu *v;
-    int i;
-
-    for_each_vcpu ( d, v )
-        sched_rem_domain(v);
-
-    SCHED_OP(free_task, d);
-
-    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
-        if ( (v = d->vcpu[i]) != NULL )
-            free_vcpu_struct(v);
-
-    xfree(d);
-}
-
-struct vcpu *alloc_vcpu(
-    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
-{
-    struct vcpu *v;
-
-    BUG_ON(d->vcpu[vcpu_id] != NULL);
-
-    if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
-        return NULL;
-
-    v->domain = d;
-    v->vcpu_id = vcpu_id;
-    v->processor = cpu_id;
-    atomic_set(&v->pausecnt, 0);
-    v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
-
-    v->cpu_affinity = is_idle_domain(d) ?
-        cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
-
-    v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
-    v->runstate.state_entry_time = NOW();
-
-    if ( (vcpu_id != 0) && !is_idle_domain(d) )
-        set_bit(_VCPUF_down, &v->vcpu_flags);
-
-    if ( SCHED_OP(alloc_task, v) < 0 )
-    {
-        free_vcpu_struct(v);
-        return NULL;
-    }
-
-    d->vcpu[vcpu_id] = v;
-    if ( vcpu_id != 0 )
-        d->vcpu[v->vcpu_id-1]->next_in_list = v;
-
-    sched_add_domain(v);
-
-    return v;
-}
-
-void sched_add_domain(struct vcpu *v) 
+int sched_init_vcpu(struct vcpu *v) 
 {
     /* Initialise the per-domain timers. */
     init_timer(&v->timer, vcpu_timer_fn, v, v->processor);
@@ -179,17 +112,23 @@ void sched_add_domain(struct vcpu *v)
         set_bit(_VCPUF_running, &v->vcpu_flags);
     }
 
-    SCHED_OP(add_task, v);
     TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
-}
-
-void sched_rem_domain(struct vcpu *v) 
-{
-    kill_timer(&v->timer);
-    kill_timer(&v->poll_timer);
-
-    SCHED_OP(rem_task, v);
-    TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
+
+    return SCHED_OP(init_vcpu, v);
+}
+
+void sched_destroy_domain(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+    {
+        kill_timer(&v->timer);
+        kill_timer(&v->poll_timer);
+        TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
+    }
+
+    SCHED_OP(destroy_domain, d);
 }
 
 void vcpu_sleep_nosync(struct vcpu *v)
@@ -663,7 +602,7 @@ static void poll_timer_fn(void *data)
 /* Initialise the data structures. */
 void __init scheduler_init(void)
 {
-    int i, rc;
+    int i;
 
     open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
 
@@ -686,17 +625,6 @@ void __init scheduler_init(void)
 
     printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
     SCHED_OP(init);
-
-    if ( idle_vcpu[0] != NULL )
-    {
-        schedule_data[0].curr = idle_vcpu[0];
-        schedule_data[0].idle = idle_vcpu[0];
-
-        rc = SCHED_OP(alloc_task, idle_vcpu[0]);
-        BUG_ON(rc < 0);
-
-        sched_add_domain(idle_vcpu[0]);
-    }
 }
 
 /*
diff -r 29861ae27914 -r 91ee504ed40e xen/common/trace.c
--- a/xen/common/trace.c        Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/trace.c        Fri Jun 02 12:31:48 2006 -0500
@@ -173,25 +173,17 @@ void init_trace_bufs(void)
  */
 int tb_control(dom0_tbufcontrol_t *tbc)
 {
-    static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+    static DEFINE_SPINLOCK(lock);
     int rc = 0;
 
     spin_lock(&lock);
-
-    if ( !tb_init_done &&
-         (tbc->op != DOM0_TBUF_SET_SIZE) &&
-         (tbc->op != DOM0_TBUF_ENABLE) )
-    {
-        spin_unlock(&lock);
-        return -EINVAL;
-    }
 
     switch ( tbc->op )
     {
     case DOM0_TBUF_GET_INFO:
         tbc->cpu_mask   = tb_cpu_mask;
         tbc->evt_mask   = tb_event_mask;
-        tbc->buffer_mfn = __pa(t_bufs[0]) >> PAGE_SHIFT;
+        tbc->buffer_mfn = opt_tbuf_size ? virt_to_mfn(t_bufs[0]) : 0UL;
         tbc->size       = opt_tbuf_size * PAGE_SIZE;
         break;
     case DOM0_TBUF_SET_CPU_MASK:
diff -r 29861ae27914 -r 91ee504ed40e xen/common/xmalloc.c
--- a/xen/common/xmalloc.c      Tue May 30 15:24:31 2006 -0500
+++ b/xen/common/xmalloc.c      Fri Jun 02 12:31:48 2006 -0500
@@ -35,7 +35,7 @@
 #include <xen/prefetch.h>
 
 static LIST_HEAD(freelist);

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>