WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] We no longer need linux sources to build xen.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] We no longer need linux sources to build xen.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 26 Aug 2005 09:10:10 +0000
Delivery-date: Fri, 26 Aug 2005 09:08:43 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User adsharma@xxxxxxxxxxxxxxxxxxxxx
# Node ID e2127f19861b842e572682619dd37651c2b5441e
# Parent  e173a853dc46f7b7f066a6dea2990a54e2a86295
We no longer need linux sources to build xen.

Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>

diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/Makefile    Tue Aug  2 23:59:09 2005
@@ -1,4 +1,6 @@
 include $(BASEDIR)/Rules.mk
+
+VPATH = linux
 
 # libs-y       += arch/ia64/lib/lib.a
 
@@ -75,7 +77,7 @@
                -o xen.lds.s xen.lds.S
 
 ia64lib.o:
-       $(MAKE) -C lib && cp lib/ia64lib.o .
+       $(MAKE) -C linux/lib && cp linux/lib/ia64lib.o .
 
 clean:
        rm -f *.o *~ core  xen.lds.s 
$(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk    Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/Rules.mk    Tue Aug  2 23:59:09 2005
@@ -6,14 +6,16 @@
 CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
 endif
 AFLAGS  += -D__ASSEMBLY__
-CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64
+CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64 \
+             -I$(BASEDIR)/include/asm-ia64/linux -I$(BASEDIR)/arch/ia64/linux
 CFLAGS  := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
 #CFLAGS  += -O3                # -O3 over-inlines making debugging tough!
 CFLAGS  += -O2         # but no optimization causes compile errors!
 #CFLAGS  += -iwithprefix include -Wall -DMONITOR_BASE=$(MONITOR_BASE)
 CFLAGS  += -iwithprefix include -Wall
 CFLAGS  += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
-CFLAGS  += -I$(BASEDIR)/include/asm-ia64
+CFLAGS  += -I$(BASEDIR)/include/asm-ia64 -I$(BASEDIR)/include/asm-ia64/linux \
+           -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64
 CFLAGS  += -Wno-pointer-arith -Wredundant-decls
 CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/tools/mkbuildtree
--- a/xen/arch/ia64/tools/mkbuildtree   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/tools/mkbuildtree   Tue Aug  2 23:59:09 2005
@@ -3,15 +3,10 @@
 # run in xen-X.X/xen directory after unpacking linux in same directory
 
 XEN=$PWD
-LINUX=$XEN/../../linux-2.6.11
-LINUXPATCH=$XEN/arch/ia64/patch/linux-2.6.11
-XENPATCH=$XEN/arch/ia64/patch/xen-2.0.1
 
 cp_patch ()
 {
-       #diff -u $LINUX/$1 $XEN/$2 > $LINUXPATCH/$3
-       cp $LINUX/$1 $XEN/$2
-       patch <$LINUXPATCH/$3 $XEN/$2
+       true;
 }
 
 xen_patch ()
@@ -22,34 +17,13 @@
 
 softlink ()
 {
-       ln -s $LINUX/$1 $XEN/$2
+       true;
 }
 
 null ()
 {
-       touch $XEN/$1
+       true;
 }
-
-
-# ensure linux directory is set up
-if [ ! -d $LINUX ]; then
-       echo "ERROR: $LINUX directory doesn't exist"
-       exit
-fi
-
-# setup
-
-#mkdir arch/ia64
-#mkdir arch/ia64/lib
-#mkdir include/asm-ia64
-mkdir include/asm-generic
-mkdir include/asm-ia64/linux
-mkdir include/asm-ia64/linux/byteorder
-mkdir include/asm-ia64/sn
-# use "gcc -Iinclude/asm-ia64" to find these linux includes
-#ln -s $XEN/include/xen $XEN/include/linux
-#ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 
-ln -s ../slab.h include/asm-ia64/linux/slab.h
 
 # prepare for building asm-offsets (circular dependency)
 #echo '#define IA64_TASK_SIZE 0' > include/asm-ia64/asm-offsets.h
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/efi.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/efi.c       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,866 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 0.9 April 30, 
1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999-2003 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * All EFI Runtime Services are not implemented yet as EFI only
+ * supports physical mode addressing on SoftSDV. This is to be fixed
+ * in a future version.  --drummond 1999-07-20
+ *
+ * Implemented EFI runtime services and virtual mode calls.  --davidm
+ *
+ * Goutham Rao: <goutham.rao@xxxxxxxxx>
+ *     Skip non-WB memory and ignore empty memory ranges.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/efi.h>
+
+#include <asm/io.h>
+#include <asm/kregs.h>
+#include <asm/meminit.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mca.h>
+
+#define EFI_DEBUG      0
+
+extern efi_status_t efi_call_phys (void *, ...);
+
+struct efi efi;
+EXPORT_SYMBOL(efi);
+static efi_runtime_services_t *runtime;
+static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
+
+#define efi_call_virt(f, args...)      (*(f))(args)
+
+#define STUB_GET_TIME(prefix, adjust_arg)                                      
                  \
+static efi_status_t                                                            
                  \
+prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)                         
                  \
+{                                                                              
                  \
+       struct ia64_fpreg fr[6];                                                
                  \
+       efi_time_cap_t *atc = NULL;                                             
                  \
+       efi_status_t ret;                                                       
                  \
+                                                                               
                  \
+       if (tc)                                                                 
                  \
+               atc = adjust_arg(tc);                                           
                  \
+       ia64_save_scratch_fpregs(fr);                                           
                  \
+       ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), 
adjust_arg(tm), atc); \
+       ia64_load_scratch_fpregs(fr);                                           
                  \
+       return ret;                                                             
                  \
+}
+
+#define STUB_SET_TIME(prefix, adjust_arg)                                      
                \
+static efi_status_t                                                            
                \
+prefix##_set_time (efi_time_t *tm)                                             
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), 
adjust_arg(tm));    \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
+static efi_status_t                                                            
                \
+prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t 
*tm)            \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_wakeup_time_t *) 
__va(runtime->get_wakeup_time),       \
+                               adjust_arg(enabled), adjust_arg(pending), 
adjust_arg(tm));      \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
+static efi_status_t                                                            
                \
+prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)                  
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_time_t *atm = NULL;                                                 
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       if (tm)                                                                 
                \
+               atm = adjust_arg(tm);                                           
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_set_wakeup_time_t *) 
__va(runtime->set_wakeup_time),       \
+                               enabled, atm);                                  
                \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_GET_VARIABLE(prefix, adjust_arg)                                  
        \
+static efi_status_t                                                            
        \
+prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,      
        \
+                      unsigned long *data_size, void *data)                    
        \
+{                                                                              
        \
+       struct ia64_fpreg fr[6];                                                
        \
+       u32 *aattr = NULL;                                                      
                \
+       efi_status_t ret;                                                       
        \
+                                                                               
        \
+       if (attr)                                                               
        \
+               aattr = adjust_arg(attr);                                       
        \
+       ia64_save_scratch_fpregs(fr);                                           
        \
+       ret = efi_call_##prefix((efi_get_variable_t *) 
__va(runtime->get_variable),     \
+                               adjust_arg(name), adjust_arg(vendor), aattr,    
        \
+                               adjust_arg(data_size), adjust_arg(data));       
        \
+       ia64_load_scratch_fpregs(fr);                                           
        \
+       return ret;                                                             
        \
+}
+
+#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)                             
                \
+static efi_status_t                                                            
                \
+prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, 
efi_guid_t *vendor)  \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_next_variable_t *) 
__va(runtime->get_next_variable),   \
+                               adjust_arg(name_size), adjust_arg(name), 
adjust_arg(vendor));   \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_SET_VARIABLE(prefix, adjust_arg)                                  
        \
+static efi_status_t                                                            
        \
+prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long 
attr,     \
+                      unsigned long data_size, void *data)                     
        \
+{                                                                              
        \
+       struct ia64_fpreg fr[6];                                                
        \
+       efi_status_t ret;                                                       
        \
+                                                                               
        \
+       ia64_save_scratch_fpregs(fr);                                           
        \
+       ret = efi_call_##prefix((efi_set_variable_t *) 
__va(runtime->set_variable),     \
+                               adjust_arg(name), adjust_arg(vendor), attr, 
data_size,  \
+                               adjust_arg(data));                              
        \
+       ia64_load_scratch_fpregs(fr);                                           
        \
+       return ret;                                                             
        \
+}
+
+#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)                      
                \
+static efi_status_t                                                            
                \
+prefix##_get_next_high_mono_count (u32 *count)                                 
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)              
                \
+                               __va(runtime->get_next_high_mono_count), 
adjust_arg(count));    \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_RESET_SYSTEM(prefix, adjust_arg)                                  
\
+static void                                                                    
\
+prefix##_reset_system (int reset_type, efi_status_t status,                    
\
+                      unsigned long data_size, efi_char16_t *data)             
\
+{                                                                              
\
+       struct ia64_fpreg fr[6];                                                
\
+       efi_char16_t *adata = NULL;                                             
\
+                                                                               
\
+       if (data)                                                               
\
+               adata = adjust_arg(data);                                       
\
+                                                                               
\
+       ia64_save_scratch_fpregs(fr);                                           
\
+       efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),   
\
+                         reset_type, status, data_size, adata);                
\
+       /* should not return, but just in case... */                            
\
+       ia64_load_scratch_fpregs(fr);                                           
\
+}
+
+#define phys_ptr(arg)  ((__typeof__(arg)) ia64_tpa(arg))
+
+STUB_GET_TIME(phys, phys_ptr)
+STUB_SET_TIME(phys, phys_ptr)
+STUB_GET_WAKEUP_TIME(phys, phys_ptr)
+STUB_SET_WAKEUP_TIME(phys, phys_ptr)
+STUB_GET_VARIABLE(phys, phys_ptr)
+STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
+STUB_SET_VARIABLE(phys, phys_ptr)
+STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
+STUB_RESET_SYSTEM(phys, phys_ptr)
+
+#define id(arg)        arg
+
+STUB_GET_TIME(virt, id)
+STUB_SET_TIME(virt, id)
+STUB_GET_WAKEUP_TIME(virt, id)
+STUB_SET_WAKEUP_TIME(virt, id)
+STUB_GET_VARIABLE(virt, id)
+STUB_GET_NEXT_VARIABLE(virt, id)
+STUB_SET_VARIABLE(virt, id)
+STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
+STUB_RESET_SYSTEM(virt, id)
+
+void
+efi_gettimeofday (struct timespec *ts)
+{
+       efi_time_t tm;
+
+       memset(ts, 0, sizeof(ts));
+       if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
+               return;
+
+       ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, 
tm.second);
+       ts->tv_nsec = tm.nanosecond;
+}
+
+static int
+is_available_memory (efi_memory_desc_t *md)
+{
+       if (!(md->attribute & EFI_MEMORY_WB))
+               return 0;
+
+       switch (md->type) {
+             case EFI_LOADER_CODE:
+             case EFI_LOADER_DATA:
+             case EFI_BOOT_SERVICES_CODE:
+             case EFI_BOOT_SERVICES_DATA:
+             case EFI_CONVENTIONAL_MEMORY:
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor 
covers
+ * memory that is normally available to the kernel, issue a warning that some 
memory
+ * is being ignored.
+ */
+static void
+trim_bottom (efi_memory_desc_t *md, u64 start_addr)
+{
+       u64 num_skipped_pages;
+
+       if (md->phys_addr >= start_addr || !md->num_pages)
+               return;
+
+       num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
+       if (num_skipped_pages > md->num_pages)
+               num_skipped_pages = md->num_pages;
+
+       if (is_available_memory(md))
+               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
+                      "at 0x%lx\n", __FUNCTION__,
+                      (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
+                      md->phys_addr, start_addr - IA64_GRANULE_SIZE);
+       /*
+        * NOTE: Don't set md->phys_addr to START_ADDR because that could cause 
the memory
+        * descriptor list to become unsorted.  In such a case, md->num_pages 
will be
+        * zero, so the Right Thing will happen.
+        */
+       md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
+       md->num_pages -= num_skipped_pages;
+}
+
+static void
+trim_top (efi_memory_desc_t *md, u64 end_addr)
+{
+       u64 num_dropped_pages, md_end_addr;
+
+       md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+       if (md_end_addr <= end_addr || !md->num_pages)
+               return;
+
+       num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
+       if (num_dropped_pages > md->num_pages)
+               num_dropped_pages = md->num_pages;
+
+       if (is_available_memory(md))
+               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
+                      "at 0x%lx\n", __FUNCTION__,
+                      (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
+                      md->phys_addr, end_addr);
+       md->num_pages -= num_dropped_pages;
+}
+
+/*
+ * Walks the EFI memory map and calls CALLBACK once for each EFI memory 
descriptor that
+ * has memory that is available for OS use.
+ */
+void
+efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
+{
+       int prev_valid = 0;
+       struct range {
+               u64 start;
+               u64 end;
+       } prev, curr;
+       void *efi_map_start, *efi_map_end, *p, *q;
+       efi_memory_desc_t *md, *check_md;
+       u64 efi_desc_size, start, end, granule_addr, last_granule_addr, 
first_non_wb_addr = 0;
+       unsigned long total_mem = 0;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               /* skip over non-WB memory descriptors; that's all we're 
interested in... */
+               if (!(md->attribute & EFI_MEMORY_WB))
+                       continue;
+
+#ifdef XEN
+// this works around a problem in the ski bootloader
+{
+               extern long running_on_sim;
+               if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
+                       continue;
+}
+// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
+               if (md->phys_addr >= 0x100000000) continue;
+#endif
+               /*
+                * granule_addr is the base of md's first granule.
+                * [granule_addr - first_non_wb_addr) is guaranteed to
+                * be contiguous WB memory.
+                */
+               granule_addr = GRANULEROUNDDOWN(md->phys_addr);
+               first_non_wb_addr = max(first_non_wb_addr, granule_addr);
+
+               if (first_non_wb_addr < md->phys_addr) {
+                       trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
+                       granule_addr = GRANULEROUNDDOWN(md->phys_addr);
+                       first_non_wb_addr = max(first_non_wb_addr, 
granule_addr);
+               }
+
+               for (q = p; q < efi_map_end; q += efi_desc_size) {
+                       check_md = q;
+
+                       if ((check_md->attribute & EFI_MEMORY_WB) &&
+                           (check_md->phys_addr == first_non_wb_addr))
+                               first_non_wb_addr += check_md->num_pages << 
EFI_PAGE_SHIFT;
+                       else
+                               break;          /* non-WB or hole */
+               }
+
+               last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr);
+               if (last_granule_addr < md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT))
+                       trim_top(md, last_granule_addr);
+
+               if (is_available_memory(md)) {
+                       if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) 
>= max_addr) {
+                               if (md->phys_addr >= max_addr)
+                                       continue;
+                               md->num_pages = (max_addr - md->phys_addr) >> 
EFI_PAGE_SHIFT;
+                               first_non_wb_addr = max_addr;
+                       }
+
+                       if (total_mem >= mem_limit)
+                               continue;
+
+                       if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > 
mem_limit) {
+                               unsigned long limit_addr = md->phys_addr;
+
+                               limit_addr += mem_limit - total_mem;
+                               limit_addr = GRANULEROUNDDOWN(limit_addr);
+
+                               if (md->phys_addr > limit_addr)
+                                       continue;
+
+                               md->num_pages = (limit_addr - md->phys_addr) >>
+                                               EFI_PAGE_SHIFT;
+                               first_non_wb_addr = max_addr = md->phys_addr +
+                                             (md->num_pages << EFI_PAGE_SHIFT);
+                       }
+                       total_mem += (md->num_pages << EFI_PAGE_SHIFT);
+
+                       if (md->num_pages == 0)
+                               continue;
+
+                       curr.start = PAGE_OFFSET + md->phys_addr;
+                       curr.end   = curr.start + (md->num_pages << 
EFI_PAGE_SHIFT);
+
+                       if (!prev_valid) {
+                               prev = curr;
+                               prev_valid = 1;
+                       } else {
+                               if (curr.start < prev.start)
+                                       printk(KERN_ERR "Oops: EFI memory table 
not ordered!\n");
+
+                               if (prev.end == curr.start) {
+                                       /* merge two consecutive memory ranges 
*/
+                                       prev.end = curr.end;
+                               } else {
+                                       start = PAGE_ALIGN(prev.start);
+                                       end = prev.end & PAGE_MASK;
+                                       if ((end > start) && (*callback)(start, 
end, arg) < 0)
+                                               return;
+                                       prev = curr;
+                               }
+                       }
+               }
+       }
+       if (prev_valid) {
+               start = PAGE_ALIGN(prev.start);
+               end = prev.end & PAGE_MASK;
+               if (end > start)
+                       (*callback)(start, end, arg);
+       }
+}
+
+/*
+ * Look for the PAL_CODE region reported by EFI and maps it using an
+ * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
+ * Abstraction Layer chapter 11 in ADAG
+ */
+
+void *
+efi_get_pal_addr (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+       int pal_code_count = 0;
+       u64 vaddr, mask;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type != EFI_PAL_CODE)
+                       continue;
+
+               if (++pal_code_count > 1) {
+                       printk(KERN_ERR "Too many EFI Pal Code memory ranges, 
dropped @ %lx\n",
+                              md->phys_addr);
+                       continue;
+               }
+               /*
+                * The only ITLB entry in region 7 that is used is the one 
installed by
+                * __start().  That entry covers a 64MB range.
+                */
+               mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
+               vaddr = PAGE_OFFSET + md->phys_addr;
+
+               /*
+                * We must check that the PAL mapping won't overlap with the 
kernel
+                * mapping.
+                *
+                * PAL code is guaranteed to be aligned on a power of 2 between 
4k and
+                * 256KB and that only one ITR is needed to map it. This 
implies that the
+                * PAL code is always aligned on its size, i.e., the closest 
matching page
+                * size supported by the TLB. Therefore PAL code is guaranteed 
never to
+                * cross a 64MB unless it is bigger than 64MB (very unlikely!). 
 So for
+                * now the following test is enough to determine whether or not 
we need a
+                * dedicated ITR for the PAL code.
+                */
+               if ((vaddr & mask) == (KERNEL_START & mask)) {
+                       printk(KERN_INFO "%s: no need to install ITR for PAL 
code\n",
+                              __FUNCTION__);
+                       continue;
+               }
+
+               if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
+                       panic("Woah!  PAL code size bigger than a granule!");
+
+#if EFI_DEBUG
+               mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
+               printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into 
[0x%lx-0x%lx)\n",
+                       smp_processor_id(), md->phys_addr,
+                       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+#endif
+               return __va(md->phys_addr);
+       }
+       printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
+              __FUNCTION__);
+       return NULL;
+}
+
+void
+efi_map_pal_code (void)
+{
+       void *pal_vaddr = efi_get_pal_addr ();
+       u64 psr;
+
+       if (!pal_vaddr)
+               return;
+
+       /*
+        * Cannot write to CRx with PSR.ic=1
+        */
+       psr = ia64_clear_ic();
+       ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) 
pal_vaddr),
+                pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
+                IA64_GRANULE_SHIFT);
+       ia64_set_psr(psr);              /* restore psr */
+       ia64_srlz_i();
+}
+
+void __init
+efi_init (void)
+{
+       void *efi_map_start, *efi_map_end;
+       efi_config_table_t *config_tables;
+       efi_char16_t *c16;
+       u64 efi_desc_size;
+       char *cp, *end, vendor[100] = "unknown";
+       extern char saved_command_line[];
+       int i;
+
+       /* it's too early to be able to use the standard kernel command line 
support... */
+       for (cp = saved_command_line; *cp; ) {
+               if (memcmp(cp, "mem=", 4) == 0) {
+                       cp += 4;
+                       mem_limit = memparse(cp, &end);
+                       if (end != cp)
+                               break;
+                       cp = end;
+               } else if (memcmp(cp, "max_addr=", 9) == 0) {
+                       cp += 9;
+                       max_addr = GRANULEROUNDDOWN(memparse(cp, &end));
+                       if (end != cp)
+                               break;
+                       cp = end;
+               } else {
+                       while (*cp != ' ' && *cp)
+                               ++cp;
+                       while (*cp == ' ')
+                               ++cp;
+               }
+       }
+       if (max_addr != ~0UL)
+               printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 
20);
+
+       efi.systab = __va(ia64_boot_param->efi_systab);
+
+       /*
+        * Verify the EFI Table
+        */
+       if (efi.systab == NULL)
+               panic("Woah! Can't find EFI system table.\n");
+       if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+               panic("Woah! EFI system table signature incorrect\n");
+       if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
+               printk(KERN_WARNING "Warning: EFI system table major version 
mismatch: "
+                      "got %d.%02d, expected %d.%02d\n",
+                      efi.systab->hdr.revision >> 16, efi.systab->hdr.revision 
& 0xffff,
+                      EFI_SYSTEM_TABLE_REVISION >> 16, 
EFI_SYSTEM_TABLE_REVISION & 0xffff);
+
+       config_tables = __va(efi.systab->tables);
+
+       /* Show what we know for posterity */
+       c16 = __va(efi.systab->fw_vendor);
+       if (c16) {
+               for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
+                       vendor[i] = *c16++;
+               vendor[i] = '\0';
+       }
+
+       printk(KERN_INFO "EFI v%u.%.02u by %s:",
+              efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 
0xffff, vendor);
+
+       for (i = 0; i < (int) efi.systab->nr_tables; i++) {
+               if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
+                       efi.mps = __va(config_tables[i].table);
+                       printk(" MPS=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
ACPI_20_TABLE_GUID) == 0) {
+                       efi.acpi20 = __va(config_tables[i].table);
+                       printk(" ACPI 2.0=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) 
== 0) {
+                       efi.acpi = __va(config_tables[i].table);
+                       printk(" ACPI=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
SMBIOS_TABLE_GUID) == 0) {
+                       efi.smbios = __va(config_tables[i].table);
+                       printk(" SMBIOS=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
SAL_SYSTEM_TABLE_GUID) == 0) {
+                       efi.sal_systab = __va(config_tables[i].table);
+                       printk(" SALsystab=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) 
== 0) {
+                       efi.hcdp = __va(config_tables[i].table);
+                       printk(" HCDP=0x%lx", config_tables[i].table);
+               }
+       }
+       printk("\n");
+
+       runtime = __va(efi.systab->runtime);
+       efi.get_time = phys_get_time;
+       efi.set_time = phys_set_time;
+       efi.get_wakeup_time = phys_get_wakeup_time;
+       efi.set_wakeup_time = phys_set_wakeup_time;
+       efi.get_variable = phys_get_variable;
+       efi.get_next_variable = phys_get_next_variable;
+       efi.set_variable = phys_set_variable;
+       efi.get_next_high_mono_count = phys_get_next_high_mono_count;
+       efi.reset_system = phys_reset_system;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+#if EFI_DEBUG
+       /* print EFI memory map: */
+       {
+               efi_memory_desc_t *md;
+               void *p;
+
+               for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += 
efi_desc_size) {
+                       md = p;
+                       printk("mem%02u: type=%u, attr=0x%lx, 
range=[0x%016lx-0x%016lx) (%luMB)\n",
+                              i, md->type, md->attribute, md->phys_addr,
+                              md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT),
+                              md->num_pages >> (20 - EFI_PAGE_SHIFT));
+               }
+       }
+#endif
+
+       efi_map_pal_code();
+       efi_enter_virtual_mode();
+}
+
+void
+efi_enter_virtual_mode (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       efi_status_t status;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->attribute & EFI_MEMORY_RUNTIME) {
+                       /*
+                        * Some descriptors have multiple bits set, so the 
order of
+                        * the tests is relevant.
+                        */
+                       if (md->attribute & EFI_MEMORY_WB) {
+                               md->virt_addr = (u64) __va(md->phys_addr);
+                       } else if (md->attribute & EFI_MEMORY_UC) {
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+                       } else if (md->attribute & EFI_MEMORY_WC) {
+#if 0
+                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
+                                                                          | 
_PAGE_D
+                                                                          | 
_PAGE_MA_WC
+                                                                          | 
_PAGE_PL_0
+                                                                          | 
_PAGE_AR_RW));
+#else
+                               printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+#endif
+                       } else if (md->attribute & EFI_MEMORY_WT) {
+#if 0
+                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
+                                                                          | 
_PAGE_D | _PAGE_MA_WT
+                                                                          | 
_PAGE_PL_0
+                                                                          | 
_PAGE_AR_RW));
+#else
+                               printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+#endif
+                       }
+               }
+       }
+
+       status = efi_call_phys(__va(runtime->set_virtual_address_map),
+                              ia64_boot_param->efi_memmap_size,
+                              efi_desc_size, 
ia64_boot_param->efi_memdesc_version,
+                              ia64_boot_param->efi_memmap);
+       if (status != EFI_SUCCESS) {
+               printk(KERN_WARNING "warning: unable to switch EFI into virtual 
mode "
+                      "(status=%lu)\n", status);
+               return;
+       }
+
+       /*
+        * Now that EFI is in virtual mode, we call the EFI functions more 
efficiently:
+        */
+       efi.get_time = virt_get_time;
+       efi.set_time = virt_set_time;
+       efi.get_wakeup_time = virt_get_wakeup_time;
+       efi.set_wakeup_time = virt_set_wakeup_time;
+       efi.get_variable = virt_get_variable;
+       efi.get_next_variable = virt_get_next_variable;
+       efi.set_variable = virt_set_variable;
+       efi.get_next_high_mono_count = virt_get_next_high_mono_count;
+       efi.reset_system = virt_reset_system;
+}
+
+/*
+ * Walk the EFI memory map looking for the I/O port range.  There can only be 
one entry of
+ * this type, other I/O port ranges should be described via ACPI.
+ */
+u64
+efi_get_iobase (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
+                       if (md->attribute & EFI_MEMORY_UC)
+                               return md->phys_addr;
+               }
+       }
+       return 0;
+}
+
+#ifdef XEN
+// variation of efi_get_iobase which returns entire memory descriptor
+efi_memory_desc_t *
+efi_get_io_md (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
+                       if (md->attribute & EFI_MEMORY_UC)
+                               return md;
+               }
+       }
+       return 0;
+}
+#endif
+
+u32
+efi_mem_type (unsigned long phys_addr)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
+                        return md->type;
+       }
+       return 0;
+}
+
+u64
+efi_mem_attributes (unsigned long phys_addr)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
+                       return md->attribute;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(efi_mem_attributes);
+
+int
+valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT)) {
+                       if (!(md->attribute & EFI_MEMORY_WB))
+                               return 0;
+
+                       if (*size > md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr)
+                               *size = md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr;
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+int __init
+efi_uart_console_only(void)
+{
+       efi_status_t status;
+       char *s, name[] = "ConOut";
+       efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
+       efi_char16_t *utf16, name_utf16[32];
+       unsigned char data[1024];
+       unsigned long size = sizeof(data);
+       struct efi_generic_dev_path *hdr, *end_addr;
+       int uart = 0;
+
+       /* Convert to UTF-16 */
+       utf16 = name_utf16;
+       s = name;
+       while (*s)
+               *utf16++ = *s++ & 0x7f;
+       *utf16 = 0;
+
+       status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
+       if (status != EFI_SUCCESS) {
+               printk(KERN_ERR "No EFI %s variable?\n", name);
+               return 0;
+       }
+
+       hdr = (struct efi_generic_dev_path *) data;
+       end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
+       while (hdr < end_addr) {
+               if (hdr->type == EFI_DEV_MSG &&
+                   hdr->sub_type == EFI_DEV_MSG_UART)
+                       uart = 1;
+               else if (hdr->type == EFI_DEV_END_PATH ||
+                         hdr->type == EFI_DEV_END_PATH2) {
+                       if (!uart)
+                               return 0;
+                       if (hdr->sub_type == EFI_DEV_END_ENTIRE)
+                               return 1;
+                       uart = 0;
+               }
+               hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + 
hdr->length);
+       }
+       printk(KERN_ERR "Malformed %s value\n", name);
+       return 0;
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/entry.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/entry.S     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1653 @@
+/*
+ * ia64/kernel/entry.S
+ *
+ * Kernel entry points.
+ *
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999, 2002-2003
+ *     Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
+ *     Don Dugger <Don.Dugger@xxxxxxxxx>
+ *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+/*
+ * ia64_switch_to now places correct virtual mapping in in TR2 for
+ * kernel stack. This allows us to handle interrupts without changing
+ * to physical mode.
+ *
+ * Jonathan Nicklin    <nicklin@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * Patrick O'Rourke    <orourke@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * 11/07/2000
+ */
+/*
+ * Global (preserved) predicate usage on syscall entry/exit path:
+ *
+ *     pKStk:          See entry.h.
+ *     pUStk:          See entry.h.
+ *     pSys:           See entry.h.
+ *     pNonSys:        !pSys
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/kregs.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/percpu.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+#include "minstate.h"
+
+#ifndef XEN
+       /*
+        * execve() is special because in case of success, we need to
+        * setup a null register window frame.
+        */
+ENTRY(ia64_execve)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,4,0
+       mov loc0=rp
+       .body
+       mov out0=in0                    // filename
+       ;;                              // stop bit between alloc and call
+       mov out1=in1                    // argv
+       mov out2=in2                    // envp
+       add out3=16,sp                  // regs
+       br.call.sptk.many rp=sys_execve
+.ret0:
+#ifdef CONFIG_IA32_SUPPORT
+       /*
+        * Check if we're returning to ia32 mode. If so, we need to restore 
ia32 registers
+        * from pt_regs.
+        */
+       adds r16=PT(CR_IPSR)+16,sp
+       ;;
+       ld8 r16=[r16]
+#endif
+       cmp4.ge p6,p7=r8,r0
+       mov ar.pfs=loc1                 // restore ar.pfs
+       sxt4 r8=r8                      // return 64-bit result
+       ;;
+       stf.spill [sp]=f0
+(p6)   cmp.ne pKStk,pUStk=r0,r0        // a successful execve() lands us in 
user-mode...
+       mov rp=loc0
+(p6)   mov ar.pfs=r0                   // clear ar.pfs on success
+(p7)   br.ret.sptk.many rp
+
+       /*
+        * In theory, we'd have to zap this state only to prevent leaking of
+        * security sensitive state (e.g., if current->mm->dumpable is zero).  
However,
+        * this executes in less than 20 cycles even on Itanium, so it's not 
worth
+        * optimizing for...).
+        */
+       mov ar.unat=0;          mov ar.lc=0
+       mov r4=0;               mov f2=f0;              mov b1=r0
+       mov r5=0;               mov f3=f0;              mov b2=r0
+       mov r6=0;               mov f4=f0;              mov b3=r0
+       mov r7=0;               mov f5=f0;              mov b4=r0
+       ldf.fill f12=[sp];      mov f13=f0;             mov b5=r0
+       ldf.fill f14=[sp];      ldf.fill f15=[sp];      mov f16=f0
+       ldf.fill f17=[sp];      ldf.fill f18=[sp];      mov f19=f0
+       ldf.fill f20=[sp];      ldf.fill f21=[sp];      mov f22=f0
+       ldf.fill f23=[sp];      ldf.fill f24=[sp];      mov f25=f0
+       ldf.fill f26=[sp];      ldf.fill f27=[sp];      mov f28=f0
+       ldf.fill f29=[sp];      ldf.fill f30=[sp];      mov f31=f0
+#ifdef CONFIG_IA32_SUPPORT
+       tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
+       movl loc0=ia64_ret_from_ia32_execve
+       ;;
+(p6)   mov rp=loc0
+#endif
+       br.ret.sptk.many rp
+END(ia64_execve)
+
+/*
+ * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, 
u64 child_tidptr,
+ *           u64 tls)
+ */
+GLOBAL_ENTRY(sys_clone2)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
+       DO_SAVE_SWITCH_STACK
+       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+       mov loc0=rp
+       mov loc1=r16                            // save ar.pfs across do_fork
+       .body
+       mov out1=in1
+       mov out3=in2
+       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+       mov out4=in3    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
+       ;;
+(p6)   st8 [r2]=in5                            // store TLS in r16 for 
copy_thread()
+       mov out5=in4    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
+       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
+       mov out0=in0                            // out0 = clone_flags
+       br.call.sptk.many rp=do_fork
+.ret1: .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(sys_clone2)
+
+/*
+ * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, 
u64 tls)
+ *     Deprecated.  Use sys_clone2() instead.
+ */
+GLOBAL_ENTRY(sys_clone)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
+       DO_SAVE_SWITCH_STACK
+       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+       mov loc0=rp
+       mov loc1=r16                            // save ar.pfs across do_fork
+       .body
+       mov out1=in1
+       mov out3=16                             // stacksize (compensates for 
16-byte scratch area)
+       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+       mov out4=in2    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
+       ;;
+(p6)   st8 [r2]=in4                            // store TLS in r13 (tp)
+       mov out5=in3    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
+       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
+       mov out0=in0                            // out0 = clone_flags
+       br.call.sptk.many rp=do_fork
+.ret2: .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(sys_clone)
+#endif /* !XEN */
+
+/*
+ * prev_task <- ia64_switch_to(struct task_struct *next)
+ *     With Ingo's new scheduler, interrupts are disabled when this routine 
gets
+ *     called.  The code starting at .map relies on this.  The rest of the code
+ *     doesn't care about the interrupt masking status.
+ */
+GLOBAL_ENTRY(ia64_switch_to)
+       .prologue
+       alloc r16=ar.pfs,1,0,0,0
+       DO_SAVE_SWITCH_STACK
+       .body
+
+       adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
+       movl r25=init_task
+       mov r27=IA64_KR(CURRENT_STACK)
+       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+#ifdef XEN
+       dep r20=0,in0,60,4              // physical address of "next"
+#else
+       dep r20=0,in0,61,3              // physical address of "next"
+#endif
+       ;;
+       st8 [r22]=sp                    // save kernel stack pointer of old task
+       shr.u r26=r20,IA64_GRANULE_SHIFT
+       cmp.eq p7,p6=r25,in0
+       ;;
+       /*
+        * If we've already mapped this task's page, we can skip doing it again.
+        */
+(p6)   cmp.eq p7,p6=r26,r27
+(p6)   br.cond.dpnt .map
+       ;;
+.done:
+(p6)   ssm psr.ic                      // if we had to map, reenable the 
psr.ic bit FIRST!!!
+       ;;
+(p6)   srlz.d
+       ld8 sp=[r21]                    // load kernel stack pointer of new task
+       mov IA64_KR(CURRENT)=in0        // update "current" application register
+       mov r8=r13                      // return pointer to previously running 
task
+       mov r13=in0                     // set "current" pointer
+       ;;
+       DO_LOAD_SWITCH_STACK
+
+#ifdef CONFIG_SMP
+       sync.i                          // ensure "fc"s done by this CPU are 
visible on other CPUs
+#endif
+       br.ret.sptk.many rp             // boogie on out in new context
+
+.map:
+#ifdef XEN
+       // avoid overlapping with kernel TR
+       movl r25=KERNEL_START
+       dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       cmp.eq p7,p0=r25,r23
+       ;;
+(p7)   mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+(p7)   br.cond.sptk .done
+#endif
+       rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
+       movl r25=PAGE_KERNEL
+       ;;
+       srlz.d
+       or r23=r25,r20                  // construct PA | page properties
+       mov r25=IA64_GRANULE_SHIFT<<2
+       ;;
+       mov cr.itir=r25
+       mov cr.ifa=in0                  // VA of next task...
+       ;;
+       mov r25=IA64_TR_CURRENT_STACK
+       mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+       ;;
+       itr.d dtr[r25]=r23              // wire in new mapping...
+       br.cond.sptk .done
+END(ia64_switch_to)
+
+/*
+ * Note that interrupts are enabled during save_switch_stack and 
load_switch_stack.  This
+ * means that we may get an interrupt with "sp" pointing to the new kernel 
stack while
+ * ar.bspstore is still pointing to the old kernel backing store area.  Since 
ar.rsc,
+ * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is 
not a
+ * problem.  Also, we don't need to specify unwind information for preserved 
registers
+ * that are not modified in save_switch_stack as the right unwind information 
is already
+ * specified at the call-site of save_switch_stack.
+ */
+
+/*
+ * save_switch_stack:
+ *     - r16 holds ar.pfs
+ *     - b7 holds address to return to
+ *     - rp (b0) holds return address to save
+ */
+GLOBAL_ENTRY(save_switch_stack)
+       .prologue
+       .altrp b7
+       flushrs                 // flush dirty regs to backing store (must be 
first in insn group)
+       .save @priunat,r17
+       mov r17=ar.unat         // preserve caller's
+       .body
+#ifdef CONFIG_ITANIUM
+       adds r2=16+128,sp
+       adds r3=16+64,sp
+       adds r14=SW(R4)+16,sp
+       ;;
+       st8.spill [r14]=r4,16           // spill r4
+       lfetch.fault.excl.nt1 [r3],128
+       ;;
+       lfetch.fault.excl.nt1 [r2],128
+       lfetch.fault.excl.nt1 [r3],128
+       ;;
+       lfetch.fault.excl [r2]
+       lfetch.fault.excl [r3]
+       adds r15=SW(R5)+16,sp
+#else
+       add r2=16+3*128,sp
+       add r3=16,sp
+       add r14=SW(R4)+16,sp
+       ;;
+       st8.spill [r14]=r4,SW(R6)-SW(R4)        // spill r4 and prefetch offset 
0x1c0
+       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x010
+       ;;
+       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x090
+       lfetch.fault.excl.nt1 [r2],128  //              prefetch offset 0x190
+       ;;
+       lfetch.fault.excl.nt1 [r3]      //              prefetch offset 0x110
+       lfetch.fault.excl.nt1 [r2]      //              prefetch offset 0x210
+       adds r15=SW(R5)+16,sp
+#endif
+       ;;
+       st8.spill [r15]=r5,SW(R7)-SW(R5)        // spill r5
+       mov.m ar.rsc=0                  // put RSE in mode: enforced lazy, 
little endian, pl 0
+       add r2=SW(F2)+16,sp             // r2 = &sw->f2
+       ;;
+       st8.spill [r14]=r6,SW(B0)-SW(R6)        // spill r6
+       mov.m r18=ar.fpsr               // preserve fpsr
+       add r3=SW(F3)+16,sp             // r3 = &sw->f3
+       ;;
+       stf.spill [r2]=f2,32
+       mov.m r19=ar.rnat
+       mov r21=b0
+
+       stf.spill [r3]=f3,32
+       st8.spill [r15]=r7,SW(B2)-SW(R7)        // spill r7
+       mov r22=b1
+       ;;
+       // since we're done with the spills, read and save ar.unat:
+       mov.m r29=ar.unat
+       mov.m r20=ar.bspstore
+       mov r23=b2
+       stf.spill [r2]=f4,32
+       stf.spill [r3]=f5,32
+       mov r24=b3
+       ;;
+       st8 [r14]=r21,SW(B1)-SW(B0)             // save b0
+       st8 [r15]=r23,SW(B3)-SW(B2)             // save b2
+       mov r25=b4
+       mov r26=b5
+       ;;
+       st8 [r14]=r22,SW(B4)-SW(B1)             // save b1
+       st8 [r15]=r24,SW(AR_PFS)-SW(B3)         // save b3
+       mov r21=ar.lc           // I-unit
+       stf.spill [r2]=f12,32
+       stf.spill [r3]=f13,32
+       ;;
+       st8 [r14]=r25,SW(B5)-SW(B4)             // save b4
+       st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)      // save ar.pfs
+       stf.spill [r2]=f14,32
+       stf.spill [r3]=f15,32
+       ;;
+       st8 [r14]=r26                           // save b5
+       st8 [r15]=r21                           // save ar.lc
+       stf.spill [r2]=f16,32
+       stf.spill [r3]=f17,32
+       ;;
+       stf.spill [r2]=f18,32
+       stf.spill [r3]=f19,32
+       ;;
+       stf.spill [r2]=f20,32
+       stf.spill [r3]=f21,32
+       ;;
+       stf.spill [r2]=f22,32
+       stf.spill [r3]=f23,32
+       ;;
+       stf.spill [r2]=f24,32
+       stf.spill [r3]=f25,32
+       ;;
+       stf.spill [r2]=f26,32
+       stf.spill [r3]=f27,32
+       ;;
+       stf.spill [r2]=f28,32
+       stf.spill [r3]=f29,32
+       ;;
+       stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
+       stf.spill [r3]=f31,SW(PR)-SW(F31)
+       add r14=SW(CALLER_UNAT)+16,sp
+       ;;
+       st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)    // save ar.unat
+       st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
+       mov r21=pr
+       ;;
+       st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
+       st8 [r3]=r21                            // save predicate registers
+       ;;
+       st8 [r2]=r20                            // save ar.bspstore
+       st8 [r14]=r18                           // save fpsr
+       mov ar.rsc=3            // put RSE back into eager mode, pl 0
+       br.cond.sptk.many b7
+END(save_switch_stack)
+
+/*
+ * load_switch_stack:
+ *     - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
+ *     - b7 holds address to return to
+ *     - must not touch r8-r11
+ */
+#ifdef XEN
+GLOBAL_ENTRY(load_switch_stack)
+#else
+ENTRY(load_switch_stack)
+#endif
+       .prologue
+       .altrp b7
+
+       .body
+       lfetch.fault.nt1 [sp]
+       adds r2=SW(AR_BSPSTORE)+16,sp
+       adds r3=SW(AR_UNAT)+16,sp
+       mov ar.rsc=0                                            // put RSE into 
enforced lazy mode
+       adds r14=SW(CALLER_UNAT)+16,sp
+       adds r15=SW(AR_FPSR)+16,sp
+       ;;
+       ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))   // bspstore
+       ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))       // unat
+       ;;
+       ld8 r21=[r2],16         // restore b0
+       ld8 r22=[r3],16         // restore b1
+       ;;
+       ld8 r23=[r2],16         // restore b2
+       ld8 r24=[r3],16         // restore b3
+       ;;
+       ld8 r25=[r2],16         // restore b4
+       ld8 r26=[r3],16         // restore b5
+       ;;
+       ld8 r16=[r2],(SW(PR)-SW(AR_PFS))        // ar.pfs
+       ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))    // ar.lc
+       ;;
+       ld8 r28=[r2]            // restore pr
+       ld8 r30=[r3]            // restore rnat
+       ;;
+       ld8 r18=[r14],16        // restore caller's unat
+       ld8 r19=[r15],24        // restore fpsr
+       ;;
+       ldf.fill f2=[r14],32
+       ldf.fill f3=[r15],32
+       ;;
+       ldf.fill f4=[r14],32
+       ldf.fill f5=[r15],32
+       ;;
+       ldf.fill f12=[r14],32
+       ldf.fill f13=[r15],32
+       ;;
+       ldf.fill f14=[r14],32
+       ldf.fill f15=[r15],32
+       ;;
+       ldf.fill f16=[r14],32
+       ldf.fill f17=[r15],32
+       ;;
+       ldf.fill f18=[r14],32
+       ldf.fill f19=[r15],32
+       mov b0=r21
+       ;;
+       ldf.fill f20=[r14],32
+       ldf.fill f21=[r15],32
+       mov b1=r22
+       ;;
+       ldf.fill f22=[r14],32
+       ldf.fill f23=[r15],32
+       mov b2=r23
+       ;;
+       mov ar.bspstore=r27
+       mov ar.unat=r29         // establish unat holding the NaT bits for r4-r7
+       mov b3=r24
+       ;;
+       ldf.fill f24=[r14],32
+       ldf.fill f25=[r15],32
+       mov b4=r25
+       ;;
+       ldf.fill f26=[r14],32
+       ldf.fill f27=[r15],32
+       mov b5=r26
+       ;;
+       ldf.fill f28=[r14],32
+       ldf.fill f29=[r15],32
+       mov ar.pfs=r16
+       ;;
+       ldf.fill f30=[r14],32
+       ldf.fill f31=[r15],24
+       mov ar.lc=r17
+       ;;
+       ld8.fill r4=[r14],16
+       ld8.fill r5=[r15],16
+       mov pr=r28,-1
+       ;;
+       ld8.fill r6=[r14],16
+       ld8.fill r7=[r15],16
+
+       mov ar.unat=r18                         // restore caller's unat
+       mov ar.rnat=r30                         // must restore after bspstore 
but before rsc!
+       mov ar.fpsr=r19                         // restore fpsr
+       mov ar.rsc=3                            // put RSE back into eager 
mode, pl 0
+       br.cond.sptk.many b7
+END(load_switch_stack)
+
+#ifndef XEN
+GLOBAL_ENTRY(__ia64_syscall)
+       .regstk 6,0,0,0
+       mov r15=in5                             // put syscall number in place
+       break __BREAK_SYSCALL
+       movl r2=errno
+       cmp.eq p6,p7=-1,r10
+       ;;
+(p6)   st4 [r2]=r8
+(p6)   mov r8=-1
+       br.ret.sptk.many rp
+END(__ia64_syscall)
+
+GLOBAL_ENTRY(execve)
+       mov r15=__NR_execve                     // put syscall number in place
+       break __BREAK_SYSCALL
+       br.ret.sptk.many rp
+END(execve)
+
+GLOBAL_ENTRY(clone)
+       mov r15=__NR_clone                      // put syscall number in place
+       break __BREAK_SYSCALL
+       br.ret.sptk.many rp
+END(clone)
+
+       /*
+        * Invoke a system call, but do some tracing before and after the call.
+        * We MUST preserve the current register frame throughout this routine
+        * because some system calls (such as ia64_execve) directly
+        * manipulate ar.pfs.
+        */
+GLOBAL_ENTRY(ia64_trace_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * We need to preserve the scratch registers f6-f11 in case the system
+        * call is sigreturn.
+        */
+       adds r16=PT(F6)+16,sp
+       adds r17=PT(F7)+16,sp
+       ;;
+       stf.spill [r16]=f6,32
+       stf.spill [r17]=f7,32
+       ;;
+       stf.spill [r16]=f8,32
+       stf.spill [r17]=f9,32
+       ;;
+       stf.spill [r16]=f10
+       stf.spill [r17]=f11
+       br.call.sptk.many rp=syscall_trace_enter // give parent a chance to 
catch syscall args
+       adds r16=PT(F6)+16,sp
+       adds r17=PT(F7)+16,sp
+       ;;
+       ldf.fill f6=[r16],32
+       ldf.fill f7=[r17],32
+       ;;
+       ldf.fill f8=[r16],32
+       ldf.fill f9=[r17],32
+       ;;
+       ldf.fill f10=[r16]
+       ldf.fill f11=[r17]
+       // the syscall number may have changed, so re-load it and re-calculate 
the
+       // syscall entry-point:
+       adds r15=PT(R15)+16,sp                  // r15 = &pt_regs.r15 (syscall 
#)
+       ;;
+       ld8 r15=[r15]
+       mov r3=NR_syscalls - 1
+       ;;
+       adds r15=-1024,r15
+       movl r16=sys_call_table
+       ;;
+       shladd r20=r15,3,r16                    // r20 = sys_call_table + 
8*(syscall-1024)
+       cmp.leu p6,p7=r15,r3
+       ;;
+(p6)   ld8 r20=[r20]                           // load address of syscall 
entry point
+(p7)   movl r20=sys_ni_syscall
+       ;;
+       mov b6=r20
+       br.call.sptk.many rp=b6                 // do the syscall
+.strace_check_retval:
+       cmp.lt p6,p0=r8,r0                      // syscall failed?
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
+       mov r10=0
+(p6)   br.cond.sptk strace_error               // syscall failed ->
+       ;;                                      // avoid RAW on r10
+.strace_save_retval:
+.mem.offset 0,0; st8.spill [r2]=r8             // store return value in slot 
for r8
+.mem.offset 8,0; st8.spill [r3]=r10            // clear error indication in 
slot for r10
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
+.ret3: br.cond.sptk .work_pending_syscall_end
+
+strace_error:
+       ld8 r3=[r2]                             // load pt_regs.r8
+       sub r9=0,r8                             // negate return value to get 
errno value
+       ;;
+       cmp.ne p6,p0=r3,r0                      // is pt_regs.r8!=0?
+       adds r3=16,r2                           // r3=&pt_regs.r10
+       ;;
+(p6)   mov r10=-1
+(p6)   mov r8=r9
+       br.cond.sptk .strace_save_retval
+END(ia64_trace_syscall)
+
+       /*
+        * When traced and returning from sigreturn, we invoke syscall_trace 
but then
+        * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
+        */
+GLOBAL_ENTRY(ia64_strace_leave_kernel)
+       PT_REGS_UNWIND_INFO(0)
+{      /*
+        * Some versions of gas generate bad unwind info if the first 
instruction of a
+        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
+        */
+       nop.m 0
+       nop.i 0
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
+}
+.ret4: br.cond.sptk ia64_leave_kernel
+END(ia64_strace_leave_kernel)
+#endif
+
+GLOBAL_ENTRY(ia64_ret_from_clone)
+       PT_REGS_UNWIND_INFO(0)
+{      /*
+        * Some versions of gas generate bad unwind info if the first 
instruction of a
+        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
+        */
+       nop.m 0
+       nop.i 0
+       /*
+        * We need to call schedule_tail() to complete the scheduling process.
+        * Called by ia64_switch_to() after do_fork()->copy_thread().  r8 
contains the
+        * address of the previously executing task.
+        */
+       br.call.sptk.many rp=ia64_invoke_schedule_tail
+}
+#ifdef XEN
+       // new domains are cloned but not exec'ed so switch to user mode here
+       cmp.ne pKStk,pUStk=r0,r0
+#ifdef CONFIG_VTI
+       br.cond.spnt ia64_leave_hypervisor
+#else // CONFIG_VTI
+       br.cond.spnt ia64_leave_kernel
+#endif // CONFIG_VTI
+#else
+.ret8:
+       adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+       ld4 r2=[r2]
+       ;;
+       mov r8=0
+       and r2=_TIF_SYSCALL_TRACEAUDIT,r2
+       ;;
+       cmp.ne p6,p0=r2,r0
+(p6)   br.cond.spnt .strace_check_retval
+#endif
+       ;;                                      // added stop bits to prevent 
r8 dependency
+END(ia64_ret_from_clone)
+       // fall through
+GLOBAL_ENTRY(ia64_ret_from_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       cmp.ge p6,p7=r8,r0                      // syscall executed 
successfully?
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       mov r10=r0                              // clear error indication in r10
+(p7)   br.cond.spnt handle_syscall_error       // handle potential syscall 
failure
+END(ia64_ret_from_syscall)
+       // fall through
+/*
+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+ *     need to switch to bank 0 and doesn't restore the scratch registers.
+ *     To avoid leaking kernel bits, the scratch registers are set to
+ *     the following known-to-be-safe values:
+ *
+ *               r1: restored (global pointer)
+ *               r2: cleared
+ *               r3: 1 (when returning to user-level)
+ *           r8-r11: restored (syscall return value(s))
+ *              r12: restored (user-level stack pointer)
+ *              r13: restored (user-level thread pointer)
+ *              r14: cleared
+ *              r15: restored (syscall #)
+ *          r16-r17: cleared
+ *              r18: user-level b6
+ *              r19: cleared
+ *              r20: user-level ar.fpsr
+ *              r21: user-level b0
+ *              r22: cleared
+ *              r23: user-level ar.bspstore
+ *              r24: user-level ar.rnat
+ *              r25: user-level ar.unat
+ *              r26: user-level ar.pfs
+ *              r27: user-level ar.rsc
+ *              r28: user-level ip
+ *              r29: user-level psr
+ *              r30: user-level cfm
+ *              r31: user-level pr
+ *           f6-f11: cleared
+ *               pr: restored (user-level pr)
+ *               b0: restored (user-level rp)
+ *               b6: restored
+ *               b7: cleared
+ *          ar.unat: restored (user-level ar.unat)
+ *           ar.pfs: restored (user-level ar.pfs)
+ *           ar.rsc: restored (user-level ar.rsc)
+ *          ar.rnat: restored (user-level ar.rnat)
+ *      ar.bspstore: restored (user-level ar.bspstore)
+ *          ar.fpsr: restored (user-level ar.fpsr)
+ *           ar.ccv: cleared
+ *           ar.csd: cleared
+ *           ar.ssd: cleared
+ */
+ENTRY(ia64_leave_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check 
for
+        * extra work.  We always check for extra work when returning to 
user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
+        * needs to be redone.
+        */
+#ifdef CONFIG_PREEMPT
+       rsm psr.i                               // disable interrupts
+       cmp.eq pLvSys,p0=r0,r0                  // pLvSys=1: leave from syscall
+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+       .pred.rel.mutex pUStk,pKStk
+(pKStk) ld4 r21=[r20]                  // r21 <- preempt_count
+(pUStk)        mov r21=0                       // r21 <- 0
+       ;;
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else /* !CONFIG_PREEMPT */
+(pUStk)        rsm psr.i
+       cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_syscall:
+       adds r2=PT(LOADRS)+16,r12
+       adds r3=PT(AR_BSPSTORE)+16,r12
+#ifdef XEN
+       ;;
+#else
+       adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+(p6)   ld4 r31=[r18]                           // load 
current_thread_info()->flags
+#endif
+       ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for 
"loadrs"
+       mov b7=r0               // clear b7
+       ;;
+       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
+       ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
+#ifndef XEN
+(p6)   and r15=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
+#endif
+       ;;
+       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
+#ifndef XEN
+(p6)   cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
+(p6)   br.cond.spnt .work_pending_syscall
+#endif
+       ;;
+       // start restoring the state saved on the kernel stack (struct pt_regs):
+       ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
+       ld8 r11=[r3],PT(CR_IIP)-PT(R11)
+       mov f6=f0               // clear f6
+       ;;
+       invala                  // M0|1 invalidate ALAT
+       rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and 
interruption collection
+       mov f9=f0               // clear f9
+
+       ld8 r29=[r2],16         // load cr.ipsr
+       ld8 r28=[r3],16                 // load cr.iip
+       mov f8=f0               // clear f8
+       ;;
+       ld8 r30=[r2],16         // M0|1 load cr.ifs
+       mov.m ar.ssd=r0         // M2 clear ar.ssd
+       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
+       ;;
+       ld8 r25=[r3],16         // M0|1 load ar.unat
+       mov.m ar.csd=r0         // M2 clear ar.csd
+       mov r22=r0              // clear r22
+       ;;
+       ld8 r26=[r2],PT(B0)-PT(AR_PFS)  // M0|1 load ar.pfs
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
+       mov f10=f0              // clear f10
+       ;;
+       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
+       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // load ar.rsc
+       mov f11=f0              // clear f11
+       ;;
+       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // load ar.rnat (may be garbage)
+       ld8 r31=[r3],PT(R1)-PT(PR)              // load predicates
+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
+       ;;
+       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // load ar.fpsr
+       ld8.fill r1=[r3],16     // load r1
+(pUStk) mov r17=1
+       ;;
+       srlz.d                  // M0  ensure interruption collection is off
+       ld8.fill r13=[r3],16
+       mov f7=f0               // clear f7
+       ;;
+       ld8.fill r12=[r2]       // restore r12 (sp)
+       ld8.fill r15=[r3]       // restore r15
+#ifdef XEN
+       movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
+#else
+       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
+#endif
+       ;;
+(pUStk)        ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
+(pUStk) st1 [r14]=r17
+       mov b6=r18              // I0  restore b6
+       ;;
+       mov r14=r0              // clear r14
+       shr.u r18=r19,16        // I0|1 get byte size of existing "dirty" 
partition
+(pKStk) br.cond.dpnt.many skip_rbs_switch
+
+       mov.m ar.ccv=r0         // clear ar.ccv
+(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
+       br.cond.sptk.many rbs_switch
+END(ia64_leave_syscall)
+
+#ifdef CONFIG_IA32_SUPPORT
+GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+       PT_REGS_UNWIND_INFO(0)
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
+       ;;
+       .mem.offset 0,0
+       st8.spill [r2]=r8       // store return value in slot for r8 and set 
unat bit
+       .mem.offset 8,0
+       st8.spill [r3]=r0       // clear error indication in slot for r10 and 
set unat bit
+END(ia64_ret_from_ia32_execve_syscall)
+       // fall through
+#endif /* CONFIG_IA32_SUPPORT */
+GLOBAL_ENTRY(ia64_leave_kernel)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check 
for
+        * extra work.  We always check for extra work when returning to 
user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
+        * needs to be redone.
+        */
+#ifdef CONFIG_PREEMPT
+       rsm psr.i                               // disable interrupts
+       cmp.eq p0,pLvSys=r0,r0                  // pLvSys=0: leave from kernel
+(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+       .pred.rel.mutex pUStk,pKStk
+(pKStk)        ld4 r21=[r20]                   // r21 <- preempt_count
+(pUStk)        mov r21=0                       // r21 <- 0
+       ;;
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else
+(pUStk)        rsm psr.i
+       cmp.eq p0,pLvSys=r0,r0          // pLvSys=0: leave from kernel
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_kernel:
+#ifdef XEN
+       alloc loc0=ar.pfs,0,1,1,0
+       adds out0=16,r12
+       ;;
+(p6)   br.call.sptk.many b0=deliver_pending_interrupt
+       mov ar.pfs=loc0
+       mov r31=r0
+#else
+       adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+(p6)   ld4 r31=[r17]                           // load 
current_thread_info()->flags
+#endif
+       adds r21=PT(PR)+16,r12
+       ;;
+
+       lfetch [r21],PT(CR_IPSR)-PT(PR)
+       adds r2=PT(B6)+16,r12
+       adds r3=PT(R16)+16,r12
+       ;;
+       lfetch [r21]
+       ld8 r28=[r2],8          // load b6
+       adds r29=PT(R24)+16,r12
+
+       ld8.fill r16=[r3]
+       adds r30=PT(AR_CCV)+16,r12
+(p6)   and r19=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
+       ;;
+       adds r3=PT(AR_CSD)-PT(R16),r3
+       ld8.fill r24=[r29]
+       ld8 r15=[r30]           // load ar.ccv
+(p6)   cmp4.ne.unc p6,p0=r19, r0               // any special work pending?
+       ;;
+       ld8 r29=[r2],16         // load b7
+       ld8 r30=[r3],16         // load ar.csd
+#ifndef XEN
+(p6)   br.cond.spnt .work_pending
+#endif
+       ;;
+       ld8 r31=[r2],16         // load ar.ssd
+       ld8.fill r8=[r3],16
+       ;;
+       ld8.fill r9=[r2],16
+       ld8.fill r10=[r3],PT(R17)-PT(R10)
+       ;;
+       ld8.fill r11=[r2],PT(R18)-PT(R11)
+       ld8.fill r17=[r3],16
+       ;;
+       ld8.fill r18=[r2],16
+       ld8.fill r19=[r3],16
+       ;;
+       ld8.fill r20=[r2],16
+       ld8.fill r21=[r3],16
+       mov ar.csd=r30
+       mov ar.ssd=r31
+       ;;
+       rsm psr.i | psr.ic      // initiate turning off of interrupt and 
interruption collection
+       invala                  // invalidate ALAT
+       ;;
+       ld8.fill r22=[r2],24
+       ld8.fill r23=[r3],24
+       mov b6=r28
+       ;;
+       ld8.fill r25=[r2],16
+       ld8.fill r26=[r3],16
+       mov b7=r29
+       ;;
+       ld8.fill r27=[r2],16
+       ld8.fill r28=[r3],16
+       ;;
+       ld8.fill r29=[r2],16
+       ld8.fill r30=[r3],24
+       ;;
+       ld8.fill r31=[r2],PT(F9)-PT(R31)
+       adds r3=PT(F10)-PT(F6),r3
+       ;;
+       ldf.fill f9=[r2],PT(F6)-PT(F9)
+       ldf.fill f10=[r3],PT(F8)-PT(F10)
+       ;;
+       ldf.fill f6=[r2],PT(F7)-PT(F6)
+       ;;
+       ldf.fill f7=[r2],PT(F11)-PT(F7)
+       ldf.fill f8=[r3],32
+       ;;
+       srlz.i                  // ensure interruption collection is off
+       mov ar.ccv=r15
+       ;;
+       ldf.fill f11=[r2]
+       bsw.0                   // switch back to bank 0 (no stop bit required 
beforehand...)
+       ;;
+(pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
+       adds r16=PT(CR_IPSR)+16,r12
+       adds r17=PT(CR_IIP)+16,r12
+
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
+       nop.i 0
+       nop.i 0
+       ;;
+       ld8 r29=[r16],16        // load cr.ipsr
+       ld8 r28=[r17],16        // load cr.iip
+       ;;
+       ld8 r30=[r16],16        // load cr.ifs
+       ld8 r25=[r17],16        // load ar.unat
+       ;;
+       ld8 r26=[r16],16        // load ar.pfs
+       ld8 r27=[r17],16        // load ar.rsc
+       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
+       ;;
+       ld8 r24=[r16],16        // load ar.rnat (may be garbage)
+       ld8 r23=[r17],16        // load ar.bspstore (may be garbage)
+       ;;
+       ld8 r31=[r16],16        // load predicates
+       ld8 r21=[r17],16        // load b0
+       ;;
+       ld8 r19=[r16],16        // load ar.rsc value for "loadrs"
+       ld8.fill r1=[r17],16    // load r1
+       ;;
+       ld8.fill r12=[r16],16
+       ld8.fill r13=[r17],16
+(pUStk)        adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
+       ;;
+       ld8 r20=[r16],16        // ar.fpsr
+       ld8.fill r15=[r17],16
+       ;;
+       ld8.fill r14=[r16],16
+       ld8.fill r2=[r17]
+(pUStk)        mov r17=1
+       ;;
+       ld8.fill r3=[r16]
+(pUStk)        st1 [r18]=r17           // restore current->thread.on_ustack
+       shr.u r18=r19,16        // get byte size of existing "dirty" partition
+       ;;
+       mov r16=ar.bsp          // get existing backing store pointer
+#ifdef XEN
+       movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
+#else
+       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
+#endif
+       ;;
+       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
+(pKStk)        br.cond.dpnt skip_rbs_switch
+
+       /*
+        * Restore user backing store.
+        *
+        * NOTE: alloc, loadrs, and cover can't be predicated.
+        */
+(pNonSys) br.cond.dpnt dont_preserve_current_frame
+
+rbs_switch:
+       cover                           // add current frame into dirty 
partition and set cr.ifs
+       ;;
+       mov r19=ar.bsp                  // get new backing store pointer
+       sub r16=r16,r18                 // krbs = old bsp - size of dirty 
partition
+       cmp.ne p9,p0=r0,r0              // clear p9 to skip restore of cr.ifs
+       ;;
+       sub r19=r19,r16                 // calculate total byte size of dirty 
partition
+       add r18=64,r18                  // don't force in0-in7 into memory...
+       ;;
+       shl r19=r19,16                  // shift size of dirty partition into 
loadrs position
+       ;;
+dont_preserve_current_frame:
+       /*
+        * To prevent leaking bits between the kernel and user-space,
+        * we must clear the stacked registers in the "invalid" partition here.
+        * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
+        * 5 registers/cycle on McKinley).
+        */
+#      define pRecurse p6
+#      define pReturn  p7
+#ifdef CONFIG_ITANIUM
+#      define Nregs    10
+#else
+#      define Nregs    14
+#endif
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       shr.u loc1=r18,9                // RNaTslots <= floor(dirtySize / 
(64*8))
+       sub r17=r17,r18                 // r17 = (physStackedSize + 8) - 
dirtySize
+       ;;
+       mov ar.rsc=r19                  // load ar.rsc to be used for "loadrs"
+       shladd in0=loc1,3,r17
+       mov in1=0
+       ;;
+       TEXT_ALIGN(32)
+rse_clear_invalid:
+#ifdef CONFIG_ITANIUM
+       // cycle 0
+ { .mii
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
+       add out0=-Nregs*8,in0
+}{ .mfb
+       add out1=1,in1                  // increment recursion count
+       nop.f 0
+       nop.b 0                         // can't do br.call here because of 
alloc (WAW on CFM)
+       ;;
+}{ .mfi        // cycle 1
+       mov loc1=0
+       nop.f 0
+       mov loc2=0
+}{ .mib
+       mov loc3=0
+       mov loc4=0
+(pRecurse) br.call.sptk.many b0=rse_clear_invalid
+
+}{ .mfi        // cycle 2
+       mov loc5=0
+       nop.f 0
+       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
+}{ .mib
+       mov loc6=0
+       mov loc7=0
+(pReturn) br.ret.sptk.many b0
+}
+#else /* !CONFIG_ITANIUM */
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
+       add out0=-Nregs*8,in0
+       add out1=1,in1                  // increment recursion count
+       mov loc1=0
+       mov loc2=0
+       ;;
+       mov loc3=0
+       mov loc4=0
+       mov loc5=0
+       mov loc6=0
+       mov loc7=0
+(pRecurse) br.call.sptk.few b0=rse_clear_invalid
+       ;;
+       mov loc8=0
+       mov loc9=0
+       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
+       mov loc10=0
+       mov loc11=0
+(pReturn) br.ret.sptk.many b0
+#endif /* !CONFIG_ITANIUM */
+#      undef pRecurse
+#      undef pReturn
+       ;;
+       alloc r17=ar.pfs,0,0,0,0        // drop current register frame
+       ;;
+       loadrs
+       ;;
+skip_rbs_switch:
+       mov ar.unat=r25         // M2
+(pKStk)        extr.u r22=r22,21,1     // I0 extract current value of psr.pp 
from r22
+(pLvSys)mov r19=r0             // A  clear r19 for leave_syscall, no-op 
otherwise
+       ;;
+(pUStk)        mov ar.bspstore=r23     // M2
+(pKStk)        dep r29=r22,r29,21,1    // I0 update ipsr.pp with psr.pp
+(pLvSys)mov r16=r0             // A  clear r16 for leave_syscall, no-op 
otherwise
+       ;;
+       mov cr.ipsr=r29         // M2
+       mov ar.pfs=r26          // I0
+(pLvSys)mov r17=r0             // A  clear r17 for leave_syscall, no-op 
otherwise
+
+(p9)   mov cr.ifs=r30          // M2
+       mov b0=r21              // I0
+(pLvSys)mov r18=r0             // A  clear r18 for leave_syscall, no-op 
otherwise
+
+       mov ar.fpsr=r20         // M2
+       mov cr.iip=r28          // M2
+       nop 0
+       ;;
+(pUStk)        mov ar.rnat=r24         // M2 must happen with RSE in lazy mode
+       nop 0
+(pLvSys)mov r2=r0
+
+       mov ar.rsc=r27          // M2
+       mov pr=r31,-1           // I0
+       rfi                     // B
+
+#ifndef XEN
+       /*
+        * On entry:
+        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
+        *      r31 = current->thread_info->flags
+        * On exit:
+        *      p6 = TRUE if work-pending-check needs to be redone
+        */
+.work_pending_syscall:
+       add r2=-8,r2
+       add r3=-8,r3
+       ;;
+       st8 [r2]=r8
+       st8 [r3]=r10
+.work_pending:
+       tbit.nz p6,p0=r31,TIF_SIGDELAYED                // signal delayed from  
MCA/INIT/NMI/PMI context?
+(p6)   br.cond.sptk.few .sigdelayed
+       ;;
+       tbit.z p6,p0=r31,TIF_NEED_RESCHED               // 
current_thread_info()->need_resched==0?
+(p6)   br.cond.sptk.few .notify
+#ifdef CONFIG_PREEMPT
+(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
+       ;;
+(pKStk) st4 [r20]=r21
+       ssm psr.i               // enable interrupts
+#endif
+       br.call.spnt.many rp=schedule
+.ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
+       rsm psr.i               // disable interrupts
+       ;;
+#ifdef CONFIG_PREEMPT
+(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+(pKStk)        st4 [r20]=r0            // preempt_count() <- 0
+#endif
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // re-check
+
+.notify:
+(pUStk)        br.call.spnt.many rp=notify_resume_user
+.ret10:        cmp.ne p6,p0=r0,r0                              // p6 <- 0
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // don't re-check
+
+// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context 
where
+// it could not be delivered.  Deliver it now.  The signal might be for us and
+// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
+// signal.
+
+.sigdelayed:
+       br.call.sptk.many rp=do_sigdelayed
+       cmp.eq p6,p0=r0,r0                              // p6 <- 1, always 
re-check
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // re-check
+
+.work_pending_syscall_end:
+       adds r2=PT(R8)+16,r12
+       adds r3=PT(R10)+16,r12
+       ;;
+       ld8 r8=[r2]
+       ld8 r10=[r3]
+       br.cond.sptk.many .work_processed_syscall       // re-check
+#endif
+
+END(ia64_leave_kernel)
+
+ENTRY(handle_syscall_error)
+       /*
+        * Some system calls (e.g., ptrace, mmap) can return arbitrary values 
which could
+        * lead us to mistake a negative return value as a failed syscall.  
Those syscall
+        * must deposit a non-zero value in pt_regs.r8 to indicate an error.  If
+        * pt_regs.r8 is zero, we assume that the call completed successfully.
+        */
+       PT_REGS_UNWIND_INFO(0)
+       ld8 r3=[r2]             // load pt_regs.r8
+       ;;
+       cmp.eq p6,p7=r3,r0      // is pt_regs.r8==0?
+       ;;
+(p7)   mov r10=-1
+(p7)   sub r8=0,r8             // negate return value to get errno
+       br.cond.sptk ia64_leave_syscall
+END(handle_syscall_error)
+
+       /*
+        * Invoke schedule_tail(task) while preserving in0-in7, which may be 
needed
+        * in case a system call gets restarted.
+        */
+GLOBAL_ENTRY(ia64_invoke_schedule_tail)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,1,0
+       mov loc0=rp
+       mov out0=r8                             // Address of previous task
+       ;;
+       br.call.sptk.many rp=schedule_tail
+.ret11:        mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(ia64_invoke_schedule_tail)
+
+#ifndef XEN
+       /*
+        * Setup stack and call do_notify_resume_user().  Note that pSys and 
pNonSys need to
+        * be set up by the caller.  We declare 8 input registers so the system 
call
+        * args get preserved, in case we need to restart a system call.
+        */
+ENTRY(notify_resume_user)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
+       mov r9=ar.unat
+       mov loc0=rp                             // save return address
+       mov out0=0                              // there is no "oldset"
+       adds out1=8,sp                          // out1=&sigscratch->ar_pfs
+(pSys) mov out2=1                              // out2==1 => we're in a syscall
+       ;;
+(pNonSys) mov out2=0                           // out2==0 => not a syscall
+       .fframe 16
+       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
+       st8 [out1]=loc1,-8                      // save ar.pfs, out1=&sigscratch
+       .body
+       br.call.sptk.many rp=do_notify_resume_user
+.ret15:        .restore sp
+       adds sp=16,sp                           // pop scratch stack space
+       ;;
+       ld8 r9=[sp]                             // load new unat from 
sigscratch->scratch_unat
+       mov rp=loc0
+       ;;
+       mov ar.unat=r9
+       mov ar.pfs=loc1
+       br.ret.sptk.many rp
+END(notify_resume_user)
+
+GLOBAL_ENTRY(sys_rt_sigsuspend)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
+       mov r9=ar.unat
+       mov loc0=rp                             // save return address
+       mov out0=in0                            // mask
+       mov out1=in1                            // sigsetsize
+       adds out2=8,sp                          // out2=&sigscratch->ar_pfs
+       ;;
+       .fframe 16
+       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
+       st8 [out2]=loc1,-8                      // save ar.pfs, out2=&sigscratch
+       .body
+       br.call.sptk.many rp=ia64_rt_sigsuspend
+.ret17:        .restore sp
+       adds sp=16,sp                           // pop scratch stack space
+       ;;
+       ld8 r9=[sp]                             // load new unat from 
sw->caller_unat
+       mov rp=loc0
+       ;;
+       mov ar.unat=r9
+       mov ar.pfs=loc1
+       br.ret.sptk.many rp
+END(sys_rt_sigsuspend)
+
+ENTRY(sys_rt_sigreturn)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       alloc r2=ar.pfs,8,0,1,0
+       .prologue
+       PT_REGS_SAVES(16)
+       adds sp=-16,sp
+       .body
+       cmp.eq pNonSys,pSys=r0,r0               // sigreturn isn't a normal 
syscall...
+       ;;
+       /*
+        * leave_kernel() restores f6-f11 from pt_regs, but since the 
streamlined
+        * syscall-entry path does not save them we save them here instead.  
Note: we
+        * don't need to save any other registers that are not saved by the 
stream-lined
+        * syscall path, because restore_sigcontext() restores them.
+        */
+       adds r16=PT(F6)+32,sp
+       adds r17=PT(F7)+32,sp
+       ;;
+       stf.spill [r16]=f6,32
+       stf.spill [r17]=f7,32
+       ;;
+       stf.spill [r16]=f8,32
+       stf.spill [r17]=f9,32
+       ;;
+       stf.spill [r16]=f10
+       stf.spill [r17]=f11
+       adds out0=16,sp                         // out0 = &sigscratch
+       br.call.sptk.many rp=ia64_rt_sigreturn
+.ret19:        .restore sp 0
+       adds sp=16,sp
+       ;;
+       ld8 r9=[sp]                             // load new ar.unat
+       mov.sptk b7=r8,ia64_leave_kernel
+       ;;
+       mov ar.unat=r9
+       br.many b7
+END(sys_rt_sigreturn)
+#endif
+
+GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
+       .prologue
+       /*
+        * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
+        */
+       mov r16=r0
+       DO_SAVE_SWITCH_STACK
+       br.call.sptk.many rp=ia64_handle_unaligned      // stack frame setup in 
ivt
+.ret21:        .body
+       DO_LOAD_SWITCH_STACK
+       br.cond.sptk.many rp                            // goes to 
ia64_leave_kernel
+END(ia64_prepare_handle_unaligned)
+
+#ifndef XEN
+       //
+       // unw_init_running(void (*callback)(info, arg), void *arg)
+       //
+#      define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
+
+GLOBAL_ENTRY(unw_init_running)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+       alloc loc1=ar.pfs,2,3,3,0
+       ;;
+       ld8 loc2=[in0],8
+       mov loc0=rp
+       mov r16=loc1
+       DO_SAVE_SWITCH_STACK
+       .body
+
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+       .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
+       SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
+       adds sp=-EXTRA_FRAME_SIZE,sp
+       .body
+       ;;
+       adds out0=16,sp                         // &info
+       mov out1=r13                            // current
+       adds out2=16+EXTRA_FRAME_SIZE,sp        // &switch_stack
+       br.call.sptk.many rp=unw_init_frame_info
+1:     adds out0=16,sp                         // &info
+       mov b6=loc2
+       mov loc2=gp                             // save gp across indirect 
function call
+       ;;
+       ld8 gp=[in0]
+       mov out1=in1                            // arg
+       br.call.sptk.many rp=b6                 // invoke the callback function
+1:     mov gp=loc2                             // restore gp
+
+       // For now, we don't allow changing registers from within
+       // unw_init_running; if we ever want to allow that, we'd
+       // have to do a load_switch_stack here:
+       .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
+
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(unw_init_running)
+
+       .rodata
+       .align 8
+       .globl sys_call_table
+sys_call_table:
+       data8 sys_ni_syscall            //  This must be sys_ni_syscall!  See 
ivt.S.
+       data8 sys_exit                          // 1025
+       data8 sys_read
+       data8 sys_write
+       data8 sys_open
+       data8 sys_close
+       data8 sys_creat                         // 1030
+       data8 sys_link
+       data8 sys_unlink
+       data8 ia64_execve
+       data8 sys_chdir
+       data8 sys_fchdir                        // 1035
+       data8 sys_utimes
+       data8 sys_mknod
+       data8 sys_chmod
+       data8 sys_chown
+       data8 sys_lseek                         // 1040
+       data8 sys_getpid
+       data8 sys_getppid
+       data8 sys_mount
+       data8 sys_umount
+       data8 sys_setuid                        // 1045
+       data8 sys_getuid
+       data8 sys_geteuid
+       data8 sys_ptrace
+       data8 sys_access
+       data8 sys_sync                          // 1050
+       data8 sys_fsync
+       data8 sys_fdatasync
+       data8 sys_kill
+       data8 sys_rename
+       data8 sys_mkdir                         // 1055
+       data8 sys_rmdir
+       data8 sys_dup
+       data8 sys_pipe
+       data8 sys_times
+       data8 ia64_brk                          // 1060
+       data8 sys_setgid
+       data8 sys_getgid
+       data8 sys_getegid
+       data8 sys_acct
+       data8 sys_ioctl                         // 1065
+       data8 sys_fcntl
+       data8 sys_umask
+       data8 sys_chroot
+       data8 sys_ustat
+       data8 sys_dup2                          // 1070
+       data8 sys_setreuid
+       data8 sys_setregid
+       data8 sys_getresuid
+       data8 sys_setresuid
+       data8 sys_getresgid                     // 1075
+       data8 sys_setresgid
+       data8 sys_getgroups
+       data8 sys_setgroups
+       data8 sys_getpgid
+       data8 sys_setpgid                       // 1080
+       data8 sys_setsid
+       data8 sys_getsid
+       data8 sys_sethostname
+       data8 sys_setrlimit
+       data8 sys_getrlimit                     // 1085
+       data8 sys_getrusage
+       data8 sys_gettimeofday
+       data8 sys_settimeofday
+       data8 sys_select
+       data8 sys_poll                          // 1090
+       data8 sys_symlink
+       data8 sys_readlink
+       data8 sys_uselib
+       data8 sys_swapon
+       data8 sys_swapoff                       // 1095
+       data8 sys_reboot
+       data8 sys_truncate
+       data8 sys_ftruncate
+       data8 sys_fchmod
+       data8 sys_fchown                        // 1100
+       data8 ia64_getpriority
+       data8 sys_setpriority
+       data8 sys_statfs
+       data8 sys_fstatfs
+       data8 sys_gettid                        // 1105
+       data8 sys_semget
+       data8 sys_semop
+       data8 sys_semctl
+       data8 sys_msgget
+       data8 sys_msgsnd                        // 1110
+       data8 sys_msgrcv
+       data8 sys_msgctl
+       data8 sys_shmget
+       data8 ia64_shmat
+       data8 sys_shmdt                         // 1115
+       data8 sys_shmctl
+       data8 sys_syslog
+       data8 sys_setitimer
+       data8 sys_getitimer
+       data8 sys_ni_syscall                    // 1120         /* was: 
ia64_oldstat */
+       data8 sys_ni_syscall                                    /* was: 
ia64_oldlstat */
+       data8 sys_ni_syscall                                    /* was: 
ia64_oldfstat */
+       data8 sys_vhangup
+       data8 sys_lchown
+       data8 sys_remap_file_pages              // 1125
+       data8 sys_wait4
+       data8 sys_sysinfo
+       data8 sys_clone
+       data8 sys_setdomainname
+       data8 sys_newuname                      // 1130
+       data8 sys_adjtimex
+       data8 sys_ni_syscall                                    /* was: 
ia64_create_module */
+       data8 sys_init_module
+       data8 sys_delete_module
+       data8 sys_ni_syscall                    // 1135         /* was: 
sys_get_kernel_syms */
+       data8 sys_ni_syscall                                    /* was: 
sys_query_module */
+       data8 sys_quotactl
+       data8 sys_bdflush
+       data8 sys_sysfs
+       data8 sys_personality                   // 1140
+       data8 sys_ni_syscall            // sys_afs_syscall
+       data8 sys_setfsuid
+       data8 sys_setfsgid
+       data8 sys_getdents
+       data8 sys_flock                         // 1145
+       data8 sys_readv
+       data8 sys_writev
+       data8 sys_pread64
+       data8 sys_pwrite64
+       data8 sys_sysctl                        // 1150
+       data8 sys_mmap
+       data8 sys_munmap
+       data8 sys_mlock
+       data8 sys_mlockall
+       data8 sys_mprotect                      // 1155
+       data8 ia64_mremap
+       data8 sys_msync
+       data8 sys_munlock
+       data8 sys_munlockall
+       data8 sys_sched_getparam                // 1160
+       data8 sys_sched_setparam
+       data8 sys_sched_getscheduler
+       data8 sys_sched_setscheduler
+       data8 sys_sched_yield
+       data8 sys_sched_get_priority_max        // 1165
+       data8 sys_sched_get_priority_min
+       data8 sys_sched_rr_get_interval
+       data8 sys_nanosleep
+       data8 sys_nfsservctl
+       data8 sys_prctl                         // 1170
+       data8 sys_getpagesize
+       data8 sys_mmap2
+       data8 sys_pciconfig_read
+       data8 sys_pciconfig_write
+       data8 sys_perfmonctl                    // 1175
+       data8 sys_sigaltstack
+       data8 sys_rt_sigaction
+       data8 sys_rt_sigpending
+       data8 sys_rt_sigprocmask
+       data8 sys_rt_sigqueueinfo               // 1180
+       data8 sys_rt_sigreturn
+       data8 sys_rt_sigsuspend
+       data8 sys_rt_sigtimedwait
+       data8 sys_getcwd
+       data8 sys_capget                        // 1185
+       data8 sys_capset
+       data8 sys_sendfile64
+       data8 sys_ni_syscall            // sys_getpmsg (STREAMS)
+       data8 sys_ni_syscall            // sys_putpmsg (STREAMS)
+       data8 sys_socket                        // 1190
+       data8 sys_bind
+       data8 sys_connect
+       data8 sys_listen
+       data8 sys_accept
+       data8 sys_getsockname                   // 1195
+       data8 sys_getpeername
+       data8 sys_socketpair
+       data8 sys_send
+       data8 sys_sendto
+       data8 sys_recv                          // 1200
+       data8 sys_recvfrom
+       data8 sys_shutdown
+       data8 sys_setsockopt
+       data8 sys_getsockopt
+       data8 sys_sendmsg                       // 1205
+       data8 sys_recvmsg
+       data8 sys_pivot_root
+       data8 sys_mincore
+       data8 sys_madvise
+       data8 sys_newstat                       // 1210
+       data8 sys_newlstat
+       data8 sys_newfstat
+       data8 sys_clone2
+       data8 sys_getdents64
+       data8 sys_getunwind                     // 1215
+       data8 sys_readahead
+       data8 sys_setxattr
+       data8 sys_lsetxattr
+       data8 sys_fsetxattr
+       data8 sys_getxattr                      // 1220
+       data8 sys_lgetxattr
+       data8 sys_fgetxattr
+       data8 sys_listxattr
+       data8 sys_llistxattr
+       data8 sys_flistxattr                    // 1225
+       data8 sys_removexattr
+       data8 sys_lremovexattr
+       data8 sys_fremovexattr
+       data8 sys_tkill
+       data8 sys_futex                         // 1230
+       data8 sys_sched_setaffinity
+       data8 sys_sched_getaffinity
+       data8 sys_set_tid_address
+       data8 sys_fadvise64_64
+       data8 sys_tgkill                        // 1235
+       data8 sys_exit_group
+       data8 sys_lookup_dcookie
+       data8 sys_io_setup
+       data8 sys_io_destroy
+       data8 sys_io_getevents                  // 1240
+       data8 sys_io_submit
+       data8 sys_io_cancel
+       data8 sys_epoll_create
+       data8 sys_epoll_ctl
+       data8 sys_epoll_wait                    // 1245
+       data8 sys_restart_syscall
+       data8 sys_semtimedop
+       data8 sys_timer_create
+       data8 sys_timer_settime
+       data8 sys_timer_gettime                 // 1250
+       data8 sys_timer_getoverrun
+       data8 sys_timer_delete
+       data8 sys_clock_settime
+       data8 sys_clock_gettime
+       data8 sys_clock_getres                  // 1255
+       data8 sys_clock_nanosleep
+       data8 sys_fstatfs64
+       data8 sys_statfs64
+       data8 sys_mbind
+       data8 sys_get_mempolicy                 // 1260
+       data8 sys_set_mempolicy
+       data8 sys_mq_open
+       data8 sys_mq_unlink
+       data8 sys_mq_timedsend
+       data8 sys_mq_timedreceive               // 1265
+       data8 sys_mq_notify
+       data8 sys_mq_getsetattr
+       data8 sys_ni_syscall                    // reserved for kexec_load
+       data8 sys_ni_syscall                    // reserved for vserver
+       data8 sys_waitid                        // 1270
+       data8 sys_add_key
+       data8 sys_request_key
+       data8 sys_keyctl
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall                    // 1275
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+
+       .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/entry.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/entry.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,97 @@
+#include <linux/config.h>
+
+/*
+ * Preserved registers that are shared between code in ivt.S and
+ * entry.S.  Be careful not to step on these!
+ */
+#define PRED_LEAVE_SYSCALL     1 /* TRUE iff leave from syscall */
+#define PRED_KERNEL_STACK      2 /* returning to kernel-stacks? */
+#define PRED_USER_STACK                3 /* returning to user-stacks? */
+#ifdef CONFIG_VTI
+#define PRED_EMUL              2 /* Need to save r4-r7 for inst emulation */
+#define PRED_NON_EMUL          3 /* No need to save r4-r7 for normal path */
+#define PRED_BN0               6 /* Guest is in bank 0 */
+#define PRED_BN1               7 /* Guest is in bank 1 */
+#endif // CONFIG_VTI
+#define PRED_SYSCALL           4 /* inside a system call? */
+#define PRED_NON_SYSCALL       5 /* complement of PRED_SYSCALL */
+
+#ifdef __ASSEMBLY__
+# define PASTE2(x,y)   x##y
+# define PASTE(x,y)    PASTE2(x,y)
+
+# define pLvSys                PASTE(p,PRED_LEAVE_SYSCALL)
+# define pKStk         PASTE(p,PRED_KERNEL_STACK)
+# define pUStk         PASTE(p,PRED_USER_STACK)
+#ifdef CONFIG_VTI
+# define pEml          PASTE(p,PRED_EMUL)
+# define pNonEml       PASTE(p,PRED_NON_EMUL)
+# define pBN0          PASTE(p,PRED_BN0)
+# define pBN1          PASTE(p,PRED_BN1)
+#endif // CONFIG_VTI
+# define pSys          PASTE(p,PRED_SYSCALL)
+# define pNonSys       PASTE(p,PRED_NON_SYSCALL)
+#endif
+
+#define PT(f)          (IA64_PT_REGS_##f##_OFFSET)
+#define SW(f)          (IA64_SWITCH_STACK_##f##_OFFSET)
+#ifdef CONFIG_VTI
+#define VPD(f)      (VPD_##f##_START_OFFSET)
+#endif // CONFIG_VTI
+
+#define PT_REGS_SAVES(off)                     \
+       .unwabi 3, 'i';                         \
+       .fframe IA64_PT_REGS_SIZE+16+(off);     \
+       .spillsp rp, PT(CR_IIP)+16+(off);       \
+       .spillsp ar.pfs, PT(CR_IFS)+16+(off);   \
+       .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
+       .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
+       .spillsp pr, PT(PR)+16+(off);
+
+#define PT_REGS_UNWIND_INFO(off)               \
+       .prologue;                              \
+       PT_REGS_SAVES(off);                     \
+       .body
+
+#define SWITCH_STACK_SAVES(off)                                                
        \
+       .savesp ar.unat,SW(CALLER_UNAT)+16+(off);                               
\
+       .savesp ar.fpsr,SW(AR_FPSR)+16+(off);                                   
\
+       .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off);               
\
+       .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off);               
\
+       .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off);           
\
+       .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off);           
\
+       .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off);           
\
+       .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off);           
\
+       .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off);           
\
+       .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off);           
\
+       .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off);           
\
+       .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off);           
\
+       .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off);               
\
+       .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off);               
\
+       .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off);               
\
+       .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off);               
\
+       .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off);               
\
+       .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); 
\
+       .spillsp @priunat,SW(AR_UNAT)+16+(off);                                 
\
+       .spillsp ar.rnat,SW(AR_RNAT)+16+(off);                                  
\
+       .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);                          
\
+       .spillsp pr,SW(PR)+16+(off))
+
+#define DO_SAVE_SWITCH_STACK                   \
+       movl r28=1f;                            \
+       ;;                                      \
+       .fframe IA64_SWITCH_STACK_SIZE;         \
+       adds sp=-IA64_SWITCH_STACK_SIZE,sp;     \
+       mov.ret.sptk b7=r28,1f;                 \
+       SWITCH_STACK_SAVES(0);                  \
+       br.cond.sptk.many save_switch_stack;    \
+1:
+
+#define DO_LOAD_SWITCH_STACK                   \
+       movl r28=1f;                            \
+       ;;                                      \
+       invala;                                 \
+       mov.ret.sptk b7=r28,1f;                 \
+       br.cond.sptk.many load_switch_stack;    \
+1:     .restore sp;                            \
+       adds sp=IA64_SWITCH_STACK_SIZE,sp
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/head.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/head.S      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1026 @@
+/*
+ * Here is where the ball gets rolling as far as the kernel is concerned.
+ * When control is transferred to _start, the bootload has already
+ * loaded us to the correct address.  All that's left to do here is
+ * to set up the kernel's global pointer and jump to the kernel
+ * entry point.
+ *
+ * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999 Intel Corp.
+ * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <Don.Dugger@xxxxxxxxx>
+ * Copyright (C) 2002 Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/fpu.h>
+#include <asm/kregs.h>
+#include <asm/mmu_context.h>
+#include <asm/offsets.h>
+#include <asm/pal.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+       .section __special_page_section,"ax"
+
+       .global empty_zero_page
+empty_zero_page:
+       .skip PAGE_SIZE
+
+       .global swapper_pg_dir
+swapper_pg_dir:
+       .skip PAGE_SIZE
+
+       .rodata
+halt_msg:
+       stringz "Halting kernel\n"
+
+       .text
+
+       .global start_ap
+
+       /*
+        * Start the kernel.  When the bootloader passes control to _start(), 
r28
+        * points to the address of the boot parameter area.  Execution reaches
+        * here in physical mode.
+        */
+GLOBAL_ENTRY(_start)
+start_ap:
+       .prologue
+       .save rp, r0            // terminate unwind chain with a NULL rp
+       .body
+
+       rsm psr.i | psr.ic
+       ;;
+       srlz.i
+       ;;
+       /*
+        * Initialize kernel region registers:
+        *      rr[0]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[1]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[2]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[3]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[4]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[5]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+        *      rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+        * We initialize all of them to prevent inadvertently assuming
+        * something about the state of address translation early in boot.
+        */
+       movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r7=(0<<61)
+       movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r9=(1<<61)
+       movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r11=(2<<61)
+       movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r13=(3<<61)
+       movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r15=(4<<61)
+       movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r17=(5<<61)
+       movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
+       movl r19=(6<<61)
+       movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
+       movl r21=(7<<61)
+       ;;
+       mov rr[r7]=r6
+       mov rr[r9]=r8
+       mov rr[r11]=r10
+       mov rr[r13]=r12
+       mov rr[r15]=r14
+       mov rr[r17]=r16
+       mov rr[r19]=r18
+       mov rr[r21]=r20
+       ;;
+       /*
+        * Now pin mappings into the TLB for kernel text and data
+        */
+       mov r18=KERNEL_TR_PAGE_SHIFT<<2
+       movl r17=KERNEL_START
+       ;;
+       mov cr.itir=r18
+       mov cr.ifa=r17
+       mov r16=IA64_TR_KERNEL
+       mov r3=ip
+       movl r18=PAGE_KERNEL
+       ;;
+       dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       or r18=r2,r18
+       ;;
+       srlz.i
+       ;;
+       itr.i itr[r16]=r18
+       ;;
+       itr.d dtr[r16]=r18
+       ;;
+       srlz.i
+
+       /*
+        * Switch into virtual mode:
+        */
+#ifdef CONFIG_VTI
+       movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
+                 |IA64_PSR_DI)
+#else // CONFIG_VTI
+       movl 
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
+                 |IA64_PSR_DI)
+#endif // CONFIG_VTI
+       ;;
+       mov cr.ipsr=r16
+       movl r17=1f
+       ;;
+       mov cr.iip=r17
+       mov cr.ifs=r0
+       ;;
+       rfi
+       ;;
+1:     // now we are in virtual mode
+
+       // set IVT entry point---can't access I/O ports without it
+#ifdef CONFIG_VTI
+    movl r3=vmx_ia64_ivt
+#else // CONFIG_VTI
+       movl r3=ia64_ivt
+#endif // CONFIG_VTI
+       ;;
+       mov cr.iva=r3
+       movl r2=FPSR_DEFAULT
+       ;;
+       srlz.i
+       movl gp=__gp
+
+       mov ar.fpsr=r2
+       ;;
+
+#define isAP   p2      // are we an Application Processor?
+#define isBP   p3      // are we the Bootstrap Processor?
+
+#ifdef CONFIG_SMP
+       /*
+        * Find the init_task for the currently booting CPU.  At poweron, and in
+        * UP mode, task_for_booting_cpu is NULL.
+        */
+       movl r3=task_for_booting_cpu
+       ;;
+       ld8 r3=[r3]
+       movl r2=init_task
+       ;;
+       cmp.eq isBP,isAP=r3,r0
+       ;;
+(isAP) mov r2=r3
+#else
+       movl r2=init_task
+       cmp.eq isBP,isAP=r0,r0
+#endif
+       ;;
+       tpa r3=r2               // r3 == phys addr of task struct
+       mov r16=-1
+(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to 
map it
+
+       // load mapping for stack (virtaddr in r2, physaddr in r3)
+       rsm psr.ic
+       movl r17=PAGE_KERNEL
+       ;;
+       srlz.d
+       dep r18=0,r3,0,12
+       ;;
+       or r18=r17,r18
+#ifdef XEN
+       dep r2=-1,r3,60,4       // IMVA of task
+#else
+       dep r2=-1,r3,61,3       // IMVA of task
+#endif
+       ;;
+       mov r17=rr[r2]
+       shr.u r16=r3,IA64_GRANULE_SHIFT
+       ;;
+       dep r17=0,r17,8,24
+       ;;
+       mov cr.itir=r17
+       mov cr.ifa=r2
+
+       mov r19=IA64_TR_CURRENT_STACK
+       ;;
+       itr.d dtr[r19]=r18
+       ;;
+       ssm psr.ic
+       srlz.d
+       ;;
+
+.load_current:
+       // load the "current" pointer (r13) and ar.k6 with the current task
+#ifdef CONFIG_VTI
+       mov r21=r2              // virtual address
+       ;;
+       bsw.1
+       ;;
+#else // CONFIG_VTI
+       mov IA64_KR(CURRENT)=r2         // virtual address
+       mov IA64_KR(CURRENT_STACK)=r16
+#endif // CONFIG_VTI
+       mov r13=r2
+       /*
+        * Reserve space at the top of the stack for "struct pt_regs".  Kernel 
threads
+        * don't store interesting values in that structure, but the space 
still needs
+        * to be there because time-critical stuff such as the context 
switching can
+        * be implemented more efficiently (for example, __switch_to()
+        * always sets the psr.dfh bit of the task it is switching to).
+        */
+       addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
+       addl r2=IA64_RBS_OFFSET,r2      // initialize the RSE
+       mov ar.rsc=0            // place RSE in enforced lazy mode
+       ;;
+       loadrs                  // clear the dirty partition
+       ;;
+       mov ar.bspstore=r2      // establish the new RSE stack
+       ;;
+       mov ar.rsc=0x3          // place RSE in eager mode
+
+#ifdef XEN
+(isBP) dep r28=-1,r28,60,4     // make address virtual
+#else
+(isBP) dep r28=-1,r28,61,3     // make address virtual
+#endif
+(isBP) movl r2=ia64_boot_param
+       ;;
+(isBP) st8 [r2]=r28            // save the address of the boot param area 
passed by the bootloader
+
+#ifdef CONFIG_SMP
+(isAP) br.call.sptk.many rp=start_secondary
+.ret0:
+(isAP) br.cond.sptk self
+#endif
+
+       // This is executed by the bootstrap processor (bsp) only:
+
+#ifdef CONFIG_IA64_FW_EMU
+       // initialize PAL & SAL emulator:
+       br.call.sptk.many rp=sys_fw_init
+.ret1:
+#endif
+       br.call.sptk.many rp=start_kernel
+.ret2: addl r3=@ltoff(halt_msg),gp
+       ;;
+       alloc r2=ar.pfs,8,0,2,0
+       ;;
+       ld8 out0=[r3]
+       br.call.sptk.many b0=console_print
+
+self:  hint @pause
+       ;;
+       br.sptk.many self               // endless loop
+       ;;
+END(_start)
+
+GLOBAL_ENTRY(ia64_save_debug_regs)
+       alloc r16=ar.pfs,1,0,0,0
+       mov r20=ar.lc                   // preserve ar.lc
+       mov ar.lc=IA64_NUM_DBG_REGS-1
+       mov r18=0
+       add r19=IA64_NUM_DBG_REGS*8,in0
+       ;;
+1:     mov r16=dbr[r18]
+#ifdef CONFIG_ITANIUM
+       ;;
+       srlz.d
+#endif
+       mov r17=ibr[r18]
+       add r18=1,r18
+       ;;
+       st8.nta [in0]=r16,8
+       st8.nta [r19]=r17,8
+       br.cloop.sptk.many 1b
+       ;;
+       mov ar.lc=r20                   // restore ar.lc
+       br.ret.sptk.many rp
+END(ia64_save_debug_regs)
+
+GLOBAL_ENTRY(ia64_load_debug_regs)
+       alloc r16=ar.pfs,1,0,0,0
+       lfetch.nta [in0]
+       mov r20=ar.lc                   // preserve ar.lc
+       add r19=IA64_NUM_DBG_REGS*8,in0
+       mov ar.lc=IA64_NUM_DBG_REGS-1
+       mov r18=-1
+       ;;
+1:     ld8.nta r16=[in0],8
+       ld8.nta r17=[r19],8
+       add r18=1,r18
+       ;;
+       mov dbr[r18]=r16
+#ifdef CONFIG_ITANIUM
+       ;;
+       srlz.d                          // Errata 132 (NoFix status)
+#endif
+       mov ibr[r18]=r17
+       br.cloop.sptk.many 1b
+       ;;
+       mov ar.lc=r20                   // restore ar.lc
+       br.ret.sptk.many rp
+END(ia64_load_debug_regs)
+
+GLOBAL_ENTRY(__ia64_save_fpu)
+       alloc r2=ar.pfs,1,4,0,0
+       adds loc0=96*16-16,in0
+       adds loc1=96*16-16-128,in0
+       ;;
+       stf.spill.nta [loc0]=f127,-256
+       stf.spill.nta [loc1]=f119,-256
+       ;;
+       stf.spill.nta [loc0]=f111,-256
+       stf.spill.nta [loc1]=f103,-256
+       ;;
+       stf.spill.nta [loc0]=f95,-256
+       stf.spill.nta [loc1]=f87,-256
+       ;;
+       stf.spill.nta [loc0]=f79,-256
+       stf.spill.nta [loc1]=f71,-256
+       ;;
+       stf.spill.nta [loc0]=f63,-256
+       stf.spill.nta [loc1]=f55,-256
+       adds loc2=96*16-32,in0
+       ;;
+       stf.spill.nta [loc0]=f47,-256
+       stf.spill.nta [loc1]=f39,-256
+       adds loc3=96*16-32-128,in0
+       ;;
+       stf.spill.nta [loc2]=f126,-256
+       stf.spill.nta [loc3]=f118,-256
+       ;;
+       stf.spill.nta [loc2]=f110,-256
+       stf.spill.nta [loc3]=f102,-256
+       ;;
+       stf.spill.nta [loc2]=f94,-256
+       stf.spill.nta [loc3]=f86,-256
+       ;;
+       stf.spill.nta [loc2]=f78,-256
+       stf.spill.nta [loc3]=f70,-256
+       ;;
+       stf.spill.nta [loc2]=f62,-256
+       stf.spill.nta [loc3]=f54,-256
+       adds loc0=96*16-48,in0
+       ;;
+       stf.spill.nta [loc2]=f46,-256
+       stf.spill.nta [loc3]=f38,-256
+       adds loc1=96*16-48-128,in0
+       ;;
+       stf.spill.nta [loc0]=f125,-256
+       stf.spill.nta [loc1]=f117,-256
+       ;;
+       stf.spill.nta [loc0]=f109,-256
+       stf.spill.nta [loc1]=f101,-256
+       ;;
+       stf.spill.nta [loc0]=f93,-256
+       stf.spill.nta [loc1]=f85,-256
+       ;;
+       stf.spill.nta [loc0]=f77,-256
+       stf.spill.nta [loc1]=f69,-256
+       ;;
+       stf.spill.nta [loc0]=f61,-256
+       stf.spill.nta [loc1]=f53,-256
+       adds loc2=96*16-64,in0
+       ;;
+       stf.spill.nta [loc0]=f45,-256
+       stf.spill.nta [loc1]=f37,-256
+       adds loc3=96*16-64-128,in0
+       ;;
+       stf.spill.nta [loc2]=f124,-256
+       stf.spill.nta [loc3]=f116,-256
+       ;;
+       stf.spill.nta [loc2]=f108,-256
+       stf.spill.nta [loc3]=f100,-256
+       ;;
+       stf.spill.nta [loc2]=f92,-256
+       stf.spill.nta [loc3]=f84,-256
+       ;;
+       stf.spill.nta [loc2]=f76,-256
+       stf.spill.nta [loc3]=f68,-256
+       ;;
+       stf.spill.nta [loc2]=f60,-256
+       stf.spill.nta [loc3]=f52,-256
+       adds loc0=96*16-80,in0
+       ;;
+       stf.spill.nta [loc2]=f44,-256
+       stf.spill.nta [loc3]=f36,-256
+       adds loc1=96*16-80-128,in0
+       ;;
+       stf.spill.nta [loc0]=f123,-256
+       stf.spill.nta [loc1]=f115,-256
+       ;;
+       stf.spill.nta [loc0]=f107,-256
+       stf.spill.nta [loc1]=f99,-256
+       ;;
+       stf.spill.nta [loc0]=f91,-256
+       stf.spill.nta [loc1]=f83,-256
+       ;;
+       stf.spill.nta [loc0]=f75,-256
+       stf.spill.nta [loc1]=f67,-256
+       ;;
+       stf.spill.nta [loc0]=f59,-256
+       stf.spill.nta [loc1]=f51,-256
+       adds loc2=96*16-96,in0
+       ;;
+       stf.spill.nta [loc0]=f43,-256
+       stf.spill.nta [loc1]=f35,-256
+       adds loc3=96*16-96-128,in0
+       ;;
+       stf.spill.nta [loc2]=f122,-256
+       stf.spill.nta [loc3]=f114,-256
+       ;;
+       stf.spill.nta [loc2]=f106,-256
+       stf.spill.nta [loc3]=f98,-256
+       ;;
+       stf.spill.nta [loc2]=f90,-256
+       stf.spill.nta [loc3]=f82,-256
+       ;;
+       stf.spill.nta [loc2]=f74,-256
+       stf.spill.nta [loc3]=f66,-256
+       ;;
+       stf.spill.nta [loc2]=f58,-256
+       stf.spill.nta [loc3]=f50,-256
+       adds loc0=96*16-112,in0
+       ;;
+       stf.spill.nta [loc2]=f42,-256
+       stf.spill.nta [loc3]=f34,-256
+       adds loc1=96*16-112-128,in0
+       ;;
+       stf.spill.nta [loc0]=f121,-256
+       stf.spill.nta [loc1]=f113,-256
+       ;;
+       stf.spill.nta [loc0]=f105,-256
+       stf.spill.nta [loc1]=f97,-256
+       ;;
+       stf.spill.nta [loc0]=f89,-256
+       stf.spill.nta [loc1]=f81,-256
+       ;;
+       stf.spill.nta [loc0]=f73,-256
+       stf.spill.nta [loc1]=f65,-256
+       ;;
+       stf.spill.nta [loc0]=f57,-256
+       stf.spill.nta [loc1]=f49,-256
+       adds loc2=96*16-128,in0
+       ;;
+       stf.spill.nta [loc0]=f41,-256
+       stf.spill.nta [loc1]=f33,-256
+       adds loc3=96*16-128-128,in0
+       ;;
+       stf.spill.nta [loc2]=f120,-256
+       stf.spill.nta [loc3]=f112,-256
+       ;;
+       stf.spill.nta [loc2]=f104,-256
+       stf.spill.nta [loc3]=f96,-256
+       ;;
+       stf.spill.nta [loc2]=f88,-256
+       stf.spill.nta [loc3]=f80,-256
+       ;;
+       stf.spill.nta [loc2]=f72,-256
+       stf.spill.nta [loc3]=f64,-256
+       ;;
+       stf.spill.nta [loc2]=f56,-256
+       stf.spill.nta [loc3]=f48,-256
+       ;;
+       stf.spill.nta [loc2]=f40
+       stf.spill.nta [loc3]=f32
+       br.ret.sptk.many rp
+END(__ia64_save_fpu)
+
+GLOBAL_ENTRY(__ia64_load_fpu)
+       alloc r2=ar.pfs,1,2,0,0
+       adds r3=128,in0
+       adds r14=256,in0
+       adds r15=384,in0
+       mov loc0=512
+       mov loc1=-1024+16
+       ;;
+       ldf.fill.nta f32=[in0],loc0
+       ldf.fill.nta f40=[ r3],loc0
+       ldf.fill.nta f48=[r14],loc0
+       ldf.fill.nta f56=[r15],loc0
+       ;;
+       ldf.fill.nta f64=[in0],loc0
+       ldf.fill.nta f72=[ r3],loc0
+       ldf.fill.nta f80=[r14],loc0
+       ldf.fill.nta f88=[r15],loc0
+       ;;
+       ldf.fill.nta f96=[in0],loc1
+       ldf.fill.nta f104=[ r3],loc1
+       ldf.fill.nta f112=[r14],loc1
+       ldf.fill.nta f120=[r15],loc1
+       ;;
+       ldf.fill.nta f33=[in0],loc0
+       ldf.fill.nta f41=[ r3],loc0
+       ldf.fill.nta f49=[r14],loc0
+       ldf.fill.nta f57=[r15],loc0
+       ;;
+       ldf.fill.nta f65=[in0],loc0
+       ldf.fill.nta f73=[ r3],loc0
+       ldf.fill.nta f81=[r14],loc0
+       ldf.fill.nta f89=[r15],loc0
+       ;;
+       ldf.fill.nta f97=[in0],loc1
+       ldf.fill.nta f105=[ r3],loc1
+       ldf.fill.nta f113=[r14],loc1
+       ldf.fill.nta f121=[r15],loc1
+       ;;
+       ldf.fill.nta f34=[in0],loc0
+       ldf.fill.nta f42=[ r3],loc0
+       ldf.fill.nta f50=[r14],loc0
+       ldf.fill.nta f58=[r15],loc0
+       ;;
+       ldf.fill.nta f66=[in0],loc0
+       ldf.fill.nta f74=[ r3],loc0
+       ldf.fill.nta f82=[r14],loc0
+       ldf.fill.nta f90=[r15],loc0
+       ;;
+       ldf.fill.nta f98=[in0],loc1
+       ldf.fill.nta f106=[ r3],loc1
+       ldf.fill.nta f114=[r14],loc1
+       ldf.fill.nta f122=[r15],loc1
+       ;;
+       ldf.fill.nta f35=[in0],loc0
+       ldf.fill.nta f43=[ r3],loc0
+       ldf.fill.nta f51=[r14],loc0
+       ldf.fill.nta f59=[r15],loc0
+       ;;
+       ldf.fill.nta f67=[in0],loc0
+       ldf.fill.nta f75=[ r3],loc0
+       ldf.fill.nta f83=[r14],loc0
+       ldf.fill.nta f91=[r15],loc0
+       ;;
+       ldf.fill.nta f99=[in0],loc1
+       ldf.fill.nta f107=[ r3],loc1
+       ldf.fill.nta f115=[r14],loc1
+       ldf.fill.nta f123=[r15],loc1
+       ;;
+       ldf.fill.nta f36=[in0],loc0
+       ldf.fill.nta f44=[ r3],loc0
+       ldf.fill.nta f52=[r14],loc0
+       ldf.fill.nta f60=[r15],loc0
+       ;;
+       ldf.fill.nta f68=[in0],loc0
+       ldf.fill.nta f76=[ r3],loc0
+       ldf.fill.nta f84=[r14],loc0
+       ldf.fill.nta f92=[r15],loc0
+       ;;
+       ldf.fill.nta f100=[in0],loc1
+       ldf.fill.nta f108=[ r3],loc1
+       ldf.fill.nta f116=[r14],loc1
+       ldf.fill.nta f124=[r15],loc1
+       ;;
+       ldf.fill.nta f37=[in0],loc0
+       ldf.fill.nta f45=[ r3],loc0
+       ldf.fill.nta f53=[r14],loc0
+       ldf.fill.nta f61=[r15],loc0
+       ;;
+       ldf.fill.nta f69=[in0],loc0
+       ldf.fill.nta f77=[ r3],loc0
+       ldf.fill.nta f85=[r14],loc0
+       ldf.fill.nta f93=[r15],loc0
+       ;;
+       ldf.fill.nta f101=[in0],loc1
+       ldf.fill.nta f109=[ r3],loc1
+       ldf.fill.nta f117=[r14],loc1
+       ldf.fill.nta f125=[r15],loc1
+       ;;
+       ldf.fill.nta f38 =[in0],loc0
+       ldf.fill.nta f46 =[ r3],loc0
+       ldf.fill.nta f54 =[r14],loc0
+       ldf.fill.nta f62 =[r15],loc0
+       ;;
+       ldf.fill.nta f70 =[in0],loc0
+       ldf.fill.nta f78 =[ r3],loc0
+       ldf.fill.nta f86 =[r14],loc0
+       ldf.fill.nta f94 =[r15],loc0
+       ;;
+       ldf.fill.nta f102=[in0],loc1
+       ldf.fill.nta f110=[ r3],loc1
+       ldf.fill.nta f118=[r14],loc1
+       ldf.fill.nta f126=[r15],loc1
+       ;;
+       ldf.fill.nta f39 =[in0],loc0
+       ldf.fill.nta f47 =[ r3],loc0
+       ldf.fill.nta f55 =[r14],loc0
+       ldf.fill.nta f63 =[r15],loc0
+       ;;
+       ldf.fill.nta f71 =[in0],loc0
+       ldf.fill.nta f79 =[ r3],loc0
+       ldf.fill.nta f87 =[r14],loc0
+       ldf.fill.nta f95 =[r15],loc0
+       ;;
+       ldf.fill.nta f103=[in0]
+       ldf.fill.nta f111=[ r3]
+       ldf.fill.nta f119=[r14]
+       ldf.fill.nta f127=[r15]
+       br.ret.sptk.many rp
+END(__ia64_load_fpu)
+
+GLOBAL_ENTRY(__ia64_init_fpu)
+       stf.spill [sp]=f0               // M3
+       mov      f32=f0                 // F
+       nop.b    0
+
+       ldfps    f33,f34=[sp]           // M0
+       ldfps    f35,f36=[sp]           // M1
+       mov      f37=f0                 // F
+       ;;
+
+       setf.s   f38=r0                 // M2
+       setf.s   f39=r0                 // M3
+       mov      f40=f0                 // F
+
+       ldfps    f41,f42=[sp]           // M0
+       ldfps    f43,f44=[sp]           // M1
+       mov      f45=f0                 // F
+
+       setf.s   f46=r0                 // M2
+       setf.s   f47=r0                 // M3
+       mov      f48=f0                 // F
+
+       ldfps    f49,f50=[sp]           // M0
+       ldfps    f51,f52=[sp]           // M1
+       mov      f53=f0                 // F
+
+       setf.s   f54=r0                 // M2
+       setf.s   f55=r0                 // M3
+       mov      f56=f0                 // F
+
+       ldfps    f57,f58=[sp]           // M0
+       ldfps    f59,f60=[sp]           // M1
+       mov      f61=f0                 // F
+
+       setf.s   f62=r0                 // M2
+       setf.s   f63=r0                 // M3
+       mov      f64=f0                 // F
+
+       ldfps    f65,f66=[sp]           // M0
+       ldfps    f67,f68=[sp]           // M1
+       mov      f69=f0                 // F
+
+       setf.s   f70=r0                 // M2
+       setf.s   f71=r0                 // M3
+       mov      f72=f0                 // F
+
+       ldfps    f73,f74=[sp]           // M0
+       ldfps    f75,f76=[sp]           // M1
+       mov      f77=f0                 // F
+
+       setf.s   f78=r0                 // M2
+       setf.s   f79=r0                 // M3
+       mov      f80=f0                 // F
+
+       ldfps    f81,f82=[sp]           // M0
+       ldfps    f83,f84=[sp]           // M1
+       mov      f85=f0                 // F
+
+       setf.s   f86=r0                 // M2
+       setf.s   f87=r0                 // M3
+       mov      f88=f0                 // F
+
+       /*
+        * When the instructions are cached, it would be faster to initialize
+        * the remaining registers with simply mov instructions (F-unit).
+        * This gets the time down to ~29 cycles.  However, this would use up
+        * 33 bundles, whereas continuing with the above pattern yields
+        * 10 bundles and ~30 cycles.
+        */
+
+       ldfps    f89,f90=[sp]           // M0
+       ldfps    f91,f92=[sp]           // M1
+       mov      f93=f0                 // F
+
+       setf.s   f94=r0                 // M2
+       setf.s   f95=r0                 // M3
+       mov      f96=f0                 // F
+
+       ldfps    f97,f98=[sp]           // M0
+       ldfps    f99,f100=[sp]          // M1
+       mov      f101=f0                // F
+
+       setf.s   f102=r0                // M2
+       setf.s   f103=r0                // M3
+       mov      f104=f0                // F
+
+       ldfps    f105,f106=[sp]         // M0
+       ldfps    f107,f108=[sp]         // M1
+       mov      f109=f0                // F
+
+       setf.s   f110=r0                // M2
+       setf.s   f111=r0                // M3
+       mov      f112=f0                // F
+
+       ldfps    f113,f114=[sp]         // M0
+       ldfps    f115,f116=[sp]         // M1
+       mov      f117=f0                // F
+
+       setf.s   f118=r0                // M2
+       setf.s   f119=r0                // M3
+       mov      f120=f0                // F
+
+       ldfps    f121,f122=[sp]         // M0
+       ldfps    f123,f124=[sp]         // M1
+       mov      f125=f0                // F
+
+       setf.s   f126=r0                // M2
+       setf.s   f127=r0                // M3
+       br.ret.sptk.many rp             // F
+END(__ia64_init_fpu)
+
+/*
+ * Switch execution mode from virtual to physical
+ *
+ * Inputs:
+ *     r16 = new psr to establish
+ * Output:
+ *     r19 = old virtual address of ar.bsp
+ *     r20 = old virtual address of sp
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_phys)
+ {
+       alloc r2=ar.pfs,0,0,0,0
+       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
+       mov r15=ip
+ }
+       ;;
+ {
+       flushrs                         // must be first insn in group
+       srlz.i
+ }
+       ;;
+       mov cr.ipsr=r16                 // set new PSR
+       add r3=1f-ia64_switch_mode_phys,r15
+
+       mov r19=ar.bsp
+       mov r20=sp
+       mov r14=rp                      // get return address into a general 
register
+       ;;
+
+       // going to physical mode, use tpa to translate virt->phys
+       tpa r17=r19
+       tpa r3=r3
+       tpa sp=sp
+       tpa r14=r14
+       ;;
+
+       mov r18=ar.rnat                 // save ar.rnat
+       mov ar.bspstore=r17             // this steps on ar.rnat
+       mov cr.iip=r3
+       mov cr.ifs=r0
+       ;;
+       mov ar.rnat=r18                 // restore ar.rnat
+       rfi                             // must be last insn in group
+       ;;
+1:     mov rp=r14
+       br.ret.sptk.many rp
+END(ia64_switch_mode_phys)
+
+/*
+ * Switch execution mode from physical to virtual
+ *
+ * Inputs:
+ *     r16 = new psr to establish
+ *     r19 = new bspstore to establish
+ *     r20 = new sp to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_virt)
+ {
+       alloc r2=ar.pfs,0,0,0,0
+       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
+       mov r15=ip
+ }
+       ;;
+ {
+       flushrs                         // must be first insn in group
+       srlz.i
+ }
+       ;;
+       mov cr.ipsr=r16                 // set new PSR
+       add r3=1f-ia64_switch_mode_virt,r15
+
+       mov r14=rp                      // get return address into a general 
register
+       ;;
+
+       // going to virtual
+       //   - for code addresses, set upper bits of addr to KERNEL_START
+       //   - for stack addresses, copy from input argument
+       movl r18=KERNEL_START
+       dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+       dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+       mov sp=r20
+       ;;
+       or r3=r3,r18
+       or r14=r14,r18
+       ;;
+
+       mov r18=ar.rnat                 // save ar.rnat
+       mov ar.bspstore=r19             // this steps on ar.rnat
+       mov cr.iip=r3
+       mov cr.ifs=r0
+       ;;
+       mov ar.rnat=r18                 // restore ar.rnat
+       rfi                             // must be last insn in group
+       ;;
+1:     mov rp=r14
+       br.ret.sptk.many rp
+END(ia64_switch_mode_virt)
+
+GLOBAL_ENTRY(ia64_delay_loop)
+       .prologue
+{      nop 0                   // work around GAS unwind info generation bug...
+       .save ar.lc,r2
+       mov r2=ar.lc
+       .body
+       ;;
+       mov ar.lc=r32
+}
+       ;;
+       // force loop to be 32-byte aligned (GAS bug means we cannot use .align
+       // inside function body without corrupting unwind info).
+{      nop 0 }
+1:     br.cloop.sptk.few 1b
+       ;;
+       mov ar.lc=r2
+       br.ret.sptk.many rp
+END(ia64_delay_loop)
+
+/*
+ * Return a CPU-local timestamp in nano-seconds.  This timestamp is
+ * NOT synchronized across CPUs its return value must never be
+ * compared against the values returned on another CPU.  The usage in
+ * kernel/sched.c ensures that.
+ *
+ * The return-value of sched_clock() is NOT supposed to wrap-around.
+ * If it did, it would cause some scheduling hiccups (at the worst).
+ * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
+ * that would happen only once every 5+ years.
+ *
+ * The code below basically calculates:
+ *
+ *   (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
+ *
+ * except that the multiplication and the shift are done with 128-bit
+ * intermediate precision so that we can produce a full 64-bit result.
+ */
+GLOBAL_ENTRY(sched_clock)
+#ifdef XEN
+       movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
+#else
+       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+#endif
+       mov.m r9=ar.itc         // fetch cycle-counter                          
(35 cyc)
+       ;;
+       ldf8 f8=[r8]
+       ;;
+       setf.sig f9=r9          // certain to stall, so issue it _after_ ldf8...
+       ;;
+       xmpy.lu f10=f9,f8       // calculate low 64 bits of 128-bit product     
(4 cyc)
+       xmpy.hu f11=f9,f8       // calculate high 64 bits of 128-bit product
+       ;;
+       getf.sig r8=f10         //                                              
(5 cyc)
+       getf.sig r9=f11
+       ;;
+       shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
+       br.ret.sptk.many rp
+END(sched_clock)
+
+GLOBAL_ENTRY(start_kernel_thread)
+       .prologue
+       .save rp, r0                            // this is the end of the 
call-chain
+       .body
+       alloc r2 = ar.pfs, 0, 0, 2, 0
+       mov out0 = r9
+       mov out1 = r11;;
+       br.call.sptk.many rp = kernel_thread_helper;;
+       mov out0 = r8
+       br.call.sptk.many rp = sys_exit;;
+1:     br.sptk.few 1b                          // not reached
+END(start_kernel_thread)
+
+#ifdef CONFIG_IA64_BRL_EMU
+
+/*
+ *  Assembly routines used by brl_emu.c to set preserved register state.
+ */
+
+#define SET_REG(reg)                           \
+ GLOBAL_ENTRY(ia64_set_##reg);                 \
+       alloc r16=ar.pfs,1,0,0,0;               \
+       mov reg=r32;                            \
+       ;;                                      \
+       br.ret.sptk.many rp;                    \
+ END(ia64_set_##reg)
+
+SET_REG(b1);
+SET_REG(b2);
+SET_REG(b3);
+SET_REG(b4);
+SET_REG(b5);
+
+#endif /* CONFIG_IA64_BRL_EMU */
+
+#ifdef CONFIG_SMP
+       /*
+        * This routine handles spinlock contention.  It uses a non-standard 
calling
+        * convention to avoid converting leaf routines into interior routines. 
 Because
+        * of this special convention, there are several restrictions:
+        *
+        * - do not use gp relative variables, this code is called from the 
kernel
+        *   and from modules, r1 is undefined.
+        * - do not use stacked registers, the caller owns them.
+        * - do not use the scratch stack space, the caller owns it.
+        * - do not use any registers other than the ones listed below
+        *
+        * Inputs:
+        *   ar.pfs - saved CFM of caller
+        *   ar.ccv - 0 (and available for use)
+        *   r27    - flags from spin_lock_irqsave or 0.  Must be preserved.
+        *   r28    - available for use.
+        *   r29    - available for use.
+        *   r30    - available for use.
+        *   r31    - address of lock, available for use.
+        *   b6     - return address
+        *   p14    - available for use.
+        *   p15    - used to track flag status.
+        *
+        * If you patch this code to use more registers, do not forget to update
+        * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
+        */
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+
+GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
+       .prologue
+       .save ar.pfs, r0        // this code effectively has a zero frame size
+       .save rp, r28
+       .body
+       nop 0
+       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
+       .restore sp             // pop existing prologue after next insn
+       mov b6 = r28
+       .prologue
+       .save ar.pfs, r0
+       .altrp b6
+       .body
+       ;;
+(p15)  ssm psr.i               // reenable interrupts if they were on
+                               // DavidM says that srlz.d is slow and is not 
required in this case
+.wait:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
+       nop 0
+       ;;
+       cmp4.ne p14,p0=r30,r0
+(p14)  br.cond.sptk.few .wait
+(p15)  rsm psr.i               // disable interrupts if we reenabled them
+       br.cond.sptk.few b6     // lock is now free, try to acquire
+       .global ia64_spinlock_contention_pre3_4_end     // for kernprof
+ia64_spinlock_contention_pre3_4_end:
+END(ia64_spinlock_contention_pre3_4)
+
+#else
+
+GLOBAL_ENTRY(ia64_spinlock_contention)
+       .prologue
+       .altrp b6
+       .body
+       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
+       ;;
+.wait:
+(p15)  ssm psr.i               // reenable interrupts if they were on
+                               // DavidM says that srlz.d is slow and is not 
required in this case
+.wait2:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
+       ;;
+       cmp4.ne p14,p0=r30,r0
+       mov r30 = 1
+(p14)  br.cond.sptk.few .wait2
+(p15)  rsm psr.i               // disable interrupts if we reenabled them
+       ;;
+       cmpxchg4.acq r30=[r31], r30, ar.ccv
+       ;;
+       cmp4.ne p14,p0=r0,r30
+(p14)  br.cond.sptk.few .wait
+
+       br.ret.sptk.many b6     // lock is now taken
+END(ia64_spinlock_contention)
+
+#endif
+
+#endif /* CONFIG_SMP */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/irq_ia64.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/irq_ia64.c  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,381 @@
+/*
+ * linux/arch/ia64/kernel/irq.c
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ *  6/10/99: Updated to bring in sync with x86 version to facilitate
+ *          support for SMP and different interrupt controllers.
+ *
+ * 09/15/00 Goutham Rao <goutham.rao@xxxxxxxxx> Implemented pci_irq_to_vector
+ *                      PCI to vector allocation routine.
+ * 04/14/2004 Ashok Raj <ashok.raj@xxxxxxxxx>
+ *                                             Added CPU Hotplug handling for 
IPF.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/jiffies.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel_stat.h>
+#include <linux/slab.h>
+#include <linux/ptrace.h>
+#include <linux/random.h>      /* for rand_initialize_irq() */
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/threads.h>
+#include <linux/bitops.h>
+
+#include <asm/delay.h>
+#include <asm/intrinsics.h>
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+#include <asm/machvec.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_PERFMON
+# include <asm/perfmon.h>
+#endif
+
+#define IRQ_DEBUG      0
+
+/* default base addr of IPI table */
+void __iomem *ipi_base_addr = ((void __iomem *)
+                              (__IA64_UNCACHED_OFFSET | 
IA64_IPI_DEFAULT_BASE_ADDR));
+
+/*
+ * Legacy IRQ to IA-64 vector translation table.
+ */
+__u8 isa_irq_to_vector_map[16] = {
+       /* 8259 IRQ translation, first 16 entries */
+       0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
+       0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
+};
+EXPORT_SYMBOL(isa_irq_to_vector_map);
+
+static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
+
+int
+assign_irq_vector (int irq)
+{
+       int pos, vector;
+ again:
+       pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
+       vector = IA64_FIRST_DEVICE_VECTOR + pos;
+       if (vector > IA64_LAST_DEVICE_VECTOR)
+               /* XXX could look for sharable vectors instead of panic'ing... 
*/
+               panic("assign_irq_vector: out of interrupt vectors!");
+       if (test_and_set_bit(pos, ia64_vector_mask))
+               goto again;
+       return vector;
+}
+
+void
+free_irq_vector (int vector)
+{
+       int pos;
+
+       if (vector < IA64_FIRST_DEVICE_VECTOR || vector > 
IA64_LAST_DEVICE_VECTOR)
+               return;
+
+       pos = vector - IA64_FIRST_DEVICE_VECTOR;
+       if (!test_and_clear_bit(pos, ia64_vector_mask))
+               printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
+}
+
+#ifdef CONFIG_SMP
+#      define IS_RESCHEDULE(vec)       (vec == IA64_IPI_RESCHEDULE)
+#else
+#      define IS_RESCHEDULE(vec)       (0)
+#endif
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
+ * function ptr.
+ */
+void
+ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+{
+       unsigned long saved_tpr;
+
+#if IRQ_DEBUG
+#ifdef XEN
+       xen_debug_irq(vector, regs);
+#endif
+       {
+               unsigned long bsp, sp;
+
+               /*
+                * Note: if the interrupt happened while executing in
+                * the context switch routine (ia64_switch_to), we may
+                * get a spurious stack overflow here.  This is
+                * because the register and the memory stack are not
+                * switched atomically.
+                */
+               bsp = ia64_getreg(_IA64_REG_AR_BSP);
+               sp = ia64_getreg(_IA64_REG_SP);
+
+               if ((sp - bsp) < 1024) {
+                       static unsigned char count;
+                       static long last_time;
+
+                       if (jiffies - last_time > 5*HZ)
+                               count = 0;
+                       if (++count < 5) {
+                               last_time = jiffies;
+                               printk("ia64_handle_irq: DANGER: less than "
+                                      "1KB of free stack space!!\n"
+                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+                       }
+               }
+       }
+#endif /* IRQ_DEBUG */
+
+       /*
+        * Always set TPR to limit maximum interrupt nesting depth to
+        * 16 (without this, it would be ~240, which could easily lead
+        * to kernel stack overflows).
+        */
+       irq_enter();
+       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+       ia64_srlz_d();
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               if (!IS_RESCHEDULE(vector)) {
+                       ia64_setreg(_IA64_REG_CR_TPR, vector);
+                       ia64_srlz_d();
+
+#ifdef XEN
+                       if (!xen_do_IRQ(vector))
+#endif
+                       __do_IRQ(local_vector_to_irq(vector), regs);
+
+                       /*
+                        * Disable interrupts and send EOI:
+                        */
+                       local_irq_disable();
+                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+               }
+               ia64_eoi();
+               vector = ia64_get_ivr();
+       }
+       /*
+        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
+        * handler needs to be able to wait for further keyboard interrupts, 
which can't
+        * come through until ia64_eoi() has been done.
+        */
+       irq_exit();
+}
+
+#ifdef  CONFIG_VTI
+#define vmx_irq_enter()                \
+       add_preempt_count(HARDIRQ_OFFSET);
+
+/* Now softirq will be checked when leaving hypervisor, or else
+ * scheduler irq will be executed too early.
+ */
+#define vmx_irq_exit(void)     \
+       sub_preempt_count(HARDIRQ_OFFSET);
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
+ * function ptr.
+ */
+void
+vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+{
+       unsigned long saved_tpr;
+       int     wake_dom0 = 0;
+
+
+#if IRQ_DEBUG
+       {
+               unsigned long bsp, sp;
+
+               /*
+                * Note: if the interrupt happened while executing in
+                * the context switch routine (ia64_switch_to), we may
+                * get a spurious stack overflow here.  This is
+                * because the register and the memory stack are not
+                * switched atomically.
+                */
+               bsp = ia64_getreg(_IA64_REG_AR_BSP);
+               sp = ia64_getreg(_IA64_REG_AR_SP);
+
+               if ((sp - bsp) < 1024) {
+                       static unsigned char count;
+                       static long last_time;
+
+                       if (jiffies - last_time > 5*HZ)
+                               count = 0;
+                       if (++count < 5) {
+                               last_time = jiffies;
+                               printk("ia64_handle_irq: DANGER: less than "
+                                      "1KB of free stack space!!\n"
+                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+                       }
+               }
+       }
+#endif /* IRQ_DEBUG */
+
+       /*
+        * Always set TPR to limit maximum interrupt nesting depth to
+        * 16 (without this, it would be ~240, which could easily lead
+        * to kernel stack overflows).
+        */
+       vmx_irq_enter();
+       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+       ia64_srlz_d();
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+           if (!IS_RESCHEDULE(vector)) {
+               ia64_setreg(_IA64_REG_CR_TPR, vector);
+               ia64_srlz_d();
+
+               if (vector != IA64_TIMER_VECTOR) {
+                       /* FIXME: Leave IRQ re-route later */
+                       vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
+                       wake_dom0 = 1;
+               }
+               else {  // FIXME: Handle Timer only now
+                       __do_IRQ(local_vector_to_irq(vector), regs);
+               }
+               
+               /*
+                * Disable interrupts and send EOI:
+                */
+               local_irq_disable();
+               ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+           }
+           else {
+                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
+            }
+           ia64_eoi();
+           vector = ia64_get_ivr();
+       }
+       /*
+        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
+        * handler needs to be able to wait for further keyboard interrupts, 
which can't
+        * come through until ia64_eoi() has been done.
+        */
+       vmx_irq_exit();
+       if ( wake_dom0 && current != dom0 ) 
+               domain_wake(dom0->vcpu[0]);
+}
+#endif
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * This function emulates a interrupt processing when a cpu is about to be
+ * brought down.
+ */
+void ia64_process_pending_intr(void)
+{
+       ia64_vector vector;
+       unsigned long saved_tpr;
+       extern unsigned int vectors_in_migration[NR_IRQS];
+
+       vector = ia64_get_ivr();
+
+        irq_enter();
+        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+        ia64_srlz_d();
+
+        /*
+         * Perform normal interrupt style processing
+         */
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               if (!IS_RESCHEDULE(vector)) {
+                       ia64_setreg(_IA64_REG_CR_TPR, vector);
+                       ia64_srlz_d();
+
+                       /*
+                        * Now try calling normal ia64_handle_irq as it would 
have got called
+                        * from a real intr handler. Try passing null for 
pt_regs, hopefully
+                        * it will work. I hope it works!.
+                        * Probably could shared code.
+                        */
+                       vectors_in_migration[local_vector_to_irq(vector)]=0;
+                       __do_IRQ(local_vector_to_irq(vector), NULL);
+
+                       /*
+                        * Disable interrupts and send EOI
+                        */
+                       local_irq_disable();
+                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+               }
+               ia64_eoi();
+               vector = ia64_get_ivr();
+       }
+       irq_exit();
+}
+#endif
+
+
+#ifdef CONFIG_SMP
+extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
+
+static struct irqaction ipi_irqaction = {
+       .handler =      handle_IPI,
+       .flags =        SA_INTERRUPT,
+       .name =         "IPI"
+};
+#endif
+
+void
+register_percpu_irq (ia64_vector vec, struct irqaction *action)
+{
+       irq_desc_t *desc;
+       unsigned int irq;
+
+       for (irq = 0; irq < NR_IRQS; ++irq)
+               if (irq_to_vector(irq) == vec) {
+                       desc = irq_descp(irq);
+                       desc->status |= IRQ_PER_CPU;
+                       desc->handler = &irq_type_ia64_lsapic;
+                       if (action)
+                               setup_irq(irq, action);
+               }
+}
+
+void __init
+init_IRQ (void)
+{
+       register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
+#ifdef CONFIG_SMP
+       register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
+#endif
+#ifdef CONFIG_PERFMON
+       pfm_init_percpu();
+#endif
+       platform_irq_init();
+}
+
+void
+ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
+{
+       void __iomem *ipi_addr;
+       unsigned long ipi_data;
+       unsigned long phys_cpu_id;
+
+#ifdef CONFIG_SMP
+       phys_cpu_id = cpu_physical_id(cpu);
+#else
+       phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
+#endif
+
+       /*
+        * cpu number is in 8bit ID and 8bit EID
+        */
+
+       ipi_data = (delivery_mode << 8) | (vector & 0xff);
+       ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
+
+       writeq(ipi_data, ipi_addr);
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/cmdline.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/cmdline.c     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,120 @@
+/*
+ * linux/lib/cmdline.c
+ * Helper functions generally used for parsing kernel command line
+ * and module options.
+ *
+ * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ *
+ * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+
+/**
+ *     get_option - Parse integer from an option string
+ *     @str: option string
+ *     @pint: (output) integer value parsed from @str
+ *
+ *     Read an int from an option string; if available accept a subsequent
+ *     comma as well.
+ *
+ *     Return values:
+ *     0 : no int in string
+ *     1 : int found, no subsequent comma
+ *     2 : int found including a subsequent comma
+ */
+
+int get_option (char **str, int *pint)
+{
+       char *cur = *str;
+
+       if (!cur || !(*cur))
+               return 0;
+       *pint = simple_strtol (cur, str, 0);
+       if (cur == *str)
+               return 0;
+       if (**str == ',') {
+               (*str)++;
+               return 2;
+       }
+
+       return 1;
+}
+
+/**
+ *     get_options - Parse a string into a list of integers
+ *     @str: String to be parsed
+ *     @nints: size of integer array
+ *     @ints: integer array
+ *
+ *     This function parses a string containing a comma-separated
+ *     list of integers.  The parse halts when the array is
+ *     full, or when no more numbers can be retrieved from the
+ *     string.
+ *
+ *     Return value is the character in the string which caused
+ *     the parse to end (typically a null terminator, if @str is
+ *     completely parseable).
+ */
+ 
+char *get_options(const char *str, int nints, int *ints)
+{
+       int res, i = 1;
+
+       while (i < nints) {
+               res = get_option ((char **)&str, ints + i);
+               if (res == 0)
+                       break;
+               i++;
+               if (res == 1)
+                       break;
+       }
+       ints[0] = i - 1;
+       return (char *)str;
+}
+
+/**
+ *     memparse - parse a string with mem suffixes into a number
+ *     @ptr: Where parse begins
+ *     @retptr: (output) Pointer to next char after parse completes
+ *
+ *     Parses a string into a number.  The number stored at @ptr is
+ *     potentially suffixed with %K (for kilobytes, or 1024 bytes),
+ *     %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
+ *     1073741824).  If the number is suffixed with K, M, or G, then
+ *     the return value is the number multiplied by one kilobyte, one
+ *     megabyte, or one gigabyte, respectively.
+ */
+
+unsigned long long memparse (char *ptr, char **retptr)
+{
+       unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+
+       switch (**retptr) {
+       case 'G':
+       case 'g':
+               ret <<= 10;
+       case 'M':
+       case 'm':
+               ret <<= 10;
+       case 'K':
+       case 'k':
+               ret <<= 10;
+               (*retptr)++;
+       default:
+               break;
+       }
+       return ret;
+}
+
+
+EXPORT_SYMBOL(memparse);
+EXPORT_SYMBOL(get_option);
+EXPORT_SYMBOL(get_options);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/efi_stub.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/efi_stub.S    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,86 @@
+/*
+ * EFI call stub.
+ *
+ * Copyright (C) 1999-2001 Hewlett-Packard Co
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ *
+ * This stub allows us to make EFI calls in physical mode with interrupts
+ * turned off.  We need this because we can't call SetVirtualMap() until
+ * the kernel has booted far enough to allow allocation of struct vma_struct
+ * entries (which we would need to map stuff with memory attributes other
+ * than uncached or writeback...).  Since the GetTime() service gets called
+ * earlier than that, we need to be able to make physical mode EFI calls from
+ * the kernel.
+ */
+
+/*
+ * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
+ * Abstraction Layer Specification", revision 2.6e).  Note that
+ * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
+ * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
+ * (the br.ia instruction fails unless psr.dfl and psr.dfh are
+ * cleared).  Fortunately, SAL promises not to touch the floating
+ * point regs, so at least we don't have to save f2-f127.
+ */
+#define PSR_BITS_TO_CLEAR                                              \
+       (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT |         \
+        IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |        \
+        IA64_PSR_DFL | IA64_PSR_DFH)
+
+#define PSR_BITS_TO_SET                                                        
\
+       (IA64_PSR_BN)
+
+#include <asm/processor.h>
+#include <asm/asmmacro.h>
+
+/*
+ * Inputs:
+ *     in0 = address of function descriptor of EFI routine to call
+ *     in1..in7 = arguments to routine
+ *
+ * Outputs:
+ *     r8 = EFI_STATUS returned by called function
+ */
+
+GLOBAL_ENTRY(efi_call_phys)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,7,7,0
+       ld8 r2=[in0],8                  // load EFI function's entry point
+       mov loc0=rp
+       .body
+       ;;
+       mov loc2=gp                     // save global pointer
+       mov loc4=ar.rsc                 // save RSE configuration
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       ;;
+       ld8 gp=[in0]                    // load EFI function's global pointer
+       movl r16=PSR_BITS_TO_CLEAR
+       mov loc3=psr                    // save processor status word
+       movl r17=PSR_BITS_TO_SET
+       ;;
+       or loc3=loc3,r17
+       mov b6=r2
+       ;;
+       andcm r16=loc3,r16              // get psr with IT, DT, and RT bits 
cleared
+       br.call.sptk.many rp=ia64_switch_mode_phys
+.ret0: mov out4=in5
+       mov out0=in1
+       mov out1=in2
+       mov out2=in3
+       mov out3=in4
+       mov out5=in6
+       mov out6=in7
+       mov loc5=r19
+       mov loc6=r20
+       br.call.sptk.many rp=b6         // call the EFI function
+.ret1: mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov r16=loc3
+       mov r19=loc5
+       mov r20=loc6
+       br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
+.ret2: mov ar.rsc=loc4                 // restore RSE configuration
+       mov ar.pfs=loc1
+       mov rp=loc0
+       mov gp=loc2
+       br.ret.sptk.many rp
+END(efi_call_phys)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/extable.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/extable.c     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,93 @@
+/*
+ * Kernel exception handling table support.  Derived from 
arch/alpha/mm/extable.c.
+ *
+ * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <asm/module.h>
+
+static inline int
+compare_entries (struct exception_table_entry *l, struct exception_table_entry 
*r)
+{
+       u64 lip = (u64) &l->addr + l->addr;
+       u64 rip = (u64) &r->addr + r->addr;
+
+       if (lip < rip)
+               return -1;
+       if (lip == rip)
+               return 0;
+       else
+               return 1;
+}
+
+static inline void
+swap_entries (struct exception_table_entry *l, struct exception_table_entry *r)
+{
+       u64 delta = (u64) r - (u64) l;
+       struct exception_table_entry tmp;
+
+       tmp = *l;
+       l->addr = r->addr + delta;
+       l->cont = r->cont + delta;
+       r->addr = tmp.addr - delta;
+       r->cont = tmp.cont - delta;
+}
+
+/*
+ * Sort the exception table.  It's usually already sorted, but there may be 
unordered
+ * entries due to multiple text sections (such as the .init text section).  
Note that the
+ * exception-table-entries contain location-relative addresses, which requires 
a bit of
+ * care during sorting to avoid overflows in the offset members (e.g., it 
would not be
+ * safe to make a temporary copy of an exception-table entry on the stack, 
because the
+ * stack may be more than 2GB away from the exception-table).
+ */
+void
+sort_extable (struct exception_table_entry *start, struct 
exception_table_entry *finish)
+{
+       struct exception_table_entry *p, *q;
+
+       /* insertion sort */
+       for (p = start + 1; p < finish; ++p)
+               /* start .. p-1 is sorted; push p down to it's proper place */
+               for (q = p; q > start && compare_entries(&q[0], &q[-1]) < 0; 
--q)
+                       swap_entries(&q[0], &q[-1]);
+}
+
+const struct exception_table_entry *
+search_extable (const struct exception_table_entry *first,
+               const struct exception_table_entry *last,
+               unsigned long ip)
+{
+       const struct exception_table_entry *mid;
+       unsigned long mid_ip;
+       long diff;
+
+        while (first <= last) {
+               mid = &first[(last - first)/2];
+               mid_ip = (u64) &mid->addr + mid->addr;
+               diff = mid_ip - ip;
+                if (diff == 0)
+                        return mid;
+                else if (diff < 0)
+                        first = mid + 1;
+                else
+                        last = mid - 1;
+        }
+        return NULL;
+}
+
+void
+ia64_handle_exception (struct pt_regs *regs, const struct 
exception_table_entry *e)
+{
+       long fix = (u64) &e->cont + e->cont;
+
+       regs->r8 = -EFAULT;
+       if (fix & 4)
+               regs->r9 = 0;
+       regs->cr_iip = fix & ~0xf;
+       ia64_psr(regs)->ri = fix & 0x3;         /* set continuation slot number 
*/
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/hpsim.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/hpsim.S       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,10 @@
+#include <asm/asmmacro.h>
+
+/*
+ * Simulator system call.
+ */
+GLOBAL_ENTRY(ia64_ssc)
+       mov r15=r36
+       break 0x80001
+       br.ret.sptk.many rp
+END(ia64_ssc)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/ia64_ksyms.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/ia64_ksyms.c  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,127 @@
+/*
+ * Architecture-specific kernel symbols
+ *
+ * Don't put any exports here unless it's defined in an assembler file.
+ * All other exports should be put directly after the definition.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/string.h>
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+
+#include <asm/checksum.h>
+EXPORT_SYMBOL(ip_fast_csum);           /* hand-coded assembly */
+
+#include <asm/semaphore.h>
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__down_trylock);
+EXPORT_SYMBOL(__up);
+
+#include <asm/page.h>
+EXPORT_SYMBOL(clear_page);
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+#include <linux/bootmem.h>
+EXPORT_SYMBOL(max_low_pfn);    /* defined by bootmem.c, but not exported by 
generic code */
+#endif
+
+#include <asm/processor.h>
+EXPORT_SYMBOL(per_cpu__cpu_info);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
+#endif
+
+#include <asm/uaccess.h>
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__do_clear_user);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+#include <asm/unistd.h>
+EXPORT_SYMBOL(__ia64_syscall);
+
+/* from arch/ia64/lib */
+extern void __divsi3(void);
+extern void __udivsi3(void);
+extern void __modsi3(void);
+extern void __umodsi3(void);
+extern void __divdi3(void);
+extern void __udivdi3(void);
+extern void __moddi3(void);
+extern void __umoddi3(void);
+
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__moddi3);
+EXPORT_SYMBOL(__umoddi3);
+
+#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+extern void xor_ia64_2(void);
+extern void xor_ia64_3(void);
+extern void xor_ia64_4(void);
+extern void xor_ia64_5(void);
+
+EXPORT_SYMBOL(xor_ia64_2);
+EXPORT_SYMBOL(xor_ia64_3);
+EXPORT_SYMBOL(xor_ia64_4);
+EXPORT_SYMBOL(xor_ia64_5);
+#endif
+
+#include <asm/pal.h>
+EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
+EXPORT_SYMBOL(ia64_pal_call_phys_static);
+EXPORT_SYMBOL(ia64_pal_call_stacked);
+EXPORT_SYMBOL(ia64_pal_call_static);
+EXPORT_SYMBOL(ia64_load_scratch_fpregs);
+EXPORT_SYMBOL(ia64_save_scratch_fpregs);
+
+#include <asm/unwind.h>
+EXPORT_SYMBOL(unw_init_running);
+
+#ifdef ASM_SUPPORTED
+# ifdef CONFIG_SMP
+#  if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+/*
+ * This is not a normal routine and we don't want a function descriptor for 
it, so we use
+ * a fake declaration here.
+ */
+extern char ia64_spinlock_contention_pre3_4;
+EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
+#  else
+/*
+ * This is not a normal routine and we don't want a function descriptor for 
it, so we use
+ * a fake declaration here.
+ */
+extern char ia64_spinlock_contention;
+EXPORT_SYMBOL(ia64_spinlock_contention);
+#  endif
+# endif
+#endif
+
+extern char ia64_ivt[];
+EXPORT_SYMBOL(ia64_ivt);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/irq_lsapic.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/irq_lsapic.c  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,37 @@
+/*
+ * LSAPIC Interrupt Controller
+ *
+ * This takes care of interrupts that are generated by the CPU's
+ * internal Streamlined Advanced Programmable Interrupt Controller
+ * (LSAPIC), such as the ITC and IPI interrupts.
+    *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+static unsigned int
+lsapic_noop_startup (unsigned int irq)
+{
+       return 0;
+}
+
+static void
+lsapic_noop (unsigned int irq)
+{
+       /* nuthing to do... */
+}
+
+struct hw_interrupt_type irq_type_ia64_lsapic = {
+       .typename =     "LSAPIC",
+       .startup =      lsapic_noop_startup,
+       .shutdown =     lsapic_noop,
+       .enable =       lsapic_noop,
+       .disable =      lsapic_noop,
+       .ack =          lsapic_noop,
+       .end =          lsapic_noop
+};
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/Makefile
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/Makefile  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,44 @@
+#
+# Makefile for ia64-specific library routines..
+#
+
+include $(BASEDIR)/Rules.mk
+
+OBJS := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o                  \
+       __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o                   \
+       bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \
+       clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o   \
+       flush.o ip_fast_csum.o do_csum.o copy_user.o                    \
+       memset.o strlen.o memcpy.o 
+
+default: $(OBJS)
+       $(LD) -r -o ia64lib.o $(OBJS)
+
+AFLAGS += -I$(BASEDIR)/include -D__ASSEMBLY__
+
+__divdi3.o: idiv64.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
+
+__udivdi3.o: idiv64.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
+
+__moddi3.o: idiv64.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
+
+__umoddi3.o: idiv64.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
+
+__divsi3.o: idiv32.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
+
+__udivsi3.o: idiv32.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
+
+__modsi3.o: idiv32.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
+
+__umodsi3.o: idiv32.S
+       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
+
+clean:
+       rm -f *.o *~
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/bitop.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/bitop.c   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,88 @@
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+/*
+ * Find next zero bit in a bitmap reasonably efficiently..
+ */
+
+int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long 
offset)
+{
+       unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp |= ~0UL >> (64-offset);
+               if (size < 64)
+                       goto found_first;
+               if (~tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if (~(tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)                /* any bits zero? */
+               return result + size;   /* nope */
+found_middle:
+       return result + ffz(tmp);
+}
+EXPORT_SYMBOL(__find_next_zero_bit);
+
+/*
+ * Find next bit in a bitmap reasonably efficiently..
+ */
+int __find_next_bit(const void *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= ~0UL << offset;
+               if (size < 64)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+  found_first:
+       tmp &= ~0UL >> (64-size);
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size; /* Nope. */
+  found_middle:
+       return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(__find_next_bit);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/carta_random.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/carta_random.S    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,54 @@
+/*
+ * Fast, simple, yet decent quality random number generator based on
+ * a paper by David G. Carta ("Two Fast Implementations of the
+ * `Minimal Standard' Random Number Generator," Communications of the
+ * ACM, January, 1990).
+ *
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm/asmmacro.h>
+
+#define a      r2
+#define m      r3
+#define lo     r8
+#define hi     r9
+#define t0     r16
+#define t1     r17
+#define        seed    r32
+
+GLOBAL_ENTRY(carta_random32)
+       movl    a = (16807 << 16) | 16807
+       ;;
+       pmpyshr2.u t0 = a, seed, 0
+       pmpyshr2.u t1 = a, seed, 16
+       ;;
+       unpack2.l t0 = t1, t0
+       dep     m = -1, r0, 0, 31
+       ;;
+       zxt4    lo = t0
+       shr.u   hi = t0, 32
+       ;;
+       dep     t0 = 0, hi, 15, 49      // t0 = (hi & 0x7fff)
+       ;;
+       shl     t0 = t0, 16             // t0 = (hi & 0x7fff) << 16
+       shr     t1 = hi, 15             // t1 = (hi >> 15)
+       ;;
+       add     lo = lo, t0
+       ;;
+       cmp.gtu p6, p0 = lo, m
+       ;;
+(p6)   and     lo = lo, m
+       ;;
+(p6)   add     lo = 1, lo
+       ;;
+       add     lo = lo, t1
+       ;;
+       cmp.gtu p6, p0 = lo, m
+       ;;
+(p6)   and     lo = lo, m
+       ;;
+(p6)   add     lo = 1, lo
+       br.ret.sptk.many rp
+END(carta_random32)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/checksum.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/checksum.c        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,102 @@
+/*
+ * Network checksum routines
+ *
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * Most of the code coming from arch/alpha/lib/checksum.c
+ *
+ * This file contains network checksum routines that are better done
+ * in an architecture-specific manner due to speed..
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+
+static inline unsigned short
+from64to16 (unsigned long x)
+{
+       /* add up 32-bit words for 33 bits */
+       x = (x & 0xffffffff) + (x >> 32);
+       /* add up 16-bit and 17-bit words for 17+c bits */
+       x = (x & 0xffff) + (x >> 16);
+       /* add up 16-bit and 2-bit for 16+c bit */
+       x = (x & 0xffff) + (x >> 16);
+       /* add up carry.. */
+       x = (x & 0xffff) + (x >> 16);
+       return x;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented.
+ */
+unsigned short int
+csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, unsigned short 
len,
+                  unsigned short proto, unsigned int sum)
+{
+       return ~from64to16(saddr + daddr + sum + ((unsigned long) ntohs(len) << 
16) +
+                          ((unsigned long) proto << 8));
+}
+
+EXPORT_SYMBOL(csum_tcpudp_magic);
+
+unsigned int
+csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short 
len,
+                   unsigned short proto, unsigned int sum)
+{
+       unsigned long result;
+
+       result = (saddr + daddr + sum +
+                 ((unsigned long) ntohs(len) << 16) +
+                 ((unsigned long) proto << 8));
+
+       /* Fold down to 32-bits so we don't lose in the typedef-less network 
stack.  */
+       /* 64 to 33 */
+       result = (result & 0xffffffff) + (result >> 32);
+       /* 33 to 32 */
+       result = (result & 0xffffffff) + (result >> 32);
+       return result;
+}
+
+extern unsigned long do_csum (const unsigned char *, long);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int
+csum_partial (const unsigned char * buff, int len, unsigned int sum)
+{
+       unsigned long result = do_csum(buff, len);
+
+       /* add in old sum, and carry.. */
+       result += sum;
+       /* 32+c bits -> 32 bits */
+       result = (result & 0xffffffff) + (result >> 32);
+       return result;
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+unsigned short
+ip_compute_csum (unsigned char * buff, int len)
+{
+       return ~do_csum(buff,len);
+}
+
+EXPORT_SYMBOL(ip_compute_csum);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/clear_page.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/clear_page.S      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 1999-2002 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2002 Ken Chen <kenneth.w.chen@xxxxxxxxx>
+ *
+ * 1/06/01 davidm      Tuned for Itanium.
+ * 2/12/02 kchen       Tuned for both Itanium and McKinley
+ * 3/08/02 davidm      Some more tweaking
+ */
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_ITANIUM
+# define L3_LINE_SIZE  64      // Itanium L3 line size
+# define PREFETCH_LINES        9       // magic number
+#else
+# define L3_LINE_SIZE  128     // McKinley L3 line size
+# define PREFETCH_LINES        12      // magic number
+#endif
+
+#define saved_lc       r2
+#define dst_fetch      r3
+#define dst1           r8
+#define dst2           r9
+#define dst3           r10
+#define dst4           r11
+
+#define dst_last       r31
+
+GLOBAL_ENTRY(clear_page)
+       .prologue
+       .regstk 1,0,0,0
+       mov r16 = PAGE_SIZE/L3_LINE_SIZE-1      // main loop count, 
-1=repeat/until
+       .save ar.lc, saved_lc
+       mov saved_lc = ar.lc
+
+       .body
+       mov ar.lc = (PREFETCH_LINES - 1)
+       mov dst_fetch = in0
+       adds dst1 = 16, in0
+       adds dst2 = 32, in0
+       ;;
+.fetch:        stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE
+       adds dst3 = 48, in0             // executing this multiple times is 
harmless
+       br.cloop.sptk.few .fetch
+       ;;
+       addl dst_last = (PAGE_SIZE - PREFETCH_LINES*L3_LINE_SIZE), dst_fetch
+       mov ar.lc = r16                 // one L3 line per iteration
+       adds dst4 = 64, in0
+       ;;
+#ifdef CONFIG_ITANIUM
+       // Optimized for Itanium
+1:     stf.spill.nta [dst1] = f0, 64
+       stf.spill.nta [dst2] = f0, 64
+       cmp.lt p8,p0=dst_fetch, dst_last
+       ;;
+#else
+       // Optimized for McKinley
+1:     stf.spill.nta [dst1] = f0, 64
+       stf.spill.nta [dst2] = f0, 64
+       stf.spill.nta [dst3] = f0, 64
+       stf.spill.nta [dst4] = f0, 128
+       cmp.lt p8,p0=dst_fetch, dst_last
+       ;;
+       stf.spill.nta [dst1] = f0, 64
+       stf.spill.nta [dst2] = f0, 64
+#endif
+       stf.spill.nta [dst3] = f0, 64
+(p8)   stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE
+       br.cloop.sptk.few 1b
+       ;;
+       mov ar.lc = saved_lc            // restore lc
+       br.ret.sptk.many rp
+END(clear_page)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/clear_user.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/clear_user.S      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,209 @@
+/*
+ * This routine clears to zero a linear memory buffer in user space.
+ *
+ * Inputs:
+ *     in0:    address of buffer
+ *     in1:    length of buffer in bytes
+ * Outputs:
+ *     r8:     number of bytes that didn't get cleared due to a fault
+ *
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ */
+
+#include <asm/asmmacro.h>
+
+//
+// arguments
+//
+#define buf            r32
+#define len            r33
+
+//
+// local registers
+//
+#define cnt            r16
+#define buf2           r17
+#define saved_lc       r18
+#define saved_pfs      r19
+#define tmp            r20
+#define len2           r21
+#define len3           r22
+
+//
+// Theory of operations:
+//     - we check whether or not the buffer is small, i.e., less than 17
+//       in which case we do the byte by byte loop.
+//
+//     - Otherwise we go progressively from 1 byte store to 8byte store in
+//       the head part, the body is a 16byte store loop and we finish we the
+//       tail for the last 15 bytes.
+//       The good point about this breakdown is that the long buffer handling
+//       contains only 2 branches.
+//
+//     The reason for not using shifting & masking for both the head and the
+//     tail is to stay semantically correct. This routine is not supposed
+//     to write bytes outside of the buffer. While most of the time this would
+//     be ok, we can't tolerate a mistake. A classical example is the case
+//     of multithreaded code were to the extra bytes touched is actually owned
+//     by another thread which runs concurrently to ours. Another, less likely,
+//     example is with device drivers where reading an I/O mapped location may
+//     have side effects (same thing for writing).
+//
+
+GLOBAL_ENTRY(__do_clear_user)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc   saved_pfs=ar.pfs,2,0,0,0
+       cmp.eq p6,p0=r0,len             // check for zero length
+       .save ar.lc, saved_lc
+       mov saved_lc=ar.lc              // preserve ar.lc (slow)
+       .body
+       ;;                              // avoid WAW on CFM
+       adds tmp=-1,len                 // br.ctop is repeat/until
+       mov ret0=len                    // return value is length at this point
+(p6)   br.ret.spnt.many rp
+       ;;
+       cmp.lt p6,p0=16,len             // if len > 16 then long memset
+       mov ar.lc=tmp                   // initialize lc for small count
+(p6)   br.cond.dptk .long_do_clear
+       ;;                              // WAR on ar.lc
+       //
+       // worst case 16 iterations, avg 8 iterations
+       //
+       // We could have played with the predicates to use the extra
+       // M slot for 2 stores/iteration but the cost the initialization
+       // the various counters compared to how long the loop is supposed
+       // to last on average does not make this solution viable.
+       //
+1:
+       EX( .Lexit1, st1 [buf]=r0,1 )
+       adds len=-1,len                 // countdown length using len
+       br.cloop.dptk 1b
+       ;;                              // avoid RAW on ar.lc
+       //
+       // .Lexit4: comes from byte by byte loop
+       //          len contains bytes left
+.Lexit1:
+       mov ret0=len                    // faster than using ar.lc
+       mov ar.lc=saved_lc
+       br.ret.sptk.many rp             // end of short clear_user
+
+
+       //
+       // At this point we know we have more than 16 bytes to copy
+       // so we focus on alignment (no branches required)
+       //
+       // The use of len/len2 for countdown of the number of bytes left
+       // instead of ret0 is due to the fact that the exception code
+       // changes the values of r8.
+       //
+.long_do_clear:
+       tbit.nz p6,p0=buf,0             // odd alignment (for long_do_clear)
+       ;;
+       EX( .Lexit3, (p6) st1 [buf]=r0,1 )      // 1-byte aligned
+(p6)   adds len=-1,len;;               // sync because buf is modified
+       tbit.nz p6,p0=buf,1
+       ;;
+       EX( .Lexit3, (p6) st2 [buf]=r0,2 )      // 2-byte aligned
+(p6)   adds len=-2,len;;
+       tbit.nz p6,p0=buf,2
+       ;;
+       EX( .Lexit3, (p6) st4 [buf]=r0,4 )      // 4-byte aligned
+(p6)   adds len=-4,len;;
+       tbit.nz p6,p0=buf,3
+       ;;
+       EX( .Lexit3, (p6) st8 [buf]=r0,8 )      // 8-byte aligned
+(p6)   adds len=-8,len;;
+       shr.u cnt=len,4         // number of 128-bit (2x64bit) words
+       ;;
+       cmp.eq p6,p0=r0,cnt
+       adds tmp=-1,cnt
+(p6)   br.cond.dpnt .dotail            // we have less than 16 bytes left
+       ;;
+       adds buf2=8,buf                 // setup second base pointer
+       mov ar.lc=tmp
+       ;;
+
+       //
+       // 16bytes/iteration core loop
+       //
+       // The second store can never generate a fault because
+       // we come into the loop only when we are 16-byte aligned.
+       // This means that if we cross a page then it will always be
+       // in the first store and never in the second.
+       //
+       //
+       // We need to keep track of the remaining length. A possible 
(optimistic)
+       // way would be to use ar.lc and derive how many byte were left by
+       // doing : left= 16*ar.lc + 16.  this would avoid the addition at
+       // every iteration.
+       // However we need to keep the synchronization point. A template
+       // M;;MB does not exist and thus we can keep the addition at no
+       // extra cycle cost (use a nop slot anyway). It also simplifies the
+       // (unlikely)  error recovery code
+       //
+
+2:     EX(.Lexit3, st8 [buf]=r0,16 )
+       ;;                              // needed to get len correct when error
+       st8 [buf2]=r0,16
+       adds len=-16,len
+       br.cloop.dptk 2b
+       ;;
+       mov ar.lc=saved_lc
+       //
+       // tail correction based on len only
+       //
+       // We alternate the use of len3,len2 to allow parallelism and correct
+       // error handling. We also reuse p6/p7 to return correct value.
+       // The addition of len2/len3 does not cost anything more compared to
+       // the regular memset as we had empty slots.
+       //
+.dotail:
+       mov len2=len                    // for parallelization of error handling
+       mov len3=len
+       tbit.nz p6,p0=len,3
+       ;;
+       EX( .Lexit2, (p6) st8 [buf]=r0,8 )      // at least 8 bytes
+(p6)   adds len3=-8,len2
+       tbit.nz p7,p6=len,2
+       ;;
+       EX( .Lexit2, (p7) st4 [buf]=r0,4 )      // at least 4 bytes
+(p7)   adds len2=-4,len3
+       tbit.nz p6,p7=len,1
+       ;;
+       EX( .Lexit2, (p6) st2 [buf]=r0,2 )      // at least 2 bytes
+(p6)   adds len3=-2,len2
+       tbit.nz p7,p6=len,0
+       ;;
+       EX( .Lexit2, (p7) st1 [buf]=r0 )        // only 1 byte left
+       mov ret0=r0                             // success
+       br.ret.sptk.many rp                     // end of most likely path
+
+       //
+       // Outlined error handling code
+       //
+
+       //
+       // .Lexit3: comes from core loop, need restore pr/lc
+       //          len contains bytes left
+       //
+       //
+       // .Lexit2:
+       //      if p6 -> coming from st8 or st2 : len2 contains what's left
+       //      if p7 -> coming from st4 or st1 : len3 contains what's left
+       // We must restore lc/pr even though might not have been used.
+.Lexit2:
+       .pred.rel "mutex", p6, p7
+(p6)   mov len=len2
+(p7)   mov len=len3
+       ;;
+       //
+       // .Lexit4: comes from head, need not restore pr/lc
+       //          len contains bytes left
+       //
+.Lexit3:
+       mov ret0=len
+       mov ar.lc=saved_lc
+       br.ret.sptk.many rp
+END(__do_clear_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/copy_page.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/copy_page.S       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,98 @@
+/*
+ *
+ * Optimized version of the standard copy_page() function
+ *
+ * Inputs:
+ *     in0:    address of target page
+ *     in1:    address of source page
+ * Output:
+ *     no return value
+ *
+ * Copyright (C) 1999, 2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ *
+ * 4/06/01 davidm      Tuned to make it perform well both for cached and 
uncached copies.
+ */
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+#define PIPE_DEPTH     3
+#define EPI            p[PIPE_DEPTH-1]
+
+#define lcount         r16
+#define saved_pr       r17
+#define saved_lc       r18
+#define saved_pfs      r19
+#define src1           r20
+#define src2           r21
+#define tgt1           r22
+#define tgt2           r23
+#define srcf           r24
+#define tgtf           r25
+#define tgt_last       r26
+
+#define Nrot           ((8*PIPE_DEPTH+7)&~7)
+
+GLOBAL_ENTRY(copy_page)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
+
+       .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \
+             t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH]
+       .rotp p[PIPE_DEPTH]
+
+       .save ar.lc, saved_lc
+       mov saved_lc=ar.lc
+       mov ar.ec=PIPE_DEPTH
+
+       mov lcount=PAGE_SIZE/64-1
+       .save pr, saved_pr
+       mov saved_pr=pr
+       mov pr.rot=1<<16
+
+       .body
+
+       mov src1=in1
+       adds src2=8,in1
+       mov tgt_last = PAGE_SIZE
+       ;;
+       adds tgt2=8,in0
+       add srcf=512,in1
+       mov ar.lc=lcount
+       mov tgt1=in0
+       add tgtf=512,in0
+       add tgt_last = tgt_last, in0
+       ;;
+1:
+(p[0]) ld8 t1[0]=[src1],16
+(EPI)  st8 [tgt1]=t1[PIPE_DEPTH-1],16
+(p[0]) ld8 t2[0]=[src2],16
+(EPI)  st8 [tgt2]=t2[PIPE_DEPTH-1],16
+       cmp.ltu p6,p0 = tgtf, tgt_last
+       ;;
+(p[0]) ld8 t3[0]=[src1],16
+(EPI)  st8 [tgt1]=t3[PIPE_DEPTH-1],16
+(p[0]) ld8 t4[0]=[src2],16
+(EPI)  st8 [tgt2]=t4[PIPE_DEPTH-1],16
+       ;;
+(p[0]) ld8 t5[0]=[src1],16
+(EPI)  st8 [tgt1]=t5[PIPE_DEPTH-1],16
+(p[0]) ld8 t6[0]=[src2],16
+(EPI)  st8 [tgt2]=t6[PIPE_DEPTH-1],16
+       ;;
+(p[0]) ld8 t7[0]=[src1],16
+(EPI)  st8 [tgt1]=t7[PIPE_DEPTH-1],16
+(p[0]) ld8 t8[0]=[src2],16
+(EPI)  st8 [tgt2]=t8[PIPE_DEPTH-1],16
+
+(p6)   lfetch [srcf], 64
+(p6)   lfetch [tgtf], 64
+       br.ctop.sptk.few 1b
+       ;;
+       mov pr=saved_pr,0xffffffffffff0000      // restore predicates
+       mov ar.pfs=saved_pfs
+       mov ar.lc=saved_lc
+       br.ret.sptk.many rp
+END(copy_page)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/copy_page_mck.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/copy_page_mck.S   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,185 @@
+/*
+ * McKinley-optimized version of copy_page().
+ *
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ *
+ * Inputs:
+ *     in0:    address of target page
+ *     in1:    address of source page
+ * Output:
+ *     no return value
+ *
+ * General idea:
+ *     - use regular loads and stores to prefetch data to avoid consuming 
M-slot just for
+ *       lfetches => good for in-cache performance
+ *     - avoid l2 bank-conflicts by not storing into the same 16-byte bank 
within a single
+ *       cycle
+ *
+ * Principle of operation:
+ *     First, note that L1 has a line-size of 64 bytes and L2 a line-size of 
128 bytes.
+ *     To avoid secondary misses in L2, we prefetch both source and 
destination with a line-size
+ *     of 128 bytes.  When both of these lines are in the L2 and the first 
half of the
+ *     source line is in L1, we start copying the remaining words.  The second 
half of the
+ *     source line is prefetched in an earlier iteration, so that by the time 
we start
+ *     accessing it, it's also present in the L1.
+ *
+ *     We use a software-pipelined loop to control the overall operation.  The 
pipeline
+ *     has 2*PREFETCH_DIST+K stages.  The first PREFETCH_DIST stages are used 
for prefetching
+ *     source cache-lines.  The second PREFETCH_DIST stages are used for 
prefetching destination
+ *     cache-lines, the last K stages are used to copy the cache-line words 
not copied by
+ *     the prefetches.  The four relevant points in the pipelined are called 
A, B, C, D:
+ *     p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a 
destination-line
+ *     should be prefetched, p[C] is TRUE if the second half of an L2 line 
should be brought
+ *     into L1D and p[D] is TRUE if a cacheline needs to be copied.
+ *
+ *     This all sounds very complicated, but thanks to the modulo-scheduled 
loop support,
+ *     the resulting code is very regular and quite easy to follow (once you 
get the idea).
+ *
+ *     As a secondary optimization, the first 2*PREFETCH_DIST iterations are 
implemented
+ *     as the separate .prefetch_loop.  Logically, this loop performs exactly 
like the
+ *     main-loop (.line_copy), but has all known-to-be-predicated-off 
instructions removed,
+ *     so that each loop iteration is faster (again, good for cached case).
+ *
+ *     When reading the code, it helps to keep the following picture in mind:
+ *
+ *            word 0 word 1
+ *            +------+------+---
+ *           | v[x] |  t1  | ^
+ *           | t2   |  t3  | |
+ *           | t4   |  t5  | |
+ *           | t6   |  t7  | | 128 bytes
+ *                   | n[y] |  t9  | | (L2 cache line)
+ *           | t10  |  t11 | |
+ *           | t12  |  t13 | |
+ *           | t14  |  t15 | v
+ *           +------+------+---
+ *
+ *     Here, v[x] is copied by the (memory) prefetch.  n[y] is loaded at p[C]
+ *     to fetch the second-half of the L2 cache line into L1, and the tX words 
are copied in
+ *     an order that avoids bank conflicts.
+ */
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+#define PREFETCH_DIST  8               // McKinley sustains 16 outstanding L2 
misses (8 ld, 8 st)
+
+#define src0           r2
+#define src1           r3
+#define dst0           r9
+#define dst1           r10
+#define src_pre_mem    r11
+#define dst_pre_mem    r14
+#define src_pre_l2     r15
+#define dst_pre_l2     r16
+#define t1             r17
+#define t2             r18
+#define t3             r19
+#define t4             r20
+#define t5             t1      // alias!
+#define t6             t2      // alias!
+#define t7             t3      // alias!
+#define t9             t5      // alias!
+#define t10            t4      // alias!
+#define t11            t7      // alias!
+#define t12            t6      // alias!
+#define t14            t10     // alias!
+#define t13            r21
+#define t15            r22
+
+#define saved_lc       r23
+#define saved_pr       r24
+
+#define        A       0
+#define B      (PREFETCH_DIST)
+#define C      (B + PREFETCH_DIST)
+#define D      (C + 3)
+#define N      (D + 1)
+#define Nrot   ((N + 7) & ~7)
+
+GLOBAL_ENTRY(copy_page)
+       .prologue
+       alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
+
+       .rotr v[2*PREFETCH_DIST], n[D-C+1]
+       .rotp p[N]
+
+       .save ar.lc, saved_lc
+       mov saved_lc = ar.lc
+       .save pr, saved_pr
+       mov saved_pr = pr
+       .body
+
+       mov src_pre_mem = in1
+       mov pr.rot = 0x10000
+       mov ar.ec = 1                           // special unrolled loop
+
+       mov dst_pre_mem = in0
+       mov ar.lc = 2*PREFETCH_DIST - 1
+
+       add src_pre_l2 = 8*8, in1
+       add dst_pre_l2 = 8*8, in0
+       add src0 = 8, in1                       // first t1 src
+       add src1 = 3*8, in1                     // first t3 src
+       add dst0 = 8, in0                       // first t1 dst
+       add dst1 = 3*8, in0                     // first t3 dst
+       mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1
+       nop.m 0
+       nop.i 0
+       ;;
+       // same as .line_copy loop, but with all predicated-off instructions 
removed:
+.prefetch_loop:
+(p[A]) ld8 v[A] = [src_pre_mem], 128           // M0
+(p[B]) st8 [dst_pre_mem] = v[B], 128           // M2
+       br.ctop.sptk .prefetch_loop
+       ;;
+       cmp.eq p16, p0 = r0, r0                 // reset p16 to 1 (br.ctop 
cleared it to zero)
+       mov ar.lc = t1                          // with 64KB pages, t1 is too 
big to fit in 8 bits!
+       mov ar.ec = N                           // # of stages in pipeline
+       ;;
+.line_copy:
+(p[D]) ld8 t2 = [src0], 3*8                    // M0
+(p[D]) ld8 t4 = [src1], 3*8                    // M1
+(p[B]) st8 [dst_pre_mem] = v[B], 128           // M2 prefetch dst from memory
+(p[D]) st8 [dst_pre_l2] = n[D-C], 128          // M3 prefetch dst from L2
+       ;;
+(p[A]) ld8 v[A] = [src_pre_mem], 128           // M0 prefetch src from memory
+(p[C]) ld8 n[0] = [src_pre_l2], 128            // M1 prefetch src from L2
+(p[D]) st8 [dst0] =  t1, 8                     // M2
+(p[D]) st8 [dst1] =  t3, 8                     // M3
+       ;;
+(p[D]) ld8  t5 = [src0], 8
+(p[D]) ld8  t7 = [src1], 3*8
+(p[D]) st8 [dst0] =  t2, 3*8
+(p[D]) st8 [dst1] =  t4, 3*8
+       ;;
+(p[D]) ld8  t6 = [src0], 3*8
+(p[D]) ld8 t10 = [src1], 8
+(p[D]) st8 [dst0] =  t5, 8
+(p[D]) st8 [dst1] =  t7, 3*8
+       ;;
+(p[D]) ld8  t9 = [src0], 3*8
+(p[D]) ld8 t11 = [src1], 3*8
+(p[D]) st8 [dst0] =  t6, 3*8
+(p[D]) st8 [dst1] = t10, 8
+       ;;
+(p[D]) ld8 t12 = [src0], 8
+(p[D]) ld8 t14 = [src1], 8
+(p[D]) st8 [dst0] =  t9, 3*8
+(p[D]) st8 [dst1] = t11, 3*8
+       ;;
+(p[D]) ld8 t13 = [src0], 4*8
+(p[D]) ld8 t15 = [src1], 4*8
+(p[D]) st8 [dst0] = t12, 8
+(p[D]) st8 [dst1] = t14, 8
+       ;;
+(p[D-1])ld8  t1 = [src0], 8
+(p[D-1])ld8  t3 = [src1], 8
+(p[D]) st8 [dst0] = t13, 4*8
+(p[D]) st8 [dst1] = t15, 4*8
+       br.ctop.sptk .line_copy
+       ;;
+       mov ar.lc = saved_lc
+       mov pr = saved_pr, -1
+       br.ret.sptk.many rp
+END(copy_page)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/copy_user.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/copy_user.S       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,610 @@
+/*
+ *
+ * Optimized version of the copy_user() routine.
+ * It is used to copy date across the kernel/user boundary.
+ *
+ * The source and destination are always on opposite side of
+ * the boundary. When reading from user space we must catch
+ * faults on loads. When writing to user space we must catch
+ * errors on stores. Note that because of the nature of the copy
+ * we don't need to worry about overlapping regions.
+ *
+ *
+ * Inputs:
+ *     in0     address of source buffer
+ *     in1     address of destination buffer
+ *     in2     number of bytes to copy
+ *
+ * Outputs:
+ *     ret0    0 in case of success. The number of bytes NOT copied in
+ *             case of error.
+ *
+ * Copyright (C) 2000-2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * Fixme:
+ *     - handle the case where we have more than 16 bytes and the alignment
+ *       are different.
+ *     - more benchmarking
+ *     - fix extraneous stop bit introduced by the EX() macro.
+ */
+
+#include <asm/asmmacro.h>
+
+//
+// Tuneable parameters
+//
+#define COPY_BREAK     16      // we do byte copy below (must be >=16)
+#define PIPE_DEPTH     21      // pipe depth
+
+#define EPI            p[PIPE_DEPTH-1]
+
+//
+// arguments
+//
+#define dst            in0
+#define src            in1
+#define len            in2
+
+//
+// local registers
+//
+#define t1             r2      // rshift in bytes
+#define t2             r3      // lshift in bytes
+#define rshift         r14     // right shift in bits
+#define lshift         r15     // left shift in bits
+#define word1          r16
+#define word2          r17
+#define cnt            r18
+#define len2           r19
+#define saved_lc       r20
+#define saved_pr       r21
+#define tmp            r22
+#define val            r23
+#define src1           r24
+#define dst1           r25
+#define src2           r26
+#define dst2           r27
+#define len1           r28
+#define enddst         r29
+#define endsrc         r30
+#define saved_pfs      r31
+
+GLOBAL_ENTRY(__copy_user)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
+
+       .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH]
+       .rotp p[PIPE_DEPTH]
+
+       adds len2=-1,len        // br.ctop is repeat/until
+       mov ret0=r0
+
+       ;;                      // RAW of cfm when len=0
+       cmp.eq p8,p0=r0,len     // check for zero length
+       .save ar.lc, saved_lc
+       mov saved_lc=ar.lc      // preserve ar.lc (slow)
+(p8)   br.ret.spnt.many rp     // empty mempcy()
+       ;;
+       add enddst=dst,len      // first byte after end of source
+       add endsrc=src,len      // first byte after end of destination
+       .save pr, saved_pr
+       mov saved_pr=pr         // preserve predicates
+
+       .body
+
+       mov dst1=dst            // copy because of rotation
+       mov ar.ec=PIPE_DEPTH
+       mov pr.rot=1<<16        // p16=true all others are false
+
+       mov src1=src            // copy because of rotation
+       mov ar.lc=len2          // initialize lc for small count
+       cmp.lt p10,p7=COPY_BREAK,len    // if len > COPY_BREAK then long copy
+
+       xor tmp=src,dst         // same alignment test prepare
+(p10)  br.cond.dptk .long_copy_user
+       ;;                      // RAW pr.rot/p16 ?
+       //
+       // Now we do the byte by byte loop with software pipeline
+       //
+       // p7 is necessarily false by now
+1:
+       EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1)
+       EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+       br.ctop.dptk.few 1b
+       ;;
+       mov ar.lc=saved_lc
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.pfs=saved_pfs            // restore ar.ec
+       br.ret.sptk.many rp             // end of short memcpy
+
+       //
+       // Not 8-byte aligned
+       //
+.diff_align_copy_user:
+       // At this point we know we have more than 16 bytes to copy
+       // and also that src and dest do _not_ have the same alignment.
+       and src2=0x7,src1                               // src offset
+       and dst2=0x7,dst1                               // dst offset
+       ;;
+       // The basic idea is that we copy byte-by-byte at the head so
+       // that we can reach 8-byte alignment for both src1 and dst1.
+       // Then copy the body using software pipelined 8-byte copy,
+       // shifting the two back-to-back words right and left, then copy
+       // the tail by copying byte-by-byte.
+       //
+       // Fault handling. If the byte-by-byte at the head fails on the
+       // load, then restart and finish the pipleline by copying zeros
+       // to the dst1. Then copy zeros for the rest of dst1.
+       // If 8-byte software pipeline fails on the load, do the same as
+       // failure_in3 does. If the byte-by-byte at the tail fails, it is
+       // handled simply by failure_in_pipe1.
+       //
+       // The case p14 represents the source has more bytes in the
+       // the first word (by the shifted part), whereas the p15 needs to
+       // copy some bytes from the 2nd word of the source that has the
+       // tail of the 1st of the destination.
+       //
+
+       //
+       // Optimization. If dst1 is 8-byte aligned (quite common), we don't need
+       // to copy the head to dst1, to start 8-byte copy software pipeline.
+       // We know src1 is not 8-byte aligned in this case.
+       //
+       cmp.eq p14,p15=r0,dst2
+(p15)  br.cond.spnt 1f
+       ;;
+       sub t1=8,src2
+       mov t2=src2
+       ;;
+       shl rshift=t2,3
+       sub len1=len,t1                                 // set len1
+       ;;
+       sub lshift=64,rshift
+       ;;
+       br.cond.spnt .word_copy_user
+       ;;
+1:
+       cmp.leu p14,p15=src2,dst2
+       sub t1=dst2,src2
+       ;;
+       .pred.rel "mutex", p14, p15
+(p14)  sub word1=8,src2                                // (8 - src offset)
+(p15)  sub t1=r0,t1                                    // absolute value
+(p15)  sub word1=8,dst2                                // (8 - dst offset)
+       ;;
+       // For the case p14, we don't need to copy the shifted part to
+       // the 1st word of destination.
+       sub t2=8,t1
+(p14)  sub word1=word1,t1
+       ;;
+       sub len1=len,word1                              // resulting len
+(p15)  shl rshift=t1,3                                 // in bits
+(p14)  shl rshift=t2,3
+       ;;
+(p14)  sub len1=len1,t1
+       adds cnt=-1,word1
+       ;;
+       sub lshift=64,rshift
+       mov ar.ec=PIPE_DEPTH
+       mov pr.rot=1<<16        // p16=true all others are false
+       mov ar.lc=cnt
+       ;;
+2:
+       EX(.failure_in_pipe2,(p16) ld1 val1[0]=[src1],1)
+       EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+       br.ctop.dptk.few 2b
+       ;;
+       clrrrb
+       ;;
+.word_copy_user:
+       cmp.gtu p9,p0=16,len1
+(p9)   br.cond.spnt 4f                 // if (16 > len1) skip 8-byte copy
+       ;;
+       shr.u cnt=len1,3                // number of 64-bit words
+       ;;
+       adds cnt=-1,cnt
+       ;;
+       .pred.rel "mutex", p14, p15
+(p14)  sub src1=src1,t2
+(p15)  sub src1=src1,t1
+       //
+       // Now both src1 and dst1 point to an 8-byte aligned address. And
+       // we have more than 8 bytes to copy.
+       //
+       mov ar.lc=cnt
+       mov ar.ec=PIPE_DEPTH
+       mov pr.rot=1<<16        // p16=true all others are false
+       ;;
+3:
+       //
+       // The pipleline consists of 3 stages:
+       // 1 (p16):     Load a word from src1
+       // 2 (EPI_1):   Shift right pair, saving to tmp
+       // 3 (EPI):     Store tmp to dst1
+       //
+       // To make it simple, use at least 2 (p16) loops to set up val1[n]
+       // because we need 2 back-to-back val1[] to get tmp.
+       // Note that this implies EPI_2 must be p18 or greater.
+       //
+
+#define EPI_1          p[PIPE_DEPTH-2]
+#define SWITCH(pred, shift)    cmp.eq pred,p0=shift,rshift
+#define CASE(pred, shift)      \
+       (pred)  br.cond.spnt .copy_user_bit##shift
+#define BODY(rshift)                                           \
+.copy_user_bit##rshift:                                                \
+1:                                                             \
+       EX(.failure_out,(EPI) st8 [dst1]=tmp,8);                \
+(EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \
+       EX(3f,(p16) ld8 val1[1]=[src1],8);                      \
+(p16)  mov val1[0]=r0;                                         \
+       br.ctop.dptk 1b;                                        \
+       ;;                                                      \
+       br.cond.sptk.many .diff_align_do_tail;                  \
+2:                                                             \
+(EPI)  st8 [dst1]=tmp,8;                                       \
+(EPI_1)        shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift;  \
+3:                                                             \
+(p16)  mov val1[1]=r0;                                         \
+(p16)  mov val1[0]=r0;                                         \
+       br.ctop.dptk 2b;                                        \
+       ;;                                                      \
+       br.cond.sptk.many .failure_in2
+
+       //
+       // Since the instruction 'shrp' requires a fixed 128-bit value
+       // specifying the bits to shift, we need to provide 7 cases
+       // below.
+       //
+       SWITCH(p6, 8)
+       SWITCH(p7, 16)
+       SWITCH(p8, 24)
+       SWITCH(p9, 32)
+       SWITCH(p10, 40)
+       SWITCH(p11, 48)
+       SWITCH(p12, 56)
+       ;;
+       CASE(p6, 8)
+       CASE(p7, 16)
+       CASE(p8, 24)
+       CASE(p9, 32)
+       CASE(p10, 40)
+       CASE(p11, 48)
+       CASE(p12, 56)
+       ;;
+       BODY(8)
+       BODY(16)
+       BODY(24)
+       BODY(32)
+       BODY(40)
+       BODY(48)
+       BODY(56)
+       ;;
+.diff_align_do_tail:
+       .pred.rel "mutex", p14, p15
+(p14)  sub src1=src1,t1
+(p14)  adds dst1=-8,dst1
+(p15)  sub dst1=dst1,t1
+       ;;
+4:
+       // Tail correction.
+       //
+       // The problem with this piplelined loop is that the last word is not
+       // loaded and thus parf of the last word written is not correct.
+       // To fix that, we simply copy the tail byte by byte.
+
+       sub len1=endsrc,src1,1
+       clrrrb
+       ;;
+       mov ar.ec=PIPE_DEPTH
+       mov pr.rot=1<<16        // p16=true all others are false
+       mov ar.lc=len1
+       ;;
+5:
+       EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1)
+       EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+       br.ctop.dptk.few 5b
+       ;;
+       mov ar.lc=saved_lc
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       //
+       // Beginning of long mempcy (i.e. > 16 bytes)
+       //
+.long_copy_user:
+       tbit.nz p6,p7=src1,0    // odd alignment
+       and tmp=7,tmp
+       ;;
+       cmp.eq p10,p8=r0,tmp
+       mov len1=len            // copy because of rotation
+(p8)   br.cond.dpnt .diff_align_copy_user
+       ;;
+       // At this point we know we have more than 16 bytes to copy
+       // and also that both src and dest have the same alignment
+       // which may not be the one we want. So for now we must move
+       // forward slowly until we reach 16byte alignment: no need to
+       // worry about reaching the end of buffer.
+       //
+       EX(.failure_in1,(p6) ld1 val1[0]=[src1],1)      // 1-byte aligned
+(p6)   adds len1=-1,len1;;
+       tbit.nz p7,p0=src1,1
+       ;;
+       EX(.failure_in1,(p7) ld2 val1[1]=[src1],2)      // 2-byte aligned
+(p7)   adds len1=-2,len1;;
+       tbit.nz p8,p0=src1,2
+       ;;
+       //
+       // Stop bit not required after ld4 because if we fail on ld4
+       // we have never executed the ld1, therefore st1 is not executed.
+       //
+       EX(.failure_in1,(p8) ld4 val2[0]=[src1],4)      // 4-byte aligned
+       ;;
+       EX(.failure_out,(p6) st1 [dst1]=val1[0],1)
+       tbit.nz p9,p0=src1,3
+       ;;
+       //
+       // Stop bit not required after ld8 because if we fail on ld8
+       // we have never executed the ld2, therefore st2 is not executed.
+       //
+       EX(.failure_in1,(p9) ld8 val2[1]=[src1],8)      // 8-byte aligned
+       EX(.failure_out,(p7) st2 [dst1]=val1[1],2)
+(p8)   adds len1=-4,len1
+       ;;
+       EX(.failure_out, (p8) st4 [dst1]=val2[0],4)
+(p9)   adds len1=-8,len1;;
+       shr.u cnt=len1,4                // number of 128-bit (2x64bit) words
+       ;;
+       EX(.failure_out, (p9) st8 [dst1]=val2[1],8)
+       tbit.nz p6,p0=len1,3
+       cmp.eq p7,p0=r0,cnt
+       adds tmp=-1,cnt                 // br.ctop is repeat/until
+(p7)   br.cond.dpnt .dotail            // we have less than 16 bytes left
+       ;;
+       adds src2=8,src1
+       adds dst2=8,dst1
+       mov ar.lc=tmp
+       ;;
+       //
+       // 16bytes/iteration
+       //
+2:
+       EX(.failure_in3,(p16) ld8 val1[0]=[src1],16)
+(p16)  ld8 val2[0]=[src2],16
+
+       EX(.failure_out, (EPI)  st8 [dst1]=val1[PIPE_DEPTH-1],16)
+(EPI)  st8 [dst2]=val2[PIPE_DEPTH-1],16
+       br.ctop.dptk 2b
+       ;;                      // RAW on src1 when fall through from loop
+       //
+       // Tail correction based on len only
+       //
+       // No matter where we come from (loop or test) the src1 pointer
+       // is 16 byte aligned AND we have less than 16 bytes to copy.
+       //
+.dotail:
+       EX(.failure_in1,(p6) ld8 val1[0]=[src1],8)      // at least 8 bytes
+       tbit.nz p7,p0=len1,2
+       ;;
+       EX(.failure_in1,(p7) ld4 val1[1]=[src1],4)      // at least 4 bytes
+       tbit.nz p8,p0=len1,1
+       ;;
+       EX(.failure_in1,(p8) ld2 val2[0]=[src1],2)      // at least 2 bytes
+       tbit.nz p9,p0=len1,0
+       ;;
+       EX(.failure_out, (p6) st8 [dst1]=val1[0],8)
+       ;;
+       EX(.failure_in1,(p9) ld1 val2[1]=[src1])        // only 1 byte left
+       mov ar.lc=saved_lc
+       ;;
+       EX(.failure_out,(p7) st4 [dst1]=val1[1],4)
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       EX(.failure_out, (p8)   st2 [dst1]=val2[0],2)
+       mov ar.pfs=saved_pfs
+       ;;
+       EX(.failure_out, (p9)   st1 [dst1]=val2[1])
+       br.ret.sptk.many rp
+
+
+       //
+       // Here we handle the case where the byte by byte copy fails
+       // on the load.
+       // Several factors make the zeroing of the rest of the buffer kind of
+       // tricky:
+       //      - the pipeline: loads/stores are not in sync (pipeline)
+       //
+       //        In the same loop iteration, the dst1 pointer does not directly
+       //        reflect where the faulty load was.
+       //
+       //      - pipeline effect
+       //        When you get a fault on load, you may have valid data from
+       //        previous loads not yet store in transit. Such data must be
+       //        store normally before moving onto zeroing the rest.
+       //
+       //      - single/multi dispersal independence.
+       //
+       // solution:
+       //      - we don't disrupt the pipeline, i.e. data in transit in
+       //        the software pipeline will be eventually move to memory.
+       //        We simply replace the load with a simple mov and keep the
+       //        pipeline going. We can't really do this inline because
+       //        p16 is always reset to 1 when lc > 0.
+       //
+.failure_in_pipe1:
+       sub ret0=endsrc,src1    // number of bytes to zero, i.e. not copied
+1:
+(p16)  mov val1[0]=r0
+(EPI)  st1 [dst1]=val1[PIPE_DEPTH-1],1
+       br.ctop.dptk 1b
+       ;;
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.lc=saved_lc
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       //
+       // This is the case where the byte by byte copy fails on the load
+       // when we copy the head. We need to finish the pipeline and copy
+       // zeros for the rest of the destination. Since this happens
+       // at the top we still need to fill the body and tail.
+.failure_in_pipe2:
+       sub ret0=endsrc,src1    // number of bytes to zero, i.e. not copied
+2:
+(p16)  mov val1[0]=r0
+(EPI)  st1 [dst1]=val1[PIPE_DEPTH-1],1
+       br.ctop.dptk 2b
+       ;;
+       sub len=enddst,dst1,1           // precompute len
+       br.cond.dptk.many .failure_in1bis
+       ;;
+
+       //
+       // Here we handle the head & tail part when we check for alignment.
+       // The following code handles only the load failures. The
+       // main diffculty comes from the fact that loads/stores are
+       // scheduled. So when you fail on a load, the stores corresponding
+       // to previous successful loads must be executed.
+       //
+       // However some simplifications are possible given the way
+       // things work.
+       //
+       // 1) HEAD
+       // Theory of operation:
+       //
+       //  Page A   | Page B
+       //  ---------|-----
+       //          1|8 x
+       //        1 2|8 x
+       //          4|8 x
+       //        1 4|8 x
+       //        2 4|8 x
+       //      1 2 4|8 x
+       //           |1
+       //           |2 x
+       //           |4 x
+       //
+       // page_size >= 4k (2^12).  (x means 4, 2, 1)
+       // Here we suppose Page A exists and Page B does not.
+       //
+       // As we move towards eight byte alignment we may encounter faults.
+       // The numbers on each page show the size of the load (current 
alignment).
+       //
+       // Key point:
+       //      - if you fail on 1, 2, 4 then you have never executed any 
smaller
+       //        size loads, e.g. failing ld4 means no ld1 nor ld2 executed
+       //        before.
+       //
+       // This allows us to simplify the cleanup code, because basically you
+       // only have to worry about "pending" stores in the case of a failing
+       // ld8(). Given the way the code is written today, this means only
+       // worry about st2, st4. There we can use the information encapsulated
+       // into the predicates.
+       //
+       // Other key point:
+       //      - if you fail on the ld8 in the head, it means you went straight
+       //        to it, i.e. 8byte alignment within an unexisting page.
+       // Again this comes from the fact that if you crossed just for the ld8 
then
+       // you are 8byte aligned but also 16byte align, therefore you would
+       // either go for the 16byte copy loop OR the ld8 in the tail part.
+       // The combination ld1, ld2, ld4, ld8 where you fail on ld8 is 
impossible
+       // because it would mean you had 15bytes to copy in which case you
+       // would have defaulted to the byte by byte copy.
+       //
+       //
+       // 2) TAIL
+       // Here we now we have less than 16 bytes AND we are either 8 or 16 byte
+       // aligned.
+       //
+       // Key point:
+       // This means that we either:
+       //              - are right on a page boundary
+       //      OR
+       //              - are at more than 16 bytes from a page boundary with
+       //                at most 15 bytes to copy: no chance of crossing.
+       //
+       // This allows us to assume that if we fail on a load we haven't 
possibly
+       // executed any of the previous (tail) ones, so we don't need to do
+       // any stores. For instance, if we fail on ld2, this means we had
+       // 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4.
+       //
+       // This means that we are in a situation similar the a fault in the
+       // head part. That's nice!
+       //
+.failure_in1:
+       sub ret0=endsrc,src1    // number of bytes to zero, i.e. not copied
+       sub len=endsrc,src1,1
+       //
+       // we know that ret0 can never be zero at this point
+       // because we failed why trying to do a load, i.e. there is still
+       // some work to do.
+       // The failure_in1bis and length problem is taken care of at the
+       // calling side.
+       //
+       ;;
+.failure_in1bis:               // from (.failure_in3)
+       mov ar.lc=len           // Continue with a stupid byte store.
+       ;;
+5:
+       st1 [dst1]=r0,1
+       br.cloop.dptk 5b
+       ;;
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.lc=saved_lc
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       //
+       // Here we simply restart the loop but instead
+       // of doing loads we fill the pipeline with zeroes
+       // We can't simply store r0 because we may have valid
+       // data in transit in the pipeline.
+       // ar.lc and ar.ec are setup correctly at this point
+       //
+       // we MUST use src1/endsrc here and not dst1/enddst because
+       // of the pipeline effect.
+       //
+.failure_in3:
+       sub ret0=endsrc,src1    // number of bytes to zero, i.e. not copied
+       ;;
+2:
+(p16)  mov val1[0]=r0
+(p16)  mov val2[0]=r0
+(EPI)  st8 [dst1]=val1[PIPE_DEPTH-1],16
+(EPI)  st8 [dst2]=val2[PIPE_DEPTH-1],16
+       br.ctop.dptk 2b
+       ;;
+       cmp.ne p6,p0=dst1,enddst        // Do we need to finish the tail ?
+       sub len=enddst,dst1,1           // precompute len
+(p6)   br.cond.dptk .failure_in1bis
+       ;;
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.lc=saved_lc
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+.failure_in2:
+       sub ret0=endsrc,src1
+       cmp.ne p6,p0=dst1,enddst        // Do we need to finish the tail ?
+       sub len=enddst,dst1,1           // precompute len
+(p6)   br.cond.dptk .failure_in1bis
+       ;;
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.lc=saved_lc
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       //
+       // handling of failures on stores: that's the easy part
+       //
+.failure_out:
+       sub ret0=enddst,dst1
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.lc=saved_lc
+
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+END(__copy_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/csum_partial_copy.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/csum_partial_copy.c       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,151 @@
+/*
+ * Network Checksum & Copy routine
+ *
+ * Copyright (C) 1999, 2003-2004 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * Most of the code has been imported from Linux/Alpha
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * XXX Fixme: those 2 inlines are meant for debugging and will go away
+ */
+static inline unsigned
+short from64to16(unsigned long x)
+{
+       /* add up 32-bit words for 33 bits */
+       x = (x & 0xffffffff) + (x >> 32);
+       /* add up 16-bit and 17-bit words for 17+c bits */
+       x = (x & 0xffff) + (x >> 16);
+       /* add up 16-bit and 2-bit for 16+c bit */
+       x = (x & 0xffff) + (x >> 16);
+       /* add up carry.. */
+       x = (x & 0xffff) + (x >> 16);
+       return x;
+}
+
+static inline
+unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
+{
+       int odd, count;
+       unsigned long result = (unsigned long)psum;
+
+       if (len <= 0)
+               goto out;
+       odd = 1 & (unsigned long) buff;
+       if (odd) {
+               result = *buff << 8;
+               len--;
+               buff++;
+       }
+       count = len >> 1;               /* nr of 16-bit words.. */
+       if (count) {
+               if (2 & (unsigned long) buff) {
+                       result += *(unsigned short *) buff;
+                       count--;
+                       len -= 2;
+                       buff += 2;
+               }
+               count >>= 1;            /* nr of 32-bit words.. */
+               if (count) {
+                       if (4 & (unsigned long) buff) {
+                               result += *(unsigned int *) buff;
+                               count--;
+                               len -= 4;
+                               buff += 4;
+                       }
+                       count >>= 1;    /* nr of 64-bit words.. */
+                       if (count) {
+                               unsigned long carry = 0;
+                               do {
+                                       unsigned long w = *(unsigned long *) 
buff;
+                                       count--;
+                                       buff += 8;
+                                       result += carry;
+                                       result += w;
+                                       carry = (w > result);
+                               } while (count);
+                               result += carry;
+                               result = (result & 0xffffffff) + (result >> 32);
+                       }
+                       if (len & 4) {
+                               result += *(unsigned int *) buff;
+                               buff += 4;
+                       }
+               }
+               if (len & 2) {
+                       result += *(unsigned short *) buff;
+                       buff += 2;
+               }
+       }
+       if (len & 1)
+               result += *buff;
+
+       result = from64to16(result);
+
+       if (odd)
+               result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+
+out:
+       return result;
+}
+
+/*
+ * XXX Fixme
+ *
+ * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS.
+ * But it's very tricky to get right even in C.
+ */
+extern unsigned long do_csum(const unsigned char *, long);
+
+static unsigned int
+do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char 
*dst,
+                               int len, unsigned int psum, int *errp)
+{
+       unsigned long result;
+
+       /* XXX Fixme
+        * for now we separate the copy from checksum for obvious
+        * alignment difficulties. Look at the Alpha code and you'll be
+        * scared.
+        */
+
+       if (__copy_from_user(dst, src, len) != 0 && errp)
+               *errp = -EFAULT;
+
+       result = do_csum(dst, len);
+
+       /* add in old sum, and carry.. */
+       result += psum;
+       /* 32+c bits -> 32 bits */
+       result = (result & 0xffffffff) + (result >> 32);
+       return result;
+}
+
+unsigned int
+csum_partial_copy_from_user (const unsigned char __user *src, unsigned char 
*dst,
+                            int len, unsigned int sum, int *errp)
+{
+       if (!access_ok(VERIFY_READ, src, len)) {
+               *errp = -EFAULT;
+               memset(dst, 0, len);
+               return sum;
+       }
+
+       return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
+}
+
+unsigned int
+csum_partial_copy_nocheck(const unsigned char __user *src, unsigned char *dst,
+                         int len, unsigned int sum)
+{
+       return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+}
+
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/dec_and_lock.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/dec_and_lock.c    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2003 Jerome Marchand, Bull S.A.
+ *     Cleaned up by David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * This file is released under the GPLv2, or at your option any later version.
+ *
+ * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" 
instruction.  This
+ * code is an adaptation of the x86 version of "atomic_dec_and_lock()".
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+/*
+ * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock.  
Both of these
+ * operations have to be done atomically, so that the count doesn't drop to 
zero without
+ * acquiring the spinlock first.
+ */
+int
+_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
+{
+       int old, new;
+
+       do {
+               old = atomic_read(refcount);
+               new = old - 1;
+
+               if (unlikely (old == 1)) {
+                       /* oops, we may be decrementing to zero, do it the slow 
way... */
+                       spin_lock(lock);
+                       if (atomic_dec_and_test(refcount))
+                               return 1;
+                       spin_unlock(lock);
+                       return 0;
+               }
+       } while (cmpxchg(&refcount->counter, old, new) != old);
+       return 0;
+}
+
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/do_csum.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/do_csum.S Tue Aug  2 23:59:09 2005
@@ -0,0 +1,323 @@
+/*
+ *
+ * Optmized version of the standard do_csum() function
+ *
+ * Return: a 64bit quantity containing the 16bit Internet checksum
+ *
+ * Inputs:
+ *     in0: address of buffer to checksum (char *)
+ *     in1: length of the buffer (int)
+ *
+ * Copyright (C) 1999, 2001-2002 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 02/04/22    Ken Chen <kenneth.w.chen@xxxxxxxxx>
+ *             Data locality study on the checksum buffer.
+ *             More optimization cleanup - remove excessive stop bits.
+ * 02/04/08    David Mosberger <davidm@xxxxxxxxxx>
+ *             More cleanup and tuning.
+ * 01/04/18    Jun Nakajima <jun.nakajima@xxxxxxxxx>
+ *             Clean up and optimize and the software pipeline, loading two
+ *             back-to-back 8-byte words per loop. Clean up the initialization
+ *             for the loop. Support the cases where load latency = 1 or 2.
+ *             Set CONFIG_IA64_LOAD_LATENCY to 1 or 2 (default).
+ */
+
+#include <asm/asmmacro.h>
+
+//
+// Theory of operations:
+//     The goal is to go as quickly as possible to the point where
+//     we can checksum 16 bytes/loop. Before reaching that point we must
+//     take care of incorrect alignment of first byte.
+//
+//     The code hereafter also takes care of the "tail" part of the buffer
+//     before entering the core loop, if any. The checksum is a sum so it
+//     allows us to commute operations. So we do the "head" and "tail"
+//     first to finish at full speed in the body. Once we get the head and
+//     tail values, we feed them into the pipeline, very handy initialization.
+//
+//     Of course we deal with the special case where the whole buffer fits
+//     into one 8 byte word. In this case we have only one entry in the 
pipeline.
+//
+//     We use a (LOAD_LATENCY+2)-stage pipeline in the loop to account for
+//     possible load latency and also to accommodate for head and tail.
+//
+//     The end of the function deals with folding the checksum from 64bits
+//     down to 16bits taking care of the carry.
+//
+//     This version avoids synchronization in the core loop by also using a
+//     pipeline for the accumulation of the checksum in resultx[] (x=1,2).
+//
+//      wordx[] (x=1,2)
+//     |---|
+//      |   | 0                        : new value loaded in pipeline
+//     |---|
+//      |   | -                        : in transit data
+//     |---|
+//      |   | LOAD_LATENCY     : current value to add to checksum
+//     |---|
+//      |   | LOAD_LATENCY+1   : previous value added to checksum
+//      |---|                  (previous iteration)
+//
+//     resultx[] (x=1,2)
+//     |---|
+//      |   | 0                        : initial value
+//     |---|
+//      |   | LOAD_LATENCY-1   : new checksum
+//     |---|
+//      |   | LOAD_LATENCY     : previous value of checksum
+//     |---|
+//      |   | LOAD_LATENCY+1   : final checksum when out of the loop
+//      |---|
+//
+//
+//     See RFC1071 "Computing the Internet Checksum" for various techniques for
+//     calculating the Internet checksum.
+//
+// NOT YET DONE:
+//     - Maybe another algorithm which would take care of the folding at the
+//       end in a different manner
+//     - Work with people more knowledgeable than me on the network stack
+//       to figure out if we could not split the function depending on the
+//       type of packet or alignment we get. Like the ip_fast_csum() routine
+//       where we know we have at least 20bytes worth of data to checksum.
+//     - Do a better job of handling small packets.
+//     - Note on prefetching: it was found that under various load, i.e. ftp 
read/write,
+//       nfs read/write, the L1 cache hit rate is at 60% and L2 cache hit rate 
is at 99.8%
+//       on the data that buffer points to (partly because the checksum is 
often preceded by
+//       a copy_from_user()).  This finding indiate that lfetch will not be 
beneficial since
+//       the data is already in the cache.
+//
+
+#define saved_pfs      r11
+#define hmask          r16
+#define tmask          r17
+#define first1         r18
+#define firstval       r19
+#define firstoff       r20
+#define last           r21
+#define lastval                r22
+#define lastoff                r23
+#define saved_lc       r24
+#define saved_pr       r25
+#define tmp1           r26
+#define tmp2           r27
+#define tmp3           r28
+#define carry1         r29
+#define carry2         r30
+#define first2         r31
+
+#define buf            in0
+#define len            in1
+
+#define LOAD_LATENCY   2       // XXX fix me
+
+#if (LOAD_LATENCY != 1) && (LOAD_LATENCY != 2)
+# error "Only 1 or 2 is supported/tested for LOAD_LATENCY."
+#endif
+
+#define PIPE_DEPTH                     (LOAD_LATENCY+2)
+#define ELD    p[LOAD_LATENCY]         // end of load
+#define ELD_1  p[LOAD_LATENCY+1]       // and next stage
+
+// unsigned long do_csum(unsigned char *buf,long len)
+
+GLOBAL_ENTRY(do_csum)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,2,16,0,16
+       .rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2]
+       .rotp p[PIPE_DEPTH], pC1[2], pC2[2]
+       mov ret0=r0             // in case we have zero length
+       cmp.lt p0,p6=r0,len     // check for zero length or negative (32bit len)
+       ;;
+       add tmp1=buf,len        // last byte's address
+       .save pr, saved_pr
+       mov saved_pr=pr         // preserve predicates (rotation)
+(p6)   br.ret.spnt.many rp     // return if zero or negative length
+
+       mov hmask=-1            // initialize head mask
+       tbit.nz p15,p0=buf,0    // is buf an odd address?
+       and first1=-8,buf       // 8-byte align down address of first1 element
+
+       and firstoff=7,buf      // how many bytes off for first1 element
+       mov tmask=-1            // initialize tail mask
+
+       ;;
+       adds tmp2=-1,tmp1       // last-1
+       and lastoff=7,tmp1      // how many bytes off for last element
+       ;;
+       sub tmp1=8,lastoff      // complement to lastoff
+       and last=-8,tmp2        // address of word containing last byte
+       ;;
+       sub tmp3=last,first1    // tmp3=distance from first1 to last
+       .save ar.lc, saved_lc
+       mov saved_lc=ar.lc      // save lc
+       cmp.eq p8,p9=last,first1        // everything fits in one word ?
+
+       ld8 firstval=[first1],8 // load, ahead of time, "first1" word
+       and tmp1=7, tmp1        // make sure that if tmp1==8 -> tmp1=0
+       shl tmp2=firstoff,3     // number of bits
+       ;;
+(p9)   ld8 lastval=[last]      // load, ahead of time, "last" word, if needed
+       shl tmp1=tmp1,3         // number of bits
+(p9)   adds tmp3=-8,tmp3       // effectively loaded
+       ;;
+(p8)   mov lastval=r0          // we don't need lastval if first1==last
+       shl hmask=hmask,tmp2    // build head mask, mask off [0,first1off[
+       shr.u tmask=tmask,tmp1  // build tail mask, mask off ]8,lastoff]
+       ;;
+       .body
+#define count tmp3
+
+(p8)   and hmask=hmask,tmask   // apply tail mask to head mask if 1 word only
+(p9)   and word2[0]=lastval,tmask      // mask last it as appropriate
+       shr.u count=count,3     // how many 8-byte?
+       ;;
+       // If count is odd, finish this 8-byte word so that we can
+       // load two back-to-back 8-byte words per loop thereafter.
+       and word1[0]=firstval,hmask     // and mask it as appropriate
+       tbit.nz p10,p11=count,0         // if (count is odd)
+       ;;
+(p8)   mov result1[0]=word1[0]
+(p9)   add result1[0]=word1[0],word2[0]
+       ;;
+       cmp.ltu p6,p0=result1[0],word1[0]       // check the carry
+       cmp.eq.or.andcm p8,p0=0,count           // exit if zero 8-byte
+       ;;
+(p6)   adds result1[0]=1,result1[0]
+(p8)   br.cond.dptk .do_csum_exit      // if (within an 8-byte word)
+(p11)  br.cond.dptk .do_csum16         // if (count is even)
+
+       // Here count is odd.
+       ld8 word1[1]=[first1],8         // load an 8-byte word
+       cmp.eq p9,p10=1,count           // if (count == 1)
+       adds count=-1,count             // loaded an 8-byte word
+       ;;
+       add result1[0]=result1[0],word1[1]
+       ;;
+       cmp.ltu p6,p0=result1[0],word1[1]
+       ;;
+(p6)   adds result1[0]=1,result1[0]
+(p9)   br.cond.sptk .do_csum_exit      // if (count == 1) exit
+       // Fall through to caluculate the checksum, feeding result1[0] as
+       // the initial value in result1[0].
+       //
+       // Calculate the checksum loading two 8-byte words per loop.
+       //
+.do_csum16:
+       add first2=8,first1
+       shr.u count=count,1     // we do 16 bytes per loop
+       ;;
+       adds count=-1,count
+       mov carry1=r0
+       mov carry2=r0
+       brp.loop.imp 1f,2f
+       ;;
+       mov ar.ec=PIPE_DEPTH
+       mov ar.lc=count // set lc
+       mov pr.rot=1<<16
+       // result1[0] must be initialized in advance.
+       mov result2[0]=r0
+       ;;
+       .align 32
+1:
+(ELD_1)        cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1]
+(pC1[1])adds carry1=1,carry1
+(ELD_1)        cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1]
+(pC2[1])adds carry2=1,carry2
+(ELD)  add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY]
+(ELD)  add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY]
+2:
+(p[0]) ld8 word1[0]=[first1],16
+(p[0]) ld8 word2[0]=[first2],16
+       br.ctop.sptk 1b
+       ;;
+       // Since len is a 32-bit value, carry cannot be larger than a 64-bit 
value.
+(pC1[1])adds carry1=1,carry1   // since we miss the last one
+(pC2[1])adds carry2=1,carry2
+       ;;
+       add result1[LOAD_LATENCY+1]=result1[LOAD_LATENCY+1],carry1
+       add result2[LOAD_LATENCY+1]=result2[LOAD_LATENCY+1],carry2
+       ;;
+       cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1
+       cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2
+       ;;
+(p6)   adds result1[LOAD_LATENCY+1]=1,result1[LOAD_LATENCY+1]
+(p7)   adds result2[LOAD_LATENCY+1]=1,result2[LOAD_LATENCY+1]
+       ;;
+       add result1[0]=result1[LOAD_LATENCY+1],result2[LOAD_LATENCY+1]
+       ;;
+       cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
+       ;;
+(p6)   adds result1[0]=1,result1[0]
+       ;;
+.do_csum_exit:
+       //
+       // now fold 64 into 16 bits taking care of carry
+       // that's not very good because it has lots of sequentiality
+       //
+       mov tmp3=0xffff
+       zxt4 tmp1=result1[0]
+       shr.u tmp2=result1[0],32
+       ;;
+       add result1[0]=tmp1,tmp2
+       ;;
+       and tmp1=result1[0],tmp3
+       shr.u tmp2=result1[0],16
+       ;;
+       add result1[0]=tmp1,tmp2
+       ;;
+       and tmp1=result1[0],tmp3
+       shr.u tmp2=result1[0],16
+       ;;
+       add result1[0]=tmp1,tmp2
+       ;;
+       and tmp1=result1[0],tmp3
+       shr.u tmp2=result1[0],16
+       ;;
+       add ret0=tmp1,tmp2
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       // if buf was odd then swap bytes
+       mov ar.pfs=saved_pfs            // restore ar.ec
+(p15)  mux1 ret0=ret0,@rev             // reverse word
+       ;;
+       mov ar.lc=saved_lc
+(p15)  shr.u ret0=ret0,64-16   // + shift back to position = swap bytes
+       br.ret.sptk.many rp
+
+//     I (Jun Nakajima) wrote an equivalent code (see below), but it was
+//     not much better than the original. So keep the original there so that
+//     someone else can challenge.
+//
+//     shr.u word1[0]=result1[0],32
+//     zxt4 result1[0]=result1[0]
+//     ;;
+//     add result1[0]=result1[0],word1[0]
+//     ;;
+//     zxt2 result2[0]=result1[0]
+//     extr.u word1[0]=result1[0],16,16
+//     shr.u carry1=result1[0],32
+//     ;;
+//     add result2[0]=result2[0],word1[0]
+//     ;;
+//     add result2[0]=result2[0],carry1
+//     ;;
+//     extr.u ret0=result2[0],16,16
+//     ;;
+//     add ret0=ret0,result2[0]
+//     ;;
+//     zxt2 ret0=ret0
+//     mov ar.pfs=saved_pfs             // restore ar.ec
+//     mov pr=saved_pr,0xffffffffffff0000
+//     ;;
+//     // if buf was odd then swap bytes
+//     mov ar.lc=saved_lc
+//(p15)        mux1 ret0=ret0,@rev             // reverse word
+//     ;;
+//(p15)        shr.u ret0=ret0,64-16   // + shift back to position = swap bytes
+//     br.ret.sptk.many rp
+
+END(do_csum)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/flush.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/flush.S   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,39 @@
+/*
+ * Cache flushing routines.
+ *
+ * Copyright (C) 1999-2001 Hewlett-Packard Co
+ * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+       /*
+        * flush_icache_range(start,end)
+        *      Must flush range from start to end-1 but nothing else (need to
+        *      be careful not to touch addresses that may be unmapped).
+        */
+GLOBAL_ENTRY(flush_icache_range)
+       .prologue
+       alloc r2=ar.pfs,2,0,0,0
+       sub r8=in1,in0,1
+       ;;
+       shr.u r8=r8,5                   // we flush 32 bytes per iteration
+       .save ar.lc, r3
+       mov r3=ar.lc                    // save ar.lc
+       ;;
+
+       .body
+
+       mov ar.lc=r8
+       ;;
+.Loop: fc in0                          // issuable on M0 only
+       add in0=32,in0
+       br.cloop.sptk.few .Loop
+       ;;
+       sync.i
+       ;;
+       srlz.i
+       ;;
+       mov ar.lc=r3                    // restore ar.lc
+       br.ret.sptk.many rp
+END(flush_icache_range)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/idiv32.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/idiv32.S  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 32-bit integer division.
+ *
+ * This code is based on the application note entitled "Divide, Square Root
+ * and Remainder Algorithms for the IA-64 Architecture".  This document
+ * is available as Intel document number 248725-002 or via the web at
+ * http://developer.intel.com/software/opensource/numerics/
+ *
+ * For more details on the theory behind these algorithms, see "IA-64
+ * and Elementary Functions" by Peter Markstein; HP Professional Books
+ * (http://www.hp.com/go/retailbooks/)
+ */
+
+#include <asm/asmmacro.h>
+
+#ifdef MODULO
+# define OP    mod
+#else
+# define OP    div
+#endif
+
+#ifdef UNSIGNED
+# define SGN   u
+# define EXTEND        zxt4
+# define INT_TO_FP(a,b)        fcvt.xuf.s1 a=b
+# define FP_TO_INT(a,b)        fcvt.fxu.trunc.s1 a=b
+#else
+# define SGN
+# define EXTEND        sxt4
+# define INT_TO_FP(a,b)        fcvt.xf a=b
+# define FP_TO_INT(a,b)        fcvt.fx.trunc.s1 a=b
+#endif
+
+#define PASTE1(a,b)    a##b
+#define PASTE(a,b)     PASTE1(a,b)
+#define NAME           PASTE(PASTE(__,SGN),PASTE(OP,si3))
+
+GLOBAL_ENTRY(NAME)
+       .regstk 2,0,0,0
+       // Transfer inputs to FP registers.
+       mov r2 = 0xffdd                 // r2 = -34 + 65535 (fp reg format bias)
+       EXTEND in0 = in0                // in0 = a
+       EXTEND in1 = in1                // in1 = b
+       ;;
+       setf.sig f8 = in0
+       setf.sig f9 = in1
+#ifdef MODULO
+       sub in1 = r0, in1               // in1 = -b
+#endif
+       ;;
+       // Convert the inputs to FP, to avoid FP software-assist faults.
+       INT_TO_FP(f8, f8)
+       INT_TO_FP(f9, f9)
+       ;;
+       setf.exp f7 = r2                // f7 = 2^-34
+       frcpa.s1 f6, p6 = f8, f9        // y0 = frcpa(b)
+       ;;
+(p6)   fmpy.s1 f8 = f8, f6             // q0 = a*y0
+(p6)   fnma.s1 f6 = f9, f6, f1         // e0 = -b*y0 + 1 
+       ;;
+#ifdef MODULO
+       setf.sig f9 = in1               // f9 = -b
+#endif
+(p6)   fma.s1 f8 = f6, f8, f8          // q1 = e0*q0 + q0
+(p6)   fma.s1 f6 = f6, f6, f7          // e1 = e0*e0 + 2^-34
+       ;;
+#ifdef MODULO
+       setf.sig f7 = in0
+#endif
+(p6)   fma.s1 f6 = f6, f8, f8          // q2 = e1*q1 + q1
+       ;;
+       FP_TO_INT(f6, f6)               // q = trunc(q2)
+       ;;
+#ifdef MODULO
+       xma.l f6 = f6, f9, f7           // r = q*(-b) + a
+       ;;
+#endif
+       getf.sig r8 = f6                // transfer result to result register
+       br.ret.sptk.many rp
+END(NAME)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/idiv64.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/idiv64.S  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 64-bit integer division.
+ *
+ * This code is based on the application note entitled "Divide, Square Root
+ * and Remainder Algorithms for the IA-64 Architecture".  This document
+ * is available as Intel document number 248725-002 or via the web at
+ * http://developer.intel.com/software/opensource/numerics/
+ *
+ * For more details on the theory behind these algorithms, see "IA-64
+ * and Elementary Functions" by Peter Markstein; HP Professional Books
+ * (http://www.hp.com/go/retailbooks/)
+ */
+
+#include <asm/asmmacro.h>
+
+#ifdef MODULO
+# define OP    mod
+#else
+# define OP    div
+#endif
+
+#ifdef UNSIGNED
+# define SGN   u
+# define INT_TO_FP(a,b)        fcvt.xuf.s1 a=b
+# define FP_TO_INT(a,b)        fcvt.fxu.trunc.s1 a=b
+#else
+# define SGN
+# define INT_TO_FP(a,b)        fcvt.xf a=b
+# define FP_TO_INT(a,b)        fcvt.fx.trunc.s1 a=b
+#endif
+
+#define PASTE1(a,b)    a##b
+#define PASTE(a,b)     PASTE1(a,b)
+#define NAME           PASTE(PASTE(__,SGN),PASTE(OP,di3))
+
+GLOBAL_ENTRY(NAME)
+       .regstk 2,0,0,0
+       // Transfer inputs to FP registers.
+       setf.sig f8 = in0
+       setf.sig f9 = in1
+       ;;
+       // Convert the inputs to FP, to avoid FP software-assist faults.
+       INT_TO_FP(f8, f8)
+       INT_TO_FP(f9, f9)
+       ;;
+       frcpa.s1 f11, p6 = f8, f9       // y0 = frcpa(b)
+       ;;
+(p6)   fmpy.s1 f7 = f8, f11            // q0 = a*y0
+(p6)   fnma.s1 f6 = f9, f11, f1        // e0 = -b*y0 + 1
+       ;;
+(p6)   fma.s1 f10 = f7, f6, f7         // q1 = q0*e0 + q0
+(p6)   fmpy.s1 f7 = f6, f6             // e1 = e0*e0
+       ;;
+#ifdef MODULO
+       sub in1 = r0, in1               // in1 = -b
+#endif
+(p6)   fma.s1 f10 = f10, f7, f10       // q2 = q1*e1 + q1
+(p6)   fma.s1 f6 = f11, f6, f11        // y1 = y0*e0 + y0
+       ;;
+(p6)   fma.s1 f6 = f6, f7, f6          // y2 = y1*e1 + y1
+(p6)   fnma.s1 f7 = f9, f10, f8        // r = -b*q2 + a
+       ;;
+#ifdef MODULO
+       setf.sig f8 = in0               // f8 = a
+       setf.sig f9 = in1               // f9 = -b
+#endif
+(p6)   fma.s1 f11 = f7, f6, f10        // q3 = r*y2 + q2
+       ;;
+       FP_TO_INT(f11, f11)             // q = trunc(q3)
+       ;;
+#ifdef MODULO
+       xma.l f11 = f11, f9, f8         // r = q*(-b) + a
+       ;;
+#endif
+       getf.sig r8 = f11               // transfer result to result register
+       br.ret.sptk.many rp
+END(NAME)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/io.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/io.c      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,165 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+       char *dst = to;
+
+       while (count) {
+               count--;
+               *dst++ = readb(from++);
+       }
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, long count)
+{
+       const char *src = from;
+
+       while (count) {
+               count--;
+               writeb(*src++, to++);
+       }
+}
+EXPORT_SYMBOL(memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void memset_io(volatile void __iomem *dst, int c, long count)
+{
+       unsigned char ch = (char)(c & 0xff);
+
+       while (count) {
+               count--;
+               writeb(ch, dst);
+               dst++;
+       }
+}
+EXPORT_SYMBOL(memset_io);
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __ia64_inb
+#undef __ia64_inw
+#undef __ia64_inl
+#undef __ia64_outb
+#undef __ia64_outw
+#undef __ia64_outl
+#undef __ia64_readb
+#undef __ia64_readw
+#undef __ia64_readl
+#undef __ia64_readq
+#undef __ia64_readb_relaxed
+#undef __ia64_readw_relaxed
+#undef __ia64_readl_relaxed
+#undef __ia64_readq_relaxed
+#undef __ia64_writeb
+#undef __ia64_writew
+#undef __ia64_writel
+#undef __ia64_writeq
+#undef __ia64_mmiowb
+
+unsigned int
+__ia64_inb (unsigned long port)
+{
+       return ___ia64_inb(port);
+}
+
+unsigned int
+__ia64_inw (unsigned long port)
+{
+       return ___ia64_inw(port);
+}
+
+unsigned int
+__ia64_inl (unsigned long port)
+{
+       return ___ia64_inl(port);
+}
+
+void
+__ia64_outb (unsigned char val, unsigned long port)
+{
+       ___ia64_outb(val, port);
+}
+
+void
+__ia64_outw (unsigned short val, unsigned long port)
+{
+       ___ia64_outw(val, port);
+}
+
+void
+__ia64_outl (unsigned int val, unsigned long port)
+{
+       ___ia64_outl(val, port);
+}
+
+unsigned char
+__ia64_readb (void __iomem *addr)
+{
+       return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw (void __iomem *addr)
+{
+       return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl (void __iomem *addr)
+{
+       return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq (void __iomem *addr)
+{
+       return ___ia64_readq (addr);
+}
+
+unsigned char
+__ia64_readb_relaxed (void __iomem *addr)
+{
+       return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw_relaxed (void __iomem *addr)
+{
+       return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl_relaxed (void __iomem *addr)
+{
+       return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq_relaxed (void __iomem *addr)
+{
+       return ___ia64_readq (addr);
+}
+
+void
+__ia64_mmiowb(void)
+{
+       ___ia64_mmiowb();
+}
+
+#endif /* CONFIG_IA64_GENERIC */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/ip_fast_csum.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/ip_fast_csum.S    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,90 @@
+/*
+ * Optmized version of the ip_fast_csum() function
+ * Used for calculating IP header checksum
+ *
+ * Return: 16bit checksum, complemented
+ *
+ * Inputs:
+ *      in0: address of buffer to checksum (char *)
+ *      in1: length of the buffer (int)
+ *
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Ken Chen <kenneth.w.chen@xxxxxxxxx>
+ */
+
+#include <asm/asmmacro.h>
+
+/*
+ * Since we know that most likely this function is called with buf aligned
+ * on 4-byte boundary and 20 bytes in length, we can execution rather quickly
+ * versus calling generic version of do_csum, which has lots of overhead in
+ * handling various alignments and sizes.  However, due to lack of constrains
+ * put on the function input argument, cases with alignment not on 4-byte or
+ * size not equal to 20 bytes will be handled by the generic do_csum function.
+ */
+
+#define in0    r32
+#define in1    r33
+#define ret0   r8
+
+GLOBAL_ENTRY(ip_fast_csum)
+       .prologue
+       .body
+       cmp.ne  p6,p7=5,in1     // size other than 20 byte?
+       and     r14=3,in0       // is it aligned on 4-byte?
+       add     r15=4,in0       // second source pointer
+       ;;
+       cmp.ne.or.andcm p6,p7=r14,r0
+       ;;
+(p7)   ld4     r20=[in0],8
+(p7)   ld4     r21=[r15],8
+(p6)   br.spnt .generic
+       ;;
+       ld4     r22=[in0],8
+       ld4     r23=[r15],8
+       ;;
+       ld4     r24=[in0]
+       add     r20=r20,r21
+       add     r22=r22,r23
+       ;;
+       add     r20=r20,r22
+       ;;
+       add     r20=r20,r24
+       ;;
+       shr.u   ret0=r20,16     // now need to add the carry
+       zxt2    r20=r20
+       ;;
+       add     r20=ret0,r20
+       ;;
+       shr.u   ret0=r20,16     // add carry again
+       zxt2    r20=r20
+       ;;
+       add     r20=ret0,r20
+       ;;
+       shr.u   ret0=r20,16
+       zxt2    r20=r20
+       ;;
+       add     r20=ret0,r20
+       ;;
+       andcm   ret0=-1,r20
+       .restore sp             // reset frame state
+       br.ret.sptk.many b0
+       ;;
+
+.generic:
+       .prologue
+       .save ar.pfs, r35
+       alloc   r35=ar.pfs,2,2,2,0
+       .save rp, r34
+       mov     r34=b0
+       .body
+       dep.z   out1=in1,2,30
+       mov     out0=in0
+       ;;
+       br.call.sptk.many b0=do_csum
+       ;;
+       andcm   ret0=-1,ret0
+       mov     ar.pfs=r35
+       mov     b0=r34
+       br.ret.sptk.many b0
+END(ip_fast_csum)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/memcpy.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/memcpy.S  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,301 @@
+/*
+ *
+ * Optimized version of the standard memcpy() function
+ *
+ * Inputs:
+ *     in0:    destination address
+ *     in1:    source address
+ *     in2:    number of bytes to copy
+ * Output:
+ *     no return value
+ *
+ * Copyright (C) 2000-2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <asm/asmmacro.h>
+
+GLOBAL_ENTRY(memcpy)
+
+#      define MEM_LAT  21              /* latency to memory */
+
+#      define dst      r2
+#      define src      r3
+#      define retval   r8
+#      define saved_pfs r9
+#      define saved_lc r10
+#      define saved_pr r11
+#      define cnt      r16
+#      define src2     r17
+#      define t0       r18
+#      define t1       r19
+#      define t2       r20
+#      define t3       r21
+#      define t4       r22
+#      define src_end  r23
+
+#      define N        (MEM_LAT + 4)
+#      define Nrot     ((N + 7) & ~7)
+
+       /*
+        * First, check if everything (src, dst, len) is a multiple of eight.  
If
+        * so, we handle everything with no taken branches (other than the loop
+        * itself) and a small icache footprint.  Otherwise, we jump off to
+        * the more general copy routine handling arbitrary
+        * sizes/alignment etc.
+        */
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot
+       .save ar.lc, saved_lc
+       mov saved_lc=ar.lc
+       or t0=in0,in1
+       ;;
+
+       or t0=t0,in2
+       .save pr, saved_pr
+       mov saved_pr=pr
+
+       .body
+
+       cmp.eq p6,p0=in2,r0     // zero length?
+       mov retval=in0          // return dst
+(p6)   br.ret.spnt.many rp     // zero length, return immediately
+       ;;
+
+       mov dst=in0             // copy because of rotation
+       shr.u cnt=in2,3         // number of 8-byte words to copy
+       mov pr.rot=1<<16
+       ;;
+
+       adds cnt=-1,cnt         // br.ctop is repeat/until
+       cmp.gtu p7,p0=16,in2    // copying less than 16 bytes?
+       mov ar.ec=N
+       ;;
+
+       and t0=0x7,t0
+       mov ar.lc=cnt
+       ;;
+       cmp.ne p6,p0=t0,r0
+
+       mov src=in1             // copy because of rotation
+(p7)   br.cond.spnt.few .memcpy_short
+(p6)   br.cond.spnt.few .memcpy_long
+       ;;
+       nop.m   0
+       ;;
+       nop.m   0
+       nop.i   0
+       ;;
+       nop.m   0
+       ;;
+       .rotr val[N]
+       .rotp p[N]
+       .align 32
+1: { .mib
+(p[0]) ld8 val[0]=[src],8
+       nop.i 0
+       brp.loop.imp 1b, 2f
+}
+2: { .mfb
+(p[N-1])st8 [dst]=val[N-1],8
+       nop.f 0
+       br.ctop.dptk.few 1b
+}
+       ;;
+       mov ar.lc=saved_lc
+       mov pr=saved_pr,-1
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       /*
+        * Small (<16 bytes) unaligned copying is done via a simple 
byte-at-the-time
+        * copy loop.  This performs relatively poorly on Itanium, but it 
doesn't
+        * get used very often (gcc inlines small copies) and due to atomicity
+        * issues, we want to avoid read-modify-write of entire words.
+        */
+       .align 32
+.memcpy_short:
+       adds cnt=-1,in2         // br.ctop is repeat/until
+       mov ar.ec=MEM_LAT
+       brp.loop.imp 1f, 2f
+       ;;
+       mov ar.lc=cnt
+       ;;
+       nop.m   0
+       ;;
+       nop.m   0
+       nop.i   0
+       ;;
+       nop.m   0
+       ;;
+       nop.m   0
+       ;;
+       /*
+        * It is faster to put a stop bit in the loop here because it makes
+        * the pipeline shorter (and latency is what matters on short copies).
+        */
+       .align 32
+1: { .mib
+(p[0]) ld1 val[0]=[src],1
+       nop.i 0
+       brp.loop.imp 1b, 2f
+} ;;
+2: { .mfb
+(p[MEM_LAT-1])st1 [dst]=val[MEM_LAT-1],1
+       nop.f 0
+       br.ctop.dptk.few 1b
+} ;;
+       mov ar.lc=saved_lc
+       mov pr=saved_pr,-1
+       mov ar.pfs=saved_pfs
+       br.ret.sptk.many rp
+
+       /*
+        * Large (>= 16 bytes) copying is done in a fancy way.  Latency isn't
+        * an overriding concern here, but throughput is.  We first do
+        * sub-word copying until the destination is aligned, then we check
+        * if the source is also aligned.  If so, we do a simple load/store-loop
+        * until there are less than 8 bytes left over and then we do the tail,
+        * by storing the last few bytes using sub-word copying.  If the source
+        * is not aligned, we branch off to the non-congruent loop.
+        *
+        *   stage:   op:
+        *         0  ld
+        *         :
+        * MEM_LAT+3  shrp
+        * MEM_LAT+4  st
+        *
+        * On Itanium, the pipeline itself runs without stalls.  However,  
br.ctop
+        * seems to introduce an unavoidable bubble in the pipeline so the 
overall
+        * latency is 2 cycles/iteration.  This gives us a _copy_ throughput
+        * of 4 byte/cycle.  Still not bad.
+        */
+#      undef N
+#      undef Nrot
+#      define N        (MEM_LAT + 5)           /* number of stages */
+#      define Nrot     ((N+1 + 2 + 7) & ~7)    /* number of rotating regs */
+
+#define LOG_LOOP_SIZE  6
+
+.memcpy_long:
+       alloc t3=ar.pfs,3,Nrot,0,Nrot   // resize register frame
+       and t0=-8,src           // t0 = src & ~7
+       and t2=7,src            // t2 = src & 7
+       ;;
+       ld8 t0=[t0]             // t0 = 1st source word
+       adds src2=7,src         // src2 = (src + 7)
+       sub t4=r0,dst           // t4 = -dst
+       ;;
+       and src2=-8,src2        // src2 = (src + 7) & ~7
+       shl t2=t2,3             // t2 = 8*(src & 7)
+       shl t4=t4,3             // t4 = 8*(dst & 7)
+       ;;
+       ld8 t1=[src2]           // t1 = 1st source word if src is 8-byte 
aligned, 2nd otherwise
+       sub t3=64,t2            // t3 = 64-8*(src & 7)
+       shr.u t0=t0,t2
+       ;;
+       add src_end=src,in2
+       shl t1=t1,t3
+       mov pr=t4,0x38          // (p5,p4,p3)=(dst & 7)
+       ;;
+       or t0=t0,t1
+       mov cnt=r0
+       adds src_end=-1,src_end
+       ;;
+(p3)   st1 [dst]=t0,1
+(p3)   shr.u t0=t0,8
+(p3)   adds cnt=1,cnt
+       ;;
+(p4)   st2 [dst]=t0,2
+(p4)   shr.u t0=t0,16
+(p4)   adds cnt=2,cnt
+       ;;
+(p5)   st4 [dst]=t0,4
+(p5)   adds cnt=4,cnt
+       and src_end=-8,src_end  // src_end = last word of source buffer
+       ;;
+
+       // At this point, dst is aligned to 8 bytes and there at least 16-7=9 
bytes left to copy:
+
+1:{    add src=cnt,src                 // make src point to remainder of 
source buffer
+       sub cnt=in2,cnt                 // cnt = number of bytes left to copy
+       mov t4=ip
+  }    ;;
+       and src2=-8,src                 // align source pointer
+       adds t4=.memcpy_loops-1b,t4
+       mov ar.ec=N
+
+       and t0=7,src                    // t0 = src & 7
+       shr.u t2=cnt,3                  // t2 = number of 8-byte words left to 
copy
+       shl cnt=cnt,3                   // move bits 0-2 to 3-5
+       ;;
+
+       .rotr val[N+1], w[2]
+       .rotp p[N]
+
+       cmp.ne p6,p0=t0,r0              // is src aligned, too?
+       shl t0=t0,LOG_LOOP_SIZE         // t0 = 8*(src & 7)
+       adds t2=-1,t2                   // br.ctop is repeat/until
+       ;;
+       add t4=t0,t4
+       mov pr=cnt,0x38                 // set (p5,p4,p3) to # of bytes 
last-word bytes to copy
+       mov ar.lc=t2
+       ;;
+       nop.m   0
+       ;;
+       nop.m   0
+       nop.i   0
+       ;;
+       nop.m   0
+       ;;
+(p6)   ld8 val[1]=[src2],8             // prime the pump...
+       mov b6=t4
+       br.sptk.few b6
+       ;;
+
+.memcpy_tail:
+       // At this point, (p5,p4,p3) are set to the number of bytes left to 
copy (which is
+       // less than 8) and t0 contains the last few bytes of the src buffer:
+(p5)   st4 [dst]=t0,4
+(p5)   shr.u t0=t0,32
+       mov ar.lc=saved_lc
+       ;;
+(p4)   st2 [dst]=t0,2
+(p4)   shr.u t0=t0,16
+       mov ar.pfs=saved_pfs
+       ;;
+(p3)   st1 [dst]=t0
+       mov pr=saved_pr,-1
+       br.ret.sptk.many rp
+
+///////////////////////////////////////////////////////
+       .align 64
+
+#define COPY(shift,index)                                                      
                \
+ 1: { .mib                                                                     
                \
+       (p[0])          ld8 val[0]=[src2],8;                                    
                \
+       (p[MEM_LAT+3])  shrp w[0]=val[MEM_LAT+3],val[MEM_LAT+4-index],shift;    
                \
+                       brp.loop.imp 1b, 2f                                     
                \
+    };                                                                         
                \
+ 2: { .mfb                                                                     
                \
+       (p[MEM_LAT+4])  st8 [dst]=w[1],8;                                       
                \
+                       nop.f 0;                                                
                \
+                       br.ctop.dptk.few 1b;                                    
                \
+    };                                                                         
                \
+                       ;;                                                      
                \
+                       ld8 val[N-1]=[src_end]; /* load last word (may be same 
as val[N]) */    \
+                       ;;                                                      
                \
+                       shrp t0=val[N-1],val[N-index],shift;                    
                \
+                       br .memcpy_tail
+.memcpy_loops:
+       COPY(0, 1) /* no point special casing this---it doesn't go any faster 
without shrp */
+       COPY(8, 0)
+       COPY(16, 0)
+       COPY(24, 0)
+       COPY(32, 0)
+       COPY(40, 0)
+       COPY(48, 0)
+       COPY(56, 0)
+
+END(memcpy)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/memcpy_mck.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/memcpy_mck.S      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,661 @@
+/*
+ * Itanium 2-optimized version of memcpy and copy_user function
+ *
+ * Inputs:
+ *     in0:    destination address
+ *     in1:    source address
+ *     in2:    number of bytes to copy
+ * Output:
+ *     0 if success, or number of byte NOT copied if error occurred.
+ *
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Ken Chen <kenneth.w.chen@xxxxxxxxx>
+ */
+#include <linux/config.h>
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+#define EK(y...) EX(y)
+
+/* McKinley specific optimization */
+
+#define retval         r8
+#define saved_pfs      r31
+#define saved_lc       r10
+#define saved_pr       r11
+#define saved_in0      r14
+#define saved_in1      r15
+#define saved_in2      r16
+
+#define src0           r2
+#define src1           r3
+#define dst0           r17
+#define dst1           r18
+#define cnt            r9
+
+/* r19-r30 are temp for each code section */
+#define PREFETCH_DIST  8
+#define src_pre_mem    r19
+#define dst_pre_mem    r20
+#define src_pre_l2     r21
+#define dst_pre_l2     r22
+#define t1             r23
+#define t2             r24
+#define t3             r25
+#define t4             r26
+#define t5             t1      // alias!
+#define t6             t2      // alias!
+#define t7             t3      // alias!
+#define n8             r27
+#define t9             t5      // alias!
+#define t10            t4      // alias!
+#define t11            t7      // alias!
+#define t12            t6      // alias!
+#define t14            t10     // alias!
+#define t13            r28
+#define t15            r29
+#define tmp            r30
+
+/* defines for long_copy block */
+#define        A       0
+#define B      (PREFETCH_DIST)
+#define C      (B + PREFETCH_DIST)
+#define D      (C + 1)
+#define N      (D + 1)
+#define Nrot   ((N + 7) & ~7)
+
+/* alias */
+#define in0            r32
+#define in1            r33
+#define in2            r34
+
+GLOBAL_ENTRY(memcpy)
+       and     r28=0x7,in0
+       and     r29=0x7,in1
+       mov     f6=f0
+       br.cond.sptk .common_code
+       ;;
+GLOBAL_ENTRY(__copy_user)
+       .prologue
+// check dest alignment
+       and     r28=0x7,in0
+       and     r29=0x7,in1
+       mov     f6=f1
+       mov     saved_in0=in0   // save dest pointer
+       mov     saved_in1=in1   // save src pointer
+       mov     saved_in2=in2   // save len
+       ;;
+.common_code:
+       cmp.gt  p15,p0=8,in2    // check for small size
+       cmp.ne  p13,p0=0,r28    // check dest alignment
+       cmp.ne  p14,p0=0,r29    // check src alignment
+       add     src0=0,in1
+       sub     r30=8,r28       // for .align_dest
+       mov     retval=r0       // initialize return value
+       ;;
+       add     dst0=0,in0
+       add     dst1=1,in0      // dest odd index
+       cmp.le  p6,p0 = 1,r30   // for .align_dest
+(p15)  br.cond.dpnt .memcpy_short
+(p13)  br.cond.dpnt .align_dest
+(p14)  br.cond.dpnt .unaligned_src
+       ;;
+
+// both dest and src are aligned on 8-byte boundary
+.aligned_src:
+       .save ar.pfs, saved_pfs
+       alloc   saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
+       .save pr, saved_pr
+       mov     saved_pr=pr
+
+       shr.u   cnt=in2,7       // this much cache line
+       ;;
+       cmp.lt  p6,p0=2*PREFETCH_DIST,cnt
+       cmp.lt  p7,p8=1,cnt
+       .save ar.lc, saved_lc
+       mov     saved_lc=ar.lc
+       .body
+       add     cnt=-1,cnt
+       add     src_pre_mem=0,in1       // prefetch src pointer
+       add     dst_pre_mem=0,in0       // prefetch dest pointer
+       ;;
+(p7)   mov     ar.lc=cnt       // prefetch count
+(p8)   mov     ar.lc=r0
+(p6)   br.cond.dpnt .long_copy
+       ;;
+
+.prefetch:
+       lfetch.fault      [src_pre_mem], 128
+       lfetch.fault.excl [dst_pre_mem], 128
+       br.cloop.dptk.few .prefetch
+       ;;
+
+.medium_copy:
+       and     tmp=31,in2      // copy length after iteration
+       shr.u   r29=in2,5       // number of 32-byte iteration
+       add     dst1=8,dst0     // 2nd dest pointer
+       ;;
+       add     cnt=-1,r29      // ctop iteration adjustment
+       cmp.eq  p10,p0=r29,r0   // do we really need to loop?
+       add     src1=8,src0     // 2nd src pointer
+       cmp.le  p6,p0=8,tmp
+       ;;
+       cmp.le  p7,p0=16,tmp
+       mov     ar.lc=cnt       // loop setup
+       cmp.eq  p16,p17 = r0,r0
+       mov     ar.ec=2
+(p10)  br.dpnt.few .aligned_src_tail
+       ;;
+       TEXT_ALIGN(32)
+1:
+EX(.ex_handler, (p16)  ld8     r34=[src0],16)
+EK(.ex_handler, (p16)  ld8     r38=[src1],16)
+EX(.ex_handler, (p17)  st8     [dst0]=r33,16)
+EK(.ex_handler, (p17)  st8     [dst1]=r37,16)
+       ;;
+EX(.ex_handler, (p16)  ld8     r32=[src0],16)
+EK(.ex_handler, (p16)  ld8     r36=[src1],16)
+EX(.ex_handler, (p16)  st8     [dst0]=r34,16)
+EK(.ex_handler, (p16)  st8     [dst1]=r38,16)
+       br.ctop.dptk.few 1b
+       ;;
+
+.aligned_src_tail:
+EX(.ex_handler, (p6)   ld8     t1=[src0])
+       mov     ar.lc=saved_lc
+       mov     ar.pfs=saved_pfs
+EX(.ex_hndlr_s, (p7)   ld8     t2=[src1],8)
+       cmp.le  p8,p0=24,tmp
+       and     r21=-8,tmp
+       ;;
+EX(.ex_hndlr_s, (p8)   ld8     t3=[src1])
+EX(.ex_handler, (p6)   st8     [dst0]=t1)      // store byte 1
+       and     in2=7,tmp       // remaining length
+EX(.ex_hndlr_d, (p7)   st8     [dst1]=t2,8)    // store byte 2
+       add     src0=src0,r21   // setting up src pointer
+       add     dst0=dst0,r21   // setting up dest pointer
+       ;;
+EX(.ex_handler, (p8)   st8     [dst1]=t3)      // store byte 3
+       mov     pr=saved_pr,-1
+       br.dptk.many .memcpy_short
+       ;;
+
+/* code taken from copy_page_mck */
+.long_copy:
+       .rotr v[2*PREFETCH_DIST]
+       .rotp p[N]
+
+       mov src_pre_mem = src0
+       mov pr.rot = 0x10000
+       mov ar.ec = 1                           // special unrolled loop
+
+       mov dst_pre_mem = dst0
+
+       add src_pre_l2 = 8*8, src0
+       add dst_pre_l2 = 8*8, dst0
+       ;;
+       add src0 = 8, src_pre_mem               // first t1 src
+       mov ar.lc = 2*PREFETCH_DIST - 1
+       shr.u cnt=in2,7                         // number of lines
+       add src1 = 3*8, src_pre_mem             // first t3 src
+       add dst0 = 8, dst_pre_mem               // first t1 dst
+       add dst1 = 3*8, dst_pre_mem             // first t3 dst
+       ;;
+       and tmp=127,in2                         // remaining bytes after this 
block
+       add cnt = -(2*PREFETCH_DIST) - 1, cnt
+       // same as .line_copy loop, but with all predicated-off instructions 
removed:
+.prefetch_loop:
+EX(.ex_hndlr_lcpy_1, (p[A])    ld8 v[A] = [src_pre_mem], 128)          // M0
+EK(.ex_hndlr_lcpy_1, (p[B])    st8 [dst_pre_mem] = v[B], 128)          // M2
+       br.ctop.sptk .prefetch_loop
+       ;;
+       cmp.eq p16, p0 = r0, r0                 // reset p16 to 1
+       mov ar.lc = cnt
+       mov ar.ec = N                           // # of stages in pipeline
+       ;;
+.line_copy:
+EX(.ex_handler,        (p[D])  ld8 t2 = [src0], 3*8)                   // M0
+EK(.ex_handler,        (p[D])  ld8 t4 = [src1], 3*8)                   // M1
+EX(.ex_handler_lcpy,   (p[B])  st8 [dst_pre_mem] = v[B], 128)          // M2 
prefetch dst from memory
+EK(.ex_handler_lcpy,   (p[D])  st8 [dst_pre_l2] = n8, 128)             // M3 
prefetch dst from L2
+       ;;
+EX(.ex_handler_lcpy,   (p[A])  ld8 v[A] = [src_pre_mem], 128)          // M0 
prefetch src from memory
+EK(.ex_handler_lcpy,   (p[C])  ld8 n8 = [src_pre_l2], 128)             // M1 
prefetch src from L2
+EX(.ex_handler,        (p[D])  st8 [dst0] =  t1, 8)                    // M2
+EK(.ex_handler,        (p[D])  st8 [dst1] =  t3, 8)                    // M3
+       ;;
+EX(.ex_handler,        (p[D])  ld8  t5 = [src0], 8)
+EK(.ex_handler,        (p[D])  ld8  t7 = [src1], 3*8)
+EX(.ex_handler,        (p[D])  st8 [dst0] =  t2, 3*8)
+EK(.ex_handler,        (p[D])  st8 [dst1] =  t4, 3*8)
+       ;;
+EX(.ex_handler,        (p[D])  ld8  t6 = [src0], 3*8)
+EK(.ex_handler,        (p[D])  ld8 t10 = [src1], 8)
+EX(.ex_handler,        (p[D])  st8 [dst0] =  t5, 8)
+EK(.ex_handler,        (p[D])  st8 [dst1] =  t7, 3*8)
+       ;;
+EX(.ex_handler,        (p[D])  ld8  t9 = [src0], 3*8)
+EK(.ex_handler,        (p[D])  ld8 t11 = [src1], 3*8)
+EX(.ex_handler,        (p[D])  st8 [dst0] =  t6, 3*8)
+EK(.ex_handler,        (p[D])  st8 [dst1] = t10, 8)
+       ;;
+EX(.ex_handler,        (p[D])  ld8 t12 = [src0], 8)
+EK(.ex_handler,        (p[D])  ld8 t14 = [src1], 8)
+EX(.ex_handler,        (p[D])  st8 [dst0] =  t9, 3*8)
+EK(.ex_handler,        (p[D])  st8 [dst1] = t11, 3*8)
+       ;;
+EX(.ex_handler,        (p[D])  ld8 t13 = [src0], 4*8)
+EK(.ex_handler,        (p[D])  ld8 t15 = [src1], 4*8)
+EX(.ex_handler,        (p[D])  st8 [dst0] = t12, 8)
+EK(.ex_handler,        (p[D])  st8 [dst1] = t14, 8)
+       ;;
+EX(.ex_handler,        (p[C])  ld8  t1 = [src0], 8)
+EK(.ex_handler,        (p[C])  ld8  t3 = [src1], 8)
+EX(.ex_handler,        (p[D])  st8 [dst0] = t13, 4*8)
+EK(.ex_handler,        (p[D])  st8 [dst1] = t15, 4*8)
+       br.ctop.sptk .line_copy
+       ;;
+
+       add dst0=-8,dst0
+       add src0=-8,src0
+       mov in2=tmp
+       .restore sp
+       br.sptk.many .medium_copy
+       ;;
+
+#define BLOCK_SIZE     128*32
+#define blocksize      r23
+#define curlen         r24
+
+// dest is on 8-byte boundary, src is not. We need to do
+// ld8-ld8, shrp, then st8.  Max 8 byte copy per cycle.
+.unaligned_src:
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc   saved_pfs=ar.pfs,3,5,0,8
+       .save ar.lc, saved_lc
+       mov     saved_lc=ar.lc
+       .save pr, saved_pr
+       mov     saved_pr=pr
+       .body
+.4k_block:
+       mov     saved_in0=dst0  // need to save all input arguments
+       mov     saved_in2=in2
+       mov     blocksize=BLOCK_SIZE
+       ;;
+       cmp.lt  p6,p7=blocksize,in2
+       mov     saved_in1=src0
+       ;;
+(p6)   mov     in2=blocksize
+       ;;
+       shr.u   r21=in2,7       // this much cache line
+       shr.u   r22=in2,4       // number of 16-byte iteration
+       and     curlen=15,in2   // copy length after iteration
+       and     r30=7,src0      // source alignment
+       ;;
+       cmp.lt  p7,p8=1,r21
+       add     cnt=-1,r21
+       ;;
+
+       add     src_pre_mem=0,src0      // prefetch src pointer
+       add     dst_pre_mem=0,dst0      // prefetch dest pointer
+       and     src0=-8,src0            // 1st src pointer
+(p7)   mov     ar.lc = r21
+(p8)   mov     ar.lc = r0
+       ;;
+       TEXT_ALIGN(32)
+1:     lfetch.fault      [src_pre_mem], 128
+       lfetch.fault.excl [dst_pre_mem], 128
+       br.cloop.dptk.few 1b
+       ;;
+
+       shladd  dst1=r22,3,dst0 // 2nd dest pointer
+       shladd  src1=r22,3,src0 // 2nd src pointer
+       cmp.eq  p8,p9=r22,r0    // do we really need to loop?
+       cmp.le  p6,p7=8,curlen; // have at least 8 byte remaining?
+       add     cnt=-1,r22      // ctop iteration adjustment
+       ;;
+EX(.ex_handler, (p9)   ld8     r33=[src0],8)   // loop primer
+EK(.ex_handler, (p9)   ld8     r37=[src1],8)
+(p8)   br.dpnt.few .noloop
+       ;;
+
+// The jump address is calculated based on src alignment. The COPYU
+// macro below need to confine its size to power of two, so an entry
+// can be caulated using shl instead of an expensive multiply. The
+// size is then hard coded by the following #define to match the
+// actual size.  This make it somewhat tedious when COPYU macro gets
+// changed and this need to be adjusted to match.
+#define LOOP_SIZE 6
+1:
+       mov     r29=ip          // jmp_table thread
+       mov     ar.lc=cnt
+       ;;
+       add     r29=.jump_table - 1b - (.jmp1-.jump_table), r29
+       shl     r28=r30, LOOP_SIZE      // jmp_table thread
+       mov     ar.ec=2         // loop setup
+       ;;
+       add     r29=r29,r28             // jmp_table thread
+       cmp.eq  p16,p17=r0,r0
+       ;;
+       mov     b6=r29                  // jmp_table thread
+       ;;
+       br.cond.sptk.few b6
+
+// for 8-15 byte case
+// We will skip the loop, but need to replicate the side effect
+// that the loop produces.
+.noloop:
+EX(.ex_handler, (p6)   ld8     r37=[src1],8)
+       add     src0=8,src0
+(p6)   shl     r25=r30,3
+       ;;
+EX(.ex_handler, (p6)   ld8     r27=[src1])
+(p6)   shr.u   r28=r37,r25
+(p6)   sub     r26=64,r25
+       ;;
+(p6)   shl     r27=r27,r26
+       ;;
+(p6)   or      r21=r28,r27
+
+.unaligned_src_tail:
+/* check if we have more than blocksize to copy, if so go back */
+       cmp.gt  p8,p0=saved_in2,blocksize
+       ;;
+(p8)   add     dst0=saved_in0,blocksize
+(p8)   add     src0=saved_in1,blocksize
+(p8)   sub     in2=saved_in2,blocksize
+(p8)   br.dpnt .4k_block
+       ;;
+
+/* we have up to 15 byte to copy in the tail.
+ * part of work is already done in the jump table code
+ * we are at the following state.
+ * src side:
+ * 
+ *   xxxxxx xx                   <----- r21 has xxxxxxxx already
+ * -------- -------- --------
+ * 0        8        16
+ *          ^
+ *          |
+ *          src1
+ * 
+ * dst
+ * -------- -------- --------
+ * ^
+ * |
+ * dst1
+ */
+EX(.ex_handler, (p6)   st8     [dst1]=r21,8)   // more than 8 byte to copy
+(p6)   add     curlen=-8,curlen        // update length
+       mov     ar.pfs=saved_pfs
+       ;;
+       mov     ar.lc=saved_lc
+       mov     pr=saved_pr,-1
+       mov     in2=curlen      // remaining length
+       mov     dst0=dst1       // dest pointer
+       add     src0=src1,r30   // forward by src alignment
+       ;;
+
+// 7 byte or smaller.
+.memcpy_short:
+       cmp.le  p8,p9   = 1,in2
+       cmp.le  p10,p11 = 2,in2
+       cmp.le  p12,p13 = 3,in2
+       cmp.le  p14,p15 = 4,in2
+       add     src1=1,src0     // second src pointer
+       add     dst1=1,dst0     // second dest pointer
+       ;;
+
+EX(.ex_handler_short, (p8)     ld1     t1=[src0],2)
+EK(.ex_handler_short, (p10)    ld1     t2=[src1],2)
+(p9)   br.ret.dpnt rp          // 0 byte copy
+       ;;
+
+EX(.ex_handler_short, (p8)     st1     [dst0]=t1,2)
+EK(.ex_handler_short, (p10)    st1     [dst1]=t2,2)
+(p11)  br.ret.dpnt rp          // 1 byte copy
+
+EX(.ex_handler_short, (p12)    ld1     t3=[src0],2)
+EK(.ex_handler_short, (p14)    ld1     t4=[src1],2)
+(p13)  br.ret.dpnt rp          // 2 byte copy
+       ;;
+
+       cmp.le  p6,p7   = 5,in2
+       cmp.le  p8,p9   = 6,in2
+       cmp.le  p10,p11 = 7,in2
+
+EX(.ex_handler_short, (p12)    st1     [dst0]=t3,2)
+EK(.ex_handler_short, (p14)    st1     [dst1]=t4,2)
+(p15)  br.ret.dpnt rp          // 3 byte copy
+       ;;
+
+EX(.ex_handler_short, (p6)     ld1     t5=[src0],2)
+EK(.ex_handler_short, (p8)     ld1     t6=[src1],2)
+(p7)   br.ret.dpnt rp          // 4 byte copy
+       ;;
+
+EX(.ex_handler_short, (p6)     st1     [dst0]=t5,2)
+EK(.ex_handler_short, (p8)     st1     [dst1]=t6,2)
+(p9)   br.ret.dptk rp          // 5 byte copy
+
+EX(.ex_handler_short, (p10)    ld1     t7=[src0],2)
+(p11)  br.ret.dptk rp          // 6 byte copy
+       ;;
+
+EX(.ex_handler_short, (p10)    st1     [dst0]=t7,2)
+       br.ret.dptk rp          // done all cases
+
+
+/* Align dest to nearest 8-byte boundary. We know we have at
+ * least 7 bytes to copy, enough to crawl to 8-byte boundary.
+ * Actual number of byte to crawl depend on the dest alignment.
+ * 7 byte or less is taken care at .memcpy_short
+
+ * src0 - source even index
+ * src1 - source  odd index
+ * dst0 - dest even index
+ * dst1 - dest  odd index
+ * r30  - distance to 8-byte boundary
+ */
+
+.align_dest:
+       add     src1=1,in1      // source odd index
+       cmp.le  p7,p0 = 2,r30   // for .align_dest
+       cmp.le  p8,p0 = 3,r30   // for .align_dest
+EX(.ex_handler_short, (p6)     ld1     t1=[src0],2)
+       cmp.le  p9,p0 = 4,r30   // for .align_dest
+       cmp.le  p10,p0 = 5,r30
+       ;;
+EX(.ex_handler_short, (p7)     ld1     t2=[src1],2)
+EK(.ex_handler_short, (p8)     ld1     t3=[src0],2)
+       cmp.le  p11,p0 = 6,r30
+EX(.ex_handler_short, (p6)     st1     [dst0] = t1,2)
+       cmp.le  p12,p0 = 7,r30
+       ;;
+EX(.ex_handler_short, (p9)     ld1     t4=[src1],2)
+EK(.ex_handler_short, (p10)    ld1     t5=[src0],2)
+EX(.ex_handler_short, (p7)     st1     [dst1] = t2,2)
+EK(.ex_handler_short, (p8)     st1     [dst0] = t3,2)
+       ;;
+EX(.ex_handler_short, (p11)    ld1     t6=[src1],2)
+EK(.ex_handler_short, (p12)    ld1     t7=[src0],2)
+       cmp.eq  p6,p7=r28,r29
+EX(.ex_handler_short, (p9)     st1     [dst1] = t4,2)
+EK(.ex_handler_short, (p10)    st1     [dst0] = t5,2)
+       sub     in2=in2,r30
+       ;;
+EX(.ex_handler_short, (p11)    st1     [dst1] = t6,2)
+EK(.ex_handler_short, (p12)    st1     [dst0] = t7)
+       add     dst0=in0,r30    // setup arguments
+       add     src0=in1,r30
+(p6)   br.cond.dptk .aligned_src
+(p7)   br.cond.dpnt .unaligned_src
+       ;;
+
+/* main loop body in jump table format */
+#define COPYU(shift)                                                           
        \
+1:                                                                             
        \
+EX(.ex_handler,  (p16) ld8     r32=[src0],8);          /* 1 */                 
        \
+EK(.ex_handler,  (p16) ld8     r36=[src1],8);                                  
        \
+                (p17)  shrp    r35=r33,r34,shift;;     /* 1 */                 
        \
+EX(.ex_handler,  (p6)  ld8     r22=[src1]);    /* common, prime for tail 
section */    \
+                nop.m  0;                                                      
        \
+                (p16)  shrp    r38=r36,r37,shift;                              
        \
+EX(.ex_handler,  (p17) st8     [dst0]=r35,8);          /* 1 */                 
        \
+EK(.ex_handler,  (p17) st8     [dst1]=r39,8);                                  
        \
+                br.ctop.dptk.few 1b;;                                          
        \
+                (p7)   add     src1=-8,src1;   /* back out for <8 byte case */ 
        \
+                shrp   r21=r22,r38,shift;      /* speculative work */          
        \
+                br.sptk.few .unaligned_src_tail /* branch out of jump table */ 
        \
+                ;;
+       TEXT_ALIGN(32)
+.jump_table:
+       COPYU(8)        // unaligned cases
+.jmp1:
+       COPYU(16)
+       COPYU(24)
+       COPYU(32)
+       COPYU(40)
+       COPYU(48)
+       COPYU(56)
+
+#undef A
+#undef B
+#undef C
+#undef D
+END(memcpy)
+
+/*
+ * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
+ * instruction failed in the bundle.  The exception algorithm is that we
+ * first figure out the faulting address, then detect if there is any
+ * progress made on the copy, if so, redo the copy from last known copied
+ * location up to the faulting address (exclusive). In the copy_from_user
+ * case, remaining byte in kernel buffer will be zeroed.
+ *
+ * Take copy_from_user as an example, in the code there are multiple loads
+ * in a bundle and those multiple loads could span over two pages, the
+ * faulting address is calculated as page_round_down(max(src0, src1)).
+ * This is based on knowledge that if we can access one byte in a page, we
+ * can access any byte in that page.
+ *
+ * predicate used in the exception handler:
+ * p6-p7: direction
+ * p10-p11: src faulting addr calculation
+ * p12-p13: dst faulting addr calculation
+ */
+
+#define A      r19
+#define B      r20
+#define C      r21
+#define D      r22
+#define F      r28
+
+#define memset_arg0    r32
+#define memset_arg2    r33
+
+#define saved_retval   loc0
+#define saved_rtlink   loc1
+#define saved_pfs_stack        loc2
+
+.ex_hndlr_s:
+       add     src0=8,src0
+       br.sptk .ex_handler
+       ;;
+.ex_hndlr_d:
+       add     dst0=8,dst0
+       br.sptk .ex_handler
+       ;;
+.ex_hndlr_lcpy_1:
+       mov     src1=src_pre_mem
+       mov     dst1=dst_pre_mem
+       cmp.gtu p10,p11=src_pre_mem,saved_in1
+       cmp.gtu p12,p13=dst_pre_mem,saved_in0
+       ;;
+(p10)  add     src0=8,saved_in1
+(p11)  mov     src0=saved_in1
+(p12)  add     dst0=8,saved_in0
+(p13)  mov     dst0=saved_in0
+       br.sptk .ex_handler
+.ex_handler_lcpy:
+       // in line_copy block, the preload addresses should always ahead
+       // of the other two src/dst pointers.  Furthermore, src1/dst1 should
+       // always ahead of src0/dst0.
+       mov     src1=src_pre_mem
+       mov     dst1=dst_pre_mem
+.ex_handler:
+       mov     pr=saved_pr,-1          // first restore pr, lc, and pfs
+       mov     ar.lc=saved_lc
+       mov     ar.pfs=saved_pfs
+       ;;
+.ex_handler_short: // fault occurred in these sections didn't change pr, lc, 
pfs
+       cmp.ltu p6,p7=saved_in0, saved_in1      // get the copy direction
+       cmp.ltu p10,p11=src0,src1
+       cmp.ltu p12,p13=dst0,dst1
+       fcmp.eq p8,p0=f6,f0             // is it memcpy?
+       mov     tmp = dst0
+       ;;
+(p11)  mov     src1 = src0             // pick the larger of the two
+(p13)  mov     dst0 = dst1             // make dst0 the smaller one
+(p13)  mov     dst1 = tmp              // and dst1 the larger one
+       ;;
+(p6)   dep     F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary
+(p7)   dep     F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary
+       ;;
+(p6)   cmp.le  p14,p0=dst0,saved_in0   // no progress has been made on store
+(p7)   cmp.le  p14,p0=src0,saved_in1   // no progress has been made on load
+       mov     retval=saved_in2
+(p8)   ld1     tmp=[src1]              // force an oops for memcpy call
+(p8)   st1     [dst1]=r0               // force an oops for memcpy call
+(p14)  br.ret.sptk.many rp
+
+/*
+ * The remaining byte to copy is calculated as:
+ *
+ * A = (faulting_addr - orig_src)      -> len to faulting ld address
+ *     or 
+ *     (faulting_addr - orig_dst)      -> len to faulting st address
+ * B = (cur_dst - orig_dst)            -> len copied so far
+ * C = A - B                           -> len need to be copied
+ * D = orig_len - A                    -> len need to be zeroed
+ */
+(p6)   sub     A = F, saved_in0
+(p7)   sub     A = F, saved_in1
+       clrrrb
+       ;;
+       alloc   saved_pfs_stack=ar.pfs,3,3,3,0
+       sub     B = dst0, saved_in0     // how many byte copied so far
+       ;;
+       sub     C = A, B
+       sub     D = saved_in2, A
+       ;;
+       cmp.gt  p8,p0=C,r0              // more than 1 byte?
+       add     memset_arg0=saved_in0, A
+(p6)   mov     memset_arg2=0           // copy_to_user should not call memset
+(p7)   mov     memset_arg2=D           // copy_from_user need to have kbuf 
zeroed
+       mov     r8=0
+       mov     saved_retval = D
+       mov     saved_rtlink = b0
+
+       add     out0=saved_in0, B
+       add     out1=saved_in1, B
+       mov     out2=C
+(p8)   br.call.sptk.few b0=__copy_user // recursive call
+       ;;
+
+       add     saved_retval=saved_retval,r8    // above might return non-zero 
value
+       cmp.gt  p8,p0=memset_arg2,r0    // more than 1 byte?
+       mov     out0=memset_arg0        // *s
+       mov     out1=r0                 // c
+       mov     out2=memset_arg2        // n
+(p8)   br.call.sptk.few b0=memset
+       ;;
+
+       mov     retval=saved_retval
+       mov     ar.pfs=saved_pfs_stack
+       mov     b0=saved_rtlink
+       br.ret.sptk.many rp
+
+/* end of McKinley specific optimization */
+END(__copy_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/memset.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/memset.S  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,362 @@
+/* Optimized version of the standard memset() function.
+
+   Copyright (c) 2002 Hewlett-Packard Co/CERN
+       Sverre Jarp <Sverre.Jarp@xxxxxxx>
+
+   Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    value
+        in2:    count
+
+   The algorithm is fairly straightforward: set byte by byte until we
+   we get to a 16B-aligned address, then loop on 128 B chunks using an
+   early store as prefetching, then loop on 32B chucks, then clear remaining
+   words, finally clear remaining bytes.
+   Since a stf.spill f0 can store 16B in one go, we use this instruction
+   to get peak speed when value = 0.  */
+
+#include <asm/asmmacro.h>
+#undef ret
+
+#define dest           in0
+#define value          in1
+#define        cnt             in2
+
+#define tmp            r31
+#define save_lc                r30
+#define ptr0           r29
+#define ptr1           r28
+#define ptr2           r27
+#define ptr3           r26
+#define ptr9           r24
+#define        loopcnt         r23
+#define linecnt                r22
+#define bytecnt                r21
+
+#define fvalue         f6
+
+// This routine uses only scratch predicate registers (p6 - p15)
+#define p_scr          p6                      // default register for 
same-cycle branches
+#define p_nz           p7
+#define p_zr           p8
+#define p_unalgn       p9
+#define p_y            p11
+#define p_n            p12
+#define p_yy           p13
+#define p_nn           p14
+
+#define MIN1           15
+#define MIN1P1HALF     8
+#define LINE_SIZE      128
+#define LSIZE_SH        7                      // shift amount
+#define PREF_AHEAD     8
+
+GLOBAL_ENTRY(memset)
+{ .mmi
+       .prologue
+       alloc   tmp = ar.pfs, 3, 0, 0, 0
+       .body
+       lfetch.nt1 [dest]                       //
+       .save   ar.lc, save_lc
+       mov.i   save_lc = ar.lc
+} { .mmi
+       mov     ret0 = dest                     // return value
+       cmp.ne  p_nz, p_zr = value, r0          // use stf.spill if value is 
zero
+       cmp.eq  p_scr, p0 = cnt, r0
+;; }
+{ .mmi
+       and     ptr2 = -(MIN1+1), dest          // aligned address
+       and     tmp = MIN1, dest                // prepare to check for correct 
alignment
+       tbit.nz p_y, p_n = dest, 0              // Do we have an odd address? 
(M_B_U)
+} { .mib
+       mov     ptr1 = dest
+       mux1    value = value, @brcst           // create 8 identical bytes in 
word
+(p_scr)        br.ret.dpnt.many rp                     // return immediately 
if count = 0
+;; }
+{ .mib
+       cmp.ne  p_unalgn, p0 = tmp, r0          //
+} { .mib
+       sub     bytecnt = (MIN1+1), tmp         // NB: # of bytes to move is 1 
higher than loopcnt
+       cmp.gt  p_scr, p0 = 16, cnt             // is it a minimalistic task?
+(p_scr)        br.cond.dptk.many .move_bytes_unaligned // go move just a few 
(M_B_U)
+;; }
+{ .mmi
+(p_unalgn) add ptr1 = (MIN1+1), ptr2           // after alignment
+(p_unalgn) add ptr2 = MIN1P1HALF, ptr2         // after alignment
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3   // should we do a st8 ?
+;; }
+{ .mib
+(p_y)  add     cnt = -8, cnt                   //
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
+} { .mib
+(p_y)  st8     [ptr2] = value,-4               //
+(p_n)  add     ptr2 = 4, ptr2                  //
+;; }
+{ .mib
+(p_yy) add     cnt = -4, cnt                   //
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1   // should we do a st2 ?
+} { .mib
+(p_yy) st4     [ptr2] = value,-2               //
+(p_nn) add     ptr2 = 2, ptr2                  //
+;; }
+{ .mmi
+       mov     tmp = LINE_SIZE+1               // for compare
+(p_y)  add     cnt = -2, cnt                   //
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
+} { .mmi
+       setf.sig fvalue=value                   // transfer value to FLP side
+(p_y)  st2     [ptr2] = value,-1               //
+(p_n)  add     ptr2 = 1, ptr2                  //
+;; }
+
+{ .mmi
+(p_yy) st1     [ptr2] = value                  //
+       cmp.gt  p_scr, p0 = tmp, cnt            // is it a minimalistic task?
+} { .mbb
+(p_yy) add     cnt = -1, cnt                   //
+(p_scr)        br.cond.dpnt.many .fraction_of_line     // go move just a few
+;; }
+
+{ .mib
+       nop.m 0
+       shr.u   linecnt = cnt, LSIZE_SH
+(p_zr) br.cond.dptk.many .l1b                  // Jump to use stf.spill
+;; }
+
+       TEXT_ALIGN(32) // --------------------- //  L1A: store ahead into cache 
lines; fill later
+{ .mmi
+       and     tmp = -(LINE_SIZE), cnt         // compute end of range
+       mov     ptr9 = ptr1                     // used for prefetching
+       and     cnt = (LINE_SIZE-1), cnt        // remainder
+} { .mmi
+       mov     loopcnt = PREF_AHEAD-1          // default prefetch loop
+       cmp.gt  p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+;; }
+{ .mmi
+(p_scr)        add     loopcnt = -1, linecnt           //
+       add     ptr2 = 8, ptr1                  // start of stores (beyond 
prefetch stores)
+       add     ptr1 = tmp, ptr1                // first address beyond total 
range
+;; }
+{ .mmi
+       add     tmp = -1, linecnt               // next loop count
+       mov.i   ar.lc = loopcnt                 //
+;; }
+.pref_l1a:
+{ .mib
+       stf8 [ptr9] = fvalue, 128               // Do stores one cache line 
apart
+       nop.i   0
+       br.cloop.dptk.few .pref_l1a
+;; }
+{ .mmi
+       add     ptr0 = 16, ptr2                 // Two stores in parallel
+       mov.i   ar.lc = tmp                     //
+;; }
+.l1ax:
+ { .mmi
+       stf8 [ptr2] = fvalue, 8
+       stf8 [ptr0] = fvalue, 8
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 24
+       stf8 [ptr0] = fvalue, 24
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 8
+       stf8 [ptr0] = fvalue, 8
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 24
+       stf8 [ptr0] = fvalue, 24
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 8
+       stf8 [ptr0] = fvalue, 8
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 24
+       stf8 [ptr0] = fvalue, 24
+ ;; }
+ { .mmi
+       stf8 [ptr2] = fvalue, 8
+       stf8 [ptr0] = fvalue, 32
+       cmp.lt  p_scr, p0 = ptr9, ptr1          // do we need more prefetching?
+ ;; }
+{ .mmb
+       stf8 [ptr2] = fvalue, 24
+(p_scr)        stf8 [ptr9] = fvalue, 128
+       br.cloop.dptk.few .l1ax
+;; }
+{ .mbb
+       cmp.le  p_scr, p0 = 8, cnt              // just a few bytes left ?
+(p_scr) br.cond.dpnt.many  .fraction_of_line   // Branch no. 2
+       br.cond.dpnt.many  .move_bytes_from_alignment   // Branch no. 3
+;; }
+
+       TEXT_ALIGN(32)
+.l1b:  // ------------------------------------ //  L1B: store ahead into cache 
lines; fill later
+{ .mmi
+       and     tmp = -(LINE_SIZE), cnt         // compute end of range
+       mov     ptr9 = ptr1                     // used for prefetching
+       and     cnt = (LINE_SIZE-1), cnt        // remainder
+} { .mmi
+       mov     loopcnt = PREF_AHEAD-1          // default prefetch loop
+       cmp.gt  p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+;; }
+{ .mmi
+(p_scr)        add     loopcnt = -1, linecnt
+       add     ptr2 = 16, ptr1                 // start of stores (beyond 
prefetch stores)
+       add     ptr1 = tmp, ptr1                // first address beyond total 
range
+;; }
+{ .mmi
+       add     tmp = -1, linecnt               // next loop count
+       mov.i   ar.lc = loopcnt
+;; }
+.pref_l1b:
+{ .mib
+       stf.spill [ptr9] = f0, 128              // Do stores one cache line 
apart
+       nop.i   0
+       br.cloop.dptk.few .pref_l1b
+;; }
+{ .mmi
+       add     ptr0 = 16, ptr2                 // Two stores in parallel
+       mov.i   ar.lc = tmp
+;; }
+.l1bx:
+ { .mmi
+       stf.spill [ptr2] = f0, 32
+       stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+       stf.spill [ptr2] = f0, 32
+       stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+       stf.spill [ptr2] = f0, 32
+       stf.spill [ptr0] = f0, 64
+       cmp.lt  p_scr, p0 = ptr9, ptr1          // do we need more prefetching?
+ ;; }
+{ .mmb
+       stf.spill [ptr2] = f0, 32
+(p_scr)        stf.spill [ptr9] = f0, 128
+       br.cloop.dptk.few .l1bx
+;; }
+{ .mib
+       cmp.gt  p_scr, p0 = 8, cnt              // just a few bytes left ?
+(p_scr)        br.cond.dpnt.many  .move_bytes_from_alignment   //
+;; }
+
+.fraction_of_line:
+{ .mib
+       add     ptr2 = 16, ptr1
+       shr.u   loopcnt = cnt, 5                // loopcnt = cnt / 32
+;; }
+{ .mib
+       cmp.eq  p_scr, p0 = loopcnt, r0
+       add     loopcnt = -1, loopcnt
+(p_scr)        br.cond.dpnt.many .store_words
+;; }
+{ .mib
+       and     cnt = 0x1f, cnt                 // compute the remaining cnt
+       mov.i   ar.lc = loopcnt
+;; }
+       TEXT_ALIGN(32)
+.l2:   // ------------------------------------ //  L2A:  store 32B in 2 cycles
+{ .mmb
+       stf8    [ptr1] = fvalue, 8
+       stf8    [ptr2] = fvalue, 8
+;; } { .mmb
+       stf8    [ptr1] = fvalue, 24
+       stf8    [ptr2] = fvalue, 24
+       br.cloop.dptk.many .l2
+;; }
+.store_words:
+{ .mib
+       cmp.gt  p_scr, p0 = 8, cnt              // just a few bytes left ?
+(p_scr)        br.cond.dpnt.many .move_bytes_from_alignment    // Branch
+;; }
+
+{ .mmi
+       stf8    [ptr1] = fvalue, 8              // store
+       cmp.le  p_y, p_n = 16, cnt
+       add     cnt = -8, cnt                   // subtract
+;; }
+{ .mmi
+(p_y)  stf8    [ptr1] = fvalue, 8              // store
+(p_y)  cmp.le.unc p_yy, p_nn = 16, cnt
+(p_y)  add     cnt = -8, cnt                   // subtract
+;; }
+{ .mmi                                         // store
+(p_yy) stf8    [ptr1] = fvalue, 8
+(p_yy) add     cnt = -8, cnt                   // subtract
+;; }
+
+.move_bytes_from_alignment:
+{ .mib
+       cmp.eq  p_scr, p0 = cnt, r0
+       tbit.nz.unc p_y, p0 = cnt, 2            // should we terminate with a 
st4 ?
+(p_scr)        br.cond.dpnt.few .restore_and_exit
+;; }
+{ .mib
+(p_y)  st4     [ptr1] = value,4
+       tbit.nz.unc p_yy, p0 = cnt, 1           // should we terminate with a 
st2 ?
+;; }
+{ .mib
+(p_yy) st2     [ptr1] = value,2
+       tbit.nz.unc p_y, p0 = cnt, 0            // should we terminate with a 
st1 ?
+;; }
+
+{ .mib
+(p_y)  st1     [ptr1] = value
+;; }
+.restore_and_exit:
+{ .mib
+       nop.m   0
+       mov.i   ar.lc = save_lc
+       br.ret.sptk.many rp
+;; }
+
+.move_bytes_unaligned:
+{ .mmi
+       .pred.rel "mutex",p_y, p_n
+       .pred.rel "mutex",p_yy, p_nn
+(p_n)  cmp.le  p_yy, p_nn = 4, cnt
+(p_y)  cmp.le  p_yy, p_nn = 5, cnt
+(p_n)  add     ptr2 = 2, ptr1
+} { .mmi
+(p_y)  add     ptr2 = 3, ptr1
+(p_y)  st1     [ptr1] = value, 1               // fill 1 (odd-aligned) byte 
[15, 14 (or less) left]
+(p_y)  add     cnt = -1, cnt
+;; }
+{ .mmi
+(p_yy) cmp.le.unc p_y, p0 = 8, cnt
+       add     ptr3 = ptr1, cnt                // prepare last store
+       mov.i   ar.lc = save_lc
+} { .mmi
+(p_yy) st2     [ptr1] = value, 4               // fill 2 (aligned) bytes
+(p_yy) st2     [ptr2] = value, 4               // fill 2 (aligned) bytes [11, 
10 (o less) left]
+(p_yy) add     cnt = -4, cnt
+;; }
+{ .mmi
+(p_y)  cmp.le.unc p_yy, p0 = 8, cnt
+       add     ptr3 = -1, ptr3                 // last store
+       tbit.nz p_scr, p0 = cnt, 1              // will there be a st2 at the 
end ?
+} { .mmi
+(p_y)  st2     [ptr1] = value, 4               // fill 2 (aligned) bytes
+(p_y)  st2     [ptr2] = value, 4               // fill 2 (aligned) bytes [7, 6 
(or less) left]
+(p_y)  add     cnt = -4, cnt
+;; }
+{ .mmi
+(p_yy) st2     [ptr1] = value, 4               // fill 2 (aligned) bytes
+(p_yy) st2     [ptr2] = value, 4               // fill 2 (aligned) bytes [3, 2 
(or less) left]
+       tbit.nz p_y, p0 = cnt, 0                // will there be a st1 at the 
end ?
+} { .mmi
+(p_yy) add     cnt = -4, cnt
+;; }
+{ .mmb
+(p_scr)        st2     [ptr1] = value                  // fill 2 (aligned) 
bytes
+(p_y)  st1     [ptr3] = value                  // fill last byte (using ptr3)
+       br.ret.sptk.many rp
+}
+END(memset)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/strlen.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/strlen.S  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,192 @@
+/*
+ *
+ * Optimized version of the standard strlen() function
+ *
+ *
+ * Inputs:
+ *     in0     address of string
+ *
+ * Outputs:
+ *     ret0    the number of characters in the string (0 if empty string)
+ *     does not count the \0
+ *
+ * Copyright (C) 1999, 2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 09/24/99 S.Eranian add speculation recovery code
+ */
+
+#include <asm/asmmacro.h>
+
+//
+//
+// This is an enhanced version of the basic strlen. it includes a combination
+// of compute zero index (czx), parallel comparisons, speculative loads and
+// loop unroll using rotating registers.
+//
+// General Ideas about the algorithm:
+//       The goal is to look at the string in chunks of 8 bytes.
+//       so we need to do a few extra checks at the beginning because the
+//       string may not be 8-byte aligned. In this case we load the 8byte
+//       quantity which includes the start of the string and mask the unused
+//       bytes with 0xff to avoid confusing czx.
+//       We use speculative loads and software pipelining to hide memory
+//       latency and do read ahead safely. This way we defer any exception.
+//
+//       Because we don't want the kernel to be relying on particular
+//       settings of the DCR register, we provide recovery code in case
+//       speculation fails. The recovery code is going to "redo" the work using
+//       only normal loads. If we still get a fault then we generate a
+//       kernel panic. Otherwise we return the strlen as usual.
+//
+//       The fact that speculation may fail can be caused, for instance, by
+//       the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
+//       a NaT bit will be set if the translation is not present. The normal
+//       load, on the other hand, will cause the translation to be inserted
+//       if the mapping exists.
+//
+//       It should be noted that we execute recovery code only when we need
+//       to use the data that has been speculatively loaded: we don't execute
+//       recovery code on pure read ahead data.
+//
+// Remarks:
+//     - the cmp r0,r0 is used as a fast way to initialize a predicate
+//       register to 1. This is required to make sure that we get the parallel
+//       compare correct.
+//
+//     - we don't use the epilogue counter to exit the loop but we need to set
+//       it to zero beforehand.
+//
+//     - after the loop we must test for Nat values because neither the
+//       czx nor cmp instruction raise a NaT consumption fault. We must be
+//       careful not to look too far for a Nat for which we don't care.
+//       For instance we don't need to look at a NaT in val2 if the zero byte
+//       was in val1.
+//
+//     - Clearly performance tuning is required.
+//
+//
+//
+#define saved_pfs      r11
+#define        tmp             r10
+#define base           r16
+#define orig           r17
+#define saved_pr       r18
+#define src            r19
+#define mask           r20
+#define val            r21
+#define val1           r22
+#define val2           r23
+
+GLOBAL_ENTRY(strlen)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8
+
+       .rotr v[2], w[2]        // declares our 4 aliases
+
+       extr.u tmp=in0,0,3      // tmp=least significant 3 bits
+       mov orig=in0            // keep trackof initial byte address
+       dep src=0,in0,0,3       // src=8byte-aligned in0 address
+       .save pr, saved_pr
+       mov saved_pr=pr         // preserve predicates (rotation)
+       ;;
+
+       .body
+
+       ld8 v[1]=[src],8        // must not speculate: can fail here
+       shl tmp=tmp,3           // multiply by 8bits/byte
+       mov mask=-1             // our mask
+       ;;
+       ld8.s w[1]=[src],8      // speculatively load next
+       cmp.eq p6,p0=r0,r0      // sets p6 to true for cmp.and
+       sub tmp=64,tmp          // how many bits to shift our mask on the right
+       ;;
+       shr.u   mask=mask,tmp   // zero enough bits to hold v[1] valuable part
+       mov ar.ec=r0            // clear epilogue counter (saved in ar.pfs)
+       ;;
+       add base=-16,src        // keep track of aligned base
+       or v[1]=v[1],mask       // now we have a safe initial byte pattern
+       ;;
+1:
+       ld8.s v[0]=[src],8      // speculatively load next
+       czx1.r val1=v[1]        // search 0 byte from right
+       czx1.r val2=w[1]        // search 0 byte from right following 8bytes
+       ;;
+       ld8.s w[0]=[src],8      // speculatively load next to next
+       cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
+       cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
+(p6)   br.wtop.dptk 1b         // loop until p6 == 0
+       ;;
+       //
+       // We must return try the recovery code iff
+       // val1_is_nat || (val1==8 && val2_is_nat)
+       //
+       // XXX Fixme
+       //      - there must be a better way of doing the test
+       //
+       cmp.eq  p8,p9=8,val1    // p6 = val1 had zero (disambiguate)
+       tnat.nz p6,p7=val1      // test NaT on val1
+(p6)   br.cond.spnt .recover   // jump to recovery if val1 is NaT
+       ;;
+       //
+       // if we come here p7 is true, i.e., initialized for // cmp
+       //
+       cmp.eq.and  p7,p0=8,val1// val1==8?
+       tnat.nz.and p7,p0=val2  // test NaT if val2
+(p7)   br.cond.spnt .recover   // jump to recovery if val2 is NaT
+       ;;
+(p8)   mov val1=val2           // the other test got us out of the loop
+(p8)   adds src=-16,src        // correct position when 3 ahead
+(p9)   adds src=-24,src        // correct position when 4 ahead
+       ;;
+       sub ret0=src,orig       // distance from base
+       sub tmp=8,val1          // which byte in word
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       sub ret0=ret0,tmp       // adjust
+       mov ar.pfs=saved_pfs    // because of ar.ec, restore no matter what
+       br.ret.sptk.many rp     // end of normal execution
+
+       //
+       // Outlined recovery code when speculation failed
+       //
+       // This time we don't use speculation and rely on the normal exception
+       // mechanism. that's why the loop is not as good as the previous one
+       // because read ahead is not possible
+       //
+       // IMPORTANT:
+       // Please note that in the case of strlen() as opposed to strlen_user()
+       // we don't use the exception mechanism, as this function is not
+       // supposed to fail. If that happens it means we have a bug and the
+       // code will cause of kernel fault.
+       //
+       // XXX Fixme
+       //      - today we restart from the beginning of the string instead
+       //        of trying to continue where we left off.
+       //
+.recover:
+       ld8 val=[base],8        // will fail if unrecoverable fault
+       ;;
+       or val=val,mask         // remask first bytes
+       cmp.eq p0,p6=r0,r0      // nullify first ld8 in loop
+       ;;
+       //
+       // ar.ec is still zero here
+       //
+2:
+(p6)   ld8 val=[base],8        // will fail if unrecoverable fault
+       ;;
+       czx1.r val1=val         // search 0 byte from right
+       ;;
+       cmp.eq p6,p0=8,val1     // val1==8 ?
+(p6)   br.wtop.dptk 2b         // loop until p6 == 0
+       ;;                      // (avoid WAW on p63)
+       sub ret0=base,orig      // distance from base
+       sub tmp=8,val1
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       sub ret0=ret0,tmp       // length=now - back -1
+       mov ar.pfs=saved_pfs    // because of ar.ec, restore no matter what
+       br.ret.sptk.many rp     // end of successful recovery code
+END(strlen)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/strlen_user.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/strlen_user.S     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,198 @@
+/*
+ * Optimized version of the strlen_user() function
+ *
+ * Inputs:
+ *     in0     address of buffer
+ *
+ * Outputs:
+ *     ret0    0 in case of fault, strlen(buffer)+1 otherwise
+ *
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 01/19/99 S.Eranian heavily enhanced version (see details below)
+ * 09/24/99 S.Eranian added speculation recovery code
+ */
+
+#include <asm/asmmacro.h>
+
+//
+// int strlen_user(char *)
+// ------------------------
+// Returns:
+//     - length of string + 1
+//     - 0 in case an exception is raised
+//
+// This is an enhanced version of the basic strlen_user. it includes a
+// combination of compute zero index (czx), parallel comparisons, speculative
+// loads and loop unroll using rotating registers.
+//
+// General Ideas about the algorithm:
+//       The goal is to look at the string in chunks of 8 bytes.
+//       so we need to do a few extra checks at the beginning because the
+//       string may not be 8-byte aligned. In this case we load the 8byte
+//       quantity which includes the start of the string and mask the unused
+//       bytes with 0xff to avoid confusing czx.
+//       We use speculative loads and software pipelining to hide memory
+//       latency and do read ahead safely. This way we defer any exception.
+//
+//       Because we don't want the kernel to be relying on particular
+//       settings of the DCR register, we provide recovery code in case
+//       speculation fails. The recovery code is going to "redo" the work using
+//       only normal loads. If we still get a fault then we return an
+//       error (ret0=0). Otherwise we return the strlen+1 as usual.
+//       The fact that speculation may fail can be caused, for instance, by
+//       the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
+//       a NaT bit will be set if the translation is not present. The normal
+//       load, on the other hand, will cause the translation to be inserted
+//       if the mapping exists.
+//
+//       It should be noted that we execute recovery code only when we need
+//       to use the data that has been speculatively loaded: we don't execute
+//       recovery code on pure read ahead data.
+//
+// Remarks:
+//     - the cmp r0,r0 is used as a fast way to initialize a predicate
+//       register to 1. This is required to make sure that we get the parallel
+//       compare correct.
+//
+//     - we don't use the epilogue counter to exit the loop but we need to set
+//       it to zero beforehand.
+//
+//     - after the loop we must test for Nat values because neither the
+//       czx nor cmp instruction raise a NaT consumption fault. We must be
+//       careful not to look too far for a Nat for which we don't care.
+//       For instance we don't need to look at a NaT in val2 if the zero byte
+//       was in val1.
+//
+//     - Clearly performance tuning is required.
+//
+
+#define saved_pfs      r11
+#define        tmp             r10
+#define base           r16
+#define orig           r17
+#define saved_pr       r18
+#define src            r19
+#define mask           r20
+#define val            r21
+#define val1           r22
+#define val2           r23
+
+GLOBAL_ENTRY(__strlen_user)
+       .prologue
+       .save ar.pfs, saved_pfs
+       alloc saved_pfs=ar.pfs,11,0,0,8
+
+       .rotr v[2], w[2]        // declares our 4 aliases
+
+       extr.u tmp=in0,0,3      // tmp=least significant 3 bits
+       mov orig=in0            // keep trackof initial byte address
+       dep src=0,in0,0,3       // src=8byte-aligned in0 address
+       .save pr, saved_pr
+       mov saved_pr=pr         // preserve predicates (rotation)
+       ;;
+
+       .body
+
+       ld8.s v[1]=[src],8      // load the initial 8bytes (must speculate)
+       shl tmp=tmp,3           // multiply by 8bits/byte
+       mov mask=-1             // our mask
+       ;;
+       ld8.s w[1]=[src],8      // load next 8 bytes in 2nd pipeline
+       cmp.eq p6,p0=r0,r0      // sets p6 (required because of // cmp.and)
+       sub tmp=64,tmp          // how many bits to shift our mask on the right
+       ;;
+       shr.u   mask=mask,tmp   // zero enough bits to hold v[1] valuable part
+       mov ar.ec=r0            // clear epilogue counter (saved in ar.pfs)
+       ;;
+       add base=-16,src        // keep track of aligned base
+       chk.s v[1], .recover    // if already NaT, then directly skip to recover
+       or v[1]=v[1],mask       // now we have a safe initial byte pattern
+       ;;
+1:
+       ld8.s v[0]=[src],8      // speculatively load next
+       czx1.r val1=v[1]        // search 0 byte from right
+       czx1.r val2=w[1]        // search 0 byte from right following 8bytes
+       ;;
+       ld8.s w[0]=[src],8      // speculatively load next to next
+       cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
+       cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
+(p6)   br.wtop.dptk.few 1b     // loop until p6 == 0
+       ;;
+       //
+       // We must return try the recovery code iff
+       // val1_is_nat || (val1==8 && val2_is_nat)
+       //
+       // XXX Fixme
+       //      - there must be a better way of doing the test
+       //
+       cmp.eq  p8,p9=8,val1    // p6 = val1 had zero (disambiguate)
+       tnat.nz p6,p7=val1      // test NaT on val1
+(p6)   br.cond.spnt .recover   // jump to recovery if val1 is NaT
+       ;;
+       //
+       // if we come here p7 is true, i.e., initialized for // cmp
+       //
+       cmp.eq.and  p7,p0=8,val1// val1==8?
+       tnat.nz.and p7,p0=val2  // test NaT if val2
+(p7)   br.cond.spnt .recover   // jump to recovery if val2 is NaT
+       ;;
+(p8)   mov val1=val2           // val2 contains the value
+(p8)   adds src=-16,src        // correct position when 3 ahead
+(p9)   adds src=-24,src        // correct position when 4 ahead
+       ;;
+       sub ret0=src,orig       // distance from origin
+       sub tmp=7,val1          // 7=8-1 because this strlen returns strlen+1
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       sub ret0=ret0,tmp       // length=now - back -1
+       mov ar.pfs=saved_pfs    // because of ar.ec, restore no matter what
+       br.ret.sptk.many rp     // end of normal execution
+
+       //
+       // Outlined recovery code when speculation failed
+       //
+       // This time we don't use speculation and rely on the normal exception
+       // mechanism. that's why the loop is not as good as the previous one
+       // because read ahead is not possible
+       //
+       // XXX Fixme
+       //      - today we restart from the beginning of the string instead
+       //        of trying to continue where we left off.
+       //
+.recover:
+       EX(.Lexit1, ld8 val=[base],8)   // load the initial bytes
+       ;;
+       or val=val,mask                 // remask first bytes
+       cmp.eq p0,p6=r0,r0              // nullify first ld8 in loop
+       ;;
+       //
+       // ar.ec is still zero here
+       //
+2:
+       EX(.Lexit1, (p6) ld8 val=[base],8)
+       ;;
+       czx1.r val1=val         // search 0 byte from right
+       ;;
+       cmp.eq p6,p0=8,val1     // val1==8 ?
+(p6)   br.wtop.dptk.few 2b     // loop until p6 == 0
+       ;;
+       sub ret0=base,orig      // distance from base
+       sub tmp=7,val1          // 7=8-1 because this strlen returns strlen+1
+       mov pr=saved_pr,0xffffffffffff0000
+       ;;
+       sub ret0=ret0,tmp       // length=now - back -1
+       mov ar.pfs=saved_pfs    // because of ar.ec, restore no matter what
+       br.ret.sptk.many rp     // end of successful recovery code
+
+       //
+       // We failed even on the normal load (called from exception handler)
+       //
+.Lexit1:
+       mov ret0=0
+       mov pr=saved_pr,0xffffffffffff0000
+       mov ar.pfs=saved_pfs    // because of ar.ec, restore no matter what
+       br.ret.sptk.many rp
+END(__strlen_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/strncpy_from_user.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/strncpy_from_user.S       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,44 @@
+/*
+ * Just like strncpy() except that if a fault occurs during copying,
+ * -EFAULT is returned.
+ *
+ * Inputs:
+ *     in0:    address of destination buffer
+ *     in1:    address of string to be copied
+ *     in2:    length of buffer in bytes
+ * Outputs:
+ *     r8:     -EFAULT in case of fault or number of bytes copied if no fault
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 00/03/06 D. Mosberger Fixed to return proper return value (bug found by
+ *                      by Andreas Schwab <schwab@xxxxxxx>).
+ */
+
+#include <asm/asmmacro.h>
+
+GLOBAL_ENTRY(__strncpy_from_user)
+       alloc r2=ar.pfs,3,0,0,0
+       mov r8=0
+       mov r9=in1
+       ;;
+       add r10=in1,in2
+       cmp.eq p6,p0=r0,in2
+(p6)   br.ret.spnt.many rp
+
+       // XXX braindead copy loop---this needs to be optimized
+.Loop1:
+       EX(.Lexit, ld1 r8=[in1],1)
+       ;;
+       EX(.Lexit, st1 [in0]=r8,1)
+       cmp.ne p6,p7=r8,r0
+       ;;
+(p6)   cmp.ne.unc p8,p0=in1,r10
+(p8)   br.cond.dpnt.few .Loop1
+       ;;
+(p6)   mov r8=in2              // buffer filled up---return buffer length
+(p7)   sub r8=in1,r9,1         // return string length (excluding NUL 
character)
+[.Lexit:]
+       br.ret.sptk.many rp
+END(__strncpy_from_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/strnlen_user.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/strnlen_user.S    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,45 @@
+/*
+ * Returns 0 if exception before NUL or reaching the supplied limit (N),
+ * a value greater than N if the string is longer than the limit, else
+ * strlen.
+ *
+ * Inputs:
+ *     in0:    address of buffer
+ *     in1:    string length limit N
+ * Outputs:
+ *     r8:     0 in case of fault, strlen(buffer)+1 otherwise
+ *
+ * Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm/asmmacro.h>
+
+GLOBAL_ENTRY(__strnlen_user)
+       .prologue
+       alloc r2=ar.pfs,2,0,0,0
+       .save ar.lc, r16
+       mov r16=ar.lc                   // preserve ar.lc
+
+       .body
+
+       add r3=-1,in1
+       ;;
+       mov ar.lc=r3
+       mov r9=0
+       ;;
+       // XXX braindead strlen loop---this needs to be optimized
+.Loop1:
+       EXCLR(.Lexit, ld1 r8=[in0],1)
+       add r9=1,r9
+       ;;
+       cmp.eq p6,p0=r8,r0
+(p6)   br.cond.dpnt .Lexit
+       br.cloop.dptk.few .Loop1
+
+       add r9=1,in1                    // NUL not found---return N+1
+       ;;
+.Lexit:
+       mov r8=r9
+       mov ar.lc=r16                   // restore ar.lc
+       br.ret.sptk.many rp
+END(__strnlen_user)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/lib/xor.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/lib/xor.S     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,184 @@
+/*
+ * arch/ia64/lib/xor.S
+ *
+ * Optimized RAID-5 checksumming functions for IA-64.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/asmmacro.h>
+
+GLOBAL_ENTRY(xor_ia64_2)
+       .prologue
+       .fframe 0
+       .save ar.pfs, r31
+       alloc r31 = ar.pfs, 3, 0, 13, 16
+       .save ar.lc, r30
+       mov r30 = ar.lc
+       .save pr, r29
+       mov r29 = pr
+       ;;
+       .body
+       mov r8 = in1
+       mov ar.ec = 6 + 2
+       shr in0 = in0, 3
+       ;;
+       adds in0 = -1, in0
+       mov r16 = in1
+       mov r17 = in2
+       ;;
+       mov ar.lc = in0
+       mov pr.rot = 1 << 16
+       ;;
+       .rotr s1[6+1], s2[6+1], d[2]
+       .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[6+1])st8.nta [r8] = d[1], 8
+       nop.f 0
+       br.ctop.dptk.few 0b
+       ;;
+       mov ar.lc = r30
+       mov pr = r29, -1
+       br.ret.sptk.few rp
+END(xor_ia64_2)
+
+GLOBAL_ENTRY(xor_ia64_3)
+       .prologue
+       .fframe 0
+       .save ar.pfs, r31
+       alloc r31 = ar.pfs, 4, 0, 20, 24
+       .save ar.lc, r30
+       mov r30 = ar.lc
+       .save pr, r29
+       mov r29 = pr
+       ;;
+       .body
+       mov r8 = in1
+       mov ar.ec = 6 + 2
+       shr in0 = in0, 3
+       ;;
+       adds in0 = -1, in0
+       mov r16 = in1
+       mov r17 = in2
+       ;;
+       mov r18 = in3
+       mov ar.lc = in0
+       mov pr.rot = 1 << 16
+       ;;
+       .rotr s1[6+1], s2[6+1], s3[6+1], d[2]
+       .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+       ;;
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], s3[6]
+       br.ctop.dptk.few 0b
+       ;;
+       mov ar.lc = r30
+       mov pr = r29, -1
+       br.ret.sptk.few rp
+END(xor_ia64_3)
+
+GLOBAL_ENTRY(xor_ia64_4)
+       .prologue
+       .fframe 0
+       .save ar.pfs, r31
+       alloc r31 = ar.pfs, 5, 0, 27, 32
+       .save ar.lc, r30
+       mov r30 = ar.lc
+       .save pr, r29
+       mov r29 = pr
+       ;;
+       .body
+       mov r8 = in1
+       mov ar.ec = 6 + 2
+       shr in0 = in0, 3
+       ;;
+       adds in0 = -1, in0
+       mov r16 = in1
+       mov r17 = in2
+       ;;
+       mov r18 = in3
+       mov ar.lc = in0
+       mov pr.rot = 1 << 16
+       mov r19 = in4
+       ;;
+       .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], d[2]
+       .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[0]) ld8.nta s4[0] = [r19], 8
+(p[6]) xor r20 = s3[6], s4[6]
+       ;;
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], r20
+       br.ctop.dptk.few 0b
+       ;;
+       mov ar.lc = r30
+       mov pr = r29, -1
+       br.ret.sptk.few rp
+END(xor_ia64_4)
+
+GLOBAL_ENTRY(xor_ia64_5)
+       .prologue
+       .fframe 0
+       .save ar.pfs, r31
+       alloc r31 = ar.pfs, 6, 0, 34, 40
+       .save ar.lc, r30
+       mov r30 = ar.lc
+       .save pr, r29
+       mov r29 = pr
+       ;;
+       .body
+       mov r8 = in1
+       mov ar.ec = 6 + 2
+       shr in0 = in0, 3
+       ;;
+       adds in0 = -1, in0
+       mov r16 = in1
+       mov r17 = in2
+       ;;
+       mov r18 = in3
+       mov ar.lc = in0
+       mov pr.rot = 1 << 16
+       mov r19 = in4
+       mov r20 = in5
+       ;;
+       .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2]
+       .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[0]) ld8.nta s4[0] = [r19], 8
+(p[6]) xor r21 = s3[6], s4[6]
+       ;;
+(p[0]) ld8.nta s5[0] = [r20], 8
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], r21
+       ;;
+(p[6])   xor d[0] = d[0], s5[6]
+       nop.f 0
+       br.ctop.dptk.few 0b
+       ;;
+       mov ar.lc = r30
+       mov pr = r29, -1
+       br.ret.sptk.few rp
+END(xor_ia64_5)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/linuxextable.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/linuxextable.c        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,67 @@
+/* Rewritten by Rusty Russell, on the backs of many others...
+   Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/sections.h>
+
+extern struct exception_table_entry __start___ex_table[];
+extern struct exception_table_entry __stop___ex_table[];
+
+/* Sort the kernel's built-in exception table */
+void __init sort_main_extable(void)
+{
+       sort_extable(__start___ex_table, __stop___ex_table);
+}
+
+/* Given an address, look for it in the exception tables. */
+const struct exception_table_entry *search_exception_tables(unsigned long addr)
+{
+       const struct exception_table_entry *e;
+
+       e = search_extable(__start___ex_table, __stop___ex_table-1, addr);
+       if (!e)
+               e = search_module_extables(addr);
+       return e;
+}
+
+static int core_kernel_text(unsigned long addr)
+{
+       if (addr >= (unsigned long)_stext &&
+           addr <= (unsigned long)_etext)
+               return 1;
+
+       if (addr >= (unsigned long)_sinittext &&
+           addr <= (unsigned long)_einittext)
+               return 1;
+       return 0;
+}
+
+int __kernel_text_address(unsigned long addr)
+{
+       if (core_kernel_text(addr))
+               return 1;
+       return __module_text_address(addr) != NULL;
+}
+
+int kernel_text_address(unsigned long addr)
+{
+       if (core_kernel_text(addr))
+               return 1;
+       return module_text_address(addr) != NULL;
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/machvec.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/machvec.c     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,70 @@
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <asm/machvec.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_IA64_GENERIC
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+
+struct ia64_machine_vector ia64_mv;
+EXPORT_SYMBOL(ia64_mv);
+
+static struct ia64_machine_vector *
+lookup_machvec (const char *name)
+{
+       extern struct ia64_machine_vector machvec_start[];
+       extern struct ia64_machine_vector machvec_end[];
+       struct ia64_machine_vector *mv;
+
+       for (mv = machvec_start; mv < machvec_end; ++mv)
+               if (strcmp (mv->name, name) == 0)
+                       return mv;
+
+       return 0;
+}
+
+void
+machvec_init (const char *name)
+{
+       struct ia64_machine_vector *mv;
+
+       mv = lookup_machvec(name);
+       if (!mv) {
+               panic("generic kernel failed to find machine vector for 
platform %s!", name);
+       }
+       ia64_mv = *mv;
+       printk(KERN_INFO "booting generic kernel on platform %s\n", name);
+}
+
+#endif /* CONFIG_IA64_GENERIC */
+
+void
+machvec_setup (char **arg)
+{
+}
+EXPORT_SYMBOL(machvec_setup);
+
+void
+machvec_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+}
+EXPORT_SYMBOL(machvec_timer_interrupt);
+
+void
+machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t 
size, int dir)
+{
+       mb();
+}
+EXPORT_SYMBOL(machvec_dma_sync_single);
+
+void
+machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int 
dir)
+{
+       mb();
+}
+EXPORT_SYMBOL(machvec_dma_sync_sg);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/minstate.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/minstate.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,251 @@
+#include <linux/config.h>
+
+#include <asm/cache.h>
+
+#include "entry.h"
+
+/*
+ * For ivt.s we want to access the stack virtually so we don't have to disable 
translation
+ * on interrupts.
+ *
+ *  On entry:
+ *     r1:     pointer to current task (ar.k6)
+ */
+#define MINSTATE_START_SAVE_MIN_VIRT                                           
                \
+(pUStk)        mov ar.rsc=0;           /* set enforced lazy mode, pl 0, 
little-endian, loadrs=0 */     \
+       ;;                                                                      
                \
+(pUStk)        mov.m r24=ar.rnat;                                              
                        \
+(pUStk)        addl r22=IA64_RBS_OFFSET,r1;                    /* compute base 
of RBS */               \
+(pKStk) mov r1=sp;                                     /* get sp  */           
                \
+       ;;                                                                      
                \
+(pUStk) lfetch.fault.excl.nt1 [r22];                                           
                \
+(pUStk)        addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base 
of memory stack */      \
+(pUStk)        mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
+       ;;                                                                      
                \
+(pUStk)        mov ar.bspstore=r22;                            /* switch to 
kernel RBS */              \
+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;                 /* if in kernel mode, 
use sp (r12) */   \
+       ;;                                                                      
                \
+(pUStk)        mov r18=ar.bsp;                                                 
                        \
+(pUStk)        mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */             \
+
+#define MINSTATE_END_SAVE_MIN_VIRT                                             
                \
+       bsw.1;                  /* switch back to bank 1 (must be last in insn 
group) */        \
+       ;;
+
+/*
+ * For mca_asm.S we want to access the stack physically since the state is 
saved before we
+ * go virtual and don't want to destroy the iip or ipsr.
+ */
+#define MINSTATE_START_SAVE_MIN_PHYS                                           
                \
+(pKStk) mov r3=IA64_KR(PER_CPU_DATA);;                                         
                \
+(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;                                   
                \
+(pKStk) ld8 r3 = [r3];;                                                        
                        \
+(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;                            
                \
+(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;                          
                \
+(pUStk)        mov ar.rsc=0;           /* set enforced lazy mode, pl 0, 
little-endian, loadrs=0 */     \
+(pUStk)        addl r22=IA64_RBS_OFFSET,r1;            /* compute base of 
register backing store */    \
+       ;;                                                                      
                \
+(pUStk)        mov r24=ar.rnat;                                                
                        \
+(pUStk)        addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base 
of memory stack */      \
+(pUStk)        mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
+(pUStk)        dep r22=-1,r22,61,3;                    /* compute kernel 
virtual addr of RBS */        \
+       ;;                                                                      
                \
+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;         /* if in kernel mode, use sp 
(r12) */           \
+(pUStk)        mov ar.bspstore=r22;                    /* switch to kernel RBS 
*/                      \
+       ;;                                                                      
                \
+(pUStk)        mov r18=ar.bsp;                                                 
                        \
+(pUStk)        mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */             \
+
+#define MINSTATE_END_SAVE_MIN_PHYS                                             
                \
+       dep r12=-1,r12,61,3;            /* make sp a kernel virtual address */  
                \
+       ;;
+
+#ifdef MINSTATE_VIRT
+# define MINSTATE_GET_CURRENT(reg)     mov reg=IA64_KR(CURRENT)
+# define MINSTATE_START_SAVE_MIN       MINSTATE_START_SAVE_MIN_VIRT
+# define MINSTATE_END_SAVE_MIN         MINSTATE_END_SAVE_MIN_VIRT
+#endif
+
+#ifdef MINSTATE_PHYS
+# define MINSTATE_GET_CURRENT(reg)     mov reg=IA64_KR(CURRENT);; tpa reg=reg
+# define MINSTATE_START_SAVE_MIN       MINSTATE_START_SAVE_MIN_PHYS
+# define MINSTATE_END_SAVE_MIN         MINSTATE_END_SAVE_MIN_PHYS
+#endif
+
+/*
+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ *     psr.ic: off
+ *     r31:    contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ *     psr.ic: off
+ *      r2 = points to &pt_regs.r16
+ *      r8 = contents of ar.ccv
+ *      r9 = contents of ar.csd
+ *     r10 = contents of ar.ssd
+ *     r11 = FPSR_DEFAULT
+ *     r12 = kernel sp (kernel virtual address)
+ *     r13 = points to current task_struct (kernel virtual address)
+ *     p15 = TRUE if psr.i is set in cr.ipsr
+ *     predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ *             preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro.  This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                                      
                \
+       MINSTATE_GET_CURRENT(r16);      /* M (or M;;I) */                       
                \
+       mov r27=ar.rsc;                 /* M */                                 
                \
+       mov r20=r1;                     /* A */                                 
                \
+       mov r25=ar.unat;                /* M */                                 
                \
+       mov r29=cr.ipsr;                /* M */                                 
                \
+       mov r26=ar.pfs;                 /* I */                                 
                \
+       mov r28=cr.iip;                 /* M */                                 
                \
+       mov r21=ar.fpsr;                /* M */                                 
                \
+       COVER;                          /* B;; (or nothing) */                  
                \
+       ;;                                                                      
                \
+       adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;                         
                \
+       ;;                                                                      
                \
+       ld1 r17=[r16];                          /* load 
current->thread.on_ustack flag */       \
+       st1 [r16]=r0;                           /* clear 
current->thread.on_ustack flag */      \
+       adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16                          
                \
+       /* switch from user to kernel RBS: */                                   
                \
+       ;;                                                                      
                \
+       invala;                         /* M */                                 
                \
+       SAVE_IFS;                                                               
                \
+       cmp.eq pKStk,pUStk=r0,r17;              /* are we in kernel mode 
already? */            \
+       ;;                                                                      
                \
+       MINSTATE_START_SAVE_MIN                                                 
                \
+       adds r17=2*L1_CACHE_BYTES,r1;           /* really: biggest cache-line 
size */           \
+       adds r16=PT(CR_IPSR),r1;                                                
                \
+       ;;                                                                      
                \
+       lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;                             
                \
+       st8 [r16]=r29;          /* save cr.ipsr */                              
                \
+       ;;                                                                      
                \
+       lfetch.fault.excl.nt1 [r17];                                            
                \
+       tbit.nz p15,p0=r29,IA64_PSR_I_BIT;                                      
                \
+       mov r29=b0                                                              
                \
+       ;;                                                                      
                \
+       adds r16=PT(R8),r1;     /* initialize first base pointer */             
                \
+       adds r17=PT(R9),r1;     /* initialize second base pointer */            
                \
+(pKStk)        mov r18=r0;             /* make sure r18 isn't NaT */           
                        \
+       ;;                                                                      
                \
+.mem.offset 0,0; st8.spill [r16]=r8,16;                                        
                        \
+.mem.offset 8,0; st8.spill [r17]=r9,16;                                        
                        \
+        ;;                                                                     
                \
+.mem.offset 0,0; st8.spill [r16]=r10,24;                                       
                \
+.mem.offset 8,0; st8.spill [r17]=r11,24;                                       
                \
+        ;;                                                                     
                \
+       st8 [r16]=r28,16;       /* save cr.iip */                               
                \
+       st8 [r17]=r30,16;       /* save cr.ifs */                               
                \
+(pUStk)        sub r18=r18,r22;        /* r18=RSE.ndirty*8 */                  
                        \
+       mov r8=ar.ccv;                                                          
                \
+       mov r9=ar.csd;                                                          
                \
+       mov r10=ar.ssd;                                                         
                \
+       movl r11=FPSR_DEFAULT;   /* L-unit */                                   
                \
+       ;;                                                                      
                \
+       st8 [r16]=r25,16;       /* save ar.unat */                              
                \
+       st8 [r17]=r26,16;       /* save ar.pfs */                               
                \
+       shl r18=r18,16;         /* compute ar.rsc to be used for "loadrs" */    
                \
+       ;;                                                                      
                \
+       st8 [r16]=r27,16;       /* save ar.rsc */                               
                \
+(pUStk)        st8 [r17]=r24,16;       /* save ar.rnat */                      
                        \
+(pKStk)        adds r17=16,r17;        /* skip over ar_rnat field */           
                        \
+       ;;                      /* avoid RAW on r16 & r17 */                    
                \
+(pUStk)        st8 [r16]=r23,16;       /* save ar.bspstore */                  
                        \
+       st8 [r17]=r31,16;       /* save predicates */                           
                \
+(pKStk)        adds r16=16,r16;        /* skip over ar_bspstore field */       
                        \
+       ;;                                                                      
                \
+       st8 [r16]=r29,16;       /* save b0 */                                   
                \
+       st8 [r17]=r18,16;       /* save ar.rsc value for "loadrs" */            
                \
+       cmp.eq pNonSys,pSys=r0,r0       /* initialize pSys=0, pNonSys=1 */      
                \
+       ;;                                                                      
                \
+.mem.offset 0,0; st8.spill [r16]=r20,16;       /* save original r1 */          
                \
+.mem.offset 8,0; st8.spill [r17]=r12,16;                                       
                \
+       adds r12=-16,r1;        /* switch to kernel memory stack (with 16 bytes 
of scratch) */  \
+       ;;                                                                      
                \
+.mem.offset 0,0; st8.spill [r16]=r13,16;                                       
                \
+.mem.offset 8,0; st8.spill [r17]=r21,16;       /* save ar.fpsr */              
                \
+       mov r13=IA64_KR(CURRENT);       /* establish `current' */               
                \
+       ;;                                                                      
                \
+.mem.offset 0,0; st8.spill [r16]=r15,16;                                       
                \
+.mem.offset 8,0; st8.spill [r17]=r14,16;                                       
                \
+       ;;                                                                      
                \
+.mem.offset 0,0; st8.spill [r16]=r2,16;                                        
                        \
+.mem.offset 8,0; st8.spill [r17]=r3,16;                                        
                        \
+       adds r2=IA64_PT_REGS_R16_OFFSET,r1;                                     
                \
+       ;;                                                                      
                \
+       EXTRA;                                                                  
                \
+       movl r1=__gp;           /* establish kernel global pointer */           
                \
+       ;;                                                                      
                \
+       MINSTATE_END_SAVE_MIN
+
+/*
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
+ *
+ * Assumed state upon entry:
+ *     psr.ic: on
+ *     r2:     points to &pt_regs.r16
+ *     r3:     points to &pt_regs.r17
+ *     r8:     contents of ar.ccv
+ *     r9:     contents of ar.csd
+ *     r10:    contents of ar.ssd
+ *     r11:    FPSR_DEFAULT
+ *
+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
+ */
+#define SAVE_REST                              \
+.mem.offset 0,0; st8.spill [r2]=r16,16;                \
+.mem.offset 8,0; st8.spill [r3]=r17,16;                \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r18,16;                \
+.mem.offset 8,0; st8.spill [r3]=r19,16;                \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r20,16;                \
+.mem.offset 8,0; st8.spill [r3]=r21,16;                \
+       mov r18=b6;                             \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r22,16;                \
+.mem.offset 8,0; st8.spill [r3]=r23,16;                \
+       mov r19=b7;                             \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r24,16;                \
+.mem.offset 8,0; st8.spill [r3]=r25,16;                \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r26,16;                \
+.mem.offset 8,0; st8.spill [r3]=r27,16;                \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r28,16;                \
+.mem.offset 8,0; st8.spill [r3]=r29,16;                \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2]=r30,16;                \
+.mem.offset 8,0; st8.spill [r3]=r31,32;                \
+       ;;                                      \
+       mov ar.fpsr=r11;        /* M-unit */    \
+       st8 [r2]=r8,8;          /* ar.ccv */    \
+       adds r24=PT(B6)-PT(F7),r3;              \
+       ;;                                      \
+       stf.spill [r2]=f6,32;                   \
+       stf.spill [r3]=f7,32;                   \
+       ;;                                      \
+       stf.spill [r2]=f8,32;                   \
+       stf.spill [r3]=f9,32;                   \
+       ;;                                      \
+       stf.spill [r2]=f10;                     \
+       stf.spill [r3]=f11;                     \
+       adds r25=PT(B7)-PT(F11),r3;             \
+       ;;                                      \
+       st8 [r24]=r18,16;       /* b6 */        \
+       st8 [r25]=r19,16;       /* b7 */        \
+       ;;                                      \
+       st8 [r24]=r9;           /* ar.csd */    \
+       st8 [r25]=r10;          /* ar.ssd */    \
+       ;;
+
+#define SAVE_MIN_WITH_COVER    DO_SAVE_MIN(cover, mov r30=cr.ifs,)
+#define SAVE_MIN_WITH_COVER_R19        DO_SAVE_MIN(cover, mov r30=cr.ifs, mov 
r15=r19)
+#define SAVE_MIN               DO_SAVE_MIN(     , mov r30=r0, )
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/patch.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/patch.c       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,189 @@
+/*
+ * Instruction-patching support.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/patch.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+/*
+ * This was adapted from code written by Tony Luck:
+ *
+ * The 64-bit value in a "movl reg=value" is scattered between the two words 
of the bundle
+ * like this:
+ *
+ * 6  6         5         4         3         2         1
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
+ *
+ * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
+ */
+static u64
+get_imm64 (u64 insn_addr)
+{
+       u64 *p = (u64 *) (insn_addr & -16);     /* mask out slot number */
+
+       return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
+               ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
+               ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
+               ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
+               ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
+               ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
+               ((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
+}
+
+/* Patch instruction with "val" where "mask" has 1 bits. */
+void
+ia64_patch (u64 insn_addr, u64 mask, u64 val)
+{
+       u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
+#      define insn_mask ((1UL << 41) - 1)
+       unsigned long shift;
+
+       b0 = b[0]; b1 = b[1];
+       shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 
41-bit instructions */
+       if (shift >= 64) {
+               m1 = mask << (shift - 64);
+               v1 = val << (shift - 64);
+       } else {
+               m0 = mask << shift; m1 = mask >> (64 - shift);
+               v0 = val  << shift; v1 = val >> (64 - shift);
+               b[0] = (b0 & ~m0) | (v0 & m0);
+       }
+       b[1] = (b1 & ~m1) | (v1 & m1);
+}
+
+void
+ia64_patch_imm64 (u64 insn_addr, u64 val)
+{
+       ia64_patch(insn_addr,
+                  0x01fffefe000UL, (  ((val & 0x8000000000000000UL) >> 27) /* 
bit 63 -> 36 */
+                                    | ((val & 0x0000000000200000UL) <<  0) /* 
bit 21 -> 21 */
+                                    | ((val & 0x00000000001f0000UL) <<  6) /* 
bit 16 -> 22 */
+                                    | ((val & 0x000000000000ff80UL) << 20) /* 
bit  7 -> 27 */
+                                    | ((val & 0x000000000000007fUL) << 13) /* 
bit  0 -> 13 */));
+       ia64_patch(insn_addr - 1, 0x1ffffffffffUL, val >> 22);
+}
+
+void
+ia64_patch_imm60 (u64 insn_addr, u64 val)
+{
+       ia64_patch(insn_addr,
+                  0x011ffffe000UL, (  ((val & 0x0800000000000000UL) >> 23) /* 
bit 59 -> 36 */
+                                    | ((val & 0x00000000000fffffUL) << 13) /* 
bit  0 -> 13 */));
+       ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
+}
+
+/*
+ * We need sometimes to load the physical address of a kernel
+ * object.  Often we can convert the virtual address to physical
+ * at execution time, but sometimes (either for performance reasons
+ * or during error recovery) we cannot to this.  Patch the marked
+ * bundles to load the physical address.
+ */
+void __init
+ia64_patch_vtop (unsigned long start, unsigned long end)
+{
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) offp + *offp;
+
+               /* replace virtual address with corresponding physical address: 
*/
+               ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+void
+ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
+{
+       static int first_time = 1;
+       int need_workaround;
+       s32 *offp = (s32 *) start;
+       u64 *wp;
+
+       need_workaround = (local_cpu_data->family == 0x1f && 
local_cpu_data->model == 0);
+
+       if (first_time) {
+               first_time = 0;
+               if (need_workaround)
+                       printk(KERN_INFO "Leaving McKinley Errata 9 workaround 
enabled\n");
+               else
+                       printk(KERN_INFO "McKinley Errata 9 workaround not 
needed; "
+                              "disabling it\n");
+       }
+       if (need_workaround)
+               return;
+
+       while (offp < (s32 *) end) {
+               wp = (u64 *) ia64_imva((char *) offp + *offp);
+               wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
+               wp[1] = 0x0004000000000200UL;
+               wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; 
br.ret.sptk.many b6 */
+               wp[3] = 0x0084006880000200UL;
+               ia64_fc(wp); ia64_fc(wp + 2);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+static void
+patch_fsyscall_table (unsigned long start, unsigned long end)
+{
+       extern unsigned long fsyscall_table[NR_syscalls];
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) ia64_imva((char *) offp + *offp);
+               ia64_patch_imm64(ip, (u64) fsyscall_table);
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+static void
+patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
+{
+       extern char fsys_bubble_down[];
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) offp + *offp;
+               ia64_patch_imm60((u64) ia64_imva((void *) ip),
+                                (u64) (fsys_bubble_down - (ip & -16)) / 16);
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+void
+ia64_patch_gate (void)
+{
+#      define START(name)      ((unsigned long) 
__start_gate_##name##_patchlist)
+#      define END(name)        ((unsigned long)__end_gate_##name##_patchlist)
+
+       patch_fsyscall_table(START(fsyscall), END(fsyscall));
+       patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), 
END(brl_fsys_bubble_down));
+       ia64_patch_vtop(START(vtop), END(vtop));
+       ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/pcdp.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/pcdp.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,84 @@
+/*
+ * Definitions for PCDP-defined console devices
+ *
+ * v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf
+ * v2.0:  http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf
+ *
+ * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
+ *     Khalid Aziz <khalid.aziz@xxxxxx>
+ *     Bjorn Helgaas <bjorn.helgaas@xxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define PCDP_CONSOLE                   0
+#define PCDP_DEBUG                     1
+#define PCDP_CONSOLE_OUTPUT            2
+#define PCDP_CONSOLE_INPUT             3
+
+#define PCDP_UART                      (0 << 3)
+#define PCDP_VGA                       (1 << 3)
+#define PCDP_USB                       (2 << 3)
+
+/* pcdp_uart.type and pcdp_device.type */
+#define PCDP_CONSOLE_UART              (PCDP_UART | PCDP_CONSOLE)
+#define PCDP_DEBUG_UART                        (PCDP_UART | PCDP_DEBUG)
+#define PCDP_CONSOLE_VGA               (PCDP_VGA  | PCDP_CONSOLE_OUTPUT)
+#define PCDP_CONSOLE_USB               (PCDP_USB  | PCDP_CONSOLE_INPUT)
+
+/* pcdp_uart.flags */
+#define PCDP_UART_EDGE_SENSITIVE       (1 << 0)
+#define PCDP_UART_ACTIVE_LOW           (1 << 1)
+#define PCDP_UART_PRIMARY_CONSOLE      (1 << 2)
+#define PCDP_UART_IRQ                  (1 << 6) /* in pci_func for rev < 3 */
+#define PCDP_UART_PCI                  (1 << 7) /* in pci_func for rev < 3 */
+
+struct pcdp_uart {
+       u8                              type;
+       u8                              bits;
+       u8                              parity;
+       u8                              stop_bits;
+       u8                              pci_seg;
+       u8                              pci_bus;
+       u8                              pci_dev;
+       u8                              pci_func;
+       u64                             baud;
+       struct acpi_generic_address     addr;
+       u16                             pci_dev_id;
+       u16                             pci_vendor_id;
+       u32                             gsi;
+       u32                             clock_rate;
+       u8                              pci_prog_intfc;
+       u8                              flags;
+};
+
+struct pcdp_vga {
+       u8                      count;          /* address space descriptors */
+};
+
+/* pcdp_device.flags */
+#define PCDP_PRIMARY_CONSOLE   1
+
+struct pcdp_device {
+       u8                      type;
+       u8                      flags;
+       u16                     length;
+       u16                     efi_index;
+};
+
+struct pcdp {
+       u8                      signature[4];
+       u32                     length;
+       u8                      rev;            /* PCDP v2.0 is rev 3 */
+       u8                      chksum;
+       u8                      oemid[6];
+       u8                      oem_tabid[8];
+       u32                     oem_rev;
+       u8                      creator_id[4];
+       u32                     creator_rev;
+       u32                     num_uarts;
+       struct pcdp_uart        uart[0];        /* actual size is num_uarts */
+       /* remainder of table is pcdp_device structures */
+};
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/linux/sal.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/linux/sal.c Tue Aug  2 23:59:09 2005
@@ -0,0 +1,302 @@
+/*
+ * System Abstraction Layer (SAL) interface routines.
+ *
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/sal.h>
+#include <asm/pal.h>
+
+ __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
+unsigned long sal_platform_features;
+
+unsigned short sal_revision;
+unsigned short sal_version;
+
+#define SAL_MAJOR(x) ((x) >> 8)
+#define SAL_MINOR(x) ((x) & 0xff)
+
+static struct {
+       void *addr;     /* function entry point */
+       void *gpval;    /* gp value to use */
+} pdesc;
+
+static long
+default_handler (void)
+{
+       return -1;
+}
+
+ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
+ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
+
+const char *
+ia64_sal_strerror (long status)
+{
+       const char *str;
+       switch (status) {
+             case 0: str = "Call completed without error"; break;
+             case 1: str = "Effect a warm boot of the system to complete "
+                             "the update"; break;
+             case -1: str = "Not implemented"; break;
+             case -2: str = "Invalid argument"; break;
+             case -3: str = "Call completed with error"; break;
+             case -4: str = "Virtual address not registered"; break;
+             case -5: str = "No information available"; break;
+             case -6: str = "Insufficient space to add the entry"; break;
+             case -7: str = "Invalid entry_addr value"; break;
+             case -8: str = "Invalid interrupt vector"; break;
+             case -9: str = "Requested memory not available"; break;
+             case -10: str = "Unable to write to the NVM device"; break;
+             case -11: str = "Invalid partition type specified"; break;
+             case -12: str = "Invalid NVM_Object id specified"; break;
+             case -13: str = "NVM_Object already has the maximum number "
+                               "of partitions"; break;
+             case -14: str = "Insufficient space in partition for the "
+                               "requested write sub-function"; break;
+             case -15: str = "Insufficient data buffer space for the "
+                               "requested read record sub-function"; break;
+             case -16: str = "Scratch buffer required for the write/delete "
+                               "sub-function"; break;
+             case -17: str = "Insufficient space in the NVM_Object for the "
+                               "requested create sub-function"; break;
+             case -18: str = "Invalid value specified in the partition_rec "
+                               "argument"; break;
+             case -19: str = "Record oriented I/O not supported for this "
+                               "partition"; break;
+             case -20: str = "Bad format of record to be written or "
+                               "required keyword variable not "
+                               "specified"; break;
+             default: str = "Unknown SAL status code"; break;
+       }
+       return str;
+}
+
+void __init
+ia64_sal_handler_init (void *entry_point, void *gpval)
+{
+       /* fill in the SAL procedure descriptor and point ia64_sal to it: */
+       pdesc.addr = entry_point;
+       pdesc.gpval = gpval;
+       ia64_sal = (ia64_sal_handler) &pdesc;
+}
+
+static void __init
+check_versions (struct ia64_sal_systab *systab)
+{
+       sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
+       sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
+
+       /* Check for broken firmware */
+       if ((sal_revision == SAL_VERSION_CODE(49, 29))
+           && (sal_version == SAL_VERSION_CODE(49, 29)))
+       {
+               /*
+                * Old firmware for zx2000 prototypes have this weird version 
number,
+                * reset it to something sane.
+                */
+               sal_revision = SAL_VERSION_CODE(2, 8);
+               sal_version = SAL_VERSION_CODE(0, 0);
+       }
+}
+
+static void __init
+sal_desc_entry_point (void *p)
+{
+       struct ia64_sal_desc_entry_point *ep = p;
+       ia64_pal_handler_init(__va(ep->pal_proc));
+       ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
+}
+
+#ifdef CONFIG_SMP
+static void __init
+set_smp_redirect (int flag)
+{
+#ifndef CONFIG_HOTPLUG_CPU
+       if (no_int_routing)
+               smp_int_redirect &= ~flag;
+       else
+               smp_int_redirect |= flag;
+#else
+       /*
+        * For CPU Hotplug we dont want to do any chipset supported
+        * interrupt redirection. The reason is this would require that
+        * All interrupts be stopped and hard bind the irq to a cpu.
+        * Later when the interrupt is fired we need to set the redir hint
+        * on again in the vector. This is combersome for something that the
+        * user mode irq balancer will solve anyways.
+        */
+       no_int_routing=1;
+       smp_int_redirect &= ~flag;
+#endif
+}
+#else
+#define set_smp_redirect(flag) do { } while (0)
+#endif
+
+static void __init
+sal_desc_platform_feature (void *p)
+{
+       struct ia64_sal_desc_platform_feature *pf = p;
+       sal_platform_features = pf->feature_mask;
+
+       printk(KERN_INFO "SAL Platform features:");
+       if (!sal_platform_features) {
+               printk(" None\n");
+               return;
+       }
+
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
+               printk(" BusLock");
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
+               printk(" IRQ_Redirection");
+               set_smp_redirect(SMP_IRQ_REDIRECTION);
+       }
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
+               printk(" IPI_Redirection");
+               set_smp_redirect(SMP_IPI_REDIRECTION);
+       }
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
+               printk(" ITC_Drift");
+       printk("\n");
+}
+
+#ifdef CONFIG_SMP
+static void __init
+sal_desc_ap_wakeup (void *p)
+{
+       struct ia64_sal_desc_ap_wakeup *ap = p;
+
+       switch (ap->mechanism) {
+       case IA64_SAL_AP_EXTERNAL_INT:
+               ap_wakeup_vector = ap->vector;
+               printk(KERN_INFO "SAL: AP wakeup using external interrupt "
+                               "vector 0x%lx\n", ap_wakeup_vector);
+               break;
+       default:
+               printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
+               break;
+       }
+}
+
+static void __init
+chk_nointroute_opt(void)
+{
+       char *cp;
+       extern char saved_command_line[];
+
+       for (cp = saved_command_line; *cp; ) {
+               if (memcmp(cp, "nointroute", 10) == 0) {
+                       no_int_routing = 1;
+                       printk ("no_int_routing on\n");
+                       break;
+               } else {
+                       while (*cp != ' ' && *cp)
+                               ++cp;
+                       while (*cp == ' ')
+                               ++cp;
+               }
+       }
+}
+
+#else
+static void __init sal_desc_ap_wakeup(void *p) { }
+#endif
+
+void __init
+ia64_sal_init (struct ia64_sal_systab *systab)
+{
+       char *p;
+       int i;
+
+       if (!systab) {
+               printk(KERN_WARNING "Hmm, no SAL System Table.\n");
+               return;
+       }
+
+       if (strncmp(systab->signature, "SST_", 4) != 0)
+               printk(KERN_ERR "bad signature in system table!");
+
+       check_versions(systab);
+#ifdef CONFIG_SMP
+       chk_nointroute_opt();
+#endif
+
+       /* revisions are coded in BCD, so %x does the job for us */
+       printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
+                       SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
+                       systab->oem_id, systab->product_id,
+                       systab->product_id[0] ? " " : "",
+                       SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
+
+       p = (char *) (systab + 1);
+       for (i = 0; i < systab->entry_count; i++) {
+               /*
+                * The first byte of each entry type contains the type
+                * descriptor.
+                */
+               switch (*p) {
+               case SAL_DESC_ENTRY_POINT:
+                       sal_desc_entry_point(p);
+                       break;
+               case SAL_DESC_PLATFORM_FEATURE:
+                       sal_desc_platform_feature(p);
+                       break;
+               case SAL_DESC_PTC:
+                       ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
+                       break;
+               case SAL_DESC_AP_WAKEUP:
+                       sal_desc_ap_wakeup(p);
+                       break;
+               }
+               p += SAL_DESC_SIZE(*p);
+       }
+}
+
+int
+ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
+                u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall);
+
+int
+ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
+                       u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
+                       u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
+                       arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
+
+int
+ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
+                          u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+                          u64 arg6, u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
+                          arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/mm_contig.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/mm_contig.c Tue Aug  2 23:59:09 2005
@@ -0,0 +1,305 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Routines used by ia64 machines with contiguous (or virtually contiguous)
+ * memory.
+ */
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/meminit.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/mca.h>
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+static unsigned long num_dma_physpages;
+#endif
+
+/**
+ * show_mem - display a memory statistics summary
+ *
+ * Just walks the pages in the system and describes where they're allocated.
+ */
+#ifndef XEN
+void
+show_mem (void)
+{
+       int i, total = 0, reserved = 0;
+       int shared = 0, cached = 0;
+
+       printk("Mem-info:\n");
+       show_free_areas();
+
+       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       i = max_mapnr;
+       while (i-- > 0) {
+               if (!pfn_valid(i))
+                       continue;
+               total++;
+               if (PageReserved(mem_map+i))
+                       reserved++;
+               else if (PageSwapCache(mem_map+i))
+                       cached++;
+               else if (page_count(mem_map + i))
+                       shared += page_count(mem_map + i) - 1;
+       }
+       printk("%d pages of RAM\n", total);
+       printk("%d reserved pages\n", reserved);
+       printk("%d pages shared\n", shared);
+       printk("%d pages swap cached\n", cached);
+       printk("%ld pages in page table cache\n", pgtable_cache_size);
+}
+#endif
+
+/* physical address where the bootmem map is located */
+unsigned long bootmap_start;
+
+/**
+ * find_max_pfn - adjust the maximum page number callback
+ * @start: start of range
+ * @end: end of range
+ * @arg: address of pointer to global max_pfn variable
+ *
+ * Passed as a callback function to efi_memmap_walk() to determine the highest
+ * available page frame number in the system.
+ */
+int
+find_max_pfn (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long *max_pfnp = arg, pfn;
+
+       pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
+       if (pfn > *max_pfnp)
+               *max_pfnp = pfn;
+       return 0;
+}
+
+/**
+ * find_bootmap_location - callback to find a memory area for the bootmap
+ * @start: start of region
+ * @end: end of region
+ * @arg: unused callback data
+ *
+ * Find a place to put the bootmap and return its starting address in
+ * bootmap_start.  This address must be page-aligned.
+ */
+int
+find_bootmap_location (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long needed = *(unsigned long *)arg;
+       unsigned long range_start, range_end, free_start;
+       int i;
+
+#if IGNORE_PFN0
+       if (start == PAGE_OFFSET) {
+               start += PAGE_SIZE;
+               if (start >= end)
+                       return 0;
+       }
+#endif
+
+       free_start = PAGE_OFFSET;
+
+       for (i = 0; i < num_rsvd_regions; i++) {
+               range_start = max(start, free_start);
+               range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
+
+               free_start = PAGE_ALIGN(rsvd_region[i].end);
+
+               if (range_end <= range_start)
+                       continue; /* skip over empty range */
+
+               if (range_end - range_start >= needed) {
+                       bootmap_start = __pa(range_start);
+                       return -1;      /* done */
+               }
+
+               /* nothing more available in this segment */
+               if (range_end == end)
+                       return 0;
+       }
+       return 0;
+}
+
+/**
+ * find_memory - setup memory map
+ *
+ * Walk the EFI memory map and find usable memory for the system, taking
+ * into account reserved areas.
+ */
+#ifndef XEN
+void
+find_memory (void)
+{
+       unsigned long bootmap_size;
+
+       reserve_memory();
+
+       /* first find highest page frame number */
+       max_pfn = 0;
+       efi_memmap_walk(find_max_pfn, &max_pfn);
+
+       /* how many bytes to cover all the pages */
+       bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
+
+       /* look for a location to hold the bootmap */
+       bootmap_start = ~0UL;
+       efi_memmap_walk(find_bootmap_location, &bootmap_size);
+       if (bootmap_start == ~0UL)
+               panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
+
+       bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+
+       /* Free all available memory, then mark bootmem-map as being in use. */
+       efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+       reserve_bootmem(bootmap_start, bootmap_size);
+
+       find_initrd();
+}
+#endif
+
+#ifdef CONFIG_SMP
+/**
+ * per_cpu_init - setup per-cpu variables
+ *
+ * Allocate and setup per-cpu data areas.
+ */
+void *
+per_cpu_init (void)
+{
+       void *cpu_data;
+       int cpu;
+
+       /*
+        * get_free_pages() cannot be used before cpu_init() done.  BSP
+        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
+        * get_zeroed_page().
+        */
+       if (smp_processor_id() == 0) {
+               cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+                                          PERCPU_PAGE_SIZE, 
__pa(MAX_DMA_ADDRESS));
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - 
__per_cpu_start);
+                       __per_cpu_offset[cpu] = (char *) cpu_data - 
__per_cpu_start;
+                       cpu_data += PERCPU_PAGE_SIZE;
+                       per_cpu(local_per_cpu_offset, cpu) = 
__per_cpu_offset[cpu];
+               }
+       }
+       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+}
+#endif /* CONFIG_SMP */
+
+static int
+count_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       *count += (end - start) >> PAGE_SHIFT;
+       return 0;
+}
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+static int
+count_dma_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       if (start < MAX_DMA_ADDRESS)
+               *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
+       return 0;
+}
+#endif
+
+/*
+ * Set up the page tables.
+ */
+
+#ifndef XEN
+void
+paging_init (void)
+{
+       unsigned long max_dma;
+       unsigned long zones_size[MAX_NR_ZONES];
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+       unsigned long zholes_size[MAX_NR_ZONES];
+       unsigned long max_gap;
+#endif
+
+       /* initialize mem_map[] */
+
+       memset(zones_size, 0, sizeof(zones_size));
+
+       num_physpages = 0;
+       efi_memmap_walk(count_pages, &num_physpages);
+
+       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+       memset(zholes_size, 0, sizeof(zholes_size));
+
+       num_dma_physpages = 0;
+       efi_memmap_walk(count_dma_pages, &num_dma_physpages);
+
+       if (max_low_pfn < max_dma) {
+               zones_size[ZONE_DMA] = max_low_pfn;
+               zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
+       } else {
+               zones_size[ZONE_DMA] = max_dma;
+               zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
+               if (num_physpages > num_dma_physpages) {
+                       zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+                       zholes_size[ZONE_NORMAL] =
+                               ((max_low_pfn - max_dma) -
+                                (num_physpages - num_dma_physpages));
+               }
+       }
+
+       max_gap = 0;
+       efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+       if (max_gap < LARGE_GAP) {
+               vmem_map = (struct page *) 0;
+               free_area_init_node(0, &contig_page_data, zones_size, 0,
+                                   zholes_size);
+       } else {
+               unsigned long map_size;
+
+               /* allocate virtual_mem_map */
+
+               map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+               vmalloc_end -= map_size;
+               vmem_map = (struct page *) vmalloc_end;
+               efi_memmap_walk(create_mem_map_page_table, NULL);
+
+               mem_map = contig_page_data.node_mem_map = vmem_map;
+               free_area_init_node(0, &contig_page_data, zones_size,
+                                   0, zholes_size);
+
+               printk("Virtual mem_map starts at 0x%p\n", mem_map);
+       }
+#else /* !CONFIG_VIRTUAL_MEM_MAP */
+       if (max_low_pfn < max_dma)
+               zones_size[ZONE_DMA] = max_low_pfn;
+       else {
+               zones_size[ZONE_DMA] = max_dma;
+               zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+       }
+       free_area_init(zones_size);
+#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+       zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
+}
+#endif /* !CONFIG_XEN */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/pal.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/pal.S       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,310 @@
+/*
+ * PAL Firmware support
+ * IA-64 Processor Programmers Reference Vol 2
+ *
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 05/22/2000 eranian Added support for stacked register calls
+ * 05/24/2000 eranian Added support for physical mode static calls
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+       .data
+pal_entry_point:
+       data8 ia64_pal_default_handler
+       .text
+
+/*
+ * Set the PAL entry point address.  This could be written in C code, but we 
do it here
+ * to keep it all in one module (besides, it's so trivial that it's
+ * not a big deal).
+ *
+ * in0         Address of the PAL entry point (text address, NOT a function 
descriptor).
+ */
+GLOBAL_ENTRY(ia64_pal_handler_init)
+       alloc r3=ar.pfs,1,0,0,0
+       movl r2=pal_entry_point
+       ;;
+       st8 [r2]=in0
+       br.ret.sptk.many rp
+END(ia64_pal_handler_init)
+
+/*
+ * Default PAL call handler.  This needs to be coded in assembly because it 
uses
+ * the static calling convention, i.e., the RSE may not be used and calls are
+ * done via "br.cond" (not "br.call").
+ */
+GLOBAL_ENTRY(ia64_pal_default_handler)
+       mov r8=-1
+       br.cond.sptk.many rp
+END(ia64_pal_default_handler)
+
+/*
+ * Make a PAL call using the static calling convention.
+ *
+ * in0         Index of PAL service
+ * in1 - in3   Remaining PAL arguments
+ * in4        1 ==> clear psr.ic,  0 ==> don't clear psr.ic
+ *
+ */
+GLOBAL_ENTRY(ia64_pal_call_static)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+       alloc loc1 = ar.pfs,5,5,0,0
+       movl loc2 = pal_entry_point
+1:     {
+         mov r28 = in0
+         mov r29 = in1
+         mov r8 = ip
+       }
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       tbit.nz p6,p7 = in4, 0
+       adds r8 = 1f-1b,r8
+       mov loc4=ar.rsc                 // save RSE configuration
+       ;;
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov loc3 = psr
+       mov loc0 = rp
+       .body
+       mov r30 = in2
+
+(p6)   rsm psr.i | psr.ic
+       mov r31 = in3
+       mov b7 = loc2
+
+(p7)   rsm psr.i
+       ;;
+(p6)   srlz.i
+       mov rp = r8
+       br.cond.sptk.many b7
+1:     mov psr.l = loc3
+       mov ar.rsc = loc4               // restore RSE configuration
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_static)
+
+/*
+ * Make a PAL call using the stacked registers calling convention.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_stacked)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
+       alloc loc1 = ar.pfs,4,4,4,0
+       movl loc2 = pal_entry_point
+
+       mov r28  = in0                  // Index MUST be copied to r28
+       mov out0 = in0                  // AND in0 of PAL function
+       mov loc0 = rp
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov out1 = in1
+       mov out2 = in2
+       mov out3 = in3
+       mov loc3 = psr
+       ;;
+       rsm psr.i
+       mov b7 = loc2
+       ;;
+       br.call.sptk.many rp=b7         // now make the call
+.ret0: mov psr.l  = loc3
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       srlz.d                          // serialize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_stacked)
+
+/*
+ * Make a physical mode PAL call using the static registers calling convention.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ *
+ * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
+ * So we don't need to clear them.
+ */
+#define PAL_PSR_BITS_TO_CLEAR                                                  
\
+       (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT  | IA64_PSR_DB | IA64_PSR_RT |  
\
+        IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |                
\
+        IA64_PSR_DFL | IA64_PSR_DFH)
+
+#define PAL_PSR_BITS_TO_SET                                                    
\
+       (IA64_PSR_BN)
+
+
+GLOBAL_ENTRY(ia64_pal_call_phys_static)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
+       alloc loc1 = ar.pfs,4,7,0,0
+       movl loc2 = pal_entry_point
+1:     {
+         mov r28  = in0                // copy procedure index
+         mov r8   = ip                 // save ip to compute branch
+         mov loc0 = rp                 // save rp
+       }
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov r29  = in1                  // first argument
+       mov r30  = in2                  // copy arg2
+       mov r31  = in3                  // copy arg3
+       ;;
+       mov loc3 = psr                  // save psr
+       adds r8  = 1f-1b,r8             // calculate return address for call
+       ;;
+       mov loc4=ar.rsc                 // save RSE configuration
+#ifdef XEN
+       dep.z loc2=loc2,0,60            // convert pal entry point to physical
+#else // XEN
+       dep.z loc2=loc2,0,61            // convert pal entry point to physical
+#endif // XEN
+       tpa r8=r8                       // convert rp to physical
+       ;;
+       mov b7 = loc2                   // install target to branch reg
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       movl r16=PAL_PSR_BITS_TO_CLEAR
+       movl r17=PAL_PSR_BITS_TO_SET
+       ;;
+       or loc3=loc3,r17                // add in psr the bits to set
+       ;;
+       andcm r16=loc3,r16              // removes bits to clear from psr
+       br.call.sptk.many rp=ia64_switch_mode_phys
+.ret1: mov rp = r8                     // install return address (physical)
+       mov loc5 = r19
+       mov loc6 = r20
+       br.cond.sptk.many b7
+1:
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov r16=loc3                    // r16= original psr
+       mov r19=loc5
+       mov r20=loc6
+       br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
+.ret2:
+       mov psr.l = loc3                // restore init PSR
+
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       mov ar.rsc=loc4                 // restore RSE configuration
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_phys_static)
+
+/*
+ * Make a PAL call using the stacked registers in physical mode.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+       alloc   loc1 = ar.pfs,5,7,4,0
+       movl    loc2 = pal_entry_point
+1:     {
+         mov r28  = in0                // copy procedure index
+         mov loc0 = rp         // save rp
+       }
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov out0 = in0          // first argument
+       mov out1 = in1          // copy arg2
+       mov out2 = in2          // copy arg3
+       mov out3 = in3          // copy arg3
+       ;;
+       mov loc3 = psr          // save psr
+       ;;
+       mov loc4=ar.rsc                 // save RSE configuration
+#ifdef XEN
+       dep.z loc2=loc2,0,60            // convert pal entry point to physical
+#else // XEN
+       dep.z loc2=loc2,0,61            // convert pal entry point to physical
+#endif // XEN
+       ;;
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       movl r16=PAL_PSR_BITS_TO_CLEAR
+       movl r17=PAL_PSR_BITS_TO_SET
+       ;;
+       or loc3=loc3,r17                // add in psr the bits to set
+       mov b7 = loc2                   // install target to branch reg
+       ;;
+       andcm r16=loc3,r16              // removes bits to clear from psr
+       br.call.sptk.many rp=ia64_switch_mode_phys
+.ret6:
+       mov loc5 = r19
+       mov loc6 = r20
+       br.call.sptk.many rp=b7         // now make the call
+.ret7:
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov r16=loc3                    // r16= original psr
+       mov r19=loc5
+       mov r20=loc6
+       br.call.sptk.many rp=ia64_switch_mode_virt      // return to virtual 
mode
+
+.ret8: mov psr.l  = loc3               // restore init PSR
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       mov ar.rsc=loc4                 // restore RSE configuration
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_phys_stacked)
+
+/*
+ * Save scratch fp scratch regs which aren't saved in pt_regs already 
(fp10-fp15).
+ *
+ * NOTE: We need to do this since firmware (SAL and PAL) may use any of the 
scratch
+ * regs fp-low partition.
+ *
+ * Inputs:
+ *      in0    Address of stack storage for fp regs
+ */
+GLOBAL_ENTRY(ia64_save_scratch_fpregs)
+       alloc r3=ar.pfs,1,0,0,0
+       add r2=16,in0
+       ;;
+       stf.spill [in0] = f10,32
+       stf.spill [r2]  = f11,32
+       ;;
+       stf.spill [in0] = f12,32
+       stf.spill [r2]  = f13,32
+       ;;
+       stf.spill [in0] = f14,32
+       stf.spill [r2]  = f15,32
+       br.ret.sptk.many rp
+END(ia64_save_scratch_fpregs)
+
+/*
+ * Load scratch fp scratch regs (fp10-fp15)
+ *
+ * Inputs:
+ *      in0    Address of stack storage for fp regs
+ */
+GLOBAL_ENTRY(ia64_load_scratch_fpregs)
+       alloc r3=ar.pfs,1,0,0,0
+       add r2=16,in0
+       ;;
+       ldf.fill  f10 = [in0],32
+       ldf.fill  f11 = [r2],32
+       ;;
+       ldf.fill  f12 = [in0],32
+       ldf.fill  f13 = [r2],32
+       ;;
+       ldf.fill  f14 = [in0],32
+       ldf.fill  f15 = [r2],32
+       br.ret.sptk.many rp
+END(ia64_load_scratch_fpregs)
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/setup.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/setup.c     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,773 @@
+/*
+ * Architecture-specific setup.
+ *
+ * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ *
+ * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
+ * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
+ * 03/31/00 R.Seth     cpu_initialized and current->processor fixes
+ * 02/04/00 D.Mosberger        some more get_cpuinfo fixes...
+ * 02/01/00 R.Seth     fixed get_cpuinfo for SMP
+ * 01/07/99 S.Eranian  added the support for command line argument
+ * 06/24/99 W.Drummond added boot_cpu_data.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/efi.h>
+#include <linux/initrd.h>
+
+#include <asm/ia32.h>
+#include <asm/machvec.h>
+#include <asm/mca.h>
+#include <asm/meminit.h>
+#include <asm/page.h>
+#include <asm/patch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/serial.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+#ifdef CONFIG_VTI
+#include <asm/vmx.h>
+#endif // CONFIG_VTI
+#include <asm/io.h>
+
+#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+# error "struct cpuinfo_ia64 too big!"
+#endif
+
+#ifdef CONFIG_SMP
+unsigned long __per_cpu_offset[NR_CPUS];
+EXPORT_SYMBOL(__per_cpu_offset);
+#endif
+
+DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
+DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
+unsigned long ia64_cycles_per_usec;
+struct ia64_boot_param *ia64_boot_param;
+struct screen_info screen_info;
+
+unsigned long ia64_max_cacheline_size;
+unsigned long ia64_iobase;     /* virtual address for I/O accesses */
+EXPORT_SYMBOL(ia64_iobase);
+struct io_space io_space[MAX_IO_SPACES];
+EXPORT_SYMBOL(io_space);
+unsigned int num_io_spaces;
+
+unsigned char aux_device_present = 0xaa;        /* XXX remove this when legacy 
I/O is gone */
+
+/*
+ * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 
1).  This
+ * mask specifies a mask of address bits that must be 0 in order for two 
buffers to be
+ * mergeable by the I/O MMU (i.e., the end address of the first buffer and the 
start
+ * address of the second buffer must be aligned to (merge_mask+1) in order to 
be
+ * mergeable).  By default, we assume there is no I/O MMU which can merge 
physically
+ * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds 
to a iommu
+ * page-size of 2^64.
+ */
+unsigned long ia64_max_iommu_merge_mask = ~0UL;
+EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
+
+/*
+ * We use a special marker for the end of memory and it uses the extra (+1) 
slot
+ */
+struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+int num_rsvd_regions;
+
+
+/*
+ * Filter incoming memory segments based on the primitive map created from the 
boot
+ * parameters. Segments contained in the map are removed from the memory 
ranges. A
+ * caller-specified function is called with the memory ranges that remain 
after filtering.
+ * This routine does not assume the incoming segments are sorted.
+ */
+int
+filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long range_start, range_end, prev_start;
+       void (*func)(unsigned long, unsigned long, int);
+       int i;
+
+#if IGNORE_PFN0
+       if (start == PAGE_OFFSET) {
+               printk(KERN_WARNING "warning: skipping physical page 0\n");
+               start += PAGE_SIZE;
+               if (start >= end) return 0;
+       }
+#endif
+       /*
+        * lowest possible address(walker uses virtual)
+        */
+       prev_start = PAGE_OFFSET;
+       func = arg;
+
+       for (i = 0; i < num_rsvd_regions; ++i) {
+               range_start = max(start, prev_start);
+               range_end   = min(end, rsvd_region[i].start);
+
+               if (range_start < range_end)
+#ifdef XEN
+               {
+               /* init_boot_pages requires "ps, pe" */
+                       printk("Init boot pages: 0x%lx -> 0x%lx.\n",
+                               __pa(range_start), __pa(range_end));
+                       (*func)(__pa(range_start), __pa(range_end), 0);
+               }
+#else
+                       call_pernode_memory(__pa(range_start), range_end - 
range_start, func);
+#endif
+
+               /* nothing more available in this segment */
+               if (range_end == end) return 0;
+
+               prev_start = rsvd_region[i].end;
+       }
+       /* end of memory marker allows full processing inside loop body */
+       return 0;
+}
+
+static void
+sort_regions (struct rsvd_region *rsvd_region, int max)
+{
+       int j;
+
+       /* simple bubble sorting */
+       while (max--) {
+               for (j = 0; j < max; ++j) {
+                       if (rsvd_region[j].start > rsvd_region[j+1].start) {
+                               struct rsvd_region tmp;
+                               tmp = rsvd_region[j];
+                               rsvd_region[j] = rsvd_region[j + 1];
+                               rsvd_region[j + 1] = tmp;
+                       }
+               }
+       }
+}
+
+/**
+ * reserve_memory - setup reserved memory areas
+ *
+ * Setup the reserved memory areas set aside for the boot parameters,
+ * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
+ * see include/asm-ia64/meminit.h if you need to define more.
+ */
+void
+reserve_memory (void)
+{
+       int n = 0;
+
+       /*
+        * none of the entries in this table overlap
+        */
+       rsvd_region[n].start = (unsigned long) ia64_boot_param;
+       rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
+       n++;
+
+       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->efi_memmap);
+       rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->efi_memmap_size;
+       n++;
+
+       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->command_line);
+       rsvd_region[n].end   = (rsvd_region[n].start
+                               + strlen(__va(ia64_boot_param->command_line)) + 
1);
+       n++;
+
+       rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+#ifdef XEN
+       /* Reserve xen image/bitmap/xen-heap */
+       rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
+#else
+       rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
+#endif
+       n++;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (ia64_boot_param->initrd_start) {
+               rsvd_region[n].start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
+               rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->initrd_size;
+               n++;
+       }
+#endif
+
+       /* end of memory marker */
+       rsvd_region[n].start = ~0UL;
+       rsvd_region[n].end   = ~0UL;
+       n++;
+
+       num_rsvd_regions = n;
+
+       sort_regions(rsvd_region, num_rsvd_regions);
+}
+
+/**
+ * find_initrd - get initrd parameters from the boot parameter structure
+ *
+ * Grab the initrd start and end from the boot parameter struct given us by
+ * the boot loader.
+ */
+void
+find_initrd (void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (ia64_boot_param->initrd_start) {
+               initrd_start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
+               initrd_end   = initrd_start+ia64_boot_param->initrd_size;
+
+               printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
+                      initrd_start, ia64_boot_param->initrd_size);
+       }
+#endif
+}
+
+static void __init
+io_port_init (void)
+{
+       extern unsigned long ia64_iobase;
+       unsigned long phys_iobase;
+
+       /*
+        *  Set `iobase' to the appropriate address in region 6 (uncached 
access range).
+        *
+        *  The EFI memory map is the "preferred" location to get the I/O port 
space base,
+        *  rather the relying on AR.KR0. This should become more clear in 
future SAL
+        *  specs. We'll fall back to getting it out of AR.KR0 if no 
appropriate entry is
+        *  found in the memory map.
+        */
+       phys_iobase = efi_get_iobase();
+       if (phys_iobase)
+               /* set AR.KR0 since this is all we use it for anyway */
+               ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
+       else {
+               phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
+               printk(KERN_INFO "No I/O port range found in EFI memory map, 
falling back "
+                      "to AR.KR0\n");
+               printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
+       }
+       ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
+
+       /* setup legacy IO port space */
+       io_space[0].mmio_base = ia64_iobase;
+       io_space[0].sparse = 1;
+       num_io_spaces = 1;
+}
+
+/**
+ * early_console_setup - setup debugging console
+ *
+ * Consoles started here require little enough setup that we can start using
+ * them very early in the boot process, either right after the machine
+ * vector initialization, or even before if the drivers can detect their hw.
+ *
+ * Returns non-zero if a console couldn't be setup.
+ */
+static inline int __init
+early_console_setup (char *cmdline)
+{
+#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
+       {
+               extern int sn_serial_console_early_setup(void);
+               if (!sn_serial_console_early_setup())
+                       return 0;
+       }
+#endif
+#ifdef CONFIG_EFI_PCDP
+       if (!efi_setup_pcdp_console(cmdline))
+               return 0;
+#endif
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+       if (!early_serial_console_init(cmdline))
+               return 0;
+#endif
+
+       return -1;
+}
+
+static inline void
+mark_bsp_online (void)
+{
+#ifdef CONFIG_SMP
+       /* If we register an early console, allow CPU 0 to printk */
+       cpu_set(smp_processor_id(), cpu_online_map);
+#endif
+}
+
+void __init
+#ifdef XEN
+early_setup_arch (char **cmdline_p)
+#else
+setup_arch (char **cmdline_p)
+#endif
+{
+       unw_init();
+
+       ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
+
+       *cmdline_p = __va(ia64_boot_param->command_line);
+#ifdef XEN
+       efi_init();
+#else
+       strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+
+       efi_init();
+       io_port_init();
+#endif
+
+#ifdef CONFIG_IA64_GENERIC
+       {
+               const char *mvec_name = strstr (*cmdline_p, "machvec=");
+               char str[64];
+
+               if (mvec_name) {
+                       const char *end;
+                       size_t len;
+
+                       mvec_name += 8;
+                       end = strchr (mvec_name, ' ');
+                       if (end)
+                               len = end - mvec_name;
+                       else
+                               len = strlen (mvec_name);
+                       len = min(len, sizeof (str) - 1);
+                       strncpy (str, mvec_name, len);
+                       str[len] = '\0';
+                       mvec_name = str;
+               } else
+                       mvec_name = acpi_get_sysname();
+               machvec_init(mvec_name);
+       }
+#endif
+
+#ifdef XEN
+       early_cmdline_parse(cmdline_p);
+       cmdline_parse(*cmdline_p);
+#undef CONFIG_ACPI_BOOT
+#endif
+       if (early_console_setup(*cmdline_p) == 0)
+               mark_bsp_online();
+
+#ifdef CONFIG_ACPI_BOOT
+       /* Initialize the ACPI boot-time table parser */
+       acpi_table_init();
+# ifdef CONFIG_ACPI_NUMA
+       acpi_numa_init();
+# endif
+#else
+# ifdef CONFIG_SMP
+       smp_build_cpu_map();    /* happens, e.g., with the Ski simulator */
+# endif
+#endif /* CONFIG_APCI_BOOT */
+
+#ifndef XEN
+       find_memory();
+#else
+       io_port_init();
+}
+
+void __init
+late_setup_arch (char **cmdline_p)
+{
+#undef CONFIG_ACPI_BOOT
+       acpi_table_init();
+#endif
+       /* process SAL system table: */
+       ia64_sal_init(efi.sal_systab);
+
+#ifdef CONFIG_SMP
+       cpu_physical_id(0) = hard_smp_processor_id();
+#endif
+
+#ifdef CONFIG_VTI
+       identify_vmx_feature();
+#endif // CONFIG_VTI
+
+       cpu_init();     /* initialize the bootstrap CPU */
+
+#ifdef CONFIG_ACPI_BOOT
+       acpi_boot_init();
+#endif
+
+#ifdef CONFIG_VT
+       if (!conswitchp) {
+# if defined(CONFIG_DUMMY_CONSOLE)
+               conswitchp = &dummy_con;
+# endif
+# if defined(CONFIG_VGA_CONSOLE)
+               /*
+                * Non-legacy systems may route legacy VGA MMIO range to system
+                * memory.  vga_con probes the MMIO hole, so memory looks like
+                * a VGA device to it.  The EFI memory map can tell us if it's
+                * memory so we can avoid this problem.
+                */
+               if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
+                       conswitchp = &vga_con;
+# endif
+       }
+#endif
+
+       /* enable IA-64 Machine Check Abort Handling unless disabled */
+       if (!strstr(saved_command_line, "nomca"))
+               ia64_mca_init();
+
+       platform_setup(cmdline_p);
+       paging_init();
+}
+
+/*
+ * Display cpu info for all cpu's.
+ */
+static int
+show_cpuinfo (struct seq_file *m, void *v)
+{
+#ifdef CONFIG_SMP
+#      define lpj      c->loops_per_jiffy
+#      define cpunum   c->cpu
+#else
+#      define lpj      loops_per_jiffy
+#      define cpunum   0
+#endif
+       static struct {
+               unsigned long mask;
+               const char *feature_name;
+       } feature_bits[] = {
+               { 1UL << 0, "branchlong" },
+               { 1UL << 1, "spontaneous deferral"},
+               { 1UL << 2, "16-byte atomic ops" }
+       };
+       char family[32], features[128], *cp, sep;
+       struct cpuinfo_ia64 *c = v;
+       unsigned long mask;
+       int i;
+
+       mask = c->features;
+
+       switch (c->family) {
+             case 0x07:        memcpy(family, "Itanium", 8); break;
+             case 0x1f:        memcpy(family, "Itanium 2", 10); break;
+             default:          sprintf(family, "%u", c->family); break;
+       }
+
+       /* build the feature string: */
+       memcpy(features, " standard", 10);
+       cp = features;
+       sep = 0;
+       for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
+               if (mask & feature_bits[i].mask) {
+                       if (sep)
+                               *cp++ = sep;
+                       sep = ',';
+                       *cp++ = ' ';
+                       strcpy(cp, feature_bits[i].feature_name);
+                       cp += strlen(feature_bits[i].feature_name);
+                       mask &= ~feature_bits[i].mask;
+               }
+       }
+       if (mask) {
+               /* print unknown features as a hex value: */
+               if (sep)
+                       *cp++ = sep;
+               sprintf(cp, " 0x%lx", mask);
+       }
+
+       seq_printf(m,
+                  "processor  : %d\n"
+                  "vendor     : %s\n"
+                  "arch       : IA-64\n"
+                  "family     : %s\n"
+                  "model      : %u\n"
+                  "revision   : %u\n"
+                  "archrev    : %u\n"
+                  "features   :%s\n"   /* don't change this---it _is_ right! */
+                  "cpu number : %lu\n"
+                  "cpu regs   : %u\n"
+                  "cpu MHz    : %lu.%06lu\n"
+                  "itc MHz    : %lu.%06lu\n"
+                  "BogoMIPS   : %lu.%02lu\n\n",
+                  cpunum, c->vendor, family, c->model, c->revision, c->archrev,
+                  features, c->ppn, c->number,
+                  c->proc_freq / 1000000, c->proc_freq % 1000000,
+                  c->itc_freq / 1000000, c->itc_freq % 1000000,
+                  lpj*HZ/500000, (lpj*HZ/5000) % 100);
+       return 0;
+}
+
+static void *
+c_start (struct seq_file *m, loff_t *pos)
+{
+#ifdef CONFIG_SMP
+       while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
+               ++*pos;
+#endif
+       return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
+}
+
+static void *
+c_next (struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void
+c_stop (struct seq_file *m, void *v)
+{
+}
+
+#ifndef XEN
+struct seq_operations cpuinfo_op = {
+       .start =        c_start,
+       .next =         c_next,
+       .stop =         c_stop,
+       .show =         show_cpuinfo
+};
+#endif
+
+void
+identify_cpu (struct cpuinfo_ia64 *c)
+{
+       union {
+               unsigned long bits[5];
+               struct {
+                       /* id 0 & 1: */
+                       char vendor[16];
+
+                       /* id 2 */
+                       u64 ppn;                /* processor serial number */
+
+                       /* id 3: */
+                       unsigned number         :  8;
+                       unsigned revision       :  8;
+                       unsigned model          :  8;
+                       unsigned family         :  8;
+                       unsigned archrev        :  8;
+                       unsigned reserved       : 24;
+
+                       /* id 4: */
+                       u64 features;
+               } field;
+       } cpuid;
+       pal_vm_info_1_u_t vm1;
+       pal_vm_info_2_u_t vm2;
+       pal_status_t status;
+       unsigned long impl_va_msb = 50, phys_addr_size = 44;    /* Itanium 
defaults */
+       int i;
+
+       for (i = 0; i < 5; ++i)
+               cpuid.bits[i] = ia64_get_cpuid(i);
+
+       memcpy(c->vendor, cpuid.field.vendor, 16);
+#ifdef CONFIG_SMP
+       c->cpu = smp_processor_id();
+#endif
+       c->ppn = cpuid.field.ppn;
+       c->number = cpuid.field.number;
+       c->revision = cpuid.field.revision;
+       c->model = cpuid.field.model;
+       c->family = cpuid.field.family;
+       c->archrev = cpuid.field.archrev;
+       c->features = cpuid.field.features;
+
+       status = ia64_pal_vm_summary(&vm1, &vm2);
+       if (status == PAL_STATUS_SUCCESS) {
+               impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
+               phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
+       }
+       c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
+       c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
+
+#ifdef CONFIG_VTI
+       /* If vmx feature is on, do necessary initialization for vmx */
+       if (vmx_enabled)
+               vmx_init_env();
+#endif
+}
+
+void
+setup_per_cpu_areas (void)
+{
+       /* start_kernel() requires this... */
+}
+
+static void
+get_max_cacheline_size (void)
+{
+       unsigned long line_size, max = 1;
+       u64 l, levels, unique_caches;
+        pal_cache_config_info_t cci;
+        s64 status;
+
+        status = ia64_pal_cache_summary(&levels, &unique_caches);
+        if (status != 0) {
+                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed 
(status=%ld)\n",
+                       __FUNCTION__, status);
+                max = SMP_CACHE_BYTES;
+               goto out;
+        }
+
+       for (l = 0; l < levels; ++l) {
+               status = ia64_pal_cache_config_info(l, /* cache_type 
(data_or_unified)= */ 2,
+                                                   &cci);
+               if (status != 0) {
+                       printk(KERN_ERR
+                              "%s: ia64_pal_cache_config_info(l=%lu) failed 
(status=%ld)\n",
+                              __FUNCTION__, l, status);
+                       max = SMP_CACHE_BYTES;
+               }
+               line_size = 1 << cci.pcci_line_size;
+               if (line_size > max)
+                       max = line_size;
+        }
+  out:
+       if (max > ia64_max_cacheline_size)
+               ia64_max_cacheline_size = max;
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU.  This function acts
+ * as a 'CPU state barrier', nothing should get across.
+ */
+void
+cpu_init (void)
+{
+       extern void __devinit ia64_mmu_init (void *);
+       unsigned long num_phys_stacked;
+       pal_vm_info_2_u_t vmi;
+       unsigned int max_ctx;
+       struct cpuinfo_ia64 *cpu_info;
+       void *cpu_data;
+
+       cpu_data = per_cpu_init();
+
+       /*
+        * We set ar.k3 so that assembly code in MCA handler can compute
+        * physical addresses of per cpu variables with a simple:
+        *   phys = ar.k3 + &per_cpu_var
+        */
+       ia64_set_kr(IA64_KR_PER_CPU_DATA,
+                   ia64_tpa(cpu_data) - (long) __per_cpu_start);
+
+       get_max_cacheline_size();
+
+       /*
+        * We can't pass "local_cpu_data" to identify_cpu() because we haven't 
called
+        * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first 
because it
+        * depends on the data returned by identify_cpu().  We break the 
dependency by
+        * accessing cpu_data() through the canonical per-CPU address.
+        */
+       cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - 
__per_cpu_start);
+       identify_cpu(cpu_info);
+
+#ifdef CONFIG_MCKINLEY
+       {
+#              define FEATURE_SET 16
+               struct ia64_pal_retval iprv;
+
+               if (cpu_info->family == 0x1f) {
+                       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 
FEATURE_SET, 0);
+                       if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 
& 0x80))
+                               PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
+                                             (iprv.v1 | 0x80), FEATURE_SET, 0);
+               }
+       }
+#endif
+
+       /* Clear the stack memory reserved for pt_regs: */
+       memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+
+       ia64_set_kr(IA64_KR_FPU_OWNER, 0);
+
+       /*
+        * Initialize default control register to defer all speculative faults. 
 The
+        * kernel MUST NOT depend on a particular setting of these bits (in 
other words,
+        * the kernel must have recovery code for all speculative accesses).  
Turn on
+        * dcr.lc as per recommendation by the architecture team.  Most IA-32 
apps
+        * shouldn't be affected by this (moral: keep your ia32 locks aligned 
and you'll
+        * be fine).
+        */
+       ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | 
IA64_DCR_DX | IA64_DCR_DR
+                                       | IA64_DCR_DA | IA64_DCR_DD | 
IA64_DCR_LC));
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+#ifdef XEN
+       if (current->domain->arch.mm)
+#else
+       if (current->mm)
+#endif
+               BUG();
+
+       ia64_mmu_init(ia64_imva(cpu_data));
+       ia64_mca_cpu_init(ia64_imva(cpu_data));
+
+#ifdef CONFIG_IA32_SUPPORT
+       ia32_cpu_init();
+#endif
+
+       /* Clear ITC to eliminiate sched_clock() overflows in human time.  */
+       ia64_set_itc(0);
+
+       /* disable all local interrupt sources: */
+       ia64_set_itv(1 << 16);
+       ia64_set_lrr0(1 << 16);
+       ia64_set_lrr1(1 << 16);
+       ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
+       ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
+
+       /* clear TPR & XTP to enable all interrupt classes: */
+       ia64_setreg(_IA64_REG_CR_TPR, 0);
+#ifdef CONFIG_SMP
+       normal_xtp();
+#endif
+
+       /* set ia64_ctx.max_rid to the maximum RID that is supported by all 
CPUs: */
+       if (ia64_pal_vm_summary(NULL, &vmi) == 0)
+               max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
+       else {
+               printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 
18 RID bits\n");
+               max_ctx = (1U << 15) - 1;       /* use architected minimum */
+       }
+       while (max_ctx < ia64_ctx.max_ctx) {
+               unsigned int old = ia64_ctx.max_ctx;
+               if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
+                       break;
+       }
+
+       if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
+               printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 
physical "
+                      "stacked regs\n");
+               num_phys_stacked = 96;
+       }
+       /* size of physical stacked register partition plus 8 bytes: */
+       __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
+       platform_cpu_init();
+}
+
+void
+check_bugs (void)
+{
+       ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+                              (unsigned long) __end___mckinley_e9_bundles);
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/time.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/time.c      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,264 @@
+/*
+ * linux/arch/ia64/kernel/time.c
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ * Copyright (C) 1999-2000 VA Linux Systems
+ * Copyright (C) 1999-2000 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/efi.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+
+#include <asm/machvec.h>
+#include <asm/delay.h>
+#include <asm/hw_irq.h>
+#include <asm/ptrace.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#ifdef XEN
+#include <linux/jiffies.h>     // not included by xen/sched.h
+#endif
+
+extern unsigned long wall_jiffies;
+
+u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+#define TIME_KEEPER_ID 0       /* smp_processor_id() of time-keeper */
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+unsigned long last_cli_ip;
+EXPORT_SYMBOL(last_cli_ip);
+
+#endif
+
+#ifndef XEN
+static struct time_interpolator itc_interpolator = {
+       .shift = 16,
+       .mask = 0xffffffffffffffffLL,
+       .source = TIME_SOURCE_CPU
+};
+
+static irqreturn_t
+timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+       unsigned long new_itm;
+
+       if (unlikely(cpu_is_offline(smp_processor_id()))) {
+               return IRQ_HANDLED;
+       }
+
+       platform_timer_interrupt(irq, dev_id, regs);
+
+       new_itm = local_cpu_data->itm_next;
+
+       if (!time_after(ia64_get_itc(), new_itm))
+               printk(KERN_ERR "Oops: timer tick before it's due 
(itc=%lx,itm=%lx)\n",
+                      ia64_get_itc(), new_itm);
+
+       profile_tick(CPU_PROFILING, regs);
+
+       while (1) {
+               update_process_times(user_mode(regs));
+
+               new_itm += local_cpu_data->itm_delta;
+
+               if (smp_processor_id() == TIME_KEEPER_ID) {
+                       /*
+                        * Here we are in the timer irq handler. We have irqs 
locally
+                        * disabled, but we don't know if the timer_bh is 
running on
+                        * another CPU. We need to avoid to SMP race by 
acquiring the
+                        * xtime_lock.
+                        */
+                       write_seqlock(&xtime_lock);
+                       do_timer(regs);
+                       local_cpu_data->itm_next = new_itm;
+                       write_sequnlock(&xtime_lock);
+               } else
+                       local_cpu_data->itm_next = new_itm;
+
+               if (time_after(new_itm, ia64_get_itc()))
+                       break;
+       }
+
+       do {
+               /*
+                * If we're too close to the next clock tick for
+                * comfort, we increase the safety margin by
+                * intentionally dropping the next tick(s).  We do NOT
+                * update itm.next because that would force us to call
+                * do_timer() which in turn would let our clock run
+                * too fast (with the potentially devastating effect
+                * of losing monotony of time).
+                */
+               while (!time_after(new_itm, ia64_get_itc() + 
local_cpu_data->itm_delta/2))
+                       new_itm += local_cpu_data->itm_delta;
+               ia64_set_itm(new_itm);
+               /* double check, in case we got hit by a (slow) PMI: */
+       } while (time_after_eq(ia64_get_itc(), new_itm));
+       return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * Encapsulate access to the itm structure for SMP.
+ */
+void
+ia64_cpu_local_tick (void)
+{
+       int cpu = smp_processor_id();
+       unsigned long shift = 0, delta;
+
+       /* arrange for the cycle counter to generate a timer interrupt: */
+       ia64_set_itv(IA64_TIMER_VECTOR);
+
+       delta = local_cpu_data->itm_delta;
+       /*
+        * Stagger the timer tick for each CPU so they don't occur all at 
(almost) the
+        * same time:
+        */
+       if (cpu) {
+               unsigned long hi = 1UL << ia64_fls(cpu);
+               shift = (2*(cpu - hi) + 1) * delta/hi/2;
+       }
+       local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
+       ia64_set_itm(local_cpu_data->itm_next);
+}
+
+static int nojitter;
+
+static int __init nojitter_setup(char *str)
+{
+       nojitter = 1;
+       printk("Jitter checking for ITC timers disabled\n");
+       return 1;
+}
+
+__setup("nojitter", nojitter_setup);
+
+
+void __devinit
+ia64_init_itm (void)
+{
+       unsigned long platform_base_freq, itc_freq;
+       struct pal_freq_ratio itc_ratio, proc_ratio;
+       long status, platform_base_drift, itc_drift;
+
+       /*
+        * According to SAL v2.6, we need to use a SAL call to determine the 
platform base
+        * frequency and then a PAL call to determine the frequency ratio 
between the ITC
+        * and the base frequency.
+        */
+       status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+                                   &platform_base_freq, &platform_base_drift);
+       if (status != 0) {
+               printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", 
ia64_sal_strerror(status));
+       } else {
+               status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
+               if (status != 0)
+                       printk(KERN_ERR "PAL_FREQ_RATIOS failed with 
status=%ld\n", status);
+       }
+       if (status != 0) {
+               /* invent "random" values */
+               printk(KERN_ERR
+                      "SAL/PAL failed to obtain frequency info---inventing 
reasonable values\n");
+               platform_base_freq = 100000000;
+               platform_base_drift = -1;       /* no drift info */
+               itc_ratio.num = 3;
+               itc_ratio.den = 1;
+       }
+       if (platform_base_freq < 40000000) {
+               printk(KERN_ERR "Platform base frequency %lu bogus---resetting 
to 75MHz!\n",
+                      platform_base_freq);
+               platform_base_freq = 75000000;
+               platform_base_drift = -1;
+       }
+       if (!proc_ratio.den)
+               proc_ratio.den = 1;     /* avoid division by zero */
+       if (!itc_ratio.den)
+               itc_ratio.den = 1;      /* avoid division by zero */
+
+       itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
+
+       local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
+       printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+              "ITC freq=%lu.%03luMHz", smp_processor_id(),
+              platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
+              itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 
1000) % 1000);
+
+       if (platform_base_drift != -1) {
+               itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
+               printk("+/-%ldppm\n", itc_drift);
+       } else {
+               itc_drift = -1;
+               printk("\n");
+       }
+
+       local_cpu_data->proc_freq = 
(platform_base_freq*proc_ratio.num)/proc_ratio.den;
+       local_cpu_data->itc_freq = itc_freq;
+       local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / 
USEC_PER_SEC;
+       local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
+                                       + itc_freq/2)/itc_freq;
+
+       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+#ifndef XEN
+               itc_interpolator.frequency = local_cpu_data->itc_freq;
+               itc_interpolator.drift = itc_drift;
+#ifdef CONFIG_SMP
+               /* On IA64 in an SMP configuration ITCs are never accurately 
synchronized.
+                * Jitter compensation requires a cmpxchg which may limit
+                * the scalability of the syscalls for retrieving time.
+                * The ITC synchronization is usually successful to within a few
+                * ITC ticks but this is not a sure thing. If you need to 
improve
+                * timer performance in SMP situations then boot the kernel 
with the
+                * "nojitter" option. However, doing so may result in time 
fluctuating (maybe
+                * even going backward) if the ITC offsets between the 
individual CPUs
+                * are too large.
+                */
+               if (!nojitter) itc_interpolator.jitter = 1;
+#endif
+               register_time_interpolator(&itc_interpolator);
+#endif
+       }
+
+       /* Setup the CPU local timer tick */
+       ia64_cpu_local_tick();
+}
+
+#ifndef XEN
+static struct irqaction timer_irqaction = {
+       .handler =      timer_interrupt,
+       .flags =        SA_INTERRUPT,
+       .name =         "timer"
+};
+
+void __init
+time_init (void)
+{
+       register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
+       efi_gettimeofday(&xtime);
+       ia64_init_itm();
+
+       /*
+        * Initialize wall_to_monotonic such that adding it to xtime will yield 
zero, the
+        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+        */
+       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, 
-xtime.tv_nsec);
+}
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/tlb.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/tlb.c       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,199 @@
+/*
+ * TLB support routines.
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 08/02/00 A. Mallick <asit.k.mallick@xxxxxxxxx>
+ *             Modified RID allocation for SMP
+ *          Goutham Rao <goutham.rao@xxxxxxxxx>
+ *              IPI based ptc implementation and A-step IPI implementation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/delay.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/pal.h>
+#include <asm/tlbflush.h>
+
+static struct {
+       unsigned long mask;     /* mask of supported purge page-sizes */
+       unsigned long max_bits; /* log2() of largest supported purge page-size 
*/
+} purge;
+
+struct ia64_ctx ia64_ctx = {
+       .lock =         SPIN_LOCK_UNLOCKED,
+       .next =         1,
+       .limit =        (1 << 15) - 1,          /* start out with the safe 
(architected) limit */
+       .max_ctx =      ~0U
+};
+
+DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
+
+/*
+ * Acquire the ia64_ctx.lock before calling this function!
+ */
+void
+wrap_mmu_context (struct mm_struct *mm)
+{
+#ifdef XEN
+printf("wrap_mmu_context: called, not implemented\n");
+#else
+       unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
+       struct task_struct *tsk;
+       int i;
+
+       if (ia64_ctx.next > max_ctx)
+               ia64_ctx.next = 300;    /* skip daemons */
+       ia64_ctx.limit = max_ctx + 1;
+
+       /*
+        * Scan all the task's mm->context and set proper safe range
+        */
+
+       read_lock(&tasklist_lock);
+  repeat:
+       for_each_process(tsk) {
+               if (!tsk->mm)
+                       continue;
+               tsk_context = tsk->mm->context;
+               if (tsk_context == ia64_ctx.next) {
+                       if (++ia64_ctx.next >= ia64_ctx.limit) {
+                               /* empty range: reset the range limit and start 
over */
+                               if (ia64_ctx.next > max_ctx)
+                                       ia64_ctx.next = 300;
+                               ia64_ctx.limit = max_ctx + 1;
+                               goto repeat;
+                       }
+               }
+               if ((tsk_context > ia64_ctx.next) && (tsk_context < 
ia64_ctx.limit))
+                       ia64_ctx.limit = tsk_context;
+       }
+       read_unlock(&tasklist_lock);
+       /* can't call flush_tlb_all() here because of race condition with O(1) 
scheduler [EF] */
+       {
+               int cpu = get_cpu(); /* prevent preemption/migration */
+               for (i = 0; i < NR_CPUS; ++i)
+                       if (cpu_online(i) && (i != cpu))
+                               per_cpu(ia64_need_tlb_flush, i) = 1;
+               put_cpu();
+       }
+       local_flush_tlb_all();
+#endif
+}
+
+void
+ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long 
nbits)
+{
+       static DEFINE_SPINLOCK(ptcg_lock);
+
+       /* HW requires global serialization of ptc.ga.  */
+       spin_lock(&ptcg_lock);
+       {
+               do {
+                       /*
+                        * Flush ALAT entries also.
+                        */
+                       ia64_ptcga(start, (nbits<<2));
+                       ia64_srlz_i();
+                       start += (1UL << nbits);
+               } while (start < end);
+       }
+       spin_unlock(&ptcg_lock);
+}
+
+void
+local_flush_tlb_all (void)
+{
+       unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
+
+       addr    = local_cpu_data->ptce_base;
+       count0  = local_cpu_data->ptce_count[0];
+       count1  = local_cpu_data->ptce_count[1];
+       stride0 = local_cpu_data->ptce_stride[0];
+       stride1 = local_cpu_data->ptce_stride[1];
+
+       local_irq_save(flags);
+       for (i = 0; i < count0; ++i) {
+               for (j = 0; j < count1; ++j) {
+                       ia64_ptce(addr);
+                       addr += stride1;
+               }
+               addr += stride0;
+       }
+       local_irq_restore(flags);
+       ia64_srlz_i();                  /* srlz.i implies srlz.d */
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void
+flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned 
long end)
+{
+#ifdef XEN
+printf("flush_tlb_range: called, not implemented\n");
+#else
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long size = end - start;
+       unsigned long nbits;
+
+       if (mm != current->active_mm) {
+               /* this does happen, but perhaps it's not worth optimizing for? 
*/
+#ifdef CONFIG_SMP
+               flush_tlb_all();
+#else
+               mm->context = 0;
+#endif
+               return;
+       }
+
+       nbits = ia64_fls(size + 0xfff);
+       while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < 
purge.max_bits))
+               ++nbits;
+       if (nbits > purge.max_bits)
+               nbits = purge.max_bits;
+       start &= ~((1UL << nbits) - 1);
+
+# ifdef CONFIG_SMP
+       platform_global_tlb_purge(start, end, nbits);
+# else
+       do {
+               ia64_ptcl(start, (nbits<<2));
+               start += (1UL << nbits);
+       } while (start < end);
+# endif
+
+       ia64_srlz_i();                  /* srlz.i implies srlz.d */
+#endif
+}
+EXPORT_SYMBOL(flush_tlb_range);
+
+void __devinit
+ia64_tlb_init (void)
+{
+       ia64_ptce_info_t ptce_info;
+       unsigned long tr_pgbits;
+       long status;
+
+       if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
+               printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
+                      "defaulting to architected purge page-sizes.\n", status);
+               purge.mask = 0x115557000UL;
+       }
+       purge.max_bits = ia64_fls(purge.mask);
+
+       ia64_get_ptce(&ptce_info);
+       local_cpu_data->ptce_base = ptce_info.base;
+       local_cpu_data->ptce_count[0] = ptce_info.count[0];
+       local_cpu_data->ptce_count[1] = ptce_info.count[1];
+       local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
+       local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
+
+       local_flush_tlb_all();          /* nuke left overs from 
bootstrapping... */
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/unaligned.c
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/unaligned.c Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1653 @@
+/*
+ * Architecture-specific unaligned trap handling.
+ *
+ * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 2002/12/09   Fix rotating register handling (off-by-1 error, missing 
fr-rotation).  Fix
+ *             get_rse_reg() to not leak kernel bits to user-level (reading an 
out-of-frame
+ *             stacked register returns an undefined value; it does NOT 
trigger a
+ *             "rsvd register fault").
+ * 2001/10/11  Fix unaligned access to rotating registers in s/w pipelined 
loops.
+ * 2001/08/13  Correct size of extended floats (float_fsz) from 16 to 10 bytes.
+ * 2001/01/17  Add support emulation of unaligned kernel accesses.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/tty.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+#include <asm/rse.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+extern void die_if_kernel(char *str, struct pt_regs *regs, long err) 
__attribute__ ((noreturn));
+
+#undef DEBUG_UNALIGNED_TRAP
+
+#ifdef DEBUG_UNALIGNED_TRAP
+# define DPRINT(a...)  do { printk("%s %u: ", __FUNCTION__, __LINE__); printk 
(a); } while (0)
+# define DDUMP(str,vp,len)     dump(str, vp, len)
+
+static void
+dump (const char *str, void *vp, size_t len)
+{
+       unsigned char *cp = vp;
+       int i;
+
+       printk("%s", str);
+       for (i = 0; i < len; ++i)
+               printk (" %02x", *cp++);
+       printk("\n");
+}
+#else
+# define DPRINT(a...)
+# define DDUMP(str,vp,len)
+#endif
+
+#define IA64_FIRST_STACKED_GR  32
+#define IA64_FIRST_ROTATING_FR 32
+#define SIGN_EXT9              0xffffffffffffff00ul
+
+/*
+ * For M-unit:
+ *
+ *  opcode |   m  |   x6    |
+ * --------|------|---------|
+ * [40-37] | [36] | [35:30] |
+ * --------|------|---------|
+ *     4   |   1  |    6    | = 11 bits
+ * --------------------------
+ * However bits [31:30] are not directly useful to distinguish between
+ * load/store so we can use [35:32] instead, which gives the following
+ * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
+ * checking the m-bit until later in the load/store emulation.
+ */
+#define IA64_OPCODE_MASK       0x1ef
+#define IA64_OPCODE_SHIFT      32
+
+/*
+ * Table C-28 Integer Load/Store
+ *
+ * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
+ *
+ * ld8.fill, st8.fill  MUST be aligned because the RNATs are based on
+ * the address (bits [8:3]), so we must failed.
+ */
+#define LD_OP            0x080
+#define LDS_OP           0x081
+#define LDA_OP           0x082
+#define LDSA_OP          0x083
+#define LDBIAS_OP        0x084
+#define LDACQ_OP         0x085
+/* 0x086, 0x087 are not relevant */
+#define LDCCLR_OP        0x088
+#define LDCNC_OP         0x089
+#define LDCCLRACQ_OP     0x08a
+#define ST_OP            0x08c
+#define STREL_OP         0x08d
+/* 0x08e,0x8f are not relevant */
+
+/*
+ * Table C-29 Integer Load +Reg
+ *
+ * we use the ld->m (bit [36:36]) field to determine whether or not we have
+ * a load/store of this form.
+ */
+
+/*
+ * Table C-30 Integer Load/Store +Imm
+ *
+ * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
+ *
+ * ld8.fill, st8.fill  must be aligned because the Nat register are based on
+ * the address, so we must fail and the program must be fixed.
+ */
+#define LD_IMM_OP            0x0a0
+#define LDS_IMM_OP           0x0a1
+#define LDA_IMM_OP           0x0a2
+#define LDSA_IMM_OP          0x0a3
+#define LDBIAS_IMM_OP        0x0a4
+#define LDACQ_IMM_OP         0x0a5
+/* 0x0a6, 0xa7 are not relevant */
+#define LDCCLR_IMM_OP        0x0a8
+#define LDCNC_IMM_OP         0x0a9
+#define LDCCLRACQ_IMM_OP     0x0aa
+#define ST_IMM_OP            0x0ac
+#define STREL_IMM_OP         0x0ad
+/* 0x0ae,0xaf are not relevant */
+
+/*
+ * Table C-32 Floating-point Load/Store
+ */
+#define LDF_OP           0x0c0
+#define LDFS_OP          0x0c1
+#define LDFA_OP          0x0c2
+#define LDFSA_OP         0x0c3
+/* 0x0c6 is irrelevant */
+#define LDFCCLR_OP       0x0c8
+#define LDFCNC_OP        0x0c9
+/* 0x0cb is irrelevant  */
+#define STF_OP           0x0cc
+
+/*
+ * Table C-33 Floating-point Load +Reg
+ *
+ * we use the ld->m (bit [36:36]) field to determine whether or not we have
+ * a load/store of this form.
+ */
+
+/*
+ * Table C-34 Floating-point Load/Store +Imm
+ */
+#define LDF_IMM_OP       0x0e0
+#define LDFS_IMM_OP      0x0e1
+#define LDFA_IMM_OP      0x0e2
+#define LDFSA_IMM_OP     0x0e3
+/* 0x0e6 is irrelevant */
+#define LDFCCLR_IMM_OP   0x0e8
+#define LDFCNC_IMM_OP    0x0e9
+#define STF_IMM_OP       0x0ec
+
+typedef struct {
+       unsigned long    qp:6;  /* [0:5]   */
+       unsigned long    r1:7;  /* [6:12]  */
+       unsigned long   imm:7;  /* [13:19] */
+       unsigned long    r3:7;  /* [20:26] */
+       unsigned long     x:1;  /* [27:27] */
+       unsigned long  hint:2;  /* [28:29] */
+       unsigned long x6_sz:2;  /* [30:31] */
+       unsigned long x6_op:4;  /* [32:35], x6 = x6_sz|x6_op */
+       unsigned long     m:1;  /* [36:36] */
+       unsigned long    op:4;  /* [37:40] */
+       unsigned long   pad:23; /* [41:63] */
+} load_store_t;
+
+
+typedef enum {
+       UPD_IMMEDIATE,  /* ldXZ r1=[r3],imm(9) */
+       UPD_REG         /* ldXZ r1=[r3],r2     */
+} update_t;
+
+/*
+ * We use tables to keep track of the offsets of registers in the saved state.
+ * This way we save having big switch/case statements.
+ *
+ * We use bit 0 to indicate switch_stack or pt_regs.
+ * The offset is simply shifted by 1 bit.
+ * A 2-byte value should be enough to hold any kind of offset
+ *
+ * In case the calling convention changes (and thus pt_regs/switch_stack)
+ * simply use RSW instead of RPT or vice-versa.
+ */
+
+#define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
+#define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
+
+#define RPT(x)         (RPO(x) << 1)
+#define RSW(x)         (1| RSO(x)<<1)
+
+#define GR_OFFS(x)     (gr_info[x]>>1)
+#define GR_IN_SW(x)    (gr_info[x] & 0x1)
+
+#define FR_OFFS(x)     (fr_info[x]>>1)
+#define FR_IN_SW(x)    (fr_info[x] & 0x1)
+
+static u16 gr_info[32]={
+       0,                      /* r0 is read-only : WE SHOULD NEVER GET THIS */
+
+       RPT(r1), RPT(r2), RPT(r3),
+
+#ifdef  CONFIG_VTI
+       RPT(r4), RPT(r5), RPT(r6), RPT(r7),
+#else   //CONFIG_VTI
+       RSW(r4), RSW(r5), RSW(r6), RSW(r7),
+#endif  //CONFIG_VTI
+
+       RPT(r8), RPT(r9), RPT(r10), RPT(r11),
+       RPT(r12), RPT(r13), RPT(r14), RPT(r15),
+
+       RPT(r16), RPT(r17), RPT(r18), RPT(r19),
+       RPT(r20), RPT(r21), RPT(r22), RPT(r23),
+       RPT(r24), RPT(r25), RPT(r26), RPT(r27),
+       RPT(r28), RPT(r29), RPT(r30), RPT(r31)
+};
+
+static u16 fr_info[32]={
+       0,                      /* constant : WE SHOULD NEVER GET THIS */
+       0,                      /* constant : WE SHOULD NEVER GET THIS */
+
+       RSW(f2), RSW(f3), RSW(f4), RSW(f5),
+
+       RPT(f6), RPT(f7), RPT(f8), RPT(f9),
+       RPT(f10), RPT(f11),
+
+       RSW(f12), RSW(f13), RSW(f14),
+       RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
+       RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
+       RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
+       RSW(f30), RSW(f31)
+};
+
+/* Invalidate ALAT entry for integer register REGNO.  */
+static void
+invala_gr (int regno)
+{
+#      define F(reg)   case reg: ia64_invala_gr(reg); break
+
+       switch (regno) {
+               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
+               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
+               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
+               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
+               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
+               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
+               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
+               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
+               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
+               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
+               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
+               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
+               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
+               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
+               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
+               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
+       }
+#      undef F
+}
+
+/* Invalidate ALAT entry for floating-point register REGNO.  */
+static void
+invala_fr (int regno)
+{
+#      define F(reg)   case reg: ia64_invala_fr(reg); break
+
+       switch (regno) {
+               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
+               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
+               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
+               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
+               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
+               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
+               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
+               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
+               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
+               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
+               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
+               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
+               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
+               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
+               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
+               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
+       }
+#      undef F
+}
+
+static inline unsigned long
+rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
+{
+       reg += rrb;
+       if (reg >= sor)
+               reg -= sor;
+       return reg;
+}
+
+#ifdef CONFIG_VTI
+static void
+set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+    unsigned long old_rsc,new_rsc;
+       unsigned long on_kbs,rnat;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* this should never happen, as the "rsvd register fault" has 
higher priority */
+               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               return;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~0x3);
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19);//16+3
+
+       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+               *addr = val;
+        bspstore = ia64_get_bspstore();
+       rnat = ia64_get_rnat ();
+        if(bspstore < rnat_addr){
+            rnat=rnat&(~nat_mask);
+        }else{
+            *rnat_addr = (*rnat_addr)&(~nat_mask);
+        }
+        ia64_mf();
+        ia64_loadrs();
+        ia64_set_rnat(rnat);
+    }else{
+
+       rnat = ia64_get_rnat ();
+               *addr = val;
+        if(bspstore < rnat_addr){
+            rnat=rnat&(~nat_mask);
+        }else{
+            *rnat_addr = (*rnat_addr)&(~nat_mask);
+        }
+        ia64_set_bspstore (bspstore);
+        ia64_set_rnat(rnat);
+    }
+    ia64_set_rsc(old_rsc);
+}
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+    unsigned long old_rsc, new_rsc;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
+               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               panic("wrong stack register number");
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~(0x3));
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19); //16+3;
+
+       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+        bspstore = ia64_get_bspstore();
+    }
+       *val=*addr;
+    if(bspstore < rnat_addr){
+        *nat=!!(ia64_get_rnat()&nat_mask);
+    }else{
+        *nat = !!((*rnat_addr)&nat_mask);
+    }
+    ia64_set_rsc(old_rsc);
+}
+#else // CONFIG_VTI
+static void
+set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* this should never happen, as the "rsvd register fault" has 
higher priority */
+               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               return;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
+              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
+
+       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
+       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
+       if (addr >= kbs) {
+               /* the register is on the kernel backing store: easy... */
+               rnat_addr = ia64_rse_rnat_addr(addr);
+               if ((unsigned long) rnat_addr >= sw->ar_bspstore)
+                       rnat_addr = &sw->ar_rnat;
+               nat_mask = 1UL << ia64_rse_slot_num(addr);
+
+               *addr = val;
+               if (nat)
+                       *rnat_addr |=  nat_mask;
+               else
+                       *rnat_addr &= ~nat_mask;
+               return;
+       }
+
+       if (!user_stack(current, regs)) {
+               DPRINT("ignoring kernel write to r%lu; register isn't on the 
kernel RBS!", r1);
+               return;
+       }
+
+       bspstore = (unsigned long *)regs->ar_bspstore;
+       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
+       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
+       addr    = ia64_rse_skip_regs(bsp, ridx);
+
+       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
+
+       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
+
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
+       DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
+              (void *) rnat_addr, rnats, nat, (rnats >> 
ia64_rse_slot_num(addr)) & 1);
+
+       nat_mask = 1UL << ia64_rse_slot_num(addr);
+       if (nat)
+               rnats |=  nat_mask;
+       else
+               rnats &= ~nat_mask;
+       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, rnats);
+
+       DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
+}
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int 
*nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
+               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               goto fail;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
+              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
+
+       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
+       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
+       if (addr >= kbs) {
+               /* the register is on the kernel backing store: easy... */
+               *val = *addr;
+               if (nat) {
+                       rnat_addr = ia64_rse_rnat_addr(addr);
+                       if ((unsigned long) rnat_addr >= sw->ar_bspstore)
+                               rnat_addr = &sw->ar_rnat;
+                       nat_mask = 1UL << ia64_rse_slot_num(addr);
+                       *nat = (*rnat_addr & nat_mask) != 0;
+               }
+               return;
+       }
+
+       if (!user_stack(current, regs)) {
+               DPRINT("ignoring kernel read of r%lu; register isn't on the 
RBS!", r1);
+               goto fail;
+       }
+
+       bspstore = (unsigned long *)regs->ar_bspstore;
+       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
+       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
+       addr    = ia64_rse_skip_regs(bsp, ridx);
+
+       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
+
+       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
+
+       if (nat) {
+               rnat_addr = ia64_rse_rnat_addr(addr);
+               nat_mask = 1UL << ia64_rse_slot_num(addr);
+
+               DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
+
+               ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
+               *nat = (rnats & nat_mask) != 0;
+       }
+       return;
+
+  fail:
+       *val = 0;
+       if (nat)
+               *nat = 0;
+       return;
+}
+#endif // CONFIG_VTI
+
+
+#ifdef XEN
+void
+#else
+static void
+#endif
+setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr;
+       unsigned long bitmask;
+       unsigned long *unat;
+
+       /*
+        * First takes care of stacked registers
+        */
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               set_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * Using r0 as a target raises a General Exception fault which has 
higher priority
+        * than the Unaligned Reference fault.
+        */
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       if (GR_IN_SW(regnum)) {
+               addr = (unsigned long)sw;
+               unat = &sw->ar_unat;
+       } else {
+               addr = (unsigned long)regs;
+#ifdef CONFIG_VTI
+               unat = &regs->eml_unat;
+#else //CONFIG_VTI
+               unat = &sw->caller_unat;
+#endif  //CONFIG_VTI
+       }
+       DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
+              addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
+       /*
+        * add offset from base of struct
+        * and do it !
+        */
+       addr += GR_OFFS(regnum);
+
+       *(unsigned long *)addr = val;
+
+       /*
+        * We need to clear the corresponding UNAT bit to fully emulate the load
+        * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
+        */
+       bitmask   = 1UL << (addr >> 3 & 0x3f);
+       DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void 
*) unat, *unat);
+       if (nat) {
+               *unat |= bitmask;
+       } else {
+               *unat &= ~bitmask;
+       }
+       DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void 
*) unat,*unat);
+}
+
+/*
+ * Return the (rotated) index for floating point register REGNUM (REGNUM must 
be in the
+ * range from 32-127, result is in the range from 0-95.
+ */
+static inline unsigned long
+fph_index (struct pt_regs *regs, long regnum)
+{
+       unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
+       return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
+}
+
+static void
+setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *)regs - 1;
+       unsigned long addr;
+
+       /*
+        * From EAS-2.5: FPDisableFault has higher priority than Unaligned
+        * Fault. Thus, when we get here, we know the partition is enabled.
+        * To update f32-f127, there are three choices:
+        *
+        *      (1) save f32-f127 to thread.fph and update the values there
+        *      (2) use a gigantic switch statement to directly access the 
registers
+        *      (3) generate code on the fly to update the desired register
+        *
+        * For now, we are using approach (1).
+        */
+       if (regnum >= IA64_FIRST_ROTATING_FR) {
+               ia64_sync_fph(current);
+#ifdef XEN
+               current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
+#else
+               current->thread.fph[fph_index(regs, regnum)] = *fpval;
+#endif
+       } else {
+               /*
+                * pt_regs or switch_stack ?
+                */
+               if (FR_IN_SW(regnum)) {
+                       addr = (unsigned long)sw;
+               } else {
+                       addr = (unsigned long)regs;
+               }
+
+               DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
+
+               addr += FR_OFFS(regnum);
+               *(struct ia64_fpreg *)addr = *fpval;
+
+               /*
+                * mark the low partition as being used now
+                *
+                * It is highly unlikely that this bit is not already set, but
+                * let's do it for safety.
+                */
+               regs->cr_ipsr |= IA64_PSR_MFL;
+       }
+}
+
+/*
+ * Those 2 inline functions generate the spilled versions of the constant 
floating point
+ * registers which can be used with stfX
+ */
+static inline void
+float_spill_f0 (struct ia64_fpreg *final)
+{
+       ia64_stf_spill(final, 0);
+}
+
+static inline void
+float_spill_f1 (struct ia64_fpreg *final)
+{
+       ia64_stf_spill(final, 1);
+}
+
+static void
+getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr;
+
+       /*
+        * From EAS-2.5: FPDisableFault has higher priority than
+        * Unaligned Fault. Thus, when we get here, we know the partition is
+        * enabled.
+        *
+        * When regnum > 31, the register is still live and we need to force a 
save
+        * to current->thread.fph to get access to it.  See discussion in 
setfpreg()
+        * for reasons and other ways of doing this.
+        */
+       if (regnum >= IA64_FIRST_ROTATING_FR) {
+               ia64_flush_fph(current);
+#ifdef XEN
+               *fpval = current->arch._thread.fph[fph_index(regs, regnum)];
+#else
+               *fpval = current->thread.fph[fph_index(regs, regnum)];
+#endif
+       } else {
+               /*
+                * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
+                * not saved, we must generate their spilled form on the fly
+                */
+               switch(regnum) {
+               case 0:
+                       float_spill_f0(fpval);
+                       break;
+               case 1:
+                       float_spill_f1(fpval);
+                       break;
+               default:
+                       /*
+                        * pt_regs or switch_stack ?
+                        */
+                       addr =  FR_IN_SW(regnum) ? (unsigned long)sw
+                                                : (unsigned long)regs;
+
+                       DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
+                              FR_IN_SW(regnum), addr, FR_OFFS(regnum));
+
+                       addr  += FR_OFFS(regnum);
+                       *fpval = *(struct ia64_fpreg *)addr;
+               }
+       }
+}
+
+
+#ifdef XEN
+void
+#else
+static void
+#endif
+getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs 
*regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr, *unat;
+
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               get_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * take care of r0 (read-only always evaluate to 0)
+        */
+       if (regnum == 0) {
+               *val = 0;
+               if (nat)
+                       *nat = 0;
+               return;
+       }
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       if (GR_IN_SW(regnum)) {
+               addr = (unsigned long)sw;
+               unat = &sw->ar_unat;
+       } else {
+               addr = (unsigned long)regs;
+#ifdef  CONFIG_VTI
+               unat = &regs->eml_unat;;
+#else   //CONFIG_VTI
+               unat = &sw->caller_unat;
+#endif  //CONFIG_VTI
+       }
+
+       DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
+
+       addr += GR_OFFS(regnum);
+
+       *val  = *(unsigned long *)addr;
+
+       /*
+        * do it only when requested
+        */
+       if (nat)
+               *nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
+}
+
+static void
+emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, 
unsigned long ifa)
+{
+       /*
+        * IMPORTANT:
+        * Given the way we handle unaligned speculative loads, we should
+        * not get to this point in the code but we keep this sanity check,
+        * just in case.
+        */
+       if (ld.x6_op == 1 || ld.x6_op == 3) {
+               printk(KERN_ERR "%s: register update on speculative load, 
error\n", __FUNCTION__);
+               die_if_kernel("unaligned reference on speculative load with 
register update\n",
+                             regs, 30);
+       }
+
+
+       /*
+        * at this point, we know that the base register to update is valid 
i.e.,
+        * it's not r0
+        */
+       if (type == UPD_IMMEDIATE) {
+               unsigned long imm;
+
+               /*
+                * Load +Imm: ldXZ r1=[r3],imm(9)
+                *
+                *
+                * form imm9: [13:19] contain the first 7 bits
+                */
+               imm = ld.x << 7 | ld.imm;
+
+               /*
+                * sign extend (1+8bits) if m set
+                */
+               if (ld.m) imm |= SIGN_EXT9;
+
+               /*
+                * ifa == r3 and we know that the NaT bit on r3 was clear so
+                * we can directly use ifa.
+                */
+               ifa += imm;
+
+               setreg(ld.r3, ifa, 0, regs);
+
+               DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, 
ifa);
+
+       } else if (ld.m) {
+               unsigned long r2;
+               int nat_r2;
+
+               /*
+                * Load +Reg Opcode: ldXZ r1=[r3],r2
+                *
+                * Note: that we update r3 even in the case of ldfX.a
+                * (where the load does not happen)
+                *
+                * The way the load algorithm works, we know that r3 does not
+                * have its NaT bit set (would have gotten NaT consumption
+                * before getting the unaligned fault). So we can use ifa
+                * which equals r3 at this point.
+                *
+                * IMPORTANT:
+                * The above statement holds ONLY because we know that we
+                * never reach this code when trying to do a ldX.s.
+                * If we ever make it to here on an ldfX.s then
+                */
+               getreg(ld.imm, &r2, &nat_r2, regs);
+
+               ifa += r2;
+
+               /*
+                * propagate Nat r2 -> r3
+                */
+               setreg(ld.r3, ifa, nat_r2, regs);
+
+               DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, 
nat_r2);
+       }
+}
+
+
+static int
+emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       unsigned int len = 1 << ld.x6_sz;
+       unsigned long val = 0;
+
+       /*
+        * r0, as target, doesn't need to be checked because Illegal Instruction
+        * faults have higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an
+        * unaligned reference.
+        */
+
+       /*
+        * ldX.a we will emulate load and also invalidate the ALAT entry.
+        * See comment below for explanation on how we handle ldX.a
+        */
+
+       if (len != 2 && len != 4 && len != 8) {
+               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
+               return -1;
+       }
+       /* this assumes little-endian byte-order: */
+       if (copy_from_user(&val, (void __user *) ifa, len))
+               return -1;
+       setreg(ld.r1, val, 0, regs);
+
+       /*
+        * check for updates on any kind of loads
+        */
+       if (ld.op == 0x5 || ld.m)
+               emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
+
+       /*
+        * handling of various loads (based on EAS2.4):
+        *
+        * ldX.acq (ordered load):
+        *      - acquire semantics would have been used, so force fence 
instead.
+        *
+        * ldX.c.clr (check load and clear):
+        *      - if we get to this handler, it's because the entry was not in 
the ALAT.
+        *        Therefore the operation reverts to a normal load
+        *
+        * ldX.c.nc (check load no clear):
+        *      - same as previous one
+        *
+        * ldX.c.clr.acq (ordered check load and clear):
+        *      - same as above for c.clr part. The load needs to have acquire 
semantics. So
+        *        we use the fence semantics which is stronger and thus ensures 
correctness.
+        *
+        * ldX.a (advanced load):
+        *      - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's 
because the
+        *        address doesn't match requested size alignment. This means 
that we would
+        *        possibly need more than one load to get the result.
+        *
+        *        The load part can be handled just like a normal load, however 
the difficult
+        *        part is to get the right thing into the ALAT. The critical 
piece of information
+        *        in the base address of the load & size. To do that, a ld.a 
must be executed,
+        *        clearly any address can be pushed into the table by using 
ld1.a r1=[r3]. Now
+        *        if we use the same target register, we will be okay for the 
check.a instruction.
+        *        If we look at the store, basically a stX [r3]=r1 checks the 
ALAT  for any entry
+        *        which would overlap within [r3,r3+X] (the size of the load 
was store in the
+        *        ALAT). If such an entry is found the entry is invalidated. 
But this is not good
+        *        enough, take the following example:
+        *              r3=3
+        *              ld4.a r1=[r3]
+        *
+        *        Could be emulated by doing:
+        *              ld1.a r1=[r3],1
+        *              store to temporary;
+        *              ld1.a r1=[r3],1
+        *              store & shift to temporary;
+        *              ld1.a r1=[r3],1
+        *              store & shift to temporary;
+        *              ld1.a r1=[r3]
+        *              store & shift to temporary;
+        *              r1=temporary
+        *
+        *        So in this case, you would get the right value is r1 but the 
wrong info in
+        *        the ALAT.  Notice that you could do it in reverse to finish 
with address 3
+        *        but you would still get the size wrong.  To get the size 
right, one needs to
+        *        execute exactly the same kind of load. You could do it from a 
aligned
+        *        temporary location, but you would get the address wrong.
+        *
+        *        So no matter what, it is not possible to emulate an advanced 
load
+        *        correctly. But is that really critical ?
+        *
+        *        We will always convert ld.a into a normal load with ALAT 
invalidated.  This
+        *        will enable compiler to do optimization where certain code 
path after ld.a
+        *        is not required to have ld.c/chk.a, e.g., code path with no 
intervening stores.
+        *
+        *        If there is a store after the advanced load, one must either 
do a ld.c.* or
+        *        chk.a.* to reuse the value stored in the ALAT. Both can 
"fail" (meaning no
+        *        entry found in ALAT), and that's perfectly ok because:
+        *
+        *              - ld.c.*, if the entry is not present a  normal load is 
executed
+        *              - chk.a.*, if the entry is not present, execution jumps 
to recovery code
+        *
+        *        In either case, the load can be potentially retried in 
another form.
+        *
+        *        ALAT must be invalidated for the register (so that chk.a or 
ld.c don't pick
+        *        up a stale entry later). The register base update MUST also 
be performed.
+        */
+
+       /*
+        * when the load has the .acq completer then
+        * use ordering fence.
+        */
+       if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
+               mb();
+
+       /*
+        * invalidate ALAT entry in case of advanced load
+        */
+       if (ld.x6_op == 0x2)
+               invala_gr(ld.r1);
+
+       return 0;
+}
+
+static int
+emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       unsigned long r2;
+       unsigned int len = 1 << ld.x6_sz;
+
+       /*
+        * if we get to this handler, Nat bits on both r3 and r2 have already
+        * been checked. so we don't need to do it
+        *
+        * extract the value to be stored
+        */
+       getreg(ld.imm, &r2, NULL, regs);
+
+       /*
+        * we rely on the macros in unaligned.h for now i.e.,
+        * we let the compiler figure out how to read memory gracefully.
+        *
+        * We need this switch/case because the way the inline function
+        * works. The code is optimized by the compiler and looks like
+        * a single switch/case.
+        */
+       DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
+
+       if (len != 2 && len != 4 && len != 8) {
+               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
+               return -1;
+       }
+
+       /* this assumes little-endian byte-order: */
+       if (copy_to_user((void __user *) ifa, &r2, len))
+               return -1;
+
+       /*
+        * stX [r3]=r2,imm(9)
+        *
+        * NOTE:
+        * ld.r3 can never be r0, because r0 would not generate an
+        * unaligned access.
+        */
+       if (ld.op == 0x5) {
+               unsigned long imm;
+
+               /*
+                * form imm9: [12:6] contain first 7bits
+                */
+               imm = ld.x << 7 | ld.r1;
+               /*
+                * sign extend (8bits) if m set
+                */
+               if (ld.m) imm |= SIGN_EXT9;
+               /*
+                * ifa == r3 (NaT is necessarily cleared)
+                */
+               ifa += imm;
+
+               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+       /*
+        * we don't have alat_invalidate_multiple() so we need
+        * to do the complete flush :-<<
+        */
+       ia64_invala();
+
+       /*
+        * stX.rel: use fence instead of release
+        */
+       if (ld.x6_op == 0xd)
+               mb();
+
+       return 0;
+}
+
+/*
+ * floating point operations sizes in bytes
+ */
+static const unsigned char float_fsz[4]={
+       10, /* extended precision (e) */
+       8,  /* integer (8)            */
+       4,  /* single precision (s)   */
+       8   /* double precision (d)   */
+};
+
+static inline void
+mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfe(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf8(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfs(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfd(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfe(final, 6);
+}
+
+static inline void
+float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stf8(final, 6);
+}
+
+static inline void
+float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfs(final, 6);
+}
+
+static inline void
+float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfd(final, 6);
+}
+
+static int
+emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs 
*regs)
+{
+       struct ia64_fpreg fpr_init[2];
+       struct ia64_fpreg fpr_final[2];
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * fr0 & fr1 don't need to be checked because Illegal Instruction 
faults have
+        * higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an 
unaligned
+        * reference.
+        */
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init, 0, sizeof(fpr_init));
+       memset(&fpr_final, 0, sizeof(fpr_final));
+
+       /*
+        * ldfpX.a: we don't try to emulate anything but we must
+        * invalidate the ALAT entry and execute updates, if any.
+        */
+       if (ld.x6_op != 0x2) {
+               /*
+                * This assumes little-endian byte-order.  Note that there is 
no "ldfpe"
+                * instruction:
+                */
+               if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
+                   || copy_from_user(&fpr_init[1], (void __user *) (ifa + 
len), len))
+                       return -1;
+
+               DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, 
ld.x6_sz);
+               DDUMP("frp_init =", &fpr_init, 2*len);
+               /*
+                * XXX fixme
+                * Could optimize inlines by using ldfpX & 2 spills
+                */
+               switch( ld.x6_sz ) {
+                       case 0:
+                               mem2float_extended(&fpr_init[0], &fpr_final[0]);
+                               mem2float_extended(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 1:
+                               mem2float_integer(&fpr_init[0], &fpr_final[0]);
+                               mem2float_integer(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 2:
+                               mem2float_single(&fpr_init[0], &fpr_final[0]);
+                               mem2float_single(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 3:
+                               mem2float_double(&fpr_init[0], &fpr_final[0]);
+                               mem2float_double(&fpr_init[1], &fpr_final[1]);
+                               break;
+               }
+               DDUMP("fpr_final =", &fpr_final, 2*len);
+               /*
+                * XXX fixme
+                *
+                * A possible optimization would be to drop fpr_final and 
directly
+                * use the storage from the saved context i.e., the actual final
+                * destination (pt_regs, switch_stack or thread structure).
+                */
+               setfpreg(ld.r1, &fpr_final[0], regs);
+               setfpreg(ld.imm, &fpr_final[1], regs);
+       }
+
+       /*
+        * Check for updates: only immediate updates are available for this
+        * instruction.
+        */
+       if (ld.m) {
+               /*
+                * the immediate is implicit given the ldsz of the operation:
+                * single: 8 (2x4) and for  all others it's 16 (2x8)
+                */
+               ifa += len<<1;
+
+               /*
+                * IMPORTANT:
+                * the fact that we force the NaT of r3 to zero is ONLY valid
+                * as long as we don't come here with a ldfpX.s.
+                * For this reason we keep this sanity check
+                */
+               if (ld.x6_op == 1 || ld.x6_op == 3)
+                       printk(KERN_ERR "%s: register update on speculative 
load pair, error\n",
+                              __FUNCTION__);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+
+       /*
+        * Invalidate ALAT entries, if any, for both registers.
+        */
+       if (ld.x6_op == 0x2) {
+               invala_fr(ld.r1);
+               invala_fr(ld.imm);
+       }
+       return 0;
+}
+
+
+static int
+emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       struct ia64_fpreg fpr_init;
+       struct ia64_fpreg fpr_final;
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * fr0 & fr1 don't need to be checked because Illegal Instruction
+        * faults have higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an
+        * unaligned reference.
+        */
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init,0, sizeof(fpr_init));
+       memset(&fpr_final,0, sizeof(fpr_final));
+
+       /*
+        * ldfX.a we don't try to emulate anything but we must
+        * invalidate the ALAT entry.
+        * See comments in ldX for descriptions on how the various loads are 
handled.
+        */
+       if (ld.x6_op != 0x2) {
+               if (copy_from_user(&fpr_init, (void __user *) ifa, len))
+                       return -1;
+
+               DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
+               DDUMP("fpr_init =", &fpr_init, len);
+               /*
+                * we only do something for x6_op={0,8,9}
+                */
+               switch( ld.x6_sz ) {
+                       case 0:
+                               mem2float_extended(&fpr_init, &fpr_final);
+                               break;
+                       case 1:
+                               mem2float_integer(&fpr_init, &fpr_final);
+                               break;
+                       case 2:
+                               mem2float_single(&fpr_init, &fpr_final);
+                               break;
+                       case 3:
+                               mem2float_double(&fpr_init, &fpr_final);
+                               break;
+               }
+               DDUMP("fpr_final =", &fpr_final, len);
+               /*
+                * XXX fixme
+                *
+                * A possible optimization would be to drop fpr_final and 
directly
+                * use the storage from the saved context i.e., the actual final
+                * destination (pt_regs, switch_stack or thread structure).
+                */
+               setfpreg(ld.r1, &fpr_final, regs);
+       }
+
+       /*
+        * check for updates on any loads
+        */
+       if (ld.op == 0x7 || ld.m)
+               emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
+
+       /*
+        * invalidate ALAT entry in case of advanced floating point loads
+        */
+       if (ld.x6_op == 0x2)
+               invala_fr(ld.r1);
+
+       return 0;
+}
+
+
+static int
+emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       struct ia64_fpreg fpr_init;
+       struct ia64_fpreg fpr_final;
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init,0, sizeof(fpr_init));
+       memset(&fpr_final,0, sizeof(fpr_final));
+
+       /*
+        * if we get to this handler, Nat bits on both r3 and r2 have already
+        * been checked. so we don't need to do it
+        *
+        * extract the value to be stored
+        */
+       getfpreg(ld.imm, &fpr_init, regs);
+       /*
+        * during this step, we extract the spilled registers from the saved
+        * context i.e., we refill. Then we store (no spill) to temporary
+        * aligned location
+        */
+       switch( ld.x6_sz ) {
+               case 0:
+                       float2mem_extended(&fpr_init, &fpr_final);
+                       break;
+               case 1:
+                       float2mem_integer(&fpr_init, &fpr_final);
+                       break;
+               case 2:
+                       float2mem_single(&fpr_init, &fpr_final);
+                       break;
+               case 3:
+                       float2mem_double(&fpr_init, &fpr_final);
+                       break;
+       }
+       DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
+       DDUMP("fpr_init =", &fpr_init, len);
+       DDUMP("fpr_final =", &fpr_final, len);
+
+       if (copy_to_user((void __user *) ifa, &fpr_final, len))
+               return -1;
+
+       /*
+        * stfX [r3]=r2,imm(9)
+        *
+        * NOTE:
+        * ld.r3 can never be r0, because r0 would not generate an
+        * unaligned access.
+        */
+       if (ld.op == 0x7) {
+               unsigned long imm;
+
+               /*
+                * form imm9: [12:6] contain first 7bits
+                */
+               imm = ld.x << 7 | ld.r1;
+               /*
+                * sign extend (8bits) if m set
+                */
+               if (ld.m)
+                       imm |= SIGN_EXT9;
+               /*
+                * ifa == r3 (NaT is necessarily cleared)
+                */
+               ifa += imm;
+
+               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+       /*
+        * we don't have alat_invalidate_multiple() so we need
+        * to do the complete flush :-<<
+        */
+       ia64_invala();
+
+       return 0;
+}
+
+/*
+ * Make sure we log the unaligned access, so that user/sysadmin can notice it 
and
+ * eventually fix the program.  However, we don't want to do that for every 
access so we
+ * pace it with jiffies.  This isn't really MP-safe, but it doesn't really 
have to be
+ * either...
+ */
+static int
+within_logging_rate_limit (void)
+{
+       static unsigned long count, last_time;
+
+       if (jiffies - last_time > 5*HZ)
+               count = 0;
+       if (++count < 5) {
+               last_time = jiffies;
+               return 1;
+       }
+       return 0;
+
+}
+
+void
+ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
+{
+#ifdef XEN
+printk("ia64_handle_unaligned: called, not working yet\n");
+#else
+       struct ia64_psr *ipsr = ia64_psr(regs);
+       mm_segment_t old_fs = get_fs();
+       unsigned long bundle[2];
+       unsigned long opcode;
+       struct siginfo si;
+       const struct exception_table_entry *eh = NULL;
+       union {
+               unsigned long l;
+               load_store_t insn;
+       } u;
+       int ret = -1;
+
+       if (ia64_psr(regs)->be) {
+               /* we don't support big-endian accesses */
+               die_if_kernel("big-endian unaligned accesses are not 
supported", regs, 0);
+               goto force_sigbus;
+       }
+
+       /*
+        * Treat kernel accesses for which there is an exception handler entry 
the same as
+        * user-level unaligned accesses.  Otherwise, a clever program could 
trick this
+        * handler into reading an arbitrary kernel addresses...
+        */
+       if (!user_mode(regs))
+               eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+       if (user_mode(regs) || eh) {
+               if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
+                       goto force_sigbus;
+
+               if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
+                   && within_logging_rate_limit())
+               {
+                       char buf[200];  /* comm[] is at most 16 bytes... */
+                       size_t len;
+
+                       len = sprintf(buf, "%s(%d): unaligned access to 
0x%016lx, "
+                                     "ip=0x%016lx\n\r", current->comm, 
current->pid,
+                                     ifa, regs->cr_iip + ipsr->ri);
+                       /*
+                        * Don't call tty_write_message() if we're in the 
kernel; we might
+                        * be holding locks...
+                        */
+                       if (user_mode(regs))
+                               tty_write_message(current->signal->tty, buf);
+                       buf[len-1] = '\0';      /* drop '\r' */
+                       printk(KERN_WARNING "%s", buf); /* watch for command 
names containing %s */
+               }
+       } else {
+               if (within_logging_rate_limit())
+                       printk(KERN_WARNING "kernel unaligned access to 
0x%016lx, ip=0x%016lx\n",
+                              ifa, regs->cr_iip + ipsr->ri);
+               set_fs(KERNEL_DS);
+       }
+
+       DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
+              regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
+
+       if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
+               goto failure;
+
+       /*
+        * extract the instruction from the bundle given the slot number
+        */
+       switch (ipsr->ri) {
+             case 0: u.l = (bundle[0] >>  5); break;
+             case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
+             case 2: u.l = (bundle[1] >> 23); break;
+       }
+       opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
+
+       DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d 
ld.hint=%d "
+              "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, 
u.insn.imm,
+              u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, 
u.insn.op);
+
+       /*
+        * IMPORTANT:
+        * Notice that the switch statement DOES not cover all possible 
instructions
+        * that DO generate unaligned references. This is made on purpose 
because for some
+        * instructions it DOES NOT make sense to try and emulate the access. 
Sometimes it
+        * is WRONG to try and emulate. Here is a list of instruction we don't 
emulate i.e.,
+        * the program will get a signal and die:
+        *
+        *      load/store:
+        *              - ldX.spill
+        *              - stX.spill
+        *      Reason: RNATs are based on addresses
+        *
+        *      synchronization:
+        *              - cmpxchg
+        *              - fetchadd
+        *              - xchg
+        *      Reason: ATOMIC operations cannot be emulated properly using 
multiple
+        *              instructions.
+        *
+        *      speculative loads:
+        *              - ldX.sZ
+        *      Reason: side effects, code must be ready to deal with failure 
so simpler
+        *              to let the load fail.
+        * 
---------------------------------------------------------------------------------
+        * XXX fixme
+        *
+        * I would like to get rid of this switch case and do something
+        * more elegant.
+        */
+       switch (opcode) {
+             case LDS_OP:
+             case LDSA_OP:
+             case LDS_IMM_OP:
+             case LDSA_IMM_OP:
+             case LDFS_OP:
+             case LDFSA_OP:
+             case LDFS_IMM_OP:
+               /*
+                * The instruction will be retried with deferred exceptions 
turned on, and
+                * we should get Nat bit installed
+                *
+                * IMPORTANT: When PSR_ED is set, the register & immediate 
update forms
+                * are actually executed even though the operation failed. So 
we don't
+                * need to take care of this.
+                */
+               DPRINT("forcing PSR_ED\n");
+               regs->cr_ipsr |= IA64_PSR_ED;
+               goto done;
+
+             case LD_OP:
+             case LDA_OP:
+             case LDBIAS_OP:
+             case LDACQ_OP:
+             case LDCCLR_OP:
+             case LDCNC_OP:
+             case LDCCLRACQ_OP:
+             case LD_IMM_OP:
+             case LDA_IMM_OP:
+             case LDBIAS_IMM_OP:
+             case LDACQ_IMM_OP:
+             case LDCCLR_IMM_OP:
+             case LDCNC_IMM_OP:
+             case LDCCLRACQ_IMM_OP:
+               ret = emulate_load_int(ifa, u.insn, regs);
+               break;
+
+             case ST_OP:
+             case STREL_OP:
+             case ST_IMM_OP:
+             case STREL_IMM_OP:
+               ret = emulate_store_int(ifa, u.insn, regs);
+               break;
+
+             case LDF_OP:
+             case LDFA_OP:
+             case LDFCCLR_OP:
+             case LDFCNC_OP:
+             case LDF_IMM_OP:
+             case LDFA_IMM_OP:
+             case LDFCCLR_IMM_OP:
+             case LDFCNC_IMM_OP:
+               if (u.insn.x)
+                       ret = emulate_load_floatpair(ifa, u.insn, regs);
+               else
+                       ret = emulate_load_float(ifa, u.insn, regs);
+               break;
+
+             case STF_OP:
+             case STF_IMM_OP:
+               ret = emulate_store_float(ifa, u.insn, regs);
+               break;
+
+             default:
+               goto failure;
+       }
+       DPRINT("ret=%d\n", ret);
+       if (ret)
+               goto failure;
+
+       if (ipsr->ri == 2)
+               /*
+                * given today's architecture this case is not likely to happen 
because a
+                * memory access instruction (M) can never be in the last slot 
of a
+                * bundle. But let's keep it for now.
+                */
+               regs->cr_iip += 16;
+       ipsr->ri = (ipsr->ri + 1) & 0x3;
+
+       DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
+  done:
+       set_fs(old_fs);         /* restore original address limit */
+       return;
+
+  failure:
+       /* something went wrong... */
+       if (!user_mode(regs)) {
+               if (eh) {
+                       ia64_handle_exception(regs, eh);
+                       goto done;
+               }
+               die_if_kernel("error during unaligned kernel access\n", regs, 
ret);
+               /* NOT_REACHED */
+       }
+  force_sigbus:
+       si.si_signo = SIGBUS;
+       si.si_errno = 0;
+       si.si_code = BUS_ADRALN;
+       si.si_addr = (void __user *) ifa;
+       si.si_flags = 0;
+       si.si_isr = 0;
+       si.si_imm = 0;
+       force_sig_info(SIGBUS, &si, current);
+       goto done;
+#endif
+}
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/xen.lds.S
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/arch/ia64/xen.lds.S   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,251 @@
+#include <linux/config.h>
+
+#include <asm/cache.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+
+#define LOAD_OFFSET    (KERNEL_START - KERNEL_TR_PAGE_SIZE)
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-ia64-little")
+OUTPUT_ARCH(ia64)
+ENTRY(phys_start)
+jiffies = jiffies_64;
+PHDRS {
+  code   PT_LOAD;
+  percpu PT_LOAD;
+  data   PT_LOAD;
+}
+SECTIONS
+{
+  /* Sections to be discarded */
+  /DISCARD/ : {
+       *(.exit.text)
+       *(.exit.data)
+       *(.exitcall.exit)
+       *(.IA_64.unwind.exit.text)
+       *(.IA_64.unwind_info.exit.text)
+       }
+
+  v = PAGE_OFFSET;     /* this symbol is here to make debugging easier... */
+  phys_start = _start - LOAD_OFFSET;
+
+  code : { } :code
+  . = KERNEL_START;
+
+  _text = .;
+  _stext = .;
+
+  .text : AT(ADDR(.text) - LOAD_OFFSET)
+    {
+       *(.text.ivt)
+       *(.text)
+       SCHED_TEXT
+       LOCK_TEXT
+       *(.gnu.linkonce.t*)
+    }
+  .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
+       { *(.text2) }
+#ifdef CONFIG_SMP
+  .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
+       { *(.text.lock) }
+#endif
+  _etext = .;
+
+  /* Read-only data */
+
+  /* Exception table */
+  . = ALIGN(16);
+  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
+       {
+         __start___ex_table = .;
+         *(__ex_table)
+         __stop___ex_table = .;
+       }
+
+  .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
+       {
+         __start___vtop_patchlist = .;
+         *(.data.patch.vtop)
+         __end___vtop_patchlist = .;
+       }
+
+  .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
+       {
+         __start___mckinley_e9_bundles = .;
+         *(.data.patch.mckinley_e9)
+         __end___mckinley_e9_bundles = .;
+       }
+
+  /* Global data */
+  _data = .;
+
+#if defined(CONFIG_IA64_GENERIC)
+  /* Machine Vector */
+  . = ALIGN(16);
+  .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
+       {
+         machvec_start = .;
+         *(.machvec)
+         machvec_end = .;
+       }
+#endif
+
+  /* Unwind info & table: */
+  . = ALIGN(8);
+  .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
+       { *(.IA_64.unwind_info*) }
+  .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
+       {
+         __start_unwind = .;
+         *(.IA_64.unwind*)
+         __end_unwind = .;
+       }
+
+  RODATA
+
+  .opd : AT(ADDR(.opd) - LOAD_OFFSET)
+       { *(.opd) }
+
+  /* Initialization code and data: */
+
+  . = ALIGN(PAGE_SIZE);
+  __init_begin = .;
+  .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
+       {
+         _sinittext = .;
+         *(.init.text)
+         _einittext = .;
+       }
+
+  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
+       { *(.init.data) }
+
+  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
+       {
+         __initramfs_start = .;
+         *(.init.ramfs)
+         __initramfs_end = .;
+       }
+
+   . = ALIGN(16);
+  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
+        {
+         __setup_start = .;
+         *(.init.setup)
+         __setup_end = .;
+       }
+  .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
+       {
+         __initcall_start = .;
+         *(.initcall1.init)
+         *(.initcall2.init)
+         *(.initcall3.init)
+         *(.initcall4.init)
+         *(.initcall5.init)
+         *(.initcall6.init)
+         *(.initcall7.init)
+         __initcall_end = .;
+       }
+   __con_initcall_start = .;
+  .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
+       { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  __security_initcall_start = .;
+  .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
+       { *(.security_initcall.init) }
+  __security_initcall_end = .;
+  . = ALIGN(PAGE_SIZE);
+  __init_end = .;
+
+  /* The initial task and kernel stack */
+  .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
+       { *(.data.init_task) }
+
+  .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
+        { *(__special_page_section)
+         __start_gate_section = .;
+         *(.data.gate)
+         __stop_gate_section = .;
+       }
+  . = ALIGN(PAGE_SIZE);                /* make sure the gate page doesn't 
expose kernel data */
+
+  .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
+        { *(.data.cacheline_aligned) }
+
+  /* Per-cpu data: */
+  percpu : { } :percpu
+  . = ALIGN(PERCPU_PAGE_SIZE);
+  __phys_per_cpu_start = .;
+  .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
+       {
+               __per_cpu_start = .;
+               *(.data.percpu)
+               __per_cpu_end = .;
+       }
+  . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into 
percpu page size */
+
+  data : { } :data
+  .data : AT(ADDR(.data) - LOAD_OFFSET)
+       { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
+
+  . = ALIGN(16);       /* gp must be 16-byte aligned for exc. table */
+  .got : AT(ADDR(.got) - LOAD_OFFSET)
+       { *(.got.plt) *(.got) }
+  __gp = ADDR(.got) + 0x200000;
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
+       { *(.sdata) *(.sdata1) *(.srdata) }
+  _edata  =  .;
+  _bss = .;
+  .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
+       { *(.sbss) *(.scommon) }
+  .bss : AT(ADDR(.bss) - LOAD_OFFSET)
+       { *(.bss) *(COMMON) }
+
+  _end = .;
+
+  code : { } :code
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /* These must appear regardless of  .  */
+  /* Discard them for now since Intel SoftSDV cannot handle them.
+  .comment 0 : { *(.comment) }
+  .note 0 : { *(.note) }
+  */
+  /DISCARD/ : { *(.comment) }
+  /DISCARD/ : { *(.note) }
+}
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/gcc_intrin.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/gcc_intrin.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,657 @@
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@xxxxxxxxx>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ */
+
+#include <linux/compiler.h>
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier() asm volatile ("":::"memory")
+
+#define ia64_stop()    asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+register unsigned long ia64_r13 asm ("r13") __attribute_used__;
+
+#define ia64_setreg(regnum, val)                                               
\
+({                                                                             
\
+       switch (regnum) {                                                       
\
+           case _IA64_REG_PSR_L:                                               
\
+                   asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");       
\
+                   break;                                                      
\
+           case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                          
\
+                   asm volatile ("mov ar%0=%1" ::                              
\
+                                         "i" (regnum - _IA64_REG_AR_KR0),      
\
+                                         "r"(val): "memory");                  
\
+                   break;                                                      
\
+           case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                        
\
+                   asm volatile ("mov cr%0=%1" ::                              
\
+                                         "i" (regnum - _IA64_REG_CR_DCR),      
\
+                                         "r"(val): "memory" );                 
\
+                   break;                                                      
\
+           case _IA64_REG_SP:                                                  
\
+                   asm volatile ("mov r12=%0" ::                               
\
+                                         "r"(val): "memory");                  
\
+                   break;                                                      
\
+           case _IA64_REG_GP:                                                  
\
+                   asm volatile ("mov gp=%0" :: "r"(val) : "memory");          
\
+               break;                                                          
\
+           default:                                                            
\
+                   ia64_bad_param_for_setreg();                                
\
+                   break;                                                      
\
+       }                                                                       
\
+})
+
+#define ia64_getreg(regnum)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+                                                                               
\
+       switch (regnum) {                                                       
\
+       case _IA64_REG_GP:                                                      
\
+               asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));              
\
+               break;                                                          
\
+       case _IA64_REG_IP:                                                      
\
+               asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));              
\
+               break;                                                          
\
+       case _IA64_REG_PSR:                                                     
\
+               asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));             
\
+               break;                                                          
\
+       case _IA64_REG_TP:      /* for current() */                             
\
+               ia64_intri_res = ia64_r13;                                      
\
+               break;                                                          
\
+       case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                              
\
+               asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)             
\
+                                     : "i"(regnum - _IA64_REG_AR_KR0));        
\
+               break;                                                          
\
+       case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                            
\
+               asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)             
\
+                                     : "i" (regnum - _IA64_REG_CR_DCR));       
\
+               break;                                                          
\
+       case _IA64_REG_SP:                                                      
\
+               asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));             
\
+               break;                                                          
\
+       default:                                                                
\
+               ia64_bad_param_for_getreg();                                    
\
+               break;                                                          
\
+       }                                                                       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode)                                                \
+({                                                             \
+       switch (mode) {                                         \
+       case ia64_hint_pause:                                   \
+               asm volatile ("hint @pause" ::: "memory");      \
+               break;                                          \
+       }                                                       \
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix   8
+#define ia64_mux1_shuf  9
+#define ia64_mux1_alt  10
+#define ia64_mux1_rev  11
+
+#define ia64_mux1(x, mode)                                                     
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+                                                                               
\
+       switch (mode) {                                                         
\
+       case ia64_mux1_brcst:                                                   
\
+               asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));    
\
+               break;                                                          
\
+       case ia64_mux1_mix:                                                     
\
+               asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       case ia64_mux1_shuf:                                                    
\
+               asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));     
\
+               break;                                                          
\
+       case ia64_mux1_alt:                                                     
\
+               asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       case ia64_mux1_rev:                                                     
\
+               asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       }                                                                       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_popcnt(x)                                         \
+({                                                             \
+       __u64 ia64_intri_res;                                   \
+       asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
+                                                               \
+       ia64_intri_res;                                         \
+})
+
+#define ia64_getf_exp(x)                                       \
+({                                                             \
+       long ia64_intri_res;                                    \
+                                                               \
+       asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
+                                                               \
+       ia64_intri_res;                                         \
+})
+
+#define ia64_shrp(a, b, count)                                                 
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), 
"i"(count));   \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_ldfs(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldfd(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldfe(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldf8(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldf_fill(regnum, x)                               \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_stfs(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfd(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfe(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf8(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf_spill(x, regnum)                                              
\
+({                                                                             
\
+       register double __f__ asm ("f"#regnum);                                 
\
+       asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");    
\
+})
+
+#define ia64_fetchadd4_acq(p, inc)                                             
\
+({                                                                             
\
+                                                                               
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd4.acq %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd4_rel(p, inc)                                             
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd4.rel %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd8_acq(p, inc)                                             
\
+({                                                                             
\
+                                                                               
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd8.acq %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd8_rel(p, inc)                                             
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd8.rel %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_xchg1(ptr,x)                                                      
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("xchg1 %0=[%1],%2"                                        
\
+                     : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); 
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_xchg2(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_xchg4(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_xchg8(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+                                                                               
        \
+       asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+                                                                               
        \
+       asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_mf()      asm volatile ("mf" ::: "memory")
+#define ia64_mfa()     asm volatile ("mf.a" ::: "memory")
+
+#ifdef CONFIG_VTI
+/*
+ * Flushrs instruction stream.
+ */
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+#define ia64_get_rsc()                          \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rsc(val)                       \
+    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_bspstore()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_bspstore(val)                       \
+    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_rnat()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rnat(val)                       \
+    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
+
+#define ia64_ttag(addr)                                                        
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_dcr()                          \
+({                                      \
+    __u64 result;                               \
+    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
+    result;                                 \
+})
+
+#define ia64_set_dcr(val)                           \
+({                                      \
+    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
+})
+
+#endif // CONFIG_VTI
+
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_thash(addr)                                                       
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_srlz_i()  asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d()  asm volatile (";; srlz.d" ::: "memory");
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define ia64_dv_serialize_data()              asm volatile 
(".serialize.data");
+# define ia64_dv_serialize_instruction()       asm volatile 
(".serialize.instruction");
+#else
+# define ia64_dv_serialize_data()
+# define ia64_dv_serialize_instruction()
+#endif
+
+#define ia64_nop(x)    asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr)        asm volatile ("itc.i %0;;" :: "r"(addr) : 
"memory")
+
+#define ia64_itcd(addr)        asm volatile ("itc.d %0;;" :: "r"(addr) : 
"memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"                
                \
+                                            :: "r"(trnum), "r"(addr) : 
"memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"                
                \
+                                            :: "r"(trnum), "r"(addr) : 
"memory")
+
+#define ia64_tpa(addr)                                                         
\
+({                                                                             
\
+       __u64 ia64_pa;                                                          
\
+       asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");    
\
+       ia64_pa;                                                                
\
+})
+
+#define __ia64_set_dbr(index, val)                                             
\
+       asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val)                                               
\
+       asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val)                                               
\
+       asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val)                                               
\
+       asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val)                                               
\
+       asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_rr(index, val)                                                
        \
+       asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_get_cpuid(index)                                                  
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : 
"rO"(index));        \
+       ia64_intri_res;                                                         
        \
+})
+
+#define __ia64_get_dbr(index)                                                  
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_ibr(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_pkr(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_pmc(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+
+#define ia64_get_pmd(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_rr(index)                                                     
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fc(addr)  asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i()  asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr)        asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_ptcga(addr, size)                                                 
\
+do {                                                                           
\
+       asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");       
\
+       ia64_dv_serialize_data();                                               
\
+} while (0)
+
+#define ia64_ptcl(addr, size)                                                  
\
+do {                                                                           
\
+       asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");        
\
+       ia64_dv_serialize_data();                                               
\
+} while (0)
+
+#define ia64_ptri(addr, size)                                          \
+       asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size)                                          \
+       asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none   0
+#define ia64_lfhint_nt1    1
+#define ia64_lfhint_nt2    2
+#define ia64_lfhint_nta    3
+
+#define ia64_lfetch(lfhint, y)                                 \
+({                                                             \
+        switch (lfhint) {                                      \
+        case ia64_lfhint_none:                                 \
+                asm volatile ("lfetch [%0]" : : "r"(y));       \
+                break;                                         \
+        case ia64_lfhint_nt1:                                  \
+                asm volatile ("lfetch.nt1 [%0]" : : "r"(y));   \
+                break;                                         \
+        case ia64_lfhint_nt2:                                  \
+                asm volatile ("lfetch.nt2 [%0]" : : "r"(y));   \
+                break;                                         \
+        case ia64_lfhint_nta:                                  \
+                asm volatile ("lfetch.nta [%0]" : : "r"(y));   \
+                break;                                         \
+        }                                                      \
+})
+
+#define ia64_lfetch_excl(lfhint, y)                                    \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.excl [%0]" :: "r"(y));           \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));       \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));       \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));       \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_lfetch_fault(lfhint, y)                                   \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.fault [%0]" : : "r"(y));         \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));     \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y)                              \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_intrin_local_irq_restore(x)                       \
+do {                                                           \
+       asm volatile (";;   cmp.ne p6,p7=%0,r0;;"               \
+                     "(p6) ssm psr.i;"                         \
+                     "(p7) rsm psr.i;;"                        \
+                     "(p6) srlz.d"                             \
+                     :: "r"((x)) : "p6", "p7", "memory");      \
+} while (0)
+
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/hpsim_ssc.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/hpsim_ssc.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,55 @@
+/*
+ * Platform dependent support for HP simulator.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
+ */
+#ifndef _IA64_PLATFORM_HPSIM_SSC_H
+#define _IA64_PLATFORM_HPSIM_SSC_H
+
+/* Simulator system calls: */
+
+#define SSC_CONSOLE_INIT               20
+#define SSC_GETCHAR                    21
+#define SSC_PUTCHAR                    31
+#define SSC_CONNECT_INTERRUPT          58
+#define SSC_GENERATE_INTERRUPT         59
+#define SSC_SET_PERIODIC_INTERRUPT     60
+#define SSC_GET_RTC                    65
+#define SSC_EXIT                       66
+#define SSC_LOAD_SYMBOLS               69
+#define SSC_GET_TOD                    74
+#define SSC_CTL_TRACE                  76
+
+#define SSC_NETDEV_PROBE               100
+#define SSC_NETDEV_SEND                        101
+#define SSC_NETDEV_RECV                        102
+#define SSC_NETDEV_ATTACH              103
+#define SSC_NETDEV_DETACH              104
+
+/*
+ * Simulator system call.
+ */
+extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
+#ifdef XEN
+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
+#define SSC_OPEN                       50
+#define SSC_CLOSE                      51
+#define SSC_READ                       52
+#define SSC_WRITE                      53
+#define SSC_GET_COMPLETION             54
+#define SSC_WAIT_COMPLETION            55
+
+#define SSC_WRITE_ACCESS               2
+#define SSC_READ_ACCESS                        1
+
+struct ssc_disk_req {
+       unsigned long addr;
+       unsigned long len;
+};
+#endif
+
+#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/ia64regs.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/ia64regs.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ *      Jun Nakajima <jun.nakajima@xxxxxxxxx>
+ *      Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP           1016    /* getreg only */
+#define _IA64_REG_PSR          1019
+#define _IA64_REG_PSR_L                1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP           1025    /* R1 */
+#define _IA64_REG_R8           1032    /* R8 */
+#define _IA64_REG_R9           1033    /* R9 */
+#define _IA64_REG_SP           1036    /* R12 */
+#define _IA64_REG_TP           1037    /* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0       3072
+#define _IA64_REG_AR_KR1       3073
+#define _IA64_REG_AR_KR2       3074
+#define _IA64_REG_AR_KR3       3075
+#define _IA64_REG_AR_KR4       3076
+#define _IA64_REG_AR_KR5       3077
+#define _IA64_REG_AR_KR6       3078
+#define _IA64_REG_AR_KR7       3079
+#define _IA64_REG_AR_RSC       3088
+#define _IA64_REG_AR_BSP       3089
+#define _IA64_REG_AR_BSPSTORE  3090
+#define _IA64_REG_AR_RNAT      3091
+#define _IA64_REG_AR_FCR       3093
+#define _IA64_REG_AR_EFLAG     3096
+#define _IA64_REG_AR_CSD       3097
+#define _IA64_REG_AR_SSD       3098
+#define _IA64_REG_AR_CFLAG     3099
+#define _IA64_REG_AR_FSR       3100
+#define _IA64_REG_AR_FIR       3101
+#define _IA64_REG_AR_FDR       3102
+#define _IA64_REG_AR_CCV       3104
+#define _IA64_REG_AR_UNAT      3108
+#define _IA64_REG_AR_FPSR      3112
+#define _IA64_REG_AR_ITC       3116
+#define _IA64_REG_AR_PFS       3136
+#define _IA64_REG_AR_LC                3137
+#define _IA64_REG_AR_EC                3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR       4096
+#define _IA64_REG_CR_ITM       4097
+#define _IA64_REG_CR_IVA       4098
+#define _IA64_REG_CR_PTA       4104
+#define _IA64_REG_CR_IPSR      4112
+#define _IA64_REG_CR_ISR       4113
+#define _IA64_REG_CR_IIP       4115
+#define _IA64_REG_CR_IFA       4116
+#define _IA64_REG_CR_ITIR      4117
+#define _IA64_REG_CR_IIPA      4118
+#define _IA64_REG_CR_IFS       4119
+#define _IA64_REG_CR_IIM       4120
+#define _IA64_REG_CR_IHA       4121
+#define _IA64_REG_CR_LID       4160
+#define _IA64_REG_CR_IVR       4161    /* getreg only */
+#define _IA64_REG_CR_TPR       4162
+#define _IA64_REG_CR_EOI       4163
+#define _IA64_REG_CR_IRR0      4164    /* getreg only */
+#define _IA64_REG_CR_IRR1      4165    /* getreg only */
+#define _IA64_REG_CR_IRR2      4166    /* getreg only */
+#define _IA64_REG_CR_IRR3      4167    /* getreg only */
+#define _IA64_REG_CR_ITV       4168
+#define _IA64_REG_CR_PMV       4169
+#define _IA64_REG_CR_CMCV      4170
+#define _IA64_REG_CR_LRR0      4176
+#define _IA64_REG_CR_LRR1      4177
+
+#ifdef  CONFIG_VTI
+#define IA64_REG_CR_DCR   0
+#define IA64_REG_CR_ITM   1
+#define IA64_REG_CR_IVA   2
+#define IA64_REG_CR_PTA   8
+#define IA64_REG_CR_IPSR  16
+#define IA64_REG_CR_ISR   17
+#define IA64_REG_CR_IIP   19
+#define IA64_REG_CR_IFA   20
+#define IA64_REG_CR_ITIR  21
+#define IA64_REG_CR_IIPA  22
+#define IA64_REG_CR_IFS   23
+#define IA64_REG_CR_IIM   24
+#define IA64_REG_CR_IHA   25
+#define IA64_REG_CR_LID   64
+#define IA64_REG_CR_IVR   65
+#define IA64_REG_CR_TPR   66
+#define IA64_REG_CR_EOI   67
+#define IA64_REG_CR_IRR0  68
+#define IA64_REG_CR_IRR1  69
+#define IA64_REG_CR_IRR2  70
+#define IA64_REG_CR_IRR3  71
+#define IA64_REG_CR_ITV   72
+#define IA64_REG_CR_PMV   73
+#define IA64_REG_CR_CMCV  74
+#define IA64_REG_CR_LRR0  80
+#define IA64_REG_CR_LRR1  81
+#endif  //  CONFIG_VTI
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID   9000    /* getindreg only */
+#define _IA64_REG_INDR_DBR     9001
+#define _IA64_REG_INDR_IBR     9002
+#define _IA64_REG_INDR_PKR     9003
+#define _IA64_REG_INDR_PMC     9004
+#define _IA64_REG_INDR_PMD     9005
+#define _IA64_REG_INDR_RR      9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/io.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/io.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,488 @@
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ */
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO do { } while (0)
+#define SLOW_DOWN_IO   do { } while (0)
+
+#ifdef XEN
+#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
+#else
+#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL    /* region 6 */
+#endif
+
+/*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 
ports, but
+ * large machines may have multiple other I/O spaces so we can't place any a 
priori limit
+ * on IO_SPACE_LIMIT.  These additional spaces are described in ACPI.
+ */
+#define IO_SPACE_LIMIT         0xffffffffffffffffUL
+
+#define MAX_IO_SPACES_BITS             4
+#define MAX_IO_SPACES                  (1UL << MAX_IO_SPACES_BITS)
+#define IO_SPACE_BITS                  24
+#define IO_SPACE_SIZE                  (1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port)              ((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space)           ((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port)            ((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p)    ((((p) >> 2) << 12) | (p & 0xfff))
+
+struct io_space {
+       unsigned long mmio_base;        /* base in MMIO space */
+       int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
+# ifdef __KERNEL__
+
+/*
+ * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
+ *     0xCxxxxxxxxxxxxxxx      MMIO cookie (return from ioremap)
+ *     0x000000001SPPPPPP      PIO cookie (S=space number, P..P=port)
+ *
+ * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
+ * code that uses bare port numbers without the prerequisite pci_iomap().
+ */
+#define PIO_OFFSET             (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
+#define PIO_MASK               (PIO_OFFSET - 1)
+#define PIO_RESERVED           __IA64_UNCACHED_OFFSET
+#define HAVE_ARCH_PIO_SIZE
+
+#include <asm/intrinsics.h>
+#include <asm/machvec.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm-generic/iomap.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+       return (unsigned long) address - PAGE_OFFSET;
+}
+
+static inline void*
+phys_to_virt (unsigned long address)
+{
+       return (void *) (address + PAGE_OFFSET);
+}
+
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c 
*/
+
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
+#define bus_to_virt    phys_to_virt
+#define virt_to_bus    virt_to_phys
+#define page_to_bus    page_to_phys
+
+# endif /* KERNEL */
+
+/*
+ * Memory fence w/accept.  This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a()  ia64_mfa()
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes.  This will make sure that writes
+ * following the barrier will arrive after all previous writes.  For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ *
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+       ia64_mfa();
+}
+
+static inline const unsigned long
+__ia64_get_io_port_base (void)
+{
+       extern unsigned long ia64_iobase;
+
+       return ia64_iobase;
+}
+
+static inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+       struct io_space *space;
+       unsigned long offset;
+
+       space = &io_space[IO_SPACE_NR(port)];
+       port = IO_SPACE_PORT(port);
+       if (space->sparse)
+               offset = IO_SPACE_SPARSE_ENCODING(port);
+       else
+               offset = port;
+
+       return (void *) (space->mmio_base | offset);
+}
+
+#define __ia64_inb     ___ia64_inb
+#define __ia64_inw     ___ia64_inw
+#define __ia64_inl     ___ia64_inl
+#define __ia64_outb    ___ia64_outb
+#define __ia64_outw    ___ia64_outw
+#define __ia64_outl    ___ia64_outl
+#define __ia64_readb   ___ia64_readb
+#define __ia64_readw   ___ia64_readw
+#define __ia64_readl   ___ia64_readl
+#define __ia64_readq   ___ia64_readq
+#define __ia64_readb_relaxed   ___ia64_readb
+#define __ia64_readw_relaxed   ___ia64_readw
+#define __ia64_readl_relaxed   ___ia64_readl
+#define __ia64_readq_relaxed   ___ia64_readq
+#define __ia64_writeb  ___ia64_writeb
+#define __ia64_writew  ___ia64_writew
+#define __ia64_writel  ___ia64_writel
+#define __ia64_writeq  ___ia64_writeq
+#define __ia64_mmiowb  ___ia64_mmiowb
+
+/*
+ * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access 
to ensure
+ * that the access has completed before executing other I/O accesses.  Since 
we're doing
+ * the accesses through an uncachable (UC) translation, the CPU will execute 
them in
+ * program order.  However, we still need to tell the compiler not to shuffle 
them around
+ * during optimization, which is why we use "volatile" pointers.
+ */
+
+static inline unsigned int
+___ia64_inb (unsigned long port)
+{
+       volatile unsigned char *addr = __ia64_mk_io_addr(port);
+       unsigned char ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+___ia64_inw (unsigned long port)
+{
+       volatile unsigned short *addr = __ia64_mk_io_addr(port);
+       unsigned short ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+___ia64_inl (unsigned long port)
+{
+       volatile unsigned int *addr = __ia64_mk_io_addr(port);
+       unsigned int ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline void
+___ia64_outb (unsigned char val, unsigned long port)
+{
+       volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+___ia64_outw (unsigned short val, unsigned long port)
+{
+       volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+___ia64_outl (unsigned int val, unsigned long port)
+{
+       volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+__insb (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned char *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inb(port);
+}
+
+static inline void
+__insw (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned short *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inw(port);
+}
+
+static inline void
+__insl (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned int *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inl(port);
+}
+
+static inline void
+__outsb (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned char *sp = src;
+
+       while (count--)
+               platform_outb(*sp++, port);
+}
+
+static inline void
+__outsw (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned short *sp = src;
+
+       while (count--)
+               platform_outw(*sp++, port);
+}
+
+static inline void
+__outsl (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned int *sp = src;
+
+       while (count--)
+               platform_outl(*sp++, port);
+}
+
+/*
+ * Unfortunately, some platforms are broken and do not follow the IA-64 
architecture
+ * specification regarding legacy I/O support.  Thus, we have to make these 
operations
+ * platform dependent...
+ */
+#define __inb          platform_inb
+#define __inw          platform_inw
+#define __inl          platform_inl
+#define __outb         platform_outb
+#define __outw         platform_outw
+#define __outl         platform_outl
+#define __mmiowb       platform_mmiowb
+
+#define inb(p)         __inb(p)
+#define inw(p)         __inw(p)
+#define inl(p)         __inl(p)
+#define insb(p,d,c)    __insb(p,d,c)
+#define insw(p,d,c)    __insw(p,d,c)
+#define insl(p,d,c)    __insl(p,d,c)
+#define outb(v,p)      __outb(v,p)
+#define outw(v,p)      __outw(v,p)
+#define outl(v,p)      __outl(v,p)
+#define outsb(p,s,c)   __outsb(p,s,c)
+#define outsw(p,s,c)   __outsw(p,s,c)
+#define outsl(p,s,c)   __outsl(p,s,c)
+#define mmiowb()       __mmiowb()
+
+/*
+ * The address passed to these functions are ioremap()ped already.
+ *
+ * We need these to be machine vectors since some platforms don't provide
+ * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
+ * a good idea).  Writes are ok though for all existing ia64 platforms (and
+ * hopefully it'll stay that way).
+ */
+static inline unsigned char
+___ia64_readb (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___ia64_readw (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___ia64_readl (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___ia64_readq (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned long __force *) addr;
+}
+
+static inline void
+__writeb (unsigned char val, volatile void __iomem *addr)
+{
+       *(volatile unsigned char __force *) addr = val;
+}
+
+static inline void
+__writew (unsigned short val, volatile void __iomem *addr)
+{
+       *(volatile unsigned short __force *) addr = val;
+}
+
+static inline void
+__writel (unsigned int val, volatile void __iomem *addr)
+{
+       *(volatile unsigned int __force *) addr = val;
+}
+
+static inline void
+__writeq (unsigned long val, volatile void __iomem *addr)
+{
+       *(volatile unsigned long __force *) addr = val;
+}
+
+#define __readb                platform_readb
+#define __readw                platform_readw
+#define __readl                platform_readl
+#define __readq                platform_readq
+#define __readb_relaxed        platform_readb_relaxed
+#define __readw_relaxed        platform_readw_relaxed
+#define __readl_relaxed        platform_readl_relaxed
+#define __readq_relaxed        platform_readq_relaxed
+
+#define readb(a)       __readb((a))
+#define readw(a)       __readw((a))
+#define readl(a)       __readl((a))
+#define readq(a)       __readq((a))
+#define readb_relaxed(a)       __readb_relaxed((a))
+#define readw_relaxed(a)       __readw_relaxed((a))
+#define readl_relaxed(a)       __readl_relaxed((a))
+#define readq_relaxed(a)       __readq_relaxed((a))
+#define __raw_readb    readb
+#define __raw_readw    readw
+#define __raw_readl    readl
+#define __raw_readq    readq
+#define __raw_readb_relaxed    readb_relaxed
+#define __raw_readw_relaxed    readw_relaxed
+#define __raw_readl_relaxed    readl_relaxed
+#define __raw_readq_relaxed    readq_relaxed
+#define writeb(v,a)    __writeb((v), (a))
+#define writew(v,a)    __writew((v), (a))
+#define writel(v,a)    __writel((v), (a))
+#define writeq(v,a)    __writeq((v), (a))
+#define __raw_writeb   writeb
+#define __raw_writew   writew
+#define __raw_writel   writel
+#define __raw_writeq   writeq
+
+#ifndef inb_p
+# define inb_p         inb
+#endif
+#ifndef inw_p
+# define inw_p         inw
+#endif
+#ifndef inl_p
+# define inl_p         inl
+#endif
+
+#ifndef outb_p
+# define outb_p                outb
+#endif
+#ifndef outw_p
+# define outw_p                outw
+#endif
+#ifndef outl_p
+# define outl_p                outl
+#endif
+
+/*
+ * An "address" in IO memory space is not clearly either an integer or a 
pointer. We will
+ * accept both, thus the casts.
+ *
+ * On ia-64, we access the physical I/O memory space through the uncached 
kernel region.
+ */
+static inline void __iomem *
+ioremap (unsigned long offset, unsigned long size)
+{
+       return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
+}
+
+static inline void
+iounmap (volatile void __iomem *addr)
+{
+}
+
+#define ioremap_nocache(o,s)   ioremap(o,s)
+
+# ifdef __KERNEL__
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
+extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
+extern void memset_io(volatile void __iomem *s, int c, long n);
+
+#define dma_cache_inv(_start,_size)             do { } while (0)
+#define dma_cache_wback(_start,_size)           do { } while (0)
+#define dma_cache_wback_inv(_start,_size)       do { } while (0)
+
+# endif /* __KERNEL__ */
+
+/*
+ * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing.  It 
is said that
+ * BIO-level virtual merging can give up to 4% performance boost (not verified 
for ia64).
+ * On the other hand, we know that I/O MMU bypassing gives ~8% performance 
improvement on
+ * SPECweb-like workloads on zx1-based machines.  Thus, for now we favor I/O 
MMU bypassing
+ * over BIO-level virtual merging.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#if 1
+#define BIO_VMERGE_BOUNDARY    0
+#else
+/*
+ * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here.  
Should be
+ * replaced by dma_merge_mask() or something of that sort.  Note: the only way
+ * BIO_VMERGE_BOUNDARY is used is to mask off bits.  Effectively, our 
definition gets
+ * expanded into:
+ *
+ *     addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & 
ia64_max_iommu_vmerge_mask)
+ *
+ * which is precisely what we want.
+ */
+#define BIO_VMERGE_BOUNDARY    (ia64_max_iommu_merge_mask + 1)
+#endif
+
+#endif /* _ASM_IA64_IO_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/kregs.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/kregs.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,199 @@
+#ifndef _ASM_IA64_KREGS_H
+#define _ASM_IA64_KREGS_H
+
+/*
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+/*
+ * This file defines the kernel register usage convention used by Linux/ia64.
+ */
+
+/*
+ * Kernel registers:
+ */
+#define IA64_KR_IO_BASE                0       /* ar.k0: legacy I/O base 
address */
+#define IA64_KR_TSSD           1       /* ar.k1: IVE uses this as the TSSD */
+#define IA64_KR_PER_CPU_DATA   3       /* ar.k3: physical per-CPU base */
+#define IA64_KR_CURRENT_STACK  4       /* ar.k4: what's mapped in 
IA64_TR_CURRENT_STACK */
+#define IA64_KR_FPU_OWNER      5       /* ar.k5: fpu-owner (UP only, at the 
moment) */
+#define IA64_KR_CURRENT                6       /* ar.k6: "current" task 
pointer */
+#define IA64_KR_PT_BASE                7       /* ar.k7: page table base 
address (physical) */
+
+#define _IA64_KR_PASTE(x,y)    x##y
+#define _IA64_KR_PREFIX(n)     _IA64_KR_PASTE(ar.k, n)
+#define IA64_KR(n)             _IA64_KR_PREFIX(IA64_KR_##n)
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_KERNEL         0       /* itr0, dtr0: maps kernel image (code 
& data) */
+#define IA64_TR_PALCODE                1       /* itr1: maps PALcode as 
required by EFI */
+#ifdef CONFIG_VTI
+#define IA64_TR_XEN_IN_DOM     6       /* itr6, dtr6: Double mapping for xen 
image in domain space */
+#endif // CONFIG_VTI
+#define IA64_TR_PERCPU_DATA    1       /* dtr1: percpu data */
+#define IA64_TR_CURRENT_STACK  2       /* dtr2: maps kernel's memory- & 
register-stacks */
+#ifdef XEN
+#define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
+#define        IA64_TR_VHPT            4       /* dtr4: vhpt */
+#define IA64_TR_ARCH_INFO      5
+#ifdef CONFIG_VTI
+#define IA64_TR_VHPT_IN_DOM    5       /* dtr5: Double mapping for vhpt table 
in domain space */
+#define IA64_TR_RR7_SWITCH_STUB        7       /* dtr7: mapping for rr7 switch 
stub */
+#define IA64_TEMP_PHYSICAL     8       /* itr8, dtr8: temp mapping for guest 
physical memory 256M */
+#endif // CONFIG_VTI
+#endif
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT                1
+#define IA64_PSR_UP_BIT                2
+#define IA64_PSR_AC_BIT                3
+#define IA64_PSR_MFL_BIT       4
+#define IA64_PSR_MFH_BIT       5
+#define IA64_PSR_IC_BIT                13
+#define IA64_PSR_I_BIT         14
+#define IA64_PSR_PK_BIT                15
+#define IA64_PSR_DT_BIT                17
+#define IA64_PSR_DFL_BIT       18
+#define IA64_PSR_DFH_BIT       19
+#define IA64_PSR_SP_BIT                20
+#define IA64_PSR_PP_BIT                21
+#define IA64_PSR_DI_BIT                22
+#define IA64_PSR_SI_BIT                23
+#define IA64_PSR_DB_BIT                24
+#define IA64_PSR_LP_BIT                25
+#define IA64_PSR_TB_BIT                26
+#define IA64_PSR_RT_BIT                27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL0_BIT      32
+#define IA64_PSR_CPL1_BIT      33
+#define IA64_PSR_IS_BIT                34
+#define IA64_PSR_MC_BIT                35
+#define IA64_PSR_IT_BIT                36
+#define IA64_PSR_ID_BIT                37
+#define IA64_PSR_DA_BIT                38
+#define IA64_PSR_DD_BIT                39
+#define IA64_PSR_SS_BIT                40
+#define IA64_PSR_RI_BIT                41
+#define IA64_PSR_ED_BIT                43
+#define IA64_PSR_BN_BIT                44
+#define IA64_PSR_IA_BIT                45
+#ifdef CONFIG_VTI
+#define IA64_PSR_VM_BIT                46
+#endif // CONFIG_VTI
+
+/* A mask of PSR bits that we generally don't want to inherit across a 
clone2() or an
+   execve().  Only list flags here that need to be cleared/set for BOTH 
clone2() and
+   execve().  */
+#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | 
IA64_PSR_LP | \
+                                IA64_PSR_TB  | IA64_PSR_ID  | IA64_PSR_DA | 
IA64_PSR_DD | \
+                                IA64_PSR_SS  | IA64_PSR_ED  | IA64_PSR_IA)
+#define IA64_PSR_BITS_TO_SET   (IA64_PSR_DFH | IA64_PSR_SP)
+
+#define IA64_PSR_BE    (__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP    (__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC    (__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL   (__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH   (__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC    (__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I     (__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK    (__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT    (__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL   (__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH   (__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP    (__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP    (__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI    (__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI    (__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB    (__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP    (__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB    (__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT    (__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL   (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
+#define IA64_PSR_IS    (__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC    (__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT    (__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID    (__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA    (__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD    (__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS    (__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI    (__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED    (__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN    (__IA64_UL(1) << IA64_PSR_BN_BIT)
+#define IA64_PSR_IA    (__IA64_UL(1) << IA64_PSR_IA_BIT)
+#ifdef CONFIG_VTI
+#define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
+#endif // CONFIG_VTI
+
+/* User mask bits: */
+#define IA64_PSR_UM    (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL 
| IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT                 0      /* privileged performance 
monitor default */
+#define IA64_DCR_BE_BIT                 1      /* big-endian default */
+#define IA64_DCR_LC_BIT                 2      /* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT                 8      /* defer TLB miss faults */
+#define IA64_DCR_DP_BIT                 9      /* defer page-not-present 
faults */
+#define IA64_DCR_DK_BIT                10      /* defer key miss faults */
+#define IA64_DCR_DX_BIT                11      /* defer key permission faults 
*/
+#define IA64_DCR_DR_BIT                12      /* defer access right faults */
+#define IA64_DCR_DA_BIT                13      /* defer access bit faults */
+#define IA64_DCR_DD_BIT                14      /* defer debug faults */
+
+#define IA64_DCR_PP    (__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE    (__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC    (__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM    (__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP    (__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK    (__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX    (__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR    (__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA    (__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD    (__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT         32      /* execute access */
+#define IA64_ISR_W_BIT         33      /* write access */
+#define IA64_ISR_R_BIT         34      /* read access */
+#define IA64_ISR_NA_BIT                35      /* non-access */
+#define IA64_ISR_SP_BIT                36      /* speculative load exception */
+#define IA64_ISR_RS_BIT                37      /* mandatory register-stack 
exception */
+#define IA64_ISR_IR_BIT                38      /* invalid register frame 
exception */
+#define IA64_ISR_CODE_MASK     0xf
+
+#define IA64_ISR_X     (__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W     (__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R     (__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA    (__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP    (__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS    (__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR    (__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+/* ISR code field for non-access instructions */
+#define IA64_ISR_CODE_TPA      0
+#define IA64_ISR_CODE_FC       1
+#define IA64_ISR_CODE_PROBE    2
+#define IA64_ISR_CODE_TAK      3
+#define IA64_ISR_CODE_LFETCH   4
+#define IA64_ISR_CODE_PROBEF   5
+
+#ifdef XEN
+/* Interruption Function State */
+#define IA64_IFS_V_BIT         63
+#define IA64_IFS_V     (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+/* Page Table Address */
+#define IA64_PTA_VE_BIT 0
+#define IA64_PTA_SIZE_BIT 2
+#define IA64_PTA_VF_BIT 8
+#define IA64_PTA_BASE_BIT 15
+
+#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
+#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
+#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
+#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+#endif
+
+#endif /* _ASM_IA64_kREGS_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/bug.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/bug.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,34 @@
+#ifndef _ASM_GENERIC_BUG_H
+#define _ASM_GENERIC_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/config.h>
+
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+       printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+       panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_PAGE_BUG
+#define PAGE_BUG(page) do { \
+       printk("page BUG for page at %p\n", page); \
+       BUG(); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) do { \
+       if (unlikely((condition)!=0)) { \
+               printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, 
__LINE__); \
+               dump_stack(); \
+       } \
+} while (0)
+#endif
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/div64.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/div64.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,58 @@
+#ifndef _ASM_GENERIC_DIV64_H
+#define _ASM_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@xxxxxxxxxxx>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ *     uint32_t remainder = *n % base;
+ *     *n = *n / base;
+ *     return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ *       beware of side effects!
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({                                     \
+       uint32_t __base = (base);                               \
+       uint32_t __rem;                                         \
+       __rem = ((uint64_t)(n)) % __base;                       \
+       (n) = ((uint64_t)(n)) / __base;                         \
+       __rem;                                                  \
+ })
+
+#elif BITS_PER_LONG == 32
+
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({                             \
+       uint32_t __base = (base);                       \
+       uint32_t __rem;                                 \
+       (void)(((typeof((n)) *)0) == ((uint64_t *)0));  \
+       if (likely(((n) >> 32) == 0)) {                 \
+               __rem = (uint32_t)(n) % __base;         \
+               (n) = (uint32_t)(n) / __base;           \
+       } else                                          \
+               __rem = __div64_32(&(n), __base);       \
+       __rem;                                          \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ASM_GENERIC_DIV64_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/errno-base.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/errno-base.h       Tue Aug  2 
23:59:09 2005
@@ -0,0 +1,39 @@
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define        EPERM            1      /* Operation not permitted */
+#define        ENOENT           2      /* No such file or directory */
+#define        ESRCH            3      /* No such process */
+#define        EINTR            4      /* Interrupted system call */
+#define        EIO              5      /* I/O error */
+#define        ENXIO            6      /* No such device or address */
+#define        E2BIG            7      /* Argument list too long */
+#define        ENOEXEC          8      /* Exec format error */
+#define        EBADF            9      /* Bad file number */
+#define        ECHILD          10      /* No child processes */
+#define        EAGAIN          11      /* Try again */
+#define        ENOMEM          12      /* Out of memory */
+#define        EACCES          13      /* Permission denied */
+#define        EFAULT          14      /* Bad address */
+#define        ENOTBLK         15      /* Block device required */
+#define        EBUSY           16      /* Device or resource busy */
+#define        EEXIST          17      /* File exists */
+#define        EXDEV           18      /* Cross-device link */
+#define        ENODEV          19      /* No such device */
+#define        ENOTDIR         20      /* Not a directory */
+#define        EISDIR          21      /* Is a directory */
+#define        EINVAL          22      /* Invalid argument */
+#define        ENFILE          23      /* File table overflow */
+#define        EMFILE          24      /* Too many open files */
+#define        ENOTTY          25      /* Not a typewriter */
+#define        ETXTBSY         26      /* Text file busy */
+#define        EFBIG           27      /* File too large */
+#define        ENOSPC          28      /* No space left on device */
+#define        ESPIPE          29      /* Illegal seek */
+#define        EROFS           30      /* Read-only file system */
+#define        EMLINK          31      /* Too many links */
+#define        EPIPE           32      /* Broken pipe */
+#define        EDOM            33      /* Math argument out of domain of func 
*/
+#define        ERANGE          34      /* Math result not representable */
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/errno.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/errno.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,105 @@
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define        EDEADLK         35      /* Resource deadlock would occur */
+#define        ENAMETOOLONG    36      /* File name too long */
+#define        ENOLCK          37      /* No record locks available */
+#define        ENOSYS          38      /* Function not implemented */
+#define        ENOTEMPTY       39      /* Directory not empty */
+#define        ELOOP           40      /* Too many symbolic links encountered 
*/
+#define        EWOULDBLOCK     EAGAIN  /* Operation would block */
+#define        ENOMSG          42      /* No message of desired type */
+#define        EIDRM           43      /* Identifier removed */
+#define        ECHRNG          44      /* Channel number out of range */
+#define        EL2NSYNC        45      /* Level 2 not synchronized */
+#define        EL3HLT          46      /* Level 3 halted */
+#define        EL3RST          47      /* Level 3 reset */
+#define        ELNRNG          48      /* Link number out of range */
+#define        EUNATCH         49      /* Protocol driver not attached */
+#define        ENOCSI          50      /* No CSI structure available */
+#define        EL2HLT          51      /* Level 2 halted */
+#define        EBADE           52      /* Invalid exchange */
+#define        EBADR           53      /* Invalid request descriptor */
+#define        EXFULL          54      /* Exchange full */
+#define        ENOANO          55      /* No anode */
+#define        EBADRQC         56      /* Invalid request code */
+#define        EBADSLT         57      /* Invalid slot */
+
+#define        EDEADLOCK       EDEADLK
+
+#define        EBFONT          59      /* Bad font file format */
+#define        ENOSTR          60      /* Device not a stream */
+#define        ENODATA         61      /* No data available */
+#define        ETIME           62      /* Timer expired */
+#define        ENOSR           63      /* Out of streams resources */
+#define        ENONET          64      /* Machine is not on the network */
+#define        ENOPKG          65      /* Package not installed */
+#define        EREMOTE         66      /* Object is remote */
+#define        ENOLINK         67      /* Link has been severed */
+#define        EADV            68      /* Advertise error */
+#define        ESRMNT          69      /* Srmount error */
+#define        ECOMM           70      /* Communication error on send */
+#define        EPROTO          71      /* Protocol error */
+#define        EMULTIHOP       72      /* Multihop attempted */
+#define        EDOTDOT         73      /* RFS specific error */
+#define        EBADMSG         74      /* Not a data message */
+#define        EOVERFLOW       75      /* Value too large for defined data 
type */
+#define        ENOTUNIQ        76      /* Name not unique on network */
+#define        EBADFD          77      /* File descriptor in bad state */
+#define        EREMCHG         78      /* Remote address changed */
+#define        ELIBACC         79      /* Can not access a needed shared 
library */
+#define        ELIBBAD         80      /* Accessing a corrupted shared library 
*/
+#define        ELIBSCN         81      /* .lib section in a.out corrupted */
+#define        ELIBMAX         82      /* Attempting to link in too many 
shared libraries */
+#define        ELIBEXEC        83      /* Cannot exec a shared library 
directly */
+#define        EILSEQ          84      /* Illegal byte sequence */
+#define        ERESTART        85      /* Interrupted system call should be 
restarted */
+#define        ESTRPIPE        86      /* Streams pipe error */
+#define        EUSERS          87      /* Too many users */
+#define        ENOTSOCK        88      /* Socket operation on non-socket */
+#define        EDESTADDRREQ    89      /* Destination address required */
+#define        EMSGSIZE        90      /* Message too long */
+#define        EPROTOTYPE      91      /* Protocol wrong type for socket */
+#define        ENOPROTOOPT     92      /* Protocol not available */
+#define        EPROTONOSUPPORT 93      /* Protocol not supported */
+#define        ESOCKTNOSUPPORT 94      /* Socket type not supported */
+#define        EOPNOTSUPP      95      /* Operation not supported on transport 
endpoint */
+#define        EPFNOSUPPORT    96      /* Protocol family not supported */
+#define        EAFNOSUPPORT    97      /* Address family not supported by 
protocol */
+#define        EADDRINUSE      98      /* Address already in use */
+#define        EADDRNOTAVAIL   99      /* Cannot assign requested address */
+#define        ENETDOWN        100     /* Network is down */
+#define        ENETUNREACH     101     /* Network is unreachable */
+#define        ENETRESET       102     /* Network dropped connection because 
of reset */
+#define        ECONNABORTED    103     /* Software caused connection abort */
+#define        ECONNRESET      104     /* Connection reset by peer */
+#define        ENOBUFS         105     /* No buffer space available */
+#define        EISCONN         106     /* Transport endpoint is already 
connected */
+#define        ENOTCONN        107     /* Transport endpoint is not connected 
*/
+#define        ESHUTDOWN       108     /* Cannot send after transport endpoint 
shutdown */
+#define        ETOOMANYREFS    109     /* Too many references: cannot splice */
+#define        ETIMEDOUT       110     /* Connection timed out */
+#define        ECONNREFUSED    111     /* Connection refused */
+#define        EHOSTDOWN       112     /* Host is down */
+#define        EHOSTUNREACH    113     /* No route to host */
+#define        EALREADY        114     /* Operation already in progress */
+#define        EINPROGRESS     115     /* Operation now in progress */
+#define        ESTALE          116     /* Stale NFS file handle */
+#define        EUCLEAN         117     /* Structure needs cleaning */
+#define        ENOTNAM         118     /* Not a XENIX named type file */
+#define        ENAVAIL         119     /* No XENIX semaphores available */
+#define        EISNAM          120     /* Is a named type file */
+#define        EREMOTEIO       121     /* Remote I/O error */
+#define        EDQUOT          122     /* Quota exceeded */
+
+#define        ENOMEDIUM       123     /* No medium found */
+#define        EMEDIUMTYPE     124     /* Wrong medium type */
+#define        ECANCELED       125     /* Operation Canceled */
+#define        ENOKEY          126     /* Required key not available */
+#define        EKEYEXPIRED     127     /* Key has expired */
+#define        EKEYREVOKED     128     /* Key has been revoked */
+#define        EKEYREJECTED    129     /* Key was rejected by service */
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/ide_iops.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/ide_iops.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,38 @@
+/* Generic I/O and MEMIO string operations.  */
+
+#define __ide_insw     insw
+#define __ide_insl     insl
+#define __ide_outsw    outsw
+#define __ide_outsl    outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+       while (count--) {
+               *(u16 *)addr = readw(port);
+               addr += 2;
+       }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+       while (count--) {
+               *(u32 *)addr = readl(port);
+               addr += 4;
+       }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 
count)
+{
+       while (count--) {
+               writew(*(u16 *)addr, port);
+               addr += 2;
+       }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 
count)
+{
+       while (count--) {
+               writel(*(u32 *)addr, port);
+               addr += 4;
+       }
+}
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/iomap.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/iomap.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,63 @@
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int fastcall ioread8(void __iomem *);
+extern unsigned int fastcall ioread16(void __iomem *);
+extern unsigned int fastcall ioread32(void __iomem *);
+
+extern void fastcall iowrite8(u8, void __iomem *);
+extern void fastcall iowrite16(u16, void __iomem *);
+extern void fastcall iowrite32(u32, void __iomem *);
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long 
count);
+extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long 
count);
+extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long 
count);
+
+extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, 
unsigned long count);
+extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, 
unsigned long count);
+extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, 
unsigned long count);
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long 
max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/pci-dma-compat.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/pci-dma-compat.h   Tue Aug  2 
23:59:09 2005
@@ -0,0 +1,107 @@
+/* include this file if the platform implements the dma_ DMA Mapping API
+ * and wants to provide the pci_ DMA Mapping API in terms of it */
+
+#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
+#define _ASM_GENERIC_PCI_DMA_COMPAT_H
+
+#include <linux/dma-mapping.h>
+
+/* note pci_set_dma_mask isn't here, since it's a public function
+ * exported from drivers/pci, use dma_supported instead */
+
+static inline int
+pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+       return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
+}
+
+static inline void *
+pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                    dma_addr_t *dma_handle)
+{
+       return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, 
dma_handle, GFP_ATOMIC);
+}
+
+static inline void
+pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                   void *vaddr, dma_addr_t dma_handle)
+{
+       dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, 
dma_handle);
+}
+
+static inline dma_addr_t
+pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+       return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, 
(enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+                size_t size, int direction)
+{
+       dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, 
(enum dma_data_direction)direction);
+}
+
+static inline dma_addr_t
+pci_map_page(struct pci_dev *hwdev, struct page *page,
+            unsigned long offset, size_t size, int direction)
+{
+       return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, 
size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
+              size_t size, int direction)
+{
+       dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, 
(enum dma_data_direction)direction);
+}
+
+static inline int
+pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+          int nents, int direction)
+{
+       return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum 
dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+            int nents, int direction)
+{
+       dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum 
dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
+                   size_t size, int direction)
+{
+       dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, 
size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
+                   size_t size, int direction)
+{
+       dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, 
dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
+               int nelems, int direction)
+{
+       dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, 
(enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
+               int nelems, int direction)
+{
+       dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, 
(enum dma_data_direction)direction);
+}
+
+static inline int
+pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+       return dma_mapping_error(dma_addr);
+}
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/pci.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/pci.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,34 @@
+/*
+ * linux/include/asm-generic/pci.h
+ *
+ *  Copyright (C) 2003 Russell King
+ */
+#ifndef _ASM_GENERIC_PCI_H
+#define _ASM_GENERIC_PCI_H
+
+/**
+ * pcibios_resource_to_bus - convert resource to PCI bus address
+ * @dev: device which owns this resource
+ * @region: converted bus-centric region (start,end)
+ * @res: resource to convert
+ *
+ * Convert a resource to a PCI device bus address or bus window.
+ */
+static inline void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+                        struct resource *res)
+{
+       region->start = res->start;
+       region->end = res->end;
+}
+
+#define pcibios_scan_all_fns(a, b)     0
+
+#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+       return channel ? 15 : 14;
+}
+#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/pgtable-nopud.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/pgtable-nopud.h    Tue Aug  2 
23:59:09 2005
@@ -0,0 +1,56 @@
+#ifndef _PGTABLE_NOPUD_H
+#define _PGTABLE_NOPUD_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT      PGDIR_SHIFT
+#define PTRS_PER_PUD   1
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)          { return 0; }
+static inline int pgd_bad(pgd_t pgd)           { return 0; }
+static inline int pgd_present(pgd_t pgd)       { return 1; }
+static inline void pgd_clear(pgd_t *pgd)       { }
+#define pud_ERROR(pud)                         (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud)             do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval)                        set_pud((pud_t 
*)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
+{
+       return (pud_t *)pgd;
+}
+
+#define pud_val(x)                             (pgd_val((x).pgd))
+#define __pud(x)                               ((pud_t) { __pgd(x) } )
+
+#define pgd_page(pgd)                          (pud_page((pud_t){ pgd }))
+#define pgd_page_kernel(pgd)                   (pud_page_kernel((pud_t){ pgd 
}))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address)             NULL
+#define pud_free(x)                            do { } while (0)
+#define __pud_free_tlb(tlb, x)                 do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOPUD_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/pgtable.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/pgtable.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,137 @@
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH
+/*
+ * Establish a new mapping:
+ *  - flush the old one
+ *  - update the page tables
+ *  - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
+ *
+ * Note: the old pte is known to not be writable, so we don't need to
+ * worry about dirty bits etc getting lost.
+ */
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       set_pte(__ptep, __entry);                                       \
+       flush_tlb_page(__vma, __address);                               \
+} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       set_pte_atomic(__ptep, __entry);                                \
+       flush_tlb_page(__vma, __address);                               \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this.
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+do {                                                                     \
+       set_pte(__ptep, __entry);                                         \
+       flush_tlb_page(__vma, __address);                                 \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (!pte_young(pte))
+               return 0;
+       set_pte(ptep, pte_mkold(pte));
+       return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)               \
+({                                                                     \
+       int __young = ptep_test_and_clear_young(__ptep);                \
+       if (__young)                                                    \
+               flush_tlb_page(__vma, __address);                       \
+       __young;                                                        \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (!pte_dirty(pte))
+               return 0;
+       set_pte(ptep, pte_mkclean(pte));
+       return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep)               \
+({                                                                     \
+       int __dirty = ptep_test_and_clear_dirty(__ptep);                \
+       if (__dirty)                                                    \
+               flush_tlb_page(__vma, __address);                       \
+       __dirty;                                                        \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       pte_clear(ptep);
+       return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep)                     \
+({                                                                     \
+       pte_t __pte = ptep_get_and_clear(__ptep);                       \
+       flush_tlb_page(__vma, __address);                               \
+       __pte;                                                          \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_MKDIRTY
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_mkdirty(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define page_test_and_clear_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr)      pgd_offset(mm, addr)
+#endif
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/sections.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/sections.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,13 @@
+#ifndef _ASM_GENERIC_SECTIONS_H_
+#define _ASM_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char _end[];
+
+#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/topology.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/topology.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,48 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.          
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@xxxxxxxxxx>
+ */
+#ifndef _ASM_GENERIC_TOPOLOGY_H
+#define _ASM_GENERIC_TOPOLOGY_H
+
+/* Other architectures wishing to use this simple topology API should fill
+   in the below functions as appropriate in their own <asm/topology.h> file. */
+#ifndef cpu_to_node
+#define cpu_to_node(cpu)       (0)
+#endif
+#ifndef parent_node
+#define parent_node(node)      (0)
+#endif
+#ifndef node_to_cpumask
+#define node_to_cpumask(node)  (cpu_online_map)
+#endif
+#ifndef node_to_first_cpu
+#define node_to_first_cpu(node)        (0)
+#endif
+#ifndef pcibus_to_cpumask
+#define pcibus_to_cpumask(bus) (cpu_online_map)
+#endif
+
+#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h      Tue Aug  2 
23:59:09 2005
@@ -0,0 +1,90 @@
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+#ifndef VMLINUX_SYMBOL
+#define VMLINUX_SYMBOL(_sym_) _sym_
+#endif
+
+#define RODATA                                                         \
+       .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
+               *(.rodata) *(.rodata.*)                                 \
+               *(__vermagic)           /* Kernel version magic */      \
+       }                                                               \
+                                                                       \
+       .rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {          \
+               *(.rodata1)                                             \
+       }                                                               \
+                                                                       \
+       /* PCI quirks */                                                \
+       .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
+               VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
+               *(.pci_fixup_early)                                     \
+               VMLINUX_SYMBOL(__end_pci_fixups_early) = .;             \
+               VMLINUX_SYMBOL(__start_pci_fixups_header) = .;          \
+               *(.pci_fixup_header)                                    \
+               VMLINUX_SYMBOL(__end_pci_fixups_header) = .;            \
+               VMLINUX_SYMBOL(__start_pci_fixups_final) = .;           \
+               *(.pci_fixup_final)                                     \
+               VMLINUX_SYMBOL(__end_pci_fixups_final) = .;             \
+               VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;          \
+               *(.pci_fixup_enable)                                    \
+               VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;            \
+       }                                                               \
+                                                                       \
+       /* Kernel symbol table: Normal symbols */                       \
+       __ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {         \
+               VMLINUX_SYMBOL(__start___ksymtab) = .;                  \
+               *(__ksymtab)                                            \
+               VMLINUX_SYMBOL(__stop___ksymtab) = .;                   \
+       }                                                               \
+                                                                       \
+       /* Kernel symbol table: GPL-only symbols */                     \
+       __ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {     \
+               VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;              \
+               *(__ksymtab_gpl)                                        \
+               VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;               \
+       }                                                               \
+                                                                       \
+       /* Kernel symbol table: Normal symbols */                       \
+       __kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {         \
+               VMLINUX_SYMBOL(__start___kcrctab) = .;                  \
+               *(__kcrctab)                                            \
+               VMLINUX_SYMBOL(__stop___kcrctab) = .;                   \
+       }                                                               \
+                                                                       \
+       /* Kernel symbol table: GPL-only symbols */                     \
+       __kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {     \
+               VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;              \
+               *(__kcrctab_gpl)                                        \
+               VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;               \
+       }                                                               \
+                                                                       \
+       /* Kernel symbol table: strings */                              \
+        __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {        
\
+               *(__ksymtab_strings)                                    \
+       }                                                               \
+                                                                       \
+       /* Built-in module parameters. */                               \
+       __param : AT(ADDR(__param) - LOAD_OFFSET) {                     \
+               VMLINUX_SYMBOL(__start___param) = .;                    \
+               *(__param)                                              \
+               VMLINUX_SYMBOL(__stop___param) = .;                     \
+       }
+
+#define SECURITY_INIT                                                  \
+       .security_initcall.init : {                                     \
+               VMLINUX_SYMBOL(__security_initcall_start) = .;          \
+               *(.security_initcall.init)                              \
+               VMLINUX_SYMBOL(__security_initcall_end) = .;            \
+       }
+
+#define SCHED_TEXT                                                     \
+               VMLINUX_SYMBOL(__sched_text_start) = .;                 \
+               *(.sched.text)                                          \
+               VMLINUX_SYMBOL(__sched_text_end) = .;
+
+#define LOCK_TEXT                                                      \
+               VMLINUX_SYMBOL(__lock_text_start) = .;                  \
+               *(.spinlock.text)                                       \
+               VMLINUX_SYMBOL(__lock_text_end) = .;
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/acpi.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/acpi.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,112 @@
+/*
+ *  asm-ia64/acpi.h
+ *
+ *  Copyright (C) 1999 VA Linux Systems
+ *  Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@xxxxxxxxx>
+ *  Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@xxxxxxxxx>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <asm/system.h>
+
+#define COMPILER_DEPENDENT_INT64       long
+#define COMPILER_DEPENDENT_UINT64      unsigned long
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE        - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE      - External ACPI interfaces
+ * ACPI_INTERNAL_XFACE      - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE  - Internal variable-parameter list interfaces
+ */
+#define ACPI_SYSTEM_XFACE
+#define ACPI_EXTERNAL_XFACE
+#define ACPI_INTERNAL_XFACE
+#define ACPI_INTERNAL_VAR_XFACE
+
+/* Asm macros */
+
+#define ACPI_ASM_MACROS
+#define BREAKPOINT3
+#define ACPI_DISABLE_IRQS() local_irq_disable()
+#define ACPI_ENABLE_IRQS()  local_irq_enable()
+#define ACPI_FLUSH_CPU_CACHE()
+
+static inline int
+ia64_acpi_acquire_global_lock (unsigned int *lock)
+{
+       unsigned int old, new, val;
+       do {
+               old = *lock;
+               new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+               val = ia64_cmpxchg4_acq(lock, new, old);
+       } while (unlikely (val != old));
+       return (new < 3) ? -1 : 0;
+}
+
+static inline int
+ia64_acpi_release_global_lock (unsigned int *lock)
+{
+       unsigned int old, new, val;
+       do {
+               old = *lock;
+               new = old & ~0x3;
+               val = ia64_cmpxchg4_acq(lock, new, old);
+       } while (unlikely (val != old));
+       return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq)                           \
+       ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq)                           \
+       ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
+
+#define acpi_disabled 0        /* ACPI always enabled on IA64 */
+#define acpi_noirq 0   /* ACPI always enabled on IA64 */
+#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
+#define acpi_strict 1  /* no ACPI spec workarounds on IA64 */
+static inline void disable_acpi(void) { }
+
+const char *acpi_get_sysname (void);
+int acpi_request_vector (u32 int_type);
+int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+
+#ifdef CONFIG_ACPI_NUMA
+/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
+#define MAX_PXM_DOMAINS (256)
+extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
+extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
+#endif
+
+extern u16 ia64_acpiid_to_sapicid[];
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/asmmacro.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/asmmacro.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,111 @@
+#ifndef _ASM_IA64_ASMMACRO_H
+#define _ASM_IA64_ASMMACRO_H
+
+/*
+ * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#define ENTRY(name)                            \
+       .align 32;                              \
+       .proc name;                             \
+name:
+
+#define ENTRY_MIN_ALIGN(name)                  \
+       .align 16;                              \
+       .proc name;                             \
+name:
+
+#define GLOBAL_ENTRY(name)                     \
+       .global name;                           \
+       ENTRY(name)
+
+#define END(name)                              \
+       .endp name
+
+/*
+ * Helper macros to make unwind directives more readable:
+ */
+
+/* prologue_gr: */
+#define ASM_UNW_PRLG_RP                        0x8
+#define ASM_UNW_PRLG_PFS               0x4
+#define ASM_UNW_PRLG_PSP               0x2
+#define ASM_UNW_PRLG_PR                        0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs)   (32+(ninputs))
+
+/*
+ * Helper macros for accessing user memory.
+ */
+
+       .section "__ex_table", "a"              // declare section & section 
attributes
+       .previous
+
+# define EX(y,x...)                            \
+       .xdata4 "__ex_table", 99f-., y-.;       \
+  [99:]        x
+# define EXCLR(y,x...)                         \
+       .xdata4 "__ex_table", 99f-., y-.+4;     \
+  [99:]        x
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address.  We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+       .section ".data.patch.vtop", "a"        // declare section & section 
attributes
+       .previous
+
+#define        LOAD_PHYSICAL(pr, reg, obj)             \
+[1:](pr)movl reg = obj;                                \
+       .xdata4 ".data.patch.vtop", 1b-.
+
+/*
+ * For now, we always put in the McKinley E9 workaround.  On CPUs that don't 
need it,
+ * we'll patch out the work-around bundles with NOPs, so their impact is 
minimal.
+ */
+#define DO_MCKINLEY_E9_WORKAROUND
+
+#ifdef DO_MCKINLEY_E9_WORKAROUND
+       .section ".data.patch.mckinley_e9", "a"
+       .previous
+/* workaround for Itanium 2 Errata 9: */
+# define FSYS_RETURN                                   \
+       .xdata4 ".data.patch.mckinley_e9", 1f-.;        \
+1:{ .mib;                                              \
+       nop.m 0;                                        \
+       mov r16=ar.pfs;                                 \
+       br.call.sptk.many b7=2f;;                       \
+  };                                                   \
+2:{ .mib;                                              \
+       nop.m 0;                                        \
+       mov ar.pfs=r16;                                 \
+       br.ret.sptk.many b6;;                           \
+  }
+#else
+# define FSYS_RETURN   br.ret.sptk.many b6
+#endif
+
+/*
+ * Up until early 2004, use of .align within a function caused bad unwind info.
+ * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into 
nothing
+ * otherwise.
+ */
+#ifdef HAVE_WORKING_TEXT_ALIGN
+# define TEXT_ALIGN(n) .align n
+#else
+# define TEXT_ALIGN(n)
+#endif
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define dv_serialize_data             .serialize.data
+# define dv_serialize_instruction      .serialize.instruction
+#else
+# define dv_serialize_data
+# define dv_serialize_instruction
+#endif
+
+#endif /* _ASM_IA64_ASMMACRO_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/atomic.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/atomic.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,183 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below!  The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <linux/types.h>
+
+#include <asm/intrinsics.h>
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+typedef struct { volatile __s64 counter; } atomic64_t;
+
+#define ATOMIC_INIT(i)         ((atomic_t) { (i) })
+#define ATOMIC64_INIT(i)       ((atomic64_t) { (i) })
+
+#define atomic_read(v)         ((v)->counter)
+#define atomic64_read(v)       ((v)->counter)
+
+#define atomic_set(v,i)                (((v)->counter) = (i))
+#define atomic64_set(v,i)      (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+       __s32 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old + i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic64_add (__s64 i, atomic64_t *v)
+{
+       __s64 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old + i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+       __s32 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old - i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic64_sub (__s64 i, atomic64_t *v)
+{
+       __s64 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old - i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+       return new;
+}
+
+#define atomic_add_return(i,v)                                         \
+({                                                                     \
+       int __ia64_aar_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic_add(__ia64_aar_i, v);                     \
+})
+
+#define atomic64_add_return(i,v)                                       \
+({                                                                     \
+       long __ia64_aar_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic64_add(__ia64_aar_i, v);                   \
+})
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static __inline__ int
+atomic64_add_negative (__s64 i, atomic64_t *v)
+{
+       return atomic64_add_return(i, v) < 0;
+}
+
+#define atomic_sub_return(i,v)                                         \
+({                                                                     \
+       int __ia64_asr_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+})
+
+#define atomic64_sub_return(i,v)                                       \
+({                                                                     \
+       long __ia64_asr_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic64_sub(__ia64_asr_i, v);                   \
+})
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
+#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v)       (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
+#define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v)       (atomic64_sub_return(1, (v)) == 0)
+#define atomic64_inc_and_test(v)       (atomic64_add_return(1, (v)) == 0)
+
+#define atomic_add(i,v)                        atomic_add_return((i), (v))
+#define atomic_sub(i,v)                        atomic_sub_return((i), (v))
+#define atomic_inc(v)                  atomic_add(1, (v))
+#define atomic_dec(v)                  atomic_sub(1, (v))
+
+#define atomic64_add(i,v)              atomic64_add_return((i), (v))
+#define atomic64_sub(i,v)              atomic64_sub_return((i), (v))
+#define atomic64_inc(v)                        atomic64_add(1, (v))
+#define atomic64_dec(v)                        atomic64_sub(1, (v))
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#endif /* _ASM_IA64_ATOMIC_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/bitops.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/bitops.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,410 @@
+#ifndef _ASM_IA64_BITOPS_H
+#define _ASM_IA64_BITOPS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 
O(1)
+ *         scheduler patch
+ */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/bitops.h>
+#include <asm/intrinsics.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to 
operate on
+ * hw-defined data-structures, so we can't easily change these operations to 
force a
+ * bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit (int nr, volatile void *addr)
+{
+       __u32 bit, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       bit = 1 << (nr & 31);
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old | bit;
+       } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__set_bit (int nr, volatile void *addr)
+{
+       *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
+}
+
+/*
+ * clear_bit() has "acquire" semantics.
+ */
+#define smp_mb__before_clear_bit()     smp_mb()
+#define smp_mb__after_clear_bit()      do { /* skip */; } while (0)
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void
+clear_bit (int nr, volatile void *addr)
+{
+       __u32 mask, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       mask = ~(1 << (nr & 31));
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old & mask;
+       } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory (non-atomic version)
+ */
+static __inline__ void
+__clear_bit (int nr, volatile void *addr)
+{
+       volatile __u32 *p = (__u32 *) addr + (nr >> 5);
+       __u32 m = 1 << (nr & 31);
+       *p &= ~m;
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void
+change_bit (int nr, volatile void *addr)
+{
+       __u32 bit, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       bit = (1 << (nr & 31));
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old ^ bit;
+       } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__change_bit (int nr, volatile void *addr)
+{
+       *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies a memory barrier.
+ */
+static __inline__ int
+test_and_set_bit (int nr, volatile void *addr)
+{
+       __u32 bit, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       bit = 1 << (nr & 31);
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old | bit;
+       } while (cmpxchg_acq(m, old, new) != old);
+       return (old & bit) != 0;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_set_bit (int nr, volatile void *addr)
+{
+       __u32 *p = (__u32 *) addr + (nr >> 5);
+       __u32 m = 1 << (nr & 31);
+       int oldbitset = (*p & m) != 0;
+
+       *p |= m;
+       return oldbitset;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies a memory barrier.
+ */
+static __inline__ int
+test_and_clear_bit (int nr, volatile void *addr)
+{
+       __u32 mask, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       mask = ~(1 << (nr & 31));
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old & mask;
+       } while (cmpxchg_acq(m, old, new) != old);
+       return (old & ~mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_clear_bit(int nr, volatile void * addr)
+{
+       __u32 *p = (__u32 *) addr + (nr >> 5);
+       __u32 m = 1 << (nr & 31);
+       int oldbitset = *p & m;
+
+       *p &= ~m;
+       return oldbitset;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies a memory barrier.
+ */
+static __inline__ int
+test_and_change_bit (int nr, volatile void *addr)
+{
+       __u32 bit, old, new;
+       volatile __u32 *m;
+       CMPXCHG_BUGCHECK_DECL
+
+       m = (volatile __u32 *) addr + (nr >> 5);
+       bit = (1 << (nr & 31));
+       do {
+               CMPXCHG_BUGCHECK(m);
+               old = *m;
+               new = old ^ bit;
+       } while (cmpxchg_acq(m, old, new) != old);
+       return (old & bit) != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ int
+__test_and_change_bit (int nr, void *addr)
+{
+       __u32 old, bit = (1 << (nr & 31));
+       __u32 *m = (__u32 *) addr + (nr >> 5);
+
+       old = *m;
+       *m = old ^ bit;
+       return (old & bit) != 0;
+}
+
+static __inline__ int
+test_bit (int nr, const volatile void *addr)
+{
+       return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/**
+ * ffz - find the first zero bit in a long word
+ * @x: The long word to find the bit in
+ *
+ * Returns the bit-number (0..63) of the first (least significant) zero bit.  
Undefined if
+ * no zero exists, so code should check against ~0UL first...
+ */
+static inline unsigned long
+ffz (unsigned long x)
+{
+       unsigned long result;
+
+       result = ia64_popcnt(x & (~x - 1));
+       return result;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @x: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long
+__ffs (unsigned long x)
+{
+       unsigned long result;
+
+       result = ia64_popcnt((x-1) & ~x);
+       return result;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * find_last_zero_bit - find the last zero bit in a 64 bit quantity
+ * @x: The value to search
+ */
+static inline unsigned long
+ia64_fls (unsigned long x)
+{
+       long double d = x;
+       long exp;
+
+       exp = ia64_getf_exp(d);
+       return exp - 0xffff;
+}
+
+static inline int
+fls (int x)
+{
+       return ia64_fls((unsigned int) x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as the libc and 
compiler builtin
+ * ffs routines, therefore differs in spirit from the above ffz (man ffs): it 
operates on
+ * "int" values only and the result value is the bit number + 1.  ffs(0) is 
defined to
+ * return zero.
+ */
+#define ffs(x) __builtin_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+static __inline__ unsigned long
+hweight64 (unsigned long x)
+{
+       unsigned long result;
+       result = ia64_popcnt(x);
+       return result;
+}
+
+#define hweight32(x) hweight64 ((x) & 0xfffffffful)
+#define hweight16(x) hweight64 ((x) & 0xfffful)
+#define hweight8(x)  hweight64 ((x) & 0xfful)
+
+#endif /* __KERNEL__ */
+
+extern int __find_next_zero_bit (const void *addr, unsigned long size,
+                       unsigned long offset);
+extern int __find_next_bit(const void *addr, unsigned long size,
+                       unsigned long offset);
+
+#define find_next_zero_bit(addr, size, offset) \
+                       __find_next_zero_bit((addr), (size), (offset))
+#define find_next_bit(addr, size, offset) \
+                       __find_next_bit((addr), (size), (offset))
+
+/*
+ * The optimizer actually does good code for this case..
+ */
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifdef __KERNEL__
+
+#define __clear_bit(nr, addr)          clear_bit(nr, addr)
+
+#define ext2_set_bit                   test_and_set_bit
+#define ext2_set_bit_atomic(l,n,a)     test_and_set_bit(n,a)
+#define ext2_clear_bit                 test_and_clear_bit
+#define ext2_clear_bit_atomic(l,n,a)   test_and_clear_bit(n,a)
+#define ext2_test_bit                  test_bit
+#define ext2_find_first_zero_bit       find_first_zero_bit
+#define ext2_find_next_zero_bit                find_next_zero_bit
+
+/* Bitmap functions for the minix filesystem.  */
+#define minix_test_and_set_bit(nr,addr)                
test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr)                 set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr)      test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr)                        test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size)   find_first_zero_bit(addr,size)
+
+static inline int
+sched_find_first_bit (unsigned long *b)
+{
+       if (unlikely(b[0]))
+               return __ffs(b[0]);
+       if (unlikely(b[1]))
+               return 64 + __ffs(b[1]);
+       return __ffs(b[2]) + 128;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_BITOPS_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/break.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/break.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_BREAK_H
+#define _ASM_IA64_BREAK_H
+
+/*
+ * IA-64 Linux break numbers.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+/*
+ * OS-specific debug break numbers:
+ */
+#define __IA64_BREAK_KDB               0x80100
+
+/*
+ * OS-specific break numbers:
+ */
+#define __IA64_BREAK_SYSCALL           0x100000
+
+#endif /* _ASM_IA64_BREAK_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/bug.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/bug.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,15 @@
+#ifndef _ASM_IA64_BUG_H
+#define _ASM_IA64_BUG_H
+
+#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
+# define ia64_abort()  __builtin_trap()
+#else
+# define ia64_abort()  (*(volatile int *) 0 = 0)
+#endif
+#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); 
ia64_abort(); } while (0)
+
+/* should this BUG should be made generic? */
+#define HAVE_ARCH_BUG
+#include <asm-generic/bug.h>
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/byteorder.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/byteorder.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_BYTEORDER_H
+#define _ASM_IA64_BYTEORDER_H
+
+/*
+ * Modified 1998, 1999
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co.
+ */
+
+#include <asm/types.h>
+#include <asm/intrinsics.h>
+#include <linux/compiler.h>
+
+static __inline__ __attribute_const__ __u64
+__ia64_swab64 (__u64 x)
+{
+       __u64 result;
+
+       result = ia64_mux1(x, ia64_mux1_rev);
+       return result;
+}
+
+static __inline__ __attribute_const__ __u32
+__ia64_swab32 (__u32 x)
+{
+       return __ia64_swab64(x) >> 32;
+}
+
+static __inline__ __attribute_const__ __u16
+__ia64_swab16(__u16 x)
+{
+       return __ia64_swab64(x) >> 48;
+}
+
+#define __arch__swab64(x) __ia64_swab64(x)
+#define __arch__swab32(x) __ia64_swab32(x)
+#define __arch__swab16(x) __ia64_swab16(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_IA64_BYTEORDER_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/cache.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/cache.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_CACHE_H
+#define _ASM_IA64_CACHE_H
+
+#include <linux/config.h>
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+/* Bytes per L1 (data) cache line.  */
+#define L1_CACHE_SHIFT         CONFIG_IA64_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+
+#define L1_CACHE_SHIFT_MAX 7   /* largest L1 which this arch supports */
+
+#ifdef CONFIG_SMP
+# define SMP_CACHE_SHIFT       L1_CACHE_SHIFT
+# define SMP_CACHE_BYTES       L1_CACHE_BYTES
+#else
+  /*
+   * The "aligned" directive can only _increase_ alignment, so this is
+   * safe and provides an easy way to avoid wasting space on a
+   * uni-processor:
+   */
+# define SMP_CACHE_SHIFT       3
+# define SMP_CACHE_BYTES       (1 << 3)
+#endif
+
+#endif /* _ASM_IA64_CACHE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/cacheflush.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/cacheflush.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,50 @@
+#ifndef _ASM_IA64_CACHEFLUSH_H
+#define _ASM_IA64_CACHEFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/page-flags.h>
+
+#include <asm/bitops.h>
+#include <asm/page.h>
+
+/*
+ * Cache flushing routines.  This is the kind of stuff that can be very 
expensive, so try
+ * to avoid them whenever possible.
+ */
+
+#define flush_cache_all()                      do { } while (0)
+#define flush_cache_mm(mm)                     do { } while (0)
+#define flush_cache_range(vma, start, end)     do { } while (0)
+#define flush_cache_page(vma, vmaddr)          do { } while (0)
+#define flush_icache_page(vma,page)            do { } while (0)
+#define flush_cache_vmap(start, end)           do { } while (0)
+#define flush_cache_vunmap(start, end)         do { } while (0)
+
+#define flush_dcache_page(page)                        \
+do {                                           \
+       clear_bit(PG_arch_1, &(page)->flags);   \
+} while (0)
+
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+
+extern void flush_icache_range (unsigned long start, unsigned long end);
+
+#define flush_icache_user_range(vma, page, user_addr, len)                     
                \
+do {                                                                           
                \
+       unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) 
& ~PAGE_MASK);  \
+       flush_icache_range(_addr, _addr + (len));                               
                \
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+     flush_icache_user_range(vma, page, vaddr, len); \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
+
+#endif /* _ASM_IA64_CACHEFLUSH_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/checksum.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/checksum.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,76 @@
+#ifndef _ASM_IA64_CHECKSUM_H
+#define _ASM_IA64_CHECKSUM_H
+
+/*
+ * Modified 1998, 1999
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
+
+/*
+ * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
+ * checksum, already complemented
+ */
+extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
+                                            unsigned long daddr,
+                                            unsigned short len,
+                                            unsigned short proto,
+                                            unsigned int sum);
+
+extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
+                                       unsigned long daddr,
+                                       unsigned short len,
+                                       unsigned short proto,
+                                       unsigned int sum);
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern unsigned int csum_partial (const unsigned char * buff, int len,
+                                 unsigned int sum);
+
+/*
+ * Same as csum_partial, but copies from src while it checksums.
+ *
+ * Here it is even more important to align src and dst on a 32-bit (or
+ * even better 64-bit) boundary.
+ */
+extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
+                                                int len, unsigned int sum,
+                                                int *errp);
+
+extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
+                                              int len, unsigned int sum);
+
+/*
+ * This routine is used for miscellaneous IP-like checksums, mainly in
+ * icmp.c
+ */
+extern unsigned short ip_compute_csum (unsigned char *buff, int len);
+
+/*
+ * Fold a partial checksum without adding pseudo headers.
+ */
+static inline unsigned short
+csum_fold (unsigned int sum)
+{
+       sum = (sum & 0xffff) + (sum >> 16);
+       sum = (sum & 0xffff) + (sum >> 16);
+       return ~sum;
+}
+
+#endif /* _ASM_IA64_CHECKSUM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/current.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/current.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,17 @@
+#ifndef _ASM_IA64_CURRENT_H
+#define _ASM_IA64_CURRENT_H
+
+/*
+ * Modified 1998-2000
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+#include <asm/intrinsics.h>
+
+/*
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
+#define current        ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
+
+#endif /* _ASM_IA64_CURRENT_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/delay.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/delay.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,97 @@
+#ifndef _ASM_IA64_DELAY_H
+#define _ASM_IA64_DELAY_H
+
+/*
+ * Delay routines using a pre-computed "cycles/usec" value.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+static __inline__ void
+ia64_set_itm (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_ITM, val);
+       ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itm (void)
+{
+       unsigned long result;
+
+       result = ia64_getreg(_IA64_REG_CR_ITM);
+       ia64_srlz_d();
+       return result;
+}
+
+static __inline__ void
+ia64_set_itv (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_ITV, val);
+       ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itv (void)
+{
+       return ia64_getreg(_IA64_REG_CR_ITV);
+}
+
+static __inline__ void
+ia64_set_itc (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_AR_ITC, val);
+       ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itc (void)
+{
+       unsigned long result;
+
+       result = ia64_getreg(_IA64_REG_AR_ITC);
+       ia64_barrier();
+#ifdef CONFIG_ITANIUM
+       while (unlikely((__s32) result == -1)) {
+               result = ia64_getreg(_IA64_REG_AR_ITC);
+               ia64_barrier();
+       }
+#endif
+       return result;
+}
+
+extern void ia64_delay_loop (unsigned long loops);
+
+static __inline__ void
+__delay (unsigned long loops)
+{
+       if (unlikely(loops < 1))
+               return;
+
+       ia64_delay_loop (loops - 1);
+}
+
+static __inline__ void
+udelay (unsigned long usecs)
+{
+       unsigned long start = ia64_get_itc();
+       unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
+
+       while (ia64_get_itc() - start < cycles)
+               cpu_relax();
+}
+
+#endif /* _ASM_IA64_DELAY_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/div64.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/div64.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1 @@
+#include <asm-generic/div64.h>
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm/dma-mapping.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/dma-mapping.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,70 @@
+#ifndef _ASM_IA64_DMA_MAPPING_H
+#define _ASM_IA64_DMA_MAPPING_H
+
+/*
+ * Copyright (C) 2003-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <linux/config.h>
+#include <asm/machvec.h>
+
+#define dma_alloc_coherent     platform_dma_alloc_coherent
+#define dma_alloc_noncoherent  platform_dma_alloc_coherent     /* coherent 
mem. is cheap */
+#define dma_free_coherent      platform_dma_free_coherent
+#define dma_free_noncoherent   platform_dma_free_coherent
+#define dma_map_single         platform_dma_map_single
+#define dma_map_sg             platform_dma_map_sg
+#define dma_unmap_single       platform_dma_unmap_single
+#define dma_unmap_sg           platform_dma_unmap_sg
+#define dma_sync_single_for_cpu        platform_dma_sync_single_for_cpu
+#define dma_sync_sg_for_cpu    platform_dma_sync_sg_for_cpu
+#define dma_sync_single_for_device platform_dma_sync_single_for_device
+#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
+#define dma_mapping_error      platform_dma_mapping_error
+
+#define dma_map_page(dev, pg, off, size, dir)                          \
+       dma_map_single(dev, page_address(pg) + (off), (size), (dir))
+#define dma_unmap_page(dev, dma_addr, size, dir)                       \
+       dma_unmap_single(dev, dma_addr, size, dir)
+
+/*
+ * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
+ * See Documentation/DMA-API.txt for details.
+ */
+
+#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)      
\
+       dma_sync_single_for_cpu(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)   
\
+       dma_sync_single_for_device(dev, dma_handle, size, dir)
+
+#define dma_supported          platform_dma_supported
+
+static inline int
+dma_set_mask (struct device *dev, u64 mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+       *dev->dma_mask = mask;
+       return 0;
+}
+
+static inline int
+dma_get_cache_alignment (void)
+{
+       extern int ia64_max_cacheline_size;
+       return ia64_max_cacheline_size;
+}
+
+static inline void
+dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+{
+       /*
+        * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do 
need to
+        * ensure that dma_cache_sync() enforces order, hence the mb().
+        */
+       mb();
+}
+
+#define dma_is_consistent(dma_handle)  (1)     /* all we do is coherent 
memory... */
+
+#endif /* _ASM_IA64_DMA_MAPPING_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/dma.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/dma.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,23 @@
+#ifndef _ASM_IA64_DMA_H
+#define _ASM_IA64_DMA_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/io.h>            /* need byte IO */
+
+extern unsigned long MAX_DMA_ADDRESS;
+
+#ifdef CONFIG_PCI
+  extern int isa_dma_bridge_buggy;
+#else
+# define isa_dma_bridge_buggy  (0)
+#endif
+
+#define free_dma(x)
+
+#endif /* _ASM_IA64_DMA_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/errno.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/errno.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1 @@
+#include <asm-generic/errno.h>
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/fpu.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/fpu.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_FPU_H
+#define _ASM_IA64_FPU_H
+
+/*
+ * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm/types.h>
+
+/* floating point status register: */
+#define FPSR_TRAP_VD   (1 << 0)        /* invalid op trap disabled */
+#define FPSR_TRAP_DD   (1 << 1)        /* denormal trap disabled */
+#define FPSR_TRAP_ZD   (1 << 2)        /* zero-divide trap disabled */
+#define FPSR_TRAP_OD   (1 << 3)        /* overflow trap disabled */
+#define FPSR_TRAP_UD   (1 << 4)        /* underflow trap disabled */
+#define FPSR_TRAP_ID   (1 << 5)        /* inexact trap disabled */
+#define FPSR_S0(x)     ((x) <<  6)
+#define FPSR_S1(x)     ((x) << 19)
+#define FPSR_S2(x)     (__IA64_UL(x) << 32)
+#define FPSR_S3(x)     (__IA64_UL(x) << 45)
+
+/* floating-point status field controls: */
+#define FPSF_FTZ       (1 << 0)                /* flush-to-zero */
+#define FPSF_WRE       (1 << 1)                /* widest-range exponent */
+#define FPSF_PC(x)     (((x) & 0x3) << 2)      /* precision control */
+#define FPSF_RC(x)     (((x) & 0x3) << 4)      /* rounding control */
+#define FPSF_TD                (1 << 6)                /* trap disabled */
+
+/* floating-point status field flags: */
+#define FPSF_V         (1 <<  7)               /* invalid operation flag */
+#define FPSF_D         (1 <<  8)               /* denormal/unnormal operand 
flag */
+#define FPSF_Z         (1 <<  9)               /* zero divide (IEEE) flag */
+#define FPSF_O         (1 << 10)               /* overflow (IEEE) flag */
+#define FPSF_U         (1 << 11)               /* underflow (IEEE) flag */
+#define FPSF_I         (1 << 12)               /* inexact (IEEE) flag) */
+
+/* floating-point rounding control: */
+#define FPRC_NEAREST   0x0
+#define FPRC_NEGINF    0x1
+#define FPRC_POSINF    0x2
+#define FPRC_TRUNC     0x3
+
+#define FPSF_DEFAULT   (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
+
+/* This default value is the same as HP-UX uses.  Don't change it
+   without a very good reason.  */
+#define FPSR_DEFAULT   (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD     \
+                        | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID   \
+                        | FPSR_S0 (FPSF_DEFAULT)                       \
+                        | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE)  \
+                        | FPSR_S2 (FPSF_DEFAULT | FPSF_TD)             \
+                        | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
+
+# ifndef __ASSEMBLY__
+
+struct ia64_fpreg {
+       union {
+               unsigned long bits[2];
+               long double __dummy;    /* force 16-byte alignment */
+       } u;
+};
+
+# endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_FPU_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/hardirq.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/hardirq.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_HARDIRQ_H
+#define _ASM_IA64_HARDIRQ_H
+
+/*
+ * Modified 1998-2002, 2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
+/*
+ * No irq_cpustat_t for IA-64.  The data is held in the per-CPU data structure.
+ */
+
+#define __ARCH_IRQ_STAT        1
+
+#define local_softirq_pending()                
(local_cpu_data->softirq_pending)
+
+#define HARDIRQ_BITS   14
+
+/*
+ * The hardirq mask has to be large enough to have space for potentially all 
IRQ sources
+ * in the system nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+extern void __iomem *ipi_base_addr;
+
+void ack_bad_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_HARDIRQ_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/hdreg.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/hdreg.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,14 @@
+/*
+ *  linux/include/asm-ia64/hdreg.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+#warning this file is obsolete, please do not use it
+
+#ifndef __ASM_IA64_HDREG_H
+#define __ASM_IA64_HDREG_H
+
+typedef unsigned short ide_ioreg_t;
+
+#endif /* __ASM_IA64_HDREG_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/hw_irq.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/hw_irq.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,144 @@
+#ifndef _ASM_IA64_HW_IRQ_H
+#define _ASM_IA64_HW_IRQ_H
+
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+
+#include <asm/machvec.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+typedef u8 ia64_vector;
+
+/*
+ * 0 special
+ *
+ * 1,3-14 are reserved from firmware
+ *
+ * 16-255 (vectored external interrupts) are available
+ *
+ * 15 spurious interrupt (see IVR)
+ *
+ * 16 lowest priority, 255 highest priority
+ *
+ * 15 classes of 16 interrupts each.
+ */
+#define IA64_MIN_VECTORED_IRQ           16
+#define IA64_MAX_VECTORED_IRQ          255
+#define IA64_NUM_VECTORS               256
+
+#define AUTO_ASSIGN                    -1
+
+#define IA64_SPURIOUS_INT_VECTOR       0x0f
+
+/*
+ * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
+ */
+#define IA64_CPEP_VECTOR               0x1c    /* corrected platform error 
polling vector */
+#define IA64_CMCP_VECTOR               0x1d    /* corrected machine-check 
polling vector */
+#define IA64_CPE_VECTOR                        0x1e    /* corrected platform 
error interrupt vector */
+#define IA64_CMC_VECTOR                        0x1f    /* corrected 
machine-check interrupt vector */
+/*
+ * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ */
+#define IA64_FIRST_DEVICE_VECTOR       0x30
+#define IA64_LAST_DEVICE_VECTOR                0xe7
+#define IA64_NUM_DEVICE_VECTORS                (IA64_LAST_DEVICE_VECTOR - 
IA64_FIRST_DEVICE_VECTOR + 1)
+
+#define IA64_MCA_RENDEZ_VECTOR         0xe8    /* MCA rendez interrupt */
+#define IA64_PERFMON_VECTOR            0xee    /* performanc monitor interrupt 
vector */
+#define IA64_TIMER_VECTOR              0xef    /* use highest-prio group 15 
interrupt for timer */
+#define        IA64_MCA_WAKEUP_VECTOR          0xf0    /* MCA wakeup (must be 
>MCA_RENDEZ_VECTOR) */
+#define IA64_IPI_RESCHEDULE            0xfd    /* SMP reschedule */
+#define IA64_IPI_VECTOR                        0xfe    /* inter-processor 
interrupt vector */
+
+/* Used for encoding redirected irqs */
+
+#define IA64_IRQ_REDIRECTED            (1 << 31)
+
+/* IA64 inter-cpu interrupt related definitions */
+
+#define IA64_IPI_DEFAULT_BASE_ADDR     0xfee00000
+
+/* Delivery modes for inter-cpu interrupts */
+enum {
+        IA64_IPI_DM_INT =       0x0,    /* pend an external interrupt */
+        IA64_IPI_DM_PMI =       0x2,    /* pend a PMI */
+        IA64_IPI_DM_NMI =       0x4,    /* pend an NMI (vector 2) */
+        IA64_IPI_DM_INIT =      0x5,    /* pend an INIT interrupt */
+        IA64_IPI_DM_EXTINT =    0x7,    /* pend an 8259-compatible interrupt. 
*/
+};
+
+extern __u8 isa_irq_to_vector_map[16];
+#define isa_irq_to_vector(x)   isa_irq_to_vector_map[(x)]
+
+extern struct hw_interrupt_type irq_type_ia64_lsapic;  /* CPU-internal 
interrupt controller */
+
+extern int assign_irq_vector (int irq);        /* allocate a free vector */
+extern void free_irq_vector (int vector);
+extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int 
redirect);
+extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
+
+static inline void
+hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
+{
+       platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+}
+
+/*
+ * Default implementations for the irq-descriptor API:
+ */
+
+extern irq_desc_t irq_desc[NR_IRQS];
+
+#ifndef CONFIG_IA64_GENERIC
+static inline unsigned int
+__ia64_local_vector_to_irq (ia64_vector vec)
+{
+       return (unsigned int) vec;
+}
+#endif
+
+/*
+ * Next follows the irq descriptor interface.  On IA-64, each CPU supports 256 
interrupt
+ * vectors.  On smaller systems, there is a one-to-one correspondence between 
interrupt
+ * vectors and the Linux irq numbers.  However, larger systems may have 
multiple interrupt
+ * domains meaning that the translation from vector number to irq number 
depends on the
+ * interrupt domain that a CPU belongs to.  This API abstracts such 
platform-dependent
+ * differences and provides a uniform means to translate between vector and 
irq numbers
+ * and to obtain the irq descriptor for a given irq number.
+ */
+
+/* Return a pointer to the irq descriptor for IRQ.  */
+static inline irq_desc_t *
+irq_descp (int irq)
+{
+       return irq_desc + irq;
+}
+
+/* Extract the IA-64 vector that corresponds to IRQ.  */
+static inline ia64_vector
+irq_to_vector (int irq)
+{
+       return (ia64_vector) irq;
+}
+
+/*
+ * Convert the local IA-64 vector to the corresponding irq number.  This 
translation is
+ * done in the context of the interrupt domain that the currently executing 
CPU belongs
+ * to.
+ */
+static inline unsigned int
+local_vector_to_irq (ia64_vector vec)
+{
+       return platform_local_vector_to_irq(vec);
+}
+
+#endif /* _ASM_IA64_HW_IRQ_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/intrinsics.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/intrinsics.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,181 @@
+#ifndef _ASM_IA64_INTRINSICS_H
+#define _ASM_IA64_INTRINSICS_H
+
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+ */
+extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
+extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
+
+#define IA64_FETCHADD(tmp,v,n,sz,sem)                                          
\
+({                                                                             
\
+       switch (sz) {                                                           
\
+             case 4:                                                           
\
+               tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);              
\
+               break;                                                          
\
+                                                                               
\
+             case 8:                                                           
\
+               tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);             
\
+               break;                                                          
\
+                                                                               
\
+             default:                                                          
\
+               __bad_size_for_ia64_fetch_and_add();                            
\
+       }                                                                       
\
+})
+
+#define ia64_fetchadd(i,v,sem)                                                 
        \
+({                                                                             
        \
+       __u64 _tmp;                                                             
        \
+       volatile __typeof__(*(v)) *_v = (v);                                    
        \
+       /* Can't use a switch () here: gcc isn't always smart enough for 
that... */     \
+       if ((i) == -16)                                                         
        \
+               IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                
        \
+       else if ((i) == -8)                                                     
        \
+               IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                 
        \
+       else if ((i) == -4)                                                     
        \
+               IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                 
        \
+       else if ((i) == -1)                                                     
        \
+               IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                 
        \
+       else if ((i) == 1)                                                      
        \
+               IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                  
        \
+       else if ((i) == 4)                                                      
        \
+               IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                  
        \
+       else if ((i) == 8)                                                      
        \
+               IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                  
        \
+       else if ((i) == 16)                                                     
        \
+               IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                 
        \
+       else                                                                    
        \
+               _tmp = __bad_increment_for_ia64_fetch_and_add();                
        \
+       (__typeof__(*(v))) (_tmp);      /* return old value */                  
        \
+})
+
+#define ia64_fetch_and_add(i,v)        (ia64_fetchadd(i, v, rel) + (i)) /* 
return new value */
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer (void);
+
+#define __xchg(x,ptr,size)                                             \
+({                                                                     \
+       unsigned long __xchg_result;                                    \
+                                                                       \
+       switch (size) {                                                 \
+             case 1:                                                   \
+               __xchg_result = ia64_xchg1((__u8 *)ptr, x);             \
+               break;                                                  \
+                                                                       \
+             case 2:                                                   \
+               __xchg_result = ia64_xchg2((__u16 *)ptr, x);            \
+               break;                                                  \
+                                                                       \
+             case 4:                                                   \
+               __xchg_result = ia64_xchg4((__u32 *)ptr, x);            \
+               break;                                                  \
+                                                                       \
+             case 8:                                                   \
+               __xchg_result = ia64_xchg8((__u64 *)ptr, x);            \
+               break;                                                  \
+             default:                                                  \
+               ia64_xchg_called_with_bad_pointer();                    \
+       }                                                               \
+       __xchg_result;                                                  \
+})
+
+#define xchg(ptr,x)                                                         \
+  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer (void);
+
+#define ia64_cmpxchg(sem,ptr,old,new,size)                                     
        \
+({                                                                             
        \
+       __u64 _o_, _r_;                                                         
        \
+                                                                               
        \
+       switch (size) {                                                         
        \
+             case 1: _o_ = (__u8 ) (long) (old); break;                        
        \
+             case 2: _o_ = (__u16) (long) (old); break;                        
        \
+             case 4: _o_ = (__u32) (long) (old); break;                        
        \
+             case 8: _o_ = (__u64) (long) (old); break;                        
        \
+             default: break;                                                   
        \
+       }                                                                       
        \
+       switch (size) {                                                         
        \
+             case 1:                                                           
        \
+               _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);              
        \
+               break;                                                          
        \
+                                                                               
        \
+             case 2:                                                           
        \
+              _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);              
        \
+               break;                                                          
        \
+                                                                               
        \
+             case 4:                                                           
        \
+               _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);             
        \
+               break;                                                          
        \
+                                                                               
        \
+             case 8:                                                           
        \
+               _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);             
        \
+               break;                                                          
        \
+                                                                               
        \
+             default:                                                          
        \
+               _r_ = ia64_cmpxchg_called_with_bad_pointer();                   
        \
+               break;                                                          
        \
+       }                                                                       
        \
+       (__typeof__(old)) _r_;                                                  
        \
+})
+
+#define cmpxchg_acq(ptr,o,n)   ia64_cmpxchg(acq, (ptr), (o), (n), 
sizeof(*(ptr)))
+#define cmpxchg_rel(ptr,o,n)   ia64_cmpxchg(rel, (ptr), (o), (n), 
sizeof(*(ptr)))
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr,o,n)       cmpxchg_acq(ptr,o,n)
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v)                                                   
\
+  do {                                                                         
\
+       if (_cmpxchg_bugcheck_count-- <= 0) {                                   
\
+               void *ip;                                                       
\
+               extern int printk(const char *fmt, ...);                        
\
+               ip = (void *) ia64_getreg(_IA64_REG_IP);                        
\
+               printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));  
\
+               break;                                                          
\
+       }                                                                       
\
+  } while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif
+#endif /* _ASM_IA64_INTRINSICS_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/ioctl.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/ioctl.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,77 @@
+#ifndef _ASM_IA64_IOCTL_H
+#define _ASM_IA64_IOCTL_H
+
+/*
+ * Based on <asm-i386/ioctl.h>.
+ *
+ * Modified 1998, 1999
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms.  The ia64 ioctl numbering scheme doesn't really enforce
+ * a type field.  De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here.  Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS    8
+#define _IOC_TYPEBITS  8
+#define _IOC_SIZEBITS  14
+#define _IOC_DIRBITS   2
+
+#define _IOC_NRMASK    ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK  ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK  ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK   ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT   0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT  (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE      0U
+#define _IOC_WRITE     1U
+#define _IOC_READ      2U
+
+#define _IOC(dir,type,nr,size) \
+       (((dir)  << _IOC_DIRSHIFT) | \
+        ((type) << _IOC_TYPESHIFT) | \
+        ((nr)   << _IOC_NRSHIFT) | \
+        ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr)           _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size)     _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size)     _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size)    
_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr)           (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr)          (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr)            (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr)          (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN         (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT                (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT      ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK   (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT  (_IOC_SIZESHIFT)
+
+#endif /* _ASM_IA64_IOCTL_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/irq.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/irq.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,43 @@
+#ifndef _ASM_IA64_IRQ_H
+#define _ASM_IA64_IRQ_H
+
+/*
+ * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 11/24/98    S.Eranian       updated TIMER_IRQ and irq_canonicalize
+ * 01/20/99    S.Eranian       added keyboard interrupt
+ * 02/29/00     D.Mosberger    moved most things into hw_irq.h
+ */
+
+#define NR_IRQS                256
+#define NR_IRQ_VECTORS NR_IRQS
+
+static __inline__ int
+irq_canonicalize (int irq)
+{
+       /*
+        * We do the legacy thing here of pretending that irqs < 16
+        * are 8259 irqs.  This really shouldn't be necessary at all,
+        * but we keep it here as serial.c still uses it...
+        */
+       return ((irq == 2) ? 9 : irq);
+}
+
+extern void disable_irq (unsigned int);
+extern void disable_irq_nosync (unsigned int);
+extern void enable_irq (unsigned int);
+extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+
+#ifdef CONFIG_SMP
+extern void move_irq(int irq);
+#else
+#define move_irq(irq)
+#endif
+
+struct irqaction;
+struct pt_regs;
+int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
+
+#endif /* _ASM_IA64_IRQ_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/linkage.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/linkage.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,6 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/machvec.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/machvec.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,390 @@
+/*
+ * Machine vector for IA-64.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
+ * Copyright (C) Vijay Chander <vijay@xxxxxxxxxxxx>
+ * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#ifndef _ASM_IA64_MACHVEC_H
+#define _ASM_IA64_MACHVEC_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+/* forward declarations: */
+struct device;
+struct pt_regs;
+struct scatterlist;
+struct page;
+struct mm_struct;
+struct pci_bus;
+
+typedef void ia64_mv_setup_t (char **);
+typedef void ia64_mv_cpu_init_t (void);
+typedef void ia64_mv_irq_init_t (void);
+typedef void ia64_mv_send_ipi_t (int, int, int, int);
+typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
+typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, 
unsigned long);
+typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
+typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
+                                      u8 size);
+typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+                                       u8 size);
+
+/* DMA-mapping interface: */
+typedef void ia64_mv_dma_init (void);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t 
*, int);
+typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, 
dma_addr_t);
+typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, 
int);
+typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, 
int);
+typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, 
int);
+typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, 
int);
+typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, 
size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist 
*, int, int);
+typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, 
size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct 
scatterlist *, int, int);
+typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_supported (struct device *, u64);
+
+/*
+ * WARNING: The legacy I/O space is _architected_.  Platforms are
+ * expected to follow this architected model (see Section 10.7 in the
+ * IA-64 Architecture Software Developer's Manual).  Unfortunately,
+ * some broken machines do not follow that model, which is why we have
+ * to make the inX/outX operations part of the machine vector.
+ * Platform designers should follow the architected model whenever
+ * possible.
+ */
+typedef unsigned int ia64_mv_inb_t (unsigned long);
+typedef unsigned int ia64_mv_inw_t (unsigned long);
+typedef unsigned int ia64_mv_inl_t (unsigned long);
+typedef void ia64_mv_outb_t (unsigned char, unsigned long);
+typedef void ia64_mv_outw_t (unsigned short, unsigned long);
+typedef void ia64_mv_outl_t (unsigned int, unsigned long);
+typedef void ia64_mv_mmiowb_t (void);
+typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
+typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
+
+static inline void
+machvec_noop (void)
+{
+}
+
+static inline void
+machvec_noop_mm (struct mm_struct *mm)
+{
+}
+
+extern void machvec_setup (char **);
+extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
+extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
+extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, 
int);
+extern void machvec_tlb_migrate_finish (struct mm_struct *);
+
+# if defined (CONFIG_IA64_HP_SIM)
+#  include <asm/machvec_hpsim.h>
+# elif defined (CONFIG_IA64_DIG)
+#  include <asm/machvec_dig.h>
+# elif defined (CONFIG_IA64_HP_ZX1)
+#  include <asm/machvec_hpzx1.h>
+# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
+#  include <asm/machvec_hpzx1_swiotlb.h>
+# elif defined (CONFIG_IA64_SGI_SN2)
+#  include <asm/machvec_sn2.h>
+# elif defined (CONFIG_IA64_GENERIC)
+
+# ifdef MACHVEC_PLATFORM_HEADER
+#  include MACHVEC_PLATFORM_HEADER
+# else
+#  define platform_name                ia64_mv.name
+#  define platform_setup       ia64_mv.setup
+#  define platform_cpu_init    ia64_mv.cpu_init
+#  define platform_irq_init    ia64_mv.irq_init
+#  define platform_send_ipi    ia64_mv.send_ipi
+#  define platform_timer_interrupt     ia64_mv.timer_interrupt
+#  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
+#  define platform_tlb_migrate_finish  ia64_mv.tlb_migrate_finish
+#  define platform_dma_init            ia64_mv.dma_init
+#  define platform_dma_alloc_coherent  ia64_mv.dma_alloc_coherent
+#  define platform_dma_free_coherent   ia64_mv.dma_free_coherent
+#  define platform_dma_map_single      ia64_mv.dma_map_single
+#  define platform_dma_unmap_single    ia64_mv.dma_unmap_single
+#  define platform_dma_map_sg          ia64_mv.dma_map_sg
+#  define platform_dma_unmap_sg                ia64_mv.dma_unmap_sg
+#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
+#  define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
+#  define platform_dma_sync_single_for_device 
ia64_mv.dma_sync_single_for_device
+#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
+#  define platform_dma_mapping_error           ia64_mv.dma_mapping_error
+#  define platform_dma_supported       ia64_mv.dma_supported
+#  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
+#  define platform_pci_get_legacy_mem  ia64_mv.pci_get_legacy_mem
+#  define platform_pci_legacy_read     ia64_mv.pci_legacy_read
+#  define platform_pci_legacy_write    ia64_mv.pci_legacy_write
+#  define platform_inb         ia64_mv.inb
+#  define platform_inw         ia64_mv.inw
+#  define platform_inl         ia64_mv.inl
+#  define platform_outb                ia64_mv.outb
+#  define platform_outw                ia64_mv.outw
+#  define platform_outl                ia64_mv.outl
+#  define platform_mmiowb      ia64_mv.mmiowb
+#  define platform_readb        ia64_mv.readb
+#  define platform_readw        ia64_mv.readw
+#  define platform_readl        ia64_mv.readl
+#  define platform_readq        ia64_mv.readq
+#  define platform_readb_relaxed        ia64_mv.readb_relaxed
+#  define platform_readw_relaxed        ia64_mv.readw_relaxed
+#  define platform_readl_relaxed        ia64_mv.readl_relaxed
+#  define platform_readq_relaxed        ia64_mv.readq_relaxed
+# endif
+
+/* __attribute__((__aligned__(16))) is required to make size of the
+ * structure multiple of 16 bytes.
+ * This will fillup the holes created because of section 3.3.1 in
+ * Software Conventions guide.
+ */
+struct ia64_machine_vector {
+       const char *name;
+       ia64_mv_setup_t *setup;
+       ia64_mv_cpu_init_t *cpu_init;
+       ia64_mv_irq_init_t *irq_init;
+       ia64_mv_send_ipi_t *send_ipi;
+       ia64_mv_timer_interrupt_t *timer_interrupt;
+       ia64_mv_global_tlb_purge_t *global_tlb_purge;
+       ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
+       ia64_mv_dma_init *dma_init;
+       ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
+       ia64_mv_dma_free_coherent *dma_free_coherent;
+       ia64_mv_dma_map_single *dma_map_single;
+       ia64_mv_dma_unmap_single *dma_unmap_single;
+       ia64_mv_dma_map_sg *dma_map_sg;
+       ia64_mv_dma_unmap_sg *dma_unmap_sg;
+       ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
+       ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
+       ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
+       ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
+       ia64_mv_dma_mapping_error *dma_mapping_error;
+       ia64_mv_dma_supported *dma_supported;
+       ia64_mv_local_vector_to_irq *local_vector_to_irq;
+       ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
+       ia64_mv_pci_legacy_read_t *pci_legacy_read;
+       ia64_mv_pci_legacy_write_t *pci_legacy_write;
+       ia64_mv_inb_t *inb;
+       ia64_mv_inw_t *inw;
+       ia64_mv_inl_t *inl;
+       ia64_mv_outb_t *outb;
+       ia64_mv_outw_t *outw;
+       ia64_mv_outl_t *outl;
+       ia64_mv_mmiowb_t *mmiowb;
+       ia64_mv_readb_t *readb;
+       ia64_mv_readw_t *readw;
+       ia64_mv_readl_t *readl;
+       ia64_mv_readq_t *readq;
+       ia64_mv_readb_relaxed_t *readb_relaxed;
+       ia64_mv_readw_relaxed_t *readw_relaxed;
+       ia64_mv_readl_relaxed_t *readl_relaxed;
+       ia64_mv_readq_relaxed_t *readq_relaxed;
+} __attribute__((__aligned__(16))); /* align attrib? see above comment */
+
+#define MACHVEC_INIT(name)                     \
+{                                              \
+       #name,                                  \
+       platform_setup,                         \
+       platform_cpu_init,                      \
+       platform_irq_init,                      \
+       platform_send_ipi,                      \
+       platform_timer_interrupt,               \
+       platform_global_tlb_purge,              \
+       platform_tlb_migrate_finish,            \
+       platform_dma_init,                      \
+       platform_dma_alloc_coherent,            \
+       platform_dma_free_coherent,             \
+       platform_dma_map_single,                \
+       platform_dma_unmap_single,              \
+       platform_dma_map_sg,                    \
+       platform_dma_unmap_sg,                  \
+       platform_dma_sync_single_for_cpu,       \
+       platform_dma_sync_sg_for_cpu,           \
+       platform_dma_sync_single_for_device,    \
+       platform_dma_sync_sg_for_device,        \
+       platform_dma_mapping_error,                     \
+       platform_dma_supported,                 \
+       platform_local_vector_to_irq,           \
+       platform_pci_get_legacy_mem,            \
+       platform_pci_legacy_read,               \
+       platform_pci_legacy_write,              \
+       platform_inb,                           \
+       platform_inw,                           \
+       platform_inl,                           \
+       platform_outb,                          \
+       platform_outw,                          \
+       platform_outl,                          \
+       platform_mmiowb,                        \
+       platform_readb,                         \
+       platform_readw,                         \
+       platform_readl,                         \
+       platform_readq,                         \
+       platform_readb_relaxed,                 \
+       platform_readw_relaxed,                 \
+       platform_readl_relaxed,                 \
+       platform_readq_relaxed,                 \
+}
+
+extern struct ia64_machine_vector ia64_mv;
+extern void machvec_init (const char *name);
+
+# else
+#  error Unknown configuration.  Update asm-ia64/machvec.h.
+# endif /* CONFIG_IA64_GENERIC */
+
+/*
+ * Declare default routines which aren't declared anywhere else:
+ */
+extern ia64_mv_dma_init                        swiotlb_init;
+extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
+extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
+extern ia64_mv_dma_map_single          swiotlb_map_single;
+extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
+extern ia64_mv_dma_map_sg              swiotlb_map_sg;
+extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
+extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu     swiotlb_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device  swiotlb_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error       swiotlb_dma_mapping_error;
+extern ia64_mv_dma_supported           swiotlb_dma_supported;
+
+/*
+ * Define default versions so we can extend machvec for new platforms without 
having
+ * to update the machvec files for all existing platforms.
+ */
+#ifndef platform_setup
+# define platform_setup                        machvec_setup
+#endif
+#ifndef platform_cpu_init
+# define platform_cpu_init             machvec_noop
+#endif
+#ifndef platform_irq_init
+# define platform_irq_init             machvec_noop
+#endif
+
+#ifndef platform_send_ipi
+# define platform_send_ipi             ia64_send_ipi   /* default to 
architected version */
+#endif
+#ifndef platform_timer_interrupt
+# define platform_timer_interrupt      machvec_timer_interrupt
+#endif
+#ifndef platform_global_tlb_purge
+# define platform_global_tlb_purge     ia64_global_tlb_purge /* default to 
architected version */
+#endif
+#ifndef platform_tlb_migrate_finish
+# define platform_tlb_migrate_finish   machvec_noop_mm
+#endif
+#ifndef platform_dma_init
+# define platform_dma_init             swiotlb_init
+#endif
+#ifndef platform_dma_alloc_coherent
+# define platform_dma_alloc_coherent   swiotlb_alloc_coherent
+#endif
+#ifndef platform_dma_free_coherent
+# define platform_dma_free_coherent    swiotlb_free_coherent
+#endif
+#ifndef platform_dma_map_single
+# define platform_dma_map_single       swiotlb_map_single
+#endif
+#ifndef platform_dma_unmap_single
+# define platform_dma_unmap_single     swiotlb_unmap_single
+#endif
+#ifndef platform_dma_map_sg
+# define platform_dma_map_sg           swiotlb_map_sg
+#endif
+#ifndef platform_dma_unmap_sg
+# define platform_dma_unmap_sg         swiotlb_unmap_sg
+#endif
+#ifndef platform_dma_sync_single_for_cpu
+# define platform_dma_sync_single_for_cpu      swiotlb_sync_single_for_cpu
+#endif
+#ifndef platform_dma_sync_sg_for_cpu
+# define platform_dma_sync_sg_for_cpu          swiotlb_sync_sg_for_cpu
+#endif
+#ifndef platform_dma_sync_single_for_device
+# define platform_dma_sync_single_for_device   swiotlb_sync_single_for_device
+#endif
+#ifndef platform_dma_sync_sg_for_device
+# define platform_dma_sync_sg_for_device       swiotlb_sync_sg_for_device
+#endif
+#ifndef platform_dma_mapping_error
+# define platform_dma_mapping_error            swiotlb_dma_mapping_error
+#endif
+#ifndef platform_dma_supported
+# define  platform_dma_supported       swiotlb_dma_supported
+#endif
+#ifndef platform_local_vector_to_irq
+# define platform_local_vector_to_irq  __ia64_local_vector_to_irq
+#endif
+#ifndef platform_pci_get_legacy_mem
+# define platform_pci_get_legacy_mem   ia64_pci_get_legacy_mem
+#endif
+#ifndef platform_pci_legacy_read
+# define platform_pci_legacy_read      ia64_pci_legacy_read
+#endif
+#ifndef platform_pci_legacy_write
+# define platform_pci_legacy_write     ia64_pci_legacy_write
+#endif
+#ifndef platform_inb
+# define platform_inb          __ia64_inb
+#endif
+#ifndef platform_inw
+# define platform_inw          __ia64_inw
+#endif
+#ifndef platform_inl
+# define platform_inl          __ia64_inl
+#endif
+#ifndef platform_outb
+# define platform_outb         __ia64_outb
+#endif
+#ifndef platform_outw
+# define platform_outw         __ia64_outw
+#endif
+#ifndef platform_outl
+# define platform_outl         __ia64_outl
+#endif
+#ifndef platform_mmiowb
+# define platform_mmiowb       __ia64_mmiowb
+#endif
+#ifndef platform_readb
+# define platform_readb                __ia64_readb
+#endif
+#ifndef platform_readw
+# define platform_readw                __ia64_readw
+#endif
+#ifndef platform_readl
+# define platform_readl                __ia64_readl
+#endif
+#ifndef platform_readq
+# define platform_readq                __ia64_readq
+#endif
+#ifndef platform_readb_relaxed
+# define platform_readb_relaxed        __ia64_readb_relaxed
+#endif
+#ifndef platform_readw_relaxed
+# define platform_readw_relaxed        __ia64_readw_relaxed
+#endif
+#ifndef platform_readl_relaxed
+# define platform_readl_relaxed        __ia64_readl_relaxed
+#endif
+#ifndef platform_readq_relaxed
+# define platform_readq_relaxed        __ia64_readq_relaxed
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm/machvec_hpsim.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/machvec_hpsim.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_MACHVEC_HPSIM_h
+#define _ASM_IA64_MACHVEC_HPSIM_h
+
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name          "hpsim"
+#define platform_setup         hpsim_setup
+#define platform_irq_init      hpsim_irq_init
+
+#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/mca.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/mca.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,132 @@
+/*
+ * File:       mca.h
+ * Purpose:    Machine check handling specific defines
+ *
+ * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander (vijay@xxxxxxxxxxxx)
+ * Copyright (C) Srinivasa Thirumalachar (sprasad@xxxxxxxxxxxx)
+ * Copyright (C) Russ Anderson (rja@xxxxxxx)
+ */
+
+#ifndef _ASM_IA64_MCA_H
+#define _ASM_IA64_MCA_H
+
+#define IA64_MCA_STACK_SIZE    8192
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include <asm/param.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+#include <asm/mca_asm.h>
+
+#define IA64_MCA_RENDEZ_TIMEOUT                (20 * 1000)     /* value in 
milliseconds - 20 seconds */
+
+typedef struct ia64_fptr {
+       unsigned long fp;
+       unsigned long gp;
+} ia64_fptr_t;
+
+typedef union cmcv_reg_u {
+       u64     cmcv_regval;
+       struct  {
+               u64     cmcr_vector             : 8;
+               u64     cmcr_reserved1          : 4;
+               u64     cmcr_ignored1           : 1;
+               u64     cmcr_reserved2          : 3;
+               u64     cmcr_mask               : 1;
+               u64     cmcr_ignored2           : 47;
+       } cmcv_reg_s;
+
+} cmcv_reg_t;
+
+#define cmcv_mask              cmcv_reg_s.cmcr_mask
+#define cmcv_vector            cmcv_reg_s.cmcr_vector
+
+enum {
+       IA64_MCA_RENDEZ_CHECKIN_NOTDONE =       0x0,
+       IA64_MCA_RENDEZ_CHECKIN_DONE    =       0x1
+};
+
+/* Information maintained by the MC infrastructure */
+typedef struct ia64_mc_info_s {
+       u64             imi_mca_handler;
+       size_t          imi_mca_handler_size;
+       u64             imi_monarch_init_handler;
+       size_t          imi_monarch_init_handler_size;
+       u64             imi_slave_init_handler;
+       size_t          imi_slave_init_handler_size;
+       u8              imi_rendez_checkin[NR_CPUS];
+
+} ia64_mc_info_t;
+
+typedef struct ia64_mca_sal_to_os_state_s {
+       u64             imsto_os_gp;            /* GP of the os registered with 
the SAL */
+       u64             imsto_pal_proc;         /* PAL_PROC entry point - 
physical addr */
+       u64             imsto_sal_proc;         /* SAL_PROC entry point - 
physical addr */
+       u64             imsto_sal_gp;           /* GP of the SAL - physical */
+       u64             imsto_rendez_state;     /* Rendez state information */
+       u64             imsto_sal_check_ra;     /* Return address in SAL_CHECK 
while going
+                                                * back to SAL from OS after 
MCA handling.
+                                                */
+       u64             pal_min_state;          /* from PAL in r17 */
+       u64             proc_state_param;       /* from PAL in r18. See SDV 
2:268 11.3.2.1 */
+} ia64_mca_sal_to_os_state_t;
+
+enum {
+       IA64_MCA_CORRECTED      =       0x0,    /* Error has been corrected by 
OS_MCA */
+       IA64_MCA_WARM_BOOT      =       -1,     /* Warm boot of the system need 
from SAL */
+       IA64_MCA_COLD_BOOT      =       -2,     /* Cold boot of the system need 
from SAL */
+       IA64_MCA_HALT           =       -3      /* System to be halted by SAL */
+};
+
+enum {
+       IA64_MCA_SAME_CONTEXT   =       0x0,    /* SAL to return to same 
context */
+       IA64_MCA_NEW_CONTEXT    =       -1      /* SAL to return to new context 
*/
+};
+
+typedef struct ia64_mca_os_to_sal_state_s {
+       u64             imots_os_status;        /*   OS status to SAL as to 
what happened
+                                                *   with the MCA handling.
+                                                */
+       u64             imots_sal_gp;           /* GP of the SAL - physical */
+       u64             imots_context;          /* 0 if return to same context
+                                                  1 if return to new context */
+       u64             *imots_new_min_state;   /* Pointer to structure 
containing
+                                                * new values of registers in 
the min state
+                                                * save area.
+                                                */
+       u64             imots_sal_check_ra;     /* Return address in SAL_CHECK 
while going
+                                                * back to SAL from OS after 
MCA handling.
+                                                */
+} ia64_mca_os_to_sal_state_t;
+
+/* Per-CPU MCA state that is too big for normal per-CPU variables.  */
+
+struct ia64_mca_cpu {
+       u64 stack[IA64_MCA_STACK_SIZE/8];       /* MCA memory-stack */
+       u64 proc_state_dump[512];
+       u64 stackframe[32];
+       u64 rbstore[IA64_MCA_STACK_SIZE/8];     /* MCA reg.-backing store */
+       u64 init_stack[KERNEL_STACK_SIZE/8];
+} __attribute__ ((aligned(16)));
+
+/* Array of physical addresses of each CPU's MCA area.  */
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
+extern void ia64_mca_init(void);
+extern void ia64_mca_cpu_init(void *);
+extern void ia64_os_mca_dispatch(void);
+extern void ia64_os_mca_dispatch_end(void);
+extern void ia64_mca_ucmc_handler(void);
+extern void ia64_monarch_init_handler(void);
+extern void ia64_slave_init_handler(void);
+extern void ia64_mca_cmc_vector_setup(void);
+extern int  ia64_reg_MCA_extension(void*);
+extern void ia64_unreg_MCA_extension(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_MCA_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/meminit.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/meminit.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,60 @@
+#ifndef meminit_h
+#define meminit_h
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+
+/*
+ * Entries defined so far:
+ *     - boot param structure itself
+ *     - memory map
+ *     - initrd (optional)
+ *     - command line string
+ *     - kernel code & data
+ *
+ * More could be added if necessary
+ */
+#define IA64_MAX_RSVD_REGIONS 5
+
+struct rsvd_region {
+       unsigned long start;    /* virtual address of beginning of element */
+       unsigned long end;      /* virtual address of end of element + 1 */
+};
+
+extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+extern int num_rsvd_regions;
+
+extern void find_memory (void);
+extern void reserve_memory (void);
+extern void find_initrd (void);
+extern int filter_rsvd_memory (unsigned long start, unsigned long end, void 
*arg);
+
+/*
+ * For rounding an address to the next IA64_GRANULE_SIZE or order
+ */
+#define GRANULEROUNDDOWN(n)    ((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n)      (((n)+IA64_GRANULE_SIZE-1) & 
~(IA64_GRANULE_SIZE-1))
+#define ORDERROUNDDOWN(n)      ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
+
+#ifdef CONFIG_DISCONTIGMEM
+  extern void call_pernode_memory (unsigned long start, unsigned long len, 
void *func);
+#else
+# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
+#endif
+
+#define IGNORE_PFN0    1       /* XXX fix me: ignore pfn 0 until TLB miss 
handler is updated... */
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP     0x40000000 /* Use virtual mem map if hole is > than 
this */
+  extern unsigned long vmalloc_end;
+  extern struct page *vmem_map;
+  extern int find_largest_hole (u64 start, u64 end, void *arg);
+  extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
+#endif
+
+#endif /* meminit_h */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/mman.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/mman.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_MMAN_H
+#define _ASM_IA64_MMAN_H
+
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+#define PROT_READ      0x1             /* page can be read */
+#define PROT_WRITE     0x2             /* page can be written */
+#define PROT_EXEC      0x4             /* page can be executed */
+#define PROT_SEM       0x8             /* page may be used for atomic ops */
+#define PROT_NONE      0x0             /* page can not be accessed */
+#define PROT_GROWSDOWN 0x01000000      /* mprotect flag: extend change to 
start of growsdown vma */
+#define PROT_GROWSUP   0x02000000      /* mprotect flag: extend change to end 
of growsup vma */
+
+#define MAP_SHARED     0x01            /* Share changes */
+#define MAP_PRIVATE    0x02            /* Changes are private */
+#define MAP_TYPE       0x0f            /* Mask for type of mapping */
+#define MAP_FIXED      0x10            /* Interpret addr exactly */
+#define MAP_ANONYMOUS  0x20            /* don't use a file */
+
+#define MAP_GROWSDOWN  0x00100         /* stack-like segment */
+#define MAP_GROWSUP    0x00200         /* register stack-like segment */
+#define MAP_DENYWRITE  0x00800         /* ETXTBSY */
+#define MAP_EXECUTABLE 0x01000         /* mark it as an executable */
+#define MAP_LOCKED     0x02000         /* pages are locked */
+#define MAP_NORESERVE  0x04000         /* don't check for reservations */
+#define MAP_POPULATE   0x08000         /* populate (prefault) pagetables */
+#define MAP_NONBLOCK   0x10000         /* do not block on IO */
+
+#define MS_ASYNC       1               /* sync memory asynchronously */
+#define MS_INVALIDATE  2               /* invalidate the caches */
+#define MS_SYNC                4               /* synchronous memory sync */
+
+#define MCL_CURRENT    1               /* lock all current mappings */
+#define MCL_FUTURE     2               /* lock all future mappings */
+
+#define MADV_NORMAL    0x0             /* default page-in behavior */
+#define MADV_RANDOM    0x1             /* page-in minimum required */
+#define MADV_SEQUENTIAL        0x2             /* read-ahead aggressively */
+#define MADV_WILLNEED  0x3             /* pre-fault pages */
+#define MADV_DONTNEED  0x4             /* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON       MAP_ANONYMOUS
+#define MAP_FILE       0
+
+#endif /* _ASM_IA64_MMAN_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/numa.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/numa.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,74 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains NUMA specific prototypes and definitions.
+ *
+ * 2002/08/05 Erich Focht <efocht@xxxxxxxxxx>
+ *
+ */
+#ifndef _ASM_IA64_NUMA_H
+#define _ASM_IA64_NUMA_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_NUMA
+
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/mmzone.h>
+
+extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
+extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+
+/* Stuff below this line could be architecture independent */
+
+extern int num_node_memblks;           /* total number of memory chunks */
+
+/*
+ * List of node memory chunks. Filled when parsing SRAT table to
+ * obtain information about memory nodes.
+*/
+
+struct node_memblk_s {
+       unsigned long start_paddr;
+       unsigned long size;
+       int nid;                /* which logical node contains this chunk? */
+       int bank;               /* which mem bank on this node */
+};
+
+struct node_cpuid_s {
+       u16     phys_id;        /* id << 8 | eid */
+       int     nid;            /* logical node containing this CPU */
+};
+
+extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
+extern struct node_cpuid_s node_cpuid[NR_CPUS];
+
+/*
+ * ACPI 2.0 SLIT (System Locality Information Table)
+ * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
+ *
+ * This is a matrix with "distances" between nodes, they should be
+ * proportional to the memory access latency ratios.
+ */
+
+extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
+#define node_distance(from,to) (numa_slit[(from) * num_online_nodes() + (to)])
+
+extern int paddr_to_nid(unsigned long paddr);
+
+#define local_nodeid (cpu_to_node_map[smp_processor_id()])
+
+#else /* !CONFIG_NUMA */
+
+#define paddr_to_nid(addr)     0
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NUMA_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/param.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/param.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_PARAM_H
+#define _ASM_IA64_PARAM_H
+
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+#define EXEC_PAGESIZE  65536
+
+#ifndef NOGROUP
+# define NOGROUP       (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64      /* max length of hostname */
+
+#ifdef __KERNEL__
+# include <linux/config.h>     /* mustn't include <linux/config.h> outside of 
#ifdef __KERNEL__ */
+# ifdef CONFIG_IA64_HP_SIM
+  /*
+   * Yeah, simulating stuff is slow, so let us catch some breath between
+   * timer interrupts...
+   */
+#  define HZ     32
+# else
+#  define HZ   1024
+# endif
+# define USER_HZ       HZ
+# define CLOCKS_PER_SEC        HZ      /* frequency at which times() counts */
+#else
+   /*
+    * Technically, this is wrong, but some old apps still refer to it.  The 
proper way to
+    * get the HZ value is via sysconf(_SC_CLK_TCK).
+    */
+# define HZ 1024
+#endif
+
+#endif /* _ASM_IA64_PARAM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/patch.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/patch.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,25 @@
+#ifndef _ASM_IA64_PATCH_H
+#define _ASM_IA64_PATCH_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * There are a number of reasons for patching instructions.  Rather than 
duplicating code
+ * all over the place, we put the common stuff here.  Reasons for patching: 
in-kernel
+ * module-loader, virtual-to-physical patch-list, McKinley Errata 9 
workaround, and gate
+ * shared library.  Undoubtedly, some of these reasons will disappear and 
others will
+ * be added over time.
+ */
+#include <linux/elf.h>
+#include <linux/types.h>
+
+extern void ia64_patch (u64 insn_addr, u64 mask, u64 val);     /* patch any 
insn slot */
+extern void ia64_patch_imm64 (u64 insn_addr, u64 val);         /* patch "movl" 
w/abs. value*/
+extern void ia64_patch_imm60 (u64 insn_addr, u64 val);         /* patch "brl" 
w/ip-rel value */
+
+extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
+extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_gate (void);
+
+#endif /* _ASM_IA64_PATCH_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/pci.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/pci.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,146 @@
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping 
already-configured bus
+ * numbers - to be used for buggy BIOSes or architectures with incomplete PCI 
setup by the
+ * loader.
+ */
+#define pcibios_assign_all_busses()     0
+#define pcibios_scan_all_fns(a, b)     0
+
+#define PCIBIOS_MIN_IO         0x1000
+#define PCIBIOS_MIN_MEM                0x10000000
+
+void pcibios_config_init(void);
+
+struct pci_dev;
+
+/*
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct 
correspondence
+ * between device bus addresses and CPU physical addresses.  Platforms with a 
hardware I/O
+ * MMU _must_ turn this off to suppress the bounce buffer handling code in the 
block and
+ * network device layers.  Platforms with separate bus address spaces _must_ 
turn this off
+ * and provide a device DMA mapping implementation that takes care of the 
necessary
+ * address translation.
+ *
+ * For now, the ia64 platforms which may have separate/multiple bus address 
spaces all
+ * have I/O MMUs which support the merging of physically discontiguous 
buffers, so we can
+ * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#define PCI_DMA_BUS_IS_PHYS    (ia64_max_iommu_merge_mask == ~0UL)
+
+static inline void
+pcibios_set_master (struct pci_dev *dev)
+{
+       /* No special bus mastering setup handling */
+}
+
+static inline void
+pcibios_penalize_isa_irq (int irq)
+{
+       /* We don't do dynamic PCI IRQ allocation */
+}
+
+#define HAVE_ARCH_PCI_MWI 1
+extern int pcibios_prep_mwi (struct pci_dev *);
+
+#include <asm-generic/pci-dma-compat.h>
+
+/* pci_unmap_{single,page} is not a nop, thus... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
+       dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
+       __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)                 \
+       ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)                \
+       (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)                   \
+       ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
+       (((PTR)->LEN_NAME) = (VAL))
+
+/* The ia64 platform always supports 64-bit addressing. */
+#define pci_dac_dma_supported(pci_dev, mask)           (1)
+#define pci_dac_page_to_dma(dev,pg,off,dir)            ((dma_addr_t) 
page_to_bus(pg) + (off))
+#define pci_dac_dma_to_page(dev,dma_addr)              
(virt_to_page(bus_to_virt(dma_addr)))
+#define pci_dac_dma_to_offset(dev,dma_addr)            offset_in_page(dma_addr)
+#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)  do { } while (0)
+#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)       do { 
mb(); } while (0)
+
+#define sg_dma_len(sg)         ((sg)->dma_length)
+#define sg_dma_address(sg)     ((sg)->dma_address)
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct 
*vma,
+                               enum pci_mmap_state mmap_state, int 
write_combine);
+#define HAVE_PCI_LEGACY
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+                                     struct vm_area_struct *vma);
+extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
+                                 size_t count);
+extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
+                                  size_t count);
+extern int pci_mmap_legacy_mem(struct kobject *kobj,
+                              struct bin_attribute *attr,
+                              struct vm_area_struct *vma);
+
+#define pci_get_legacy_mem platform_pci_get_legacy_mem
+#define pci_legacy_read platform_pci_legacy_read
+#define pci_legacy_write platform_pci_legacy_write
+
+struct pci_window {
+       struct resource resource;
+       u64 offset;
+};
+
+struct pci_controller {
+       void *acpi_handle;
+       void *iommu;
+       int segment;
+
+       unsigned int windows;
+       struct pci_window *window;
+
+       void *platform_data;
+};
+
+#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
+#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
+
+extern struct pci_ops pci_root_ops;
+
+static inline int pci_name_bus(char *name, struct pci_bus *bus)
+{
+       if (pci_domain_nr(bus) == 0) {
+               sprintf(name, "%02x", bus->number);
+       } else {
+               sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
+       }
+       return 0;
+}
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+               struct pci_bus_region *region, struct resource *res);
+
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+               struct resource *res, struct pci_bus_region *region);
+
+#define pcibios_scan_all_fns(a, b)     0
+
+#endif /* _ASM_IA64_PCI_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/percpu.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/percpu.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,72 @@
+#ifndef _ASM_IA64_PERCPU_H
+#define _ASM_IA64_PERCPU_H
+
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
+
+#ifdef __ASSEMBLY__
+# define THIS_CPU(var) (per_cpu__##var)  /* use this to mark accesses to 
per-CPU variables... */
+#else /* !__ASSEMBLY__ */
+
+#include <linux/config.h>
+
+#include <linux/threads.h>
+
+#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
+# define __SMALL_ADDR_AREA     __attribute__((__model__ (__small__)))
+#else
+# define __SMALL_ADDR_AREA
+#endif
+
+#define DECLARE_PER_CPU(type, name)                            \
+       extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name)                             \
+       __attribute__((__section__(".data.percpu")))            \
+       __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+
+/*
+ * Pretty much a literal copy of asm-generic/percpu.h, except that 
percpu_modcopy() is an
+ * external routine, to avoid include-hell.
+ */
+#ifdef CONFIG_SMP
+
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
+DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
+
+#define per_cpu(var, cpu)  (*RELOC_HIDE(&per_cpu__##var, 
__per_cpu_offset[cpu]))
+#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, 
__ia64_per_cpu_var(local_per_cpu_offset)))
+
+extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
+extern void setup_per_cpu_areas (void);
+extern void *per_cpu_init(void);
+
+#else /* ! SMP */
+
+#define per_cpu(var, cpu)                      (*((void)cpu, &per_cpu__##var))
+#define __get_cpu_var(var)                     per_cpu__##var
+#define per_cpu_init()                         (__phys_per_cpu_start)
+
+#endif /* SMP */
+
+#define EXPORT_PER_CPU_SYMBOL(var)             EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var)         
EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+/*
+ * Be extremely careful when taking the address of this variable!  Due to 
virtual
+ * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() 
is slightly
+ * more efficient.
+ */
+#define __ia64_per_cpu_var(var)        (per_cpu__##var)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PERCPU_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/pgtable.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/pgtable.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,567 @@
+#ifndef _ASM_IA64_PGTABLE_H
+#define _ASM_IA64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the IA-64 page table tree.
+ *
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
+ * in <asm/page.h>.
+ *
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/types.h>
+
+#define IA64_MAX_PHYS_BITS     50      /* max. number of physical address bits 
(architected) */
+
+/*
+ * First, define the various bits in a PTE.  Note that the PTE format
+ * matches the VHPT short format, the firt doubleword of the VHPD long
+ * format, and the first doubleword of the TLB insertion format.
+ */
+#define _PAGE_P_BIT            0
+#define _PAGE_A_BIT            5
+#define _PAGE_D_BIT            6
+
+#define _PAGE_P                        (1 << _PAGE_P_BIT)      /* page present 
bit */
+#define _PAGE_MA_WB            (0x0 <<  2)     /* write back memory attribute 
*/
+#define _PAGE_MA_UC            (0x4 <<  2)     /* uncacheable memory attribute 
*/
+#define _PAGE_MA_UCE           (0x5 <<  2)     /* UC exported attribute */
+#define _PAGE_MA_WC            (0x6 <<  2)     /* write coalescing memory 
attribute */
+#define _PAGE_MA_NAT           (0x7 <<  2)     /* not-a-thing attribute */
+#define _PAGE_MA_MASK          (0x7 <<  2)
+#define _PAGE_PL_0             (0 <<  7)       /* privilege level 0 (kernel) */
+#define _PAGE_PL_1             (1 <<  7)       /* privilege level 1 (unused) */
+#define _PAGE_PL_2             (2 <<  7)       /* privilege level 2 (unused) */
+#define _PAGE_PL_3             (3 <<  7)       /* privilege level 3 (user) */
+#define _PAGE_PL_MASK          (3 <<  7)
+#define _PAGE_AR_R             (0 <<  9)       /* read only */
+#define _PAGE_AR_RX            (1 <<  9)       /* read & execute */
+#define _PAGE_AR_RW            (2 <<  9)       /* read & write */
+#define _PAGE_AR_RWX           (3 <<  9)       /* read, write & execute */
+#define _PAGE_AR_R_RW          (4 <<  9)       /* read / read & write */
+#define _PAGE_AR_RX_RWX                (5 <<  9)       /* read & exec / read, 
write & exec */
+#define _PAGE_AR_RWX_RW                (6 <<  9)       /* read, write & exec / 
read & write */
+#define _PAGE_AR_X_RX          (7 <<  9)       /* exec & promote / read & exec 
*/
+#define _PAGE_AR_MASK          (7 <<  9)
+#define _PAGE_AR_SHIFT         9
+#define _PAGE_A                        (1 << _PAGE_A_BIT)      /* page 
accessed bit */
+#define _PAGE_D                        (1 << _PAGE_D_BIT)      /* page dirty 
bit */
+#define _PAGE_PPN_MASK         (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & 
~0xfffUL)
+#define _PAGE_ED               (__IA64_UL(1) << 52)    /* exception deferral */
+#define _PAGE_PROTNONE         (__IA64_UL(1) << 63)
+
+/* Valid only for a PTE with the present bit cleared: */
+#define _PAGE_FILE             (1 << 1)                /* see swap & file pte 
remarks below */
+
+#define _PFN_MASK              _PAGE_PPN_MASK
+/* Mask of bits which may be changed by pte_modify(); the odd bits are there 
for _PAGE_PROTNONE */
+#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | 
_PAGE_AR_MASK | _PAGE_ED)
+
+#define _PAGE_SIZE_4K  12
+#define _PAGE_SIZE_8K  13
+#define _PAGE_SIZE_16K 14
+#define _PAGE_SIZE_64K 16
+#define _PAGE_SIZE_256K        18
+#define _PAGE_SIZE_1M  20
+#define _PAGE_SIZE_4M  22
+#define _PAGE_SIZE_16M 24
+#define _PAGE_SIZE_64M 26
+#define _PAGE_SIZE_256M        28
+#define _PAGE_SIZE_1G  30
+#define _PAGE_SIZE_4G  32
+
+#define __ACCESS_BITS          _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
+#define __DIRTY_BITS_NO_ED     _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
+#define __DIRTY_BITS           _PAGE_ED | __DIRTY_BITS_NO_ED
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+#define PGDIR_SHIFT            (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define PGDIR_SIZE             (__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK             (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD           (1UL << (PAGE_SHIFT-3))
+#define USER_PTRS_PER_PGD      (5*PTRS_PER_PGD/8)      /* regions 0-4 are user 
regions */
+#define FIRST_USER_PGD_NR      0
+
+/*
+ * Definitions for second level:
+ *
+ * PMD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PMD_SHIFT      (PAGE_SHIFT + (PAGE_SHIFT-3))
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+#define PTRS_PER_PMD   (1UL << (PAGE_SHIFT-3))
+
+/*
+ * Definitions for third level:
+ */
+#define PTRS_PER_PTE   (__IA64_UL(1) << (PAGE_SHIFT-3))
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time
+ * they are used, the page is accessed. They are cleared only by the
+ * page-out routines.
+ */
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_A)
+#define PAGE_SHARED    __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define PAGE_READONLY  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY      __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_GATE      __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+#define PAGE_KERNEL    __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
+#define PAGE_KERNELRX  __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+
+# ifndef __ASSEMBLY__
+
+#include <asm/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+/*
+ * Next come the mappings that determine how mmap() protection bits
+ * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented.  The
+ * _P version gets used for a private shared memory segment, the _S
+ * version gets used for a shared memory segment with MAP_SHARED on.
+ * In a private shared memory segment, we do a copy-on-write if a task
+ * attempts to write to the page.
+ */
+       /* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_READONLY   /* write to priv pg -> copy & make writable */
+#define __P011 PAGE_READONLY   /* ditto */
+#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED     /* we don't have (and don't need) write-only */
+#define __S011 PAGE_SHARED
+#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+
+#define pgd_ERROR(e)   printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, 
pgd_val(e))
+#define pmd_ERROR(e)   printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, 
pmd_val(e))
+#define pte_ERROR(e)   printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, 
pte_val(e))
+
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page addresses:
+ */
+
+
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static inline long
+ia64_phys_addr_valid (unsigned long addr)
+{
+       return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory.  For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error.  Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr)  (1)
+
+
+/*
+ * Now come the defines and routines to manage and access the three-level
+ * page table.
+ */
+
+/*
+ * On some architectures, special things need to be done when setting
+ * the PTE in a page table.  Nothing special needs to be on IA-64.
+ */
+#define set_pte(ptep, pteval)  (*(ptep) = (pteval))
+
+#define RGN_SIZE       (1UL << 61)
+#define RGN_KERNEL     7
+
+#define VMALLOC_START          0xa000000200000000UL
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT      (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 
9)))
+# define VMALLOC_END           vmalloc_end
+  extern unsigned long vmalloc_end;
+#else
+# define VMALLOC_END           (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 
9)))
+#endif
+
+/* fs/proc/kcore.c */
+#define        kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
+#define        kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
+
+/*
+ * Conversion functions: convert page frame number (pfn) and a protection 
value to a page
+ * table entry (pte).
+ */
+#define pfn_pte(pfn, pgprot) \
+({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); 
__pte; })
+
+/* Extract pfn from pte.  */
+#define pte_pfn(_pte)          ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
+
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions 
*/
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+#define pte_modify(_pte, newprot) \
+       (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & 
_PAGE_CHG_MASK)))
+
+#define page_pte_prot(page,prot)       mk_pte(page, prot)
+#define page_pte(page)                 page_pte_prot(page, __pgprot(0))
+
+#define pte_none(pte)                  (!pte_val(pte))
+#define pte_present(pte)               (pte_val(pte) & (_PAGE_P | 
_PAGE_PROTNONE))
+#define pte_clear(pte)                 (pte_val(*(pte)) = 0UL)
+/* pte_page() returns the "struct page *" corresponding to the PTE: */
+#define pte_page(pte)                  virt_to_page(((pte_val(pte) & 
_PFN_MASK) + PAGE_OFFSET))
+
+#define pmd_none(pmd)                  (!pmd_val(pmd))
+#define pmd_bad(pmd)                   (!ia64_phys_addr_valid(pmd_val(pmd)))
+#define pmd_present(pmd)               (pmd_val(pmd) != 0UL)
+#define pmd_clear(pmdp)                        (pmd_val(*(pmdp)) = 0UL)
+#define pmd_page_kernel(pmd)           ((unsigned long) __va(pmd_val(pmd) & 
_PFN_MASK))
+#define pmd_page(pmd)                  virt_to_page((pmd_val(pmd) + 
PAGE_OFFSET))
+
+#define pud_none(pud)                  (!pud_val(pud))
+#define pud_bad(pud)                   (!ia64_phys_addr_valid(pud_val(pud)))
+#define pud_present(pud)               (pud_val(pud) != 0UL)
+#define pud_clear(pudp)                        (pud_val(*(pudp)) = 0UL)
+
+#define pud_page(pud)                  ((unsigned long) __va(pud_val(pud) & 
_PFN_MASK))
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+#define pte_user(pte)          ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
+#define pte_read(pte)          (((pte_val(pte) & _PAGE_AR_MASK) >> 
_PAGE_AR_SHIFT) < 6)
+#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> 
_PAGE_AR_SHIFT) - 2) <= 4)
+#define pte_exec(pte)          ((pte_val(pte) & _PAGE_AR_RX) != 0)
+#define pte_dirty(pte)         ((pte_val(pte) & _PAGE_D) != 0)
+#define pte_young(pte)         ((pte_val(pte) & _PAGE_A) != 0)
+#define pte_file(pte)          ((pte_val(pte) & _PAGE_FILE) != 0)
+/*
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit 
in the
+ * access rights:
+ */
+#define pte_wrprotect(pte)     (__pte(pte_val(pte) & ~_PAGE_AR_RW))
+#define pte_mkwrite(pte)       (__pte(pte_val(pte) | _PAGE_AR_RW))
+#define pte_mkexec(pte)                (__pte(pte_val(pte) | _PAGE_AR_RX))
+#define pte_mkold(pte)         (__pte(pte_val(pte) & ~_PAGE_A))
+#define pte_mkyoung(pte)       (__pte(pte_val(pte) | _PAGE_A))
+#define pte_mkclean(pte)       (__pte(pte_val(pte) & ~_PAGE_D))
+#define pte_mkdirty(pte)       (__pte(pte_val(pte) | _PAGE_D))
+
+/*
+ * Macro to a page protection value as "uncacheable".  Note that "protection" 
is really a
+ * misnomer here as the protection value contains the memory attribute bits, 
dirty bits,
+ * and various other bits as well.
+ */
+#define pgprot_noncached(prot)         __pgprot((pgprot_val(prot) & 
~_PAGE_MA_MASK) | _PAGE_MA_UC)
+
+/*
+ * Macro to make mark a page protection value as "write-combining".
+ * Note that "protection" is really a misnomer here as the protection
+ * value contains the memory attribute bits, dirty bits, and various
+ * other bits as well.  Accesses through a write-combining translation
+ * works bypasses the caches, but does allow for consecutive writes to
+ * be combined into single (but larger) write transactions.
+ */
+#define pgprot_writecombine(prot)      __pgprot((pgprot_val(prot) & 
~_PAGE_MA_MASK) | _PAGE_MA_WC)
+
+static inline unsigned long
+pgd_index (unsigned long address)
+{
+       unsigned long region = address >> 61;
+       unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) 
- 1);
+
+       return (region << (PAGE_SHIFT - 6)) | l1index;
+}
+
+/* The offset in the 1-level directory is given by the 3 region bits
+   (61..63) and the level-1 bits.  */
+static inline pgd_t*
+pgd_offset (struct mm_struct *mm, unsigned long address)
+{
+       return mm->pgd + pgd_index(address);
+}
+
+/* In the kernel's mapped region we completely ignore the region number
+   (since we know it's in region number 5). */
+#define pgd_offset_k(addr) \
+       (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+/* Look up a pgd entry in the gate area.  On IA-64, the gate-area
+   resides in the kernel-mapped segment, hence we use pgd_offset_k()
+   here.  */
+#define pgd_offset_gate(mm, addr)      pgd_offset_k(addr)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,addr) \
+       ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 
1)))
+
+/*
+ * Find an entry in the third-level page table.  This looks more complicated 
than it
+ * should be because some platforms place page tables in high memory.
+ */
+#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE 
- 1))
+#define pte_offset_kernel(dir,addr)    ((pte_t *) pmd_page_kernel(*(dir)) + 
pte_index(addr))
+#define pte_offset_map(dir,addr)       pte_offset_kernel(dir, addr)
+#define pte_offset_map_nested(dir,addr)        pte_offset_map(dir, addr)
+#define pte_unmap(pte)                 do { } while (0)
+#define pte_unmap_nested(pte)          do { } while (0)
+
+/* atomic versions of the some PTE manipulations: */
+
+static inline int
+ptep_test_and_clear_young (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+       if (!pte_young(*ptep))
+               return 0;
+       return test_and_clear_bit(_PAGE_A_BIT, ptep);
+#else
+       pte_t pte = *ptep;
+       if (!pte_young(pte))
+               return 0;
+       set_pte(ptep, pte_mkold(pte));
+       return 1;
+#endif
+}
+
+static inline int
+ptep_test_and_clear_dirty (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+       if (!pte_dirty(*ptep))
+               return 0;
+       return test_and_clear_bit(_PAGE_D_BIT, ptep);
+#else
+       pte_t pte = *ptep;
+       if (!pte_dirty(pte))
+               return 0;
+       set_pte(ptep, pte_mkclean(pte));
+       return 1;
+#endif
+}
+
+static inline pte_t
+ptep_get_and_clear (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+       return __pte(xchg((long *) ptep, 0));
+#else
+       pte_t pte = *ptep;
+       pte_clear(ptep);
+       return pte;
+#endif
+}
+
+static inline void
+ptep_set_wrprotect (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+       unsigned long new, old;
+
+       do {
+               old = pte_val(*ptep);
+               new = pte_val(pte_wrprotect(__pte (old)));
+       } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+static inline void
+ptep_mkdirty (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+       set_bit(_PAGE_D_BIT, ptep);
+#else
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_mkdirty(old_pte));
+#endif
+}
+
+static inline int
+pte_same (pte_t a, pte_t b)
+{
+       return pte_val(a) == pte_val(b);
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init (void);
+
+/*
+ * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number 
of
+ *      bits in the swap-type field of the swap pte.  It would be nice to
+ *      enforce that, but we can't easily include <linux/swap.h> here.
+ *      (Of course, better still would be to define MAX_SWAPFILES_SHIFT 
here...).
+ *
+ * Format of swap pte:
+ *     bit   0   : present bit (must be zero)
+ *     bit   1   : _PAGE_FILE (must be zero)
+ *     bits  2- 8: swap-type
+ *     bits  9-62: swap offset
+ *     bit  63   : _PAGE_PROTNONE bit
+ *
+ * Format of file pte:
+ *     bit   0   : present bit (must be zero)
+ *     bit   1   : _PAGE_FILE (must be one)
+ *     bits  2-62: file_offset/PAGE_SIZE
+ *     bit  63   : _PAGE_PROTNONE bit
+ */
+#define __swp_type(entry)              (((entry).val >> 2) & 0x7f)
+#define __swp_offset(entry)            (((entry).val << 1) >> 10)
+#define __swp_entry(type,offset)       ((swp_entry_t) { ((type) << 2) | 
((long) (offset) << 9) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
+
+#define PTE_FILE_MAX_BITS              61
+#define pte_to_pgoff(pte)              ((pte_val(pte) << 1) >> 3)
+#define pgoff_to_pte(off)              ((pte_t) { ((off) << 2) | _PAGE_FILE })
+
+/* XXX is this right? */
+#define io_remap_page_range(vma, vaddr, paddr, size, prot)             \
+               remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
+
+/* We provide our own get_unmapped_area to cope with VA holes for userland */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HUGETLB_PGDIR_SHIFT    (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define HUGETLB_PGDIR_SIZE     (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
+#define HUGETLB_PGDIR_MASK     (~(HUGETLB_PGDIR_SIZE-1))
+struct mmu_gather;
+extern void hugetlb_free_pgtables(struct mmu_gather *tlb,
+       struct vm_area_struct * prev, unsigned long start, unsigned long end);
+#endif
+
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain all the 
necessary
+ * information.  However, we use this routine to take care of any (delayed) 
i-cache
+ * flushing that may be necessary.
+ */
+extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, 
pte_t pte);
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Update PTEP with ENTRY, which is guaranteed to be a less
+ * restrictive PTE.  That is, ENTRY may have the ACCESSED, DIRTY, and
+ * WRITABLE bits turned on, when the value at PTEP did not.  The
+ * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
+ *
+ * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
+ * having to worry about races.  On SMP machines, there are only two
+ * cases where this is true:
+ *
+ *     (1) *PTEP has the PRESENT bit turned OFF
+ *     (2) ENTRY has the DIRTY bit turned ON
+ *
+ * On ia64, we could implement this routine with a cmpxchg()-loop
+ * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
+ * However, like on x86, we can get a more streamlined version by
+ * observing that it is OK to drop ACCESSED bit updates when
+ * SAFELY_WRITABLE is FALSE.  Besides being rare, all that would do is
+ * result in an extra Access-bit fault, which would then turn on the
+ * ACCESSED bit in the low-level fault handler (iaccess_bit or
+ * daccess_bit in ivt.S).
+ */
+#ifdef CONFIG_SMP
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, 
__safely_writable)      \
+do {                                                                           
        \
+       if (__safely_writable) {                                                
        \
+               set_pte(__ptep, __entry);                                       
        \
+               flush_tlb_page(__vma, __addr);                                  
        \
+       }                                                                       
        \
+} while (0)
+#else
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, 
__safely_writable)      \
+       ptep_establish(__vma, __addr, __ptep, __entry)
+#endif
+
+#  ifdef CONFIG_VIRTUAL_MEM_MAP
+  /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+#   define __HAVE_ARCH_MEMMAP_INIT
+    extern void memmap_init (unsigned long size, int nid, unsigned long zone,
+                            unsigned long start_pfn);
+#  endif /* CONFIG_VIRTUAL_MEM_MAP */
+# endif /* !__ASSEMBLY__ */
+
+/*
+ * Identity-mapped regions use a large page size.  We'll call such large pages
+ * "granules".  If you can think of a better name that's unambiguous, let me
+ * know...
+ */
+#if defined(CONFIG_IA64_GRANULE_64MB)
+# define IA64_GRANULE_SHIFT    _PAGE_SIZE_64M
+#elif defined(CONFIG_IA64_GRANULE_16MB)
+# define IA64_GRANULE_SHIFT    _PAGE_SIZE_16M
+#endif
+#define IA64_GRANULE_SIZE      (1 << IA64_GRANULE_SHIFT)
+/*
+ * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
+ */
+#define KERNEL_TR_PAGE_SHIFT   _PAGE_SIZE_64M
+#define KERNEL_TR_PAGE_SIZE    (1 << KERNEL_TR_PAGE_SHIFT)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+
+/* These tell get_user_pages() that the first gate page is accessible from 
user-level.  */
+#define FIXADDR_USER_START     GATE_ADDR
+#define FIXADDR_USER_END       (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
+#include <asm-generic/pgtable.h>
+#include <asm-generic/pgtable-nopud.h>
+
+#endif /* _ASM_IA64_PGTABLE_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm/ptrace_offsets.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/ptrace_offsets.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,268 @@
+#ifndef _ASM_IA64_PTRACE_OFFSETS_H
+#define _ASM_IA64_PTRACE_OFFSETS_H
+
+/*
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+/*
+ * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
+ * virtual structure that would have the following definition:
+ *
+ *     struct uarea {
+ *             struct ia64_fpreg fph[96];              // f32-f127
+ *             unsigned long nat_bits;
+ *             unsigned long empty1;
+ *             struct ia64_fpreg f2;                   // f2-f5
+ *                     :
+ *             struct ia64_fpreg f5;
+ *             struct ia64_fpreg f10;                  // f10-f31
+ *                     :
+ *             struct ia64_fpreg f31;
+ *             unsigned long r4;                       // r4-r7
+ *                     :
+ *             unsigned long r7;
+ *             unsigned long b1;                       // b1-b5
+ *                     :
+ *             unsigned long b5;
+ *             unsigned long ar_ec;
+ *             unsigned long ar_lc;
+ *             unsigned long empty2[5];
+ *             unsigned long cr_ipsr;
+ *             unsigned long cr_iip;
+ *             unsigned long cfm;
+ *             unsigned long ar_unat;
+ *             unsigned long ar_pfs;
+ *             unsigned long ar_rsc;
+ *             unsigned long ar_rnat;
+ *             unsigned long ar_bspstore;
+ *             unsigned long pr;
+ *             unsigned long b6;
+ *             unsigned long ar_bsp;
+ *             unsigned long r1;
+ *             unsigned long r2;
+ *             unsigned long r3;
+ *             unsigned long r12;
+ *             unsigned long r13;
+ *             unsigned long r14;
+ *             unsigned long r15;
+ *             unsigned long r8;
+ *             unsigned long r9;
+ *             unsigned long r10;
+ *             unsigned long r11;
+ *             unsigned long r16;
+ *                     :
+ *             unsigned long r31;
+ *             unsigned long ar_ccv;
+ *             unsigned long ar_fpsr;
+ *             unsigned long b0;
+ *             unsigned long b7;
+ *             unsigned long f6;
+ *             unsigned long f7;
+ *             unsigned long f8;
+ *             unsigned long f9;
+ *             unsigned long ar_csd;
+ *             unsigned long ar_ssd;
+ *             unsigned long rsvd1[710];
+ *             unsigned long dbr[8];
+ *             unsigned long rsvd2[504];
+ *             unsigned long ibr[8];
+ *             unsigned long rsvd3[504];
+ *             unsigned long pmd[4];
+ *     }
+ */
+
+/* fph: */
+#define PT_F32                 0x0000
+#define PT_F33                 0x0010
+#define PT_F34                 0x0020
+#define PT_F35                 0x0030
+#define PT_F36                 0x0040
+#define PT_F37                 0x0050
+#define PT_F38                 0x0060
+#define PT_F39                 0x0070
+#define PT_F40                 0x0080
+#define PT_F41                 0x0090
+#define PT_F42                 0x00a0
+#define PT_F43                 0x00b0
+#define PT_F44                 0x00c0
+#define PT_F45                 0x00d0
+#define PT_F46                 0x00e0
+#define PT_F47                 0x00f0
+#define PT_F48                 0x0100
+#define PT_F49                 0x0110
+#define PT_F50                 0x0120
+#define PT_F51                 0x0130
+#define PT_F52                 0x0140
+#define PT_F53                 0x0150
+#define PT_F54                 0x0160
+#define PT_F55                 0x0170
+#define PT_F56                 0x0180
+#define PT_F57                 0x0190
+#define PT_F58                 0x01a0
+#define PT_F59                 0x01b0
+#define PT_F60                 0x01c0
+#define PT_F61                 0x01d0
+#define PT_F62                 0x01e0
+#define PT_F63                 0x01f0
+#define PT_F64                 0x0200
+#define PT_F65                 0x0210
+#define PT_F66                 0x0220
+#define PT_F67                 0x0230
+#define PT_F68                 0x0240
+#define PT_F69                 0x0250
+#define PT_F70                 0x0260
+#define PT_F71                 0x0270
+#define PT_F72                 0x0280
+#define PT_F73                 0x0290
+#define PT_F74                 0x02a0
+#define PT_F75                 0x02b0
+#define PT_F76                 0x02c0
+#define PT_F77                 0x02d0
+#define PT_F78                 0x02e0
+#define PT_F79                 0x02f0
+#define PT_F80                 0x0300
+#define PT_F81                 0x0310
+#define PT_F82                 0x0320
+#define PT_F83                 0x0330
+#define PT_F84                 0x0340
+#define PT_F85                 0x0350
+#define PT_F86                 0x0360
+#define PT_F87                 0x0370
+#define PT_F88                 0x0380
+#define PT_F89                 0x0390
+#define PT_F90                 0x03a0
+#define PT_F91                 0x03b0
+#define PT_F92                 0x03c0
+#define PT_F93                 0x03d0
+#define PT_F94                 0x03e0
+#define PT_F95                 0x03f0
+#define PT_F96                 0x0400
+#define PT_F97                 0x0410
+#define PT_F98                 0x0420
+#define PT_F99                 0x0430
+#define PT_F100                        0x0440
+#define PT_F101                        0x0450
+#define PT_F102                        0x0460
+#define PT_F103                        0x0470
+#define PT_F104                        0x0480
+#define PT_F105                        0x0490
+#define PT_F106                        0x04a0
+#define PT_F107                        0x04b0
+#define PT_F108                        0x04c0
+#define PT_F109                        0x04d0
+#define PT_F110                        0x04e0
+#define PT_F111                        0x04f0
+#define PT_F112                        0x0500
+#define PT_F113                        0x0510
+#define PT_F114                        0x0520
+#define PT_F115                        0x0530
+#define PT_F116                        0x0540
+#define PT_F117                        0x0550
+#define PT_F118                        0x0560
+#define PT_F119                        0x0570
+#define PT_F120                        0x0580
+#define PT_F121                        0x0590
+#define PT_F122                        0x05a0
+#define PT_F123                        0x05b0
+#define PT_F124                        0x05c0
+#define PT_F125                        0x05d0
+#define PT_F126                        0x05e0
+#define PT_F127                        0x05f0
+
+#define PT_NAT_BITS            0x0600
+
+#define PT_F2                  0x0610
+#define PT_F3                  0x0620
+#define PT_F4                  0x0630
+#define PT_F5                  0x0640
+#define PT_F10                 0x0650
+#define PT_F11                 0x0660
+#define PT_F12                 0x0670
+#define PT_F13                 0x0680
+#define PT_F14                 0x0690
+#define PT_F15                 0x06a0
+#define PT_F16                 0x06b0
+#define PT_F17                 0x06c0
+#define PT_F18                 0x06d0
+#define PT_F19                 0x06e0
+#define PT_F20                 0x06f0
+#define PT_F21                 0x0700
+#define PT_F22                 0x0710
+#define PT_F23                 0x0720
+#define PT_F24                 0x0730
+#define PT_F25                 0x0740
+#define PT_F26                 0x0750
+#define PT_F27                 0x0760
+#define PT_F28                 0x0770
+#define PT_F29                 0x0780
+#define PT_F30                 0x0790
+#define PT_F31                 0x07a0
+#define PT_R4                  0x07b0
+#define PT_R5                  0x07b8
+#define PT_R6                  0x07c0
+#define PT_R7                  0x07c8
+
+#define PT_B1                  0x07d8
+#define PT_B2                  0x07e0
+#define PT_B3                  0x07e8
+#define PT_B4                  0x07f0
+#define PT_B5                  0x07f8
+
+#define PT_AR_EC               0x0800
+#define PT_AR_LC               0x0808
+
+#define PT_CR_IPSR             0x0830
+#define PT_CR_IIP              0x0838
+#define PT_CFM                 0x0840
+#define PT_AR_UNAT             0x0848
+#define PT_AR_PFS              0x0850
+#define PT_AR_RSC              0x0858
+#define PT_AR_RNAT             0x0860
+#define PT_AR_BSPSTORE         0x0868
+#define PT_PR                  0x0870
+#define PT_B6                  0x0878
+#define PT_AR_BSP              0x0880  /* note: this points to the *end* of 
the backing store! */
+#define PT_R1                  0x0888
+#define PT_R2                  0x0890
+#define PT_R3                  0x0898
+#define PT_R12                 0x08a0
+#define PT_R13                 0x08a8
+#define PT_R14                 0x08b0
+#define PT_R15                 0x08b8
+#define PT_R8                  0x08c0
+#define PT_R9                  0x08c8
+#define PT_R10                 0x08d0
+#define PT_R11                 0x08d8
+#define PT_R16                 0x08e0
+#define PT_R17                 0x08e8
+#define PT_R18                 0x08f0
+#define PT_R19                 0x08f8
+#define PT_R20                 0x0900
+#define PT_R21                 0x0908
+#define PT_R22                 0x0910
+#define PT_R23                 0x0918
+#define PT_R24                 0x0920
+#define PT_R25                 0x0928
+#define PT_R26                 0x0930
+#define PT_R27                 0x0938
+#define PT_R28                 0x0940
+#define PT_R29                 0x0948
+#define PT_R30                 0x0950
+#define PT_R31                 0x0958
+#define PT_AR_CCV              0x0960
+#define PT_AR_FPSR             0x0968
+#define PT_B0                  0x0970
+#define PT_B7                  0x0978
+#define PT_F6                  0x0980
+#define PT_F7                  0x0990
+#define PT_F8                  0x09a0
+#define PT_F9                  0x09b0
+#define PT_AR_CSD              0x09c0
+#define PT_AR_SSD              0x09c8
+
+#define PT_DBR                 0x2000  /* data breakpoint registers */
+#define PT_IBR                 0x3000  /* instruction breakpoint registers */
+#define PT_PMD                 0x4000  /* performance monitoring counters */
+
+#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/rse.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/rse.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_RSE_H
+#define _ASM_IA64_RSE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * Register stack engine related helper functions.  This file may be
+ * used in applications, so be careful about the name-space and give
+ * some consideration to non-GNU C compilers (though __inline__ is
+ * fine).
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long *addr)
+{
+       return (((unsigned long) addr) >> 3) & 0x3f;
+}
+
+/*
+ * Return TRUE if ADDR is the address of an RNAT slot.
+ */
+static __inline__ unsigned long
+ia64_rse_is_rnat_slot (unsigned long *addr)
+{
+       return ia64_rse_slot_num(addr) == 0x3f;
+}
+
+/*
+ * Returns the address of the RNAT slot that covers the slot at
+ * address SLOT_ADDR.
+ */
+static __inline__ unsigned long *
+ia64_rse_rnat_addr (unsigned long *slot_addr)
+{
+       return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
+}
+
+/*
+ * Calculate the number of registers in the dirty partition starting at 
BSPSTORE and
+ * ending at BSP.  This isn't simply (BSP-BSPSTORE)/8 because every 64th slot 
stores
+ * ar.rnat.
+ */
+static __inline__ unsigned long
+ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
+{
+       unsigned long slots = (bsp - bspstore);
+
+       return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
+}
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static __inline__ unsigned long *
+ia64_rse_skip_regs (unsigned long *addr, long num_regs)
+{
+       long delta = ia64_rse_slot_num(addr) + num_regs;
+
+       if (num_regs < 0)
+               delta -= 0x3e;
+       return addr + num_regs + delta/0x3f;
+}
+
+#endif /* _ASM_IA64_RSE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/rwsem.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/rwsem.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,188 @@
+/*
+ * asm-ia64/rwsem.h: R/W semaphores for ia64
+ *
+ * Copyright (C) 2003 Ken Chen <kenneth.w.chen@xxxxxxxxx>
+ * Copyright (C) 2003 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ *
+ * Based on asm-i386/rwsem.h and other architecture implementation.
+ *
+ * The MSW of the count is the negated number of active writers and
+ * waiting lockers, and the LSW is the total number of active locks.
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case
+ * of an uncontended lock. Readers increment by 1 and see a positive value
+ * when uncontended, negative if there are writers (and maybe) readers
+ * waiting (in which case it goes to sleep).
+ */
+
+#ifndef _ASM_IA64_RWSEM_H
+#define _ASM_IA64_RWSEM_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <asm/intrinsics.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+       signed int              count;
+       spinlock_t              wait_lock;
+       struct list_head        wait_list;
+#if RWSEM_DEBUG
+       int                     debug;
+#endif
+};
+
+#define RWSEM_UNLOCKED_VALUE           0x00000000
+#define RWSEM_ACTIVE_BIAS              0x00000001
+#define RWSEM_ACTIVE_MASK              0x0000ffff
+#define RWSEM_WAITING_BIAS             (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + 
RWSEM_ACTIVE_BIAS)
+
+/*
+ * initialization
+ */
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT      , 0
+#else
+#define __RWSEM_DEBUG_INIT     /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+       { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+         LIST_HEAD_INIT((name).wait_list) \
+         __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void
+init_rwsem (struct rw_semaphore *sem)
+{
+       sem->count = RWSEM_UNLOCKED_VALUE;
+       spin_lock_init(&sem->wait_lock);
+       INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+       sem->debug = 0;
+#endif
+}
+
+/*
+ * lock for reading
+ */
+static inline void
+__down_read (struct rw_semaphore *sem)
+{
+       int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
+
+       if (result < 0)
+               rwsem_down_read_failed(sem);
+}
+
+/*
+ * lock for writing
+ */
+static inline void
+__down_write (struct rw_semaphore *sem)
+{
+       int old, new;
+
+       do {
+               old = sem->count;
+               new = old + RWSEM_ACTIVE_WRITE_BIAS;
+       } while (cmpxchg_acq(&sem->count, old, new) != old);
+
+       if (old != 0)
+               rwsem_down_write_failed(sem);
+}
+
+/*
+ * unlock after reading
+ */
+static inline void
+__up_read (struct rw_semaphore *sem)
+{
+       int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
+
+       if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
+               rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void
+__up_write (struct rw_semaphore *sem)
+{
+       int old, new;
+
+       do {
+               old = sem->count;
+               new = old - RWSEM_ACTIVE_WRITE_BIAS;
+       } while (cmpxchg_rel(&sem->count, old, new) != old);
+
+       if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
+               rwsem_wake(sem);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_read_trylock (struct rw_semaphore *sem)
+{
+       int tmp;
+       while ((tmp = sem->count) >= 0) {
+               if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_write_trylock (struct rw_semaphore *sem)
+{
+       int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
+                             RWSEM_ACTIVE_WRITE_BIAS);
+       return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void
+__downgrade_write (struct rw_semaphore *sem)
+{
+       int old, new;
+
+       do {
+               old = sem->count;
+               new = old - RWSEM_WAITING_BIAS;
+       } while (cmpxchg_rel(&sem->count, old, new) != old);
+
+       if (old < 0)
+               rwsem_downgrade_wake(sem);
+}
+
+/*
+ * Implement atomic add functionality.  These used to be "inline" functions, 
but GCC v3.1
+ * doesn't quite optimize this stuff right and ends up with bad calls to 
fetchandadd.
+ */
+#define rwsem_atomic_add(delta, sem)   atomic_add(delta, (atomic_t 
*)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem)        atomic_add_return(delta, 
(atomic_t *)(&(sem)->count))
+
+#endif /* _ASM_IA64_RWSEM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/sal.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/sal.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,840 @@
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@xxxxxxxxx>
+ * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@xxxxxxxxx>
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar 
<sprasad@xxxxxxxxxxxxxxxxxxxx>
+ *
+ * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
+ *                 revision of the SAL spec.
+ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
+ *                  revision of the SAL spec.
+ * 99/09/29 davidm     Updated for SAL 2.6.
+ * 00/03/29 cfleck      Updated SAL Error Logging info for processor (SAL 2.6)
+ *                      (plus examples of platform error info structures from 
smariset @ Intel)
+ */
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT         0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT   1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT   2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT                3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK       
(1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT 
(1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT 
(1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT      
(1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bcd.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+
+#include <asm/pal.h>
+#include <asm/system.h>
+#include <asm/fpu.h>
+
+extern spinlock_t sal_lock;
+
+/* SAL spec _requires_ eight args for each call. */
+#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7)     \
+       result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
+
+# define SAL_CALL(result,args...) do {                         \
+       unsigned long __ia64_sc_flags;                          \
+       struct ia64_fpreg __ia64_sc_fr[6];                      \
+       ia64_save_scratch_fpregs(__ia64_sc_fr);                 \
+       spin_lock_irqsave(&sal_lock, __ia64_sc_flags);          \
+       __SAL_CALL(result, args);                               \
+       spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);     \
+       ia64_load_scratch_fpregs(__ia64_sc_fr);                 \
+} while (0)
+
+# define SAL_CALL_NOLOCK(result,args...) do {          \
+       unsigned long __ia64_scn_flags;                 \
+       struct ia64_fpreg __ia64_scn_fr[6];             \
+       ia64_save_scratch_fpregs(__ia64_scn_fr);        \
+       local_irq_save(__ia64_scn_flags);               \
+       __SAL_CALL(result, args);                       \
+       local_irq_restore(__ia64_scn_flags);            \
+       ia64_load_scratch_fpregs(__ia64_scn_fr);        \
+} while (0)
+
+# define SAL_CALL_REENTRANT(result,args...) do {       \
+       struct ia64_fpreg __ia64_scs_fr[6];             \
+       ia64_save_scratch_fpregs(__ia64_scs_fr);        \
+       preempt_disable();                              \
+       __SAL_CALL(result, args);                       \
+       preempt_enable();                               \
+       ia64_load_scratch_fpregs(__ia64_scs_fr);        \
+} while (0)
+
+#define SAL_SET_VECTORS                        0x01000000
+#define SAL_GET_STATE_INFO             0x01000001
+#define SAL_GET_STATE_INFO_SIZE                0x01000002
+#define SAL_CLEAR_STATE_INFO           0x01000003
+#define SAL_MC_RENDEZ                  0x01000004
+#define SAL_MC_SET_PARAMS              0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR     0x01000006
+
+#define SAL_CACHE_FLUSH                        0x01000008
+#define SAL_CACHE_INIT                 0x01000009
+#define SAL_PCI_CONFIG_READ            0x01000010
+#define SAL_PCI_CONFIG_WRITE           0x01000011
+#define SAL_FREQ_BASE                  0x01000012
+
+#define SAL_UPDATE_PAL                 0x01000020
+
+struct ia64_sal_retval {
+       /*
+        * A zero status value indicates call completed without error.
+        * A negative status value indicates reason of call failure.
+        * A positive status value indicates success but an
+        * informational value should be printed (e.g., "reboot for
+        * change to take effect").
+        */
+       s64 status;
+       u64 v0;
+       u64 v1;
+       u64 v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+       SAL_FREQ_BASE_PLATFORM = 0,
+       SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+       SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors.  The structure of these descriptors follows
+ * below.
+ * The defininition follows SAL specs from July 2000
+ */
+struct ia64_sal_systab {
+       u8 signature[4];        /* should be "SST_" */
+       u32 size;               /* size of this table in bytes */
+       u8 sal_rev_minor;
+       u8 sal_rev_major;
+       u16 entry_count;        /* # of entries in variable portion */
+       u8 checksum;
+       u8 reserved1[7];
+       u8 sal_a_rev_minor;
+       u8 sal_a_rev_major;
+       u8 sal_b_rev_minor;
+       u8 sal_b_rev_major;
+       /* oem_id & product_id: terminating NUL is missing if string is exactly 
32 bytes long. */
+       u8 oem_id[32];
+       u8 product_id[32];      /* ASCII product id  */
+       u8 reserved2[8];
+};
+
+enum sal_systab_entry_type {
+       SAL_DESC_ENTRY_POINT = 0,
+       SAL_DESC_MEMORY = 1,
+       SAL_DESC_PLATFORM_FEATURE = 2,
+       SAL_DESC_TR = 3,
+       SAL_DESC_PTC = 4,
+       SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type: Size:
+ *     0       48
+ *     1       32
+ *     2       16
+ *     3       32
+ *     4       16
+ *     5       16
+ */
+#define SAL_DESC_SIZE(type)    "\060\040\020\040\020\020"[(unsigned) type]
+
+typedef struct ia64_sal_desc_entry_point {
+       u8 type;
+       u8 reserved1[7];
+       u64 pal_proc;
+       u64 sal_proc;
+       u64 gp;
+       u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+       u8 type;
+       u8 used_by_sal; /* needs to be mapped for SAL? */
+       u8 mem_attr;            /* current memory attribute setting */
+       u8 access_rights;       /* access rights set up by SAL */
+       u8 mem_attr_mask;       /* mask of supported memory attributes */
+       u8 reserved1;
+       u8 mem_type;            /* memory type */
+       u8 mem_usage;           /* memory usage */
+       u64 addr;               /* physical address of memory */
+       u32 length;     /* length (multiple of 4KB pages) */
+       u32 reserved2;
+       u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
+
+typedef struct ia64_sal_desc_platform_feature {
+       u8 type;
+       u8 feature_mask;
+       u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+       u8 type;
+       u8 tr_type;             /* 0 == instruction, 1 == data */
+       u8 regnum;              /* translation register number */
+       u8 reserved1[5];
+       u64 addr;               /* virtual address of area covered */
+       u64 page_size;          /* encoded page size */
+       u8 reserved2[8];
+} ia64_sal_desc_tr_t;
+
+typedef struct ia64_sal_desc_ptc {
+       u8 type;
+       u8 reserved1[3];
+       u32 num_domains;        /* # of coherence domains */
+       u64 domain_info;        /* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+       u64 proc_count;         /* number of processors in domain */
+       u64 proc_list;          /* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+       u64 id  : 8;            /* id of processor */
+       u64 eid : 8;            /* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
+
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+typedef struct ia64_sal_desc_ap_wakeup {
+       u8 type;
+       u8 mechanism;           /* 0 == external interrupt */
+       u8 reserved1[6];
+       u64 vector;             /* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
+
+extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
+
+extern unsigned short sal_revision;    /* supported SAL spec revision */
+extern unsigned short sal_version;     /* SAL version; OEM dependent */
+#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+       SAL_INFO_TYPE_MCA  = 0,         /* Machine check abort information */
+        SAL_INFO_TYPE_INIT = 1,                /* Init information */
+        SAL_INFO_TYPE_CMC  = 2,                /* Corrected machine check 
information */
+        SAL_INFO_TYPE_CPE  = 3         /* Corrected platform error information 
*/
+};
+
+/* Encodings for machine check parameter types */
+enum {
+       SAL_MC_PARAM_RENDEZ_INT    = 1, /* Rendezvous interrupt */
+       SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
+       SAL_MC_PARAM_CPE_INT       = 3  /* Corrected Platform Error Int */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+       SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
+       SAL_MC_PARAM_MECHANISM_MEM = 2  /* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+       SAL_VECTOR_OS_MCA         = 0,
+       SAL_VECTOR_OS_INIT        = 1,
+       SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
+#define        SAL_MC_PARAM_RZ_ALWAYS          0x1
+#define        SAL_MC_PARAM_BINIT_ESCALATE     0x10
+
+/*
+ * Definition of the SAL Error Log from the SAL spec
+ */
+
+/* SAL Error Record Section GUID Definitions */
+#define SAL_PROC_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+
+#define MAX_CACHE_ERRORS       6
+#define MAX_TLB_ERRORS         6
+#define MAX_BUS_ERRORS         1
+
+/* Definition of version  according to SAL spec for logging purposes */
+typedef struct sal_log_revision {
+       u8 minor;               /* BCD (0..99) */
+       u8 major;               /* BCD (0..99) */
+} sal_log_revision_t;
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+typedef struct sal_log_timestamp {
+       u8 slh_second;          /* Second (0..59) */
+       u8 slh_minute;          /* Minute (0..59) */
+       u8 slh_hour;            /* Hour (0..23) */
+       u8 slh_reserved;
+       u8 slh_day;             /* Day (1..31) */
+       u8 slh_month;           /* Month (1..12) */
+       u8 slh_year;            /* Year (00..99) */
+       u8 slh_century;         /* Century (19, 20, 21, ...) */
+} sal_log_timestamp_t;
+
+/* Definition of log record  header structures */
+typedef struct sal_log_record_header {
+       u64 id;                         /* Unique monotonically increasing ID */
+       sal_log_revision_t revision;    /* Major and Minor revision of header */
+       u16 severity;                   /* Error Severity */
+       u32 len;                        /* Length of this error log in bytes */
+       sal_log_timestamp_t timestamp;  /* Timestamp */
+       efi_guid_t platform_guid;       /* Unique OEM Platform ID */
+} sal_log_record_header_t;
+
+#define sal_log_severity_recoverable   0
+#define sal_log_severity_fatal         1
+#define sal_log_severity_corrected     2
+
+/* Definition of log section header structures */
+typedef struct sal_log_sec_header {
+    efi_guid_t guid;                   /* Unique Section ID */
+    sal_log_revision_t revision;       /* Major and Minor revision of Section 
*/
+    u16 reserved;
+    u32 len;                           /* Section length */
+} sal_log_section_hdr_t;
+
+typedef struct sal_log_mod_error_info {
+       struct {
+               u64 check_info              : 1,
+                   requestor_identifier    : 1,
+                   responder_identifier    : 1,
+                   target_identifier       : 1,
+                   precise_ip              : 1,
+                   reserved                : 59;
+       } valid;
+       u64 check_info;
+       u64 requestor_identifier;
+       u64 responder_identifier;
+       u64 target_identifier;
+       u64 precise_ip;
+} sal_log_mod_error_info_t;
+
+typedef struct sal_processor_static_info {
+       struct {
+               u64 minstate        : 1,
+                   br              : 1,
+                   cr              : 1,
+                   ar              : 1,
+                   rr              : 1,
+                   fr              : 1,
+                   reserved        : 58;
+       } valid;
+       pal_min_state_area_t min_state_area;
+       u64 br[8];
+       u64 cr[128];
+       u64 ar[128];
+       u64 rr[8];
+       struct ia64_fpreg __attribute__ ((packed)) fr[128];
+} sal_processor_static_info_t;
+
+struct sal_cpuid_info {
+       u64 regs[5];
+       u64 reserved;
+};
+
+typedef struct sal_log_processor_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 proc_error_map      : 1,
+                   proc_state_param    : 1,
+                   proc_cr_lid         : 1,
+                   psi_static_struct   : 1,
+                   num_cache_check     : 4,
+                   num_tlb_check       : 4,
+                   num_bus_check       : 4,
+                   num_reg_file_check  : 4,
+                   num_ms_check        : 4,
+                   cpuid_info          : 1,
+                   reserved1           : 39;
+       } valid;
+       u64 proc_error_map;
+       u64 proc_state_parameter;
+       u64 proc_cr_lid;
+       /*
+        * The rest of this structure consists of variable-length arrays, which 
can't be
+        * expressed in C.
+        */
+       sal_log_mod_error_info_t info[0];
+       /*
+        * This is what the rest looked like if C supported variable-length 
arrays:
+        *
+        * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
+        * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
+        * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
+        * sal_log_mod_error_info_t 
reg_file_check_info[.valid.num_reg_file_check];
+        * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
+        * struct sal_cpuid_info cpuid_info;
+        * sal_processor_static_info_t processor_static_info;
+        */
+} sal_log_processor_info_t;
+
+/* Given a sal_log_processor_info_t pointer, return a pointer to the 
processor_static_info: */
+#define SAL_LPI_PSI_INFO(l)                                                    
                \
+({     sal_log_processor_info_t *_l = (l);                                     
                \
+       ((sal_processor_static_info_t *)                                        
                \
+        ((char *) _l->info + ((_l->valid.num_cache_check + 
_l->valid.num_tlb_check             \
+                               + _l->valid.num_bus_check + 
_l->valid.num_reg_file_check        \
+                               + _l->valid.num_ms_check) * 
sizeof(sal_log_mod_error_info_t)    \
+                              + sizeof(struct sal_cpuid_info))));              
                \
+})
+
+/* platform error log structures */
+
+typedef struct sal_log_mem_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 error_status    : 1,
+                   physical_addr   : 1,
+                   addr_mask       : 1,
+                   node            : 1,
+                   card            : 1,
+                   module          : 1,
+                   bank            : 1,
+                   device          : 1,
+                   row             : 1,
+                   column          : 1,
+                   bit_position    : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_id          : 1,
+                   oem_data        : 1,
+                   reserved        : 47;
+       } valid;
+       u64 error_status;
+       u64 physical_addr;
+       u64 addr_mask;
+       u16 node;
+       u16 card;
+       u16 module;
+       u16 bank;
+       u16 device;
+       u16 row;
+       u16 column;
+       u16 bit_position;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_id[16];
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_mem_dev_err_info_t;
+
+typedef struct sal_log_sel_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 record_id       : 1,
+                   record_type     : 1,
+                   generator_id    : 1,
+                   evm_rev         : 1,
+                   sensor_type     : 1,
+                   sensor_num      : 1,
+                   event_dir       : 1,
+                   event_data1     : 1,
+                   event_data2     : 1,
+                   event_data3     : 1,
+                   reserved        : 54;
+       } valid;
+       u16 record_id;
+       u8 record_type;
+       u8 timestamp[4];
+       u16 generator_id;
+       u8 evm_rev;
+       u8 sensor_type;
+       u8 sensor_num;
+       u8 event_dir;
+       u8 event_data1;
+       u8 event_data2;
+       u8 event_data3;
+} sal_log_sel_dev_err_info_t;
+
+typedef struct sal_log_pci_bus_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   err_type        : 1,
+                   bus_id          : 1,
+                   bus_address     : 1,
+                   bus_data        : 1,
+                   bus_cmd         : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   oem_data        : 1,
+                   reserved        : 54;
+       } valid;
+       u64 err_status;
+       u16 err_type;
+       u16 bus_id;
+       u32 reserved;
+       u64 bus_address;
+       u64 bus_data;
+       u64 bus_cmd;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_pci_bus_err_info_t;
+
+typedef struct sal_log_smbios_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 event_type      : 1,
+                   length          : 1,
+                   time_stamp      : 1,
+                   data            : 1,
+                   reserved1       : 60;
+       } valid;
+       u8 event_type;
+       u8 length;
+       u8 time_stamp[6];
+       u8 data[1];                     /* data of variable length, length == 
slsmb_length */
+} sal_log_smbios_dev_err_info_t;
+
+typedef struct sal_log_pci_comp_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   comp_info       : 1,
+                   num_mem_regs    : 1,
+                   num_io_regs     : 1,
+                   reg_data_pairs  : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       struct {
+               u16 vendor_id;
+               u16 device_id;
+               u8 class_code[3];
+               u8 func_num;
+               u8 dev_num;
+               u8 bus_num;
+               u8 seg_num;
+               u8 reserved[5];
+       } comp_info;
+       u32 num_mem_regs;
+       u32 num_io_regs;
+       u64 reg_data_pairs[1];
+       /*
+        * array of address/data register pairs is num_mem_regs + num_io_regs 
elements
+        * long.  Each array element consists of a u64 address followed by a 
u64 data
+        * value.  The oem_data array immediately follows the reg_data_pairs 
array
+        */
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_pci_comp_err_info_t;
+
+typedef struct sal_log_plat_specific_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   guid            : 1,
+                   oem_data        : 1,
+                   reserved        : 61;
+       } valid;
+       u64 err_status;
+       efi_guid_t guid;
+       u8 oem_data[1];                 /* platform specific variable length 
data */
+} sal_log_plat_specific_err_info_t;
+
+typedef struct sal_log_host_ctlr_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_data[1];                 /* Variable length OEM data */
+} sal_log_host_ctlr_err_info_t;
+
+typedef struct sal_log_plat_bus_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_data[1];                 /* Variable length OEM data */
+} sal_log_plat_bus_err_info_t;
+
+/* Overall platform error section structure */
+typedef union sal_log_platform_err_info {
+       sal_log_mem_dev_err_info_t mem_dev_err;
+       sal_log_sel_dev_err_info_t sel_dev_err;
+       sal_log_pci_bus_err_info_t pci_bus_err;
+       sal_log_smbios_dev_err_info_t smbios_dev_err;
+       sal_log_pci_comp_err_info_t pci_comp_err;
+       sal_log_plat_specific_err_info_t plat_specific_err;
+       sal_log_host_ctlr_err_info_t host_ctlr_err;
+       sal_log_plat_bus_err_info_t plat_bus_err;
+} sal_log_platform_err_info_t;
+
+/* SAL log over-all, multi-section error record structure (processor+platform) 
*/
+typedef struct err_rec {
+       sal_log_record_header_t sal_elog_header;
+       sal_log_processor_info_t proc_err;
+       sal_log_platform_err_info_t plat_err;
+       u8 oem_data_pad[1024];
+} ia64_err_rec_t;
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+static inline long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+                   unsigned long *drift_info)
+{
+       struct ia64_sal_retval isrv;
+
+       SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
+       *ticks_per_second = isrv.v0;
+       *drift_info = isrv.v1;
+       return isrv.status;
+}
+
+/* Flush all the processor and platform level instruction and/or data caches */
+static inline s64
+ia64_sal_cache_flush (u64 cache_type)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+
+/* Initialize all the processor and platform level instruction and data caches 
*/
+static inline s64
+ia64_sal_cache_init (void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Clear the processor and platform information logged by SAL with respect to 
the machine
+ * state at the time of MCA's, INITs, CMCs, or CPEs.
+ */
+static inline s64
+ia64_sal_clear_state_info (u64 sal_info_type)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
+                     0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to 
the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+                     sal_info, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+
+       return isrv.v0;
+}
+
+/*
+ * Get the maximum size of the information logged by SAL with respect to the 
machine state
+ * at the time of MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
+                     0, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+       return isrv.v0;
+}
+
+/*
+ * Causes the processor to go into a spin loop within SAL where SAL awaits a 
wakeup from
+ * the monarch processor.  Must not lock, because it will not return on any 
cpu until the
+ * monarch processor sends a wake up.
+ */
+static inline s64
+ia64_sal_mc_rendez (void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Allow the OS to specify the interrupt number to be used by SAL to interrupt 
OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up 
the
+ * non-monarch processor at the end of machine check processing.
+ * Returns the complete ia64_sal_retval because some calls return more than 
just a status
+ * value.
+ */
+static inline struct ia64_sal_retval
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 
timeout, u64 rz_always)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
+                timeout, rz_always, 0, 0);
+       return isrv;
+}
+
+/* Read from PCI configuration space */
+static inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 
0, 0);
+       if (value)
+               *value = isrv.v0;
+       return isrv.status;
+}
+
+/* Write to PCI configuration space */
+static inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+                type, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL procedures 
are invoked
+ * in virtual mode.
+ */
+static inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+                0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Register software dependent code locations within SAL. These locations are 
handlers or
+ * entry points where SAL will pass control for the specified event. These 
event handlers
+ * are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+static inline s64
+ia64_sal_set_vectors (u64 vector_type,
+                     u64 handler_addr1, u64 gp1, u64 handler_len1,
+                     u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+                       handler_addr1, gp1, handler_len1,
+                       handler_addr2, gp2, handler_len2);
+
+       return isrv.status;
+}
+
+/* Update the contents of PAL block in the non-volatile storage device */
+static inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+                    u64 *error_code, u64 *scratch_buf_size_needed)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+                0, 0, 0, 0);
+       if (error_code)
+               *error_code = isrv.v0;
+       if (scratch_buf_size_needed)
+               *scratch_buf_size_needed = isrv.v1;
+       return isrv.status;
+}
+
+extern unsigned long sal_platform_features;
+
+extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
+
+struct sal_ret_values {
+       long r8; long r9; long r10; long r11;
+};
+
+#define IA64_SAL_OEMFUNC_MIN           0x02000000
+#define IA64_SAL_OEMFUNC_MAX           0x03ffffff
+
+extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
+                           u64, u64, u64);
+extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
+                                  u64, u64, u64, u64, u64);
+extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
+                                     u64, u64, u64, u64, u64);
+
+extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SAL_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm/scatterlist.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/scatterlist.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_SCATTERLIST_H
+#define _ASM_IA64_SCATTERLIST_H
+
+/*
+ * Modified 1998-1999, 2001-2002, 2004
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+struct scatterlist {
+       struct page *page;
+       unsigned int offset;
+       unsigned int length;    /* buffer length */
+
+       dma_addr_t dma_address;
+       unsigned int dma_length;
+};
+
+/*
+ * It used to be that ISA_DMA_THRESHOLD had something to do with the
+ * DMA-limits of ISA-devices.  Nowadays, its only remaining use (apart
+ * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
+ * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
+ * address of a page is that is allocated with GFP_DMA.  On IA-64,
+ * that's 4GB - 1.
+ */
+#define ISA_DMA_THRESHOLD      0xffffffff
+
+#endif /* _ASM_IA64_SCATTERLIST_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/sections.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/sections.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_SECTIONS_H
+#define _ASM_IA64_SECTIONS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm-generic/sections.h>
+
+extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
+extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start_gate_section[];
+extern char __start_gate_mckinley_e9_patchlist[], 
__end_gate_mckinley_e9_patchlist[];
+extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
+extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
+extern char __start_gate_brl_fsys_bubble_down_patchlist[], 
__end_gate_brl_fsys_bubble_down_patchlist[];
+extern char __start_unwind[], __end_unwind[];
+
+#endif /* _ASM_IA64_SECTIONS_H */
+
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/semaphore.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/semaphore.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,102 @@
+#ifndef _ASM_IA64_SEMAPHORE_H
+#define _ASM_IA64_SEMAPHORE_H
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+#include <asm/atomic.h>
+
+struct semaphore {
+       atomic_t count;
+       int sleepers;
+       wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n)                               \
+{                                                                      \
+       .count          = ATOMIC_INIT(n),                               \
+       .sleepers       = 0,                                            \
+       .wait           = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)    \
+}
+
+#define __MUTEX_INITIALIZER(name)      __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count)                                
        \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
+
+#define DECLARE_MUTEX(name)            __DECLARE_SEMAPHORE_GENERIC(name, 1)
+#define DECLARE_MUTEX_LOCKED(name)     __DECLARE_SEMAPHORE_GENERIC(name, 0)
+
+static inline void
+sema_init (struct semaphore *sem, int val)
+{
+       *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
+}
+
+static inline void
+init_MUTEX (struct semaphore *sem)
+{
+       sema_init(sem, 1);
+}
+
+static inline void
+init_MUTEX_LOCKED (struct semaphore *sem)
+{
+       sema_init(sem, 0);
+}
+
+extern void __down (struct semaphore * sem);
+extern int  __down_interruptible (struct semaphore * sem);
+extern int  __down_trylock (struct semaphore * sem);
+extern void __up (struct semaphore * sem);
+
+/*
+ * Atomically decrement the semaphore's count.  If it goes negative,
+ * block the calling thread in the TASK_UNINTERRUPTIBLE state.
+ */
+static inline void
+down (struct semaphore *sem)
+{
+       might_sleep();
+       if (atomic_dec_return(&sem->count) < 0)
+               __down(sem);
+}
+
+/*
+ * Atomically decrement the semaphore's count.  If it goes negative,
+ * block the calling thread in the TASK_INTERRUPTIBLE state.
+ */
+static inline int
+down_interruptible (struct semaphore * sem)
+{
+       int ret = 0;
+
+       might_sleep();
+       if (atomic_dec_return(&sem->count) < 0)
+               ret = __down_interruptible(sem);
+       return ret;
+}
+
+static inline int
+down_trylock (struct semaphore *sem)
+{
+       int ret = 0;
+
+       if (atomic_dec_return(&sem->count) < 0)
+               ret = __down_trylock(sem);
+       return ret;
+}
+
+static inline void
+up (struct semaphore * sem)
+{
+       if (atomic_inc_return(&sem->count) <= 0)
+               __up(sem);
+}
+
+#endif /* _ASM_IA64_SEMAPHORE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/setup.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/setup.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,6 @@
+#ifndef __IA64_SETUP_H
+#define __IA64_SETUP_H
+
+#define COMMAND_LINE_SIZE      512
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/sigcontext.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/sigcontext.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,70 @@
+#ifndef _ASM_IA64_SIGCONTEXT_H
+#define _ASM_IA64_SIGCONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm/fpu.h>
+
+#define IA64_SC_FLAG_ONSTACK_BIT               0       /* is handler running 
on signal stack? */
+#define IA64_SC_FLAG_IN_SYSCALL_BIT            1       /* did signal interrupt 
a syscall? */
+#define IA64_SC_FLAG_FPH_VALID_BIT             2       /* is state in 
f[32]-f[127] valid? */
+
+#define IA64_SC_FLAG_ONSTACK           (1 << IA64_SC_FLAG_ONSTACK_BIT)
+#define IA64_SC_FLAG_IN_SYSCALL                (1 << 
IA64_SC_FLAG_IN_SYSCALL_BIT)
+#define IA64_SC_FLAG_FPH_VALID         (1 << IA64_SC_FLAG_FPH_VALID_BIT)
+
+# ifndef __ASSEMBLY__
+
+/*
+ * Note on handling of register backing store: sc_ar_bsp contains the address 
that would
+ * be found in ar.bsp after executing a "cover" instruction the context in 
which the
+ * signal was raised.  If signal delivery required switching to an alternate 
signal stack
+ * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after 
executing the
+ * imaginary "cover" instruction) is backed by the *alternate* signal stack, 
not the
+ * original one.  In this case, sc_rbs_base contains the base address of the 
new register
+ * backing store.  The number of registers in the dirty partition can be 
calculated as:
+ *
+ *   ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
+ *
+ */
+
+struct sigcontext {
+       unsigned long           sc_flags;       /* see manifest constants above 
*/
+       unsigned long           sc_nat;         /* bit i == 1 iff scratch reg 
gr[i] is a NaT */
+       stack_t                 sc_stack;       /* previously active stack */
+
+       unsigned long           sc_ip;          /* instruction pointer */
+       unsigned long           sc_cfm;         /* current frame marker */
+       unsigned long           sc_um;          /* user mask bits */
+       unsigned long           sc_ar_rsc;      /* register stack configuration 
register */
+       unsigned long           sc_ar_bsp;      /* backing store pointer */
+       unsigned long           sc_ar_rnat;     /* RSE NaT collection register 
*/
+       unsigned long           sc_ar_ccv;      /* compare and exchange compare 
value register */
+       unsigned long           sc_ar_unat;     /* ar.unat of interrupted 
context */
+       unsigned long           sc_ar_fpsr;     /* floating-point status 
register */
+       unsigned long           sc_ar_pfs;      /* previous function state */
+       unsigned long           sc_ar_lc;       /* loop count register */
+       unsigned long           sc_pr;          /* predicate registers */
+       unsigned long           sc_br[8];       /* branch registers */
+       /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
+       unsigned long           sc_gr[32];      /* general registers (static 
partition) */
+       struct ia64_fpreg       sc_fr[128];     /* floating-point registers */
+
+       unsigned long           sc_rbs_base;    /* NULL or new base of 
sighandler's rbs */
+       unsigned long           sc_loadrs;      /* see description above */
+
+       unsigned long           sc_ar25;        /* cmp8xchg16 uses this */
+       unsigned long           sc_ar26;        /* rsvd for scratch use */
+       unsigned long           sc_rsvd[12];    /* reserved for future use */
+       /*
+        * The mask must come last so we can increase _NSIG_WORDS
+        * without breaking binary compatibility.
+        */
+       sigset_t                sc_mask;        /* signal mask to restore after 
handler returns */
+};
+
+# endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGCONTEXT_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/signal.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/signal.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,185 @@
+#ifndef _ASM_IA64_SIGNAL_H
+#define _ASM_IA64_SIGNAL_H
+
+/*
+ * Modified 1998-2001, 2003
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x.  Hence the #ifdef __KERNEL__ ugliness.
+ */
+
+#define SIGHUP          1
+#define SIGINT          2
+#define SIGQUIT                 3
+#define SIGILL          4
+#define SIGTRAP                 5
+#define SIGABRT                 6
+#define SIGIOT          6
+#define SIGBUS          7
+#define SIGFPE          8
+#define SIGKILL                 9
+#define SIGUSR1                10
+#define SIGSEGV                11
+#define SIGUSR2                12
+#define SIGPIPE                13
+#define SIGALRM                14
+#define SIGTERM                15
+#define SIGSTKFLT      16
+#define SIGCHLD                17
+#define SIGCONT                18
+#define SIGSTOP                19
+#define SIGTSTP                20
+#define SIGTTIN                21
+#define SIGTTOU                22
+#define SIGURG         23
+#define SIGXCPU                24
+#define SIGXFSZ                25
+#define SIGVTALRM      26
+#define SIGPROF                27
+#define SIGWINCH       28
+#define SIGIO          29
+#define SIGPOLL                SIGIO
+/*
+#define SIGLOST                29
+*/
+#define SIGPWR         30
+#define SIGSYS         31
+/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for 
backwards compatibility */
+#define        SIGUNUSED       31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN       32
+#define SIGRTMAX       _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP   0x00000001
+#define SA_NOCLDWAIT   0x00000002
+#define SA_SIGINFO     0x00000004
+#define SA_ONSTACK     0x08000000
+#define SA_RESTART     0x10000000
+#define SA_NODEFER     0x40000000
+#define SA_RESETHAND   0x80000000
+
+#define SA_NOMASK      SA_NODEFER
+#define SA_ONESHOT     SA_RESETHAND
+#define SA_INTERRUPT   0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER    0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK     1
+#define SS_DISABLE     2
+
+/*
+ * The minimum stack size needs to be fairly large because we want to
+ * be sure that an app compiled for today's CPUs will continue to run
+ * on all future CPU models.  The CPU model matters because the signal
+ * frame needs to have space for the complete machine state, including
+ * all physical stacked registers.  The number of physical stacked
+ * registers is CPU model dependent, but given that the width of
+ * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
+ * more than 16KB of space.
+ */
+#if 1
+  /*
+   * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but 
I typed it
+   * in wrong. ;-(  To preserve backwards compatibility, we leave the kernel 
at the
+   * incorrect value and fix libc only.
+   */
+# define MINSIGSTKSZ   131027  /* min. stack size for sigaltstack() */
+#else
+# define MINSIGSTKSZ   131072  /* min. stack size for sigaltstack() */
+#endif
+#define SIGSTKSZ       262144  /* default stack size for sigaltstack() */
+
+#ifdef __KERNEL__
+
+#define _NSIG          64
+#define _NSIG_BPW      64
+#define _NSIG_WORDS    (_NSIG / _NSIG_BPW)
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE               SA_ONESHOT
+#define SA_SAMPLE_RANDOM       SA_RESTART
+#define SA_SHIRQ               0x04000000
+#define SA_PERCPU_IRQ          0x02000000
+
+#endif /* __KERNEL__ */
+
+#define SIG_BLOCK          0   /* for blocking signals */
+#define SIG_UNBLOCK        1   /* for unblocking signals */
+#define SIG_SETMASK        2   /* for setting the signal mask */
+
+#define SIG_DFL        ((__sighandler_t)0)     /* default signal handling */
+#define SIG_IGN        ((__sighandler_t)1)     /* ignore signal */
+#define SIG_ERR        ((__sighandler_t)-1)    /* error return from signal */
+
+# ifndef __ASSEMBLY__
+
+#  include <linux/types.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+/* Type of a signal handler.  */
+typedef void __user (*__sighandler_t)(int);
+
+typedef struct sigaltstack {
+       void __user *ss_sp;
+       int ss_flags;
+       size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+typedef unsigned long old_sigset_t;
+
+typedef struct {
+       unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+struct sigaction {
+       __sighandler_t sa_handler;
+       unsigned long sa_flags;
+       sigset_t sa_mask;               /* mask last for extensibility */
+};
+
+struct k_sigaction {
+       struct sigaction sa;
+};
+
+#  include <asm/sigcontext.h>
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
+
+#endif /* __KERNEL__ */
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGNAL_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/smp.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/smp.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,131 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#define XTP_OFFSET             0x1e0008
+
+#define SMP_IRQ_REDIRECTION    (1 << 0)
+#define SMP_IPI_REDIRECTION    (1 << 1)
+
+#define smp_processor_id()     (current_thread_info()->cpu)
+
+extern struct smp_boot_data {
+       int cpu_count;
+       int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
+
+extern char no_int_routing __devinitdata;
+
+extern cpumask_t cpu_online_map;
+extern void __iomem *ipi_base_addr;
+extern unsigned char smp_int_redirect;
+
+extern volatile int ia64_cpu_to_sapicid[];
+#define cpu_physical_id(i)     ia64_cpu_to_sapicid[i]
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * Function to map hard smp processor id to logical id.  Slow, so don't use 
this in
+ * performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+       int i;
+
+       for (i = 0; i < NR_CPUS; ++i)
+               if (cpu_physical_id(i) == cpuid)
+                       break;
+       return i;
+}
+
+/*
+ * XTP control functions:
+ *     min_xtp   : route all interrupts to this CPU
+ *     normal_xtp: nominal XTP value
+ *     max_xtp   : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+static inline unsigned int
+hard_smp_processor_id (void)
+{
+       union {
+               struct {
+                       unsigned long reserved : 16;
+                       unsigned long eid : 8;
+                       unsigned long id : 8;
+                       unsigned long ignored : 32;
+               } f;
+               unsigned long bits;
+       } lid;
+
+       lid.bits = ia64_getreg(_IA64_REG_CR_LID);
+       return lid.f.id << 8 | lid.f.eid;
+}
+
+/* Upping and downing of CPUs */
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern int __cpu_up (unsigned int cpu);
+extern void __init smp_build_cpu_map(void);
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
+                                    int retry, int wait);
+extern void smp_send_reschedule (int cpu);
+extern void lock_ipi_calllock(void);
+extern void unlock_ipi_calllock(void);
+
+#else
+
+#define cpu_logical_id(cpuid)          0
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_SMP_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/sn/sn_sal.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,994 @@
+#ifndef _ASM_IA64_SN_SN_SAL_H
+#define _ASM_IA64_SN_SN_SAL_H
+
+/*
+ * System Abstraction Layer definitions for IA64
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+
+#include <linux/config.h>
+#include <asm/sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+
+// SGI Specific Calls
+#define  SN_SAL_POD_MODE                           0x02000001
+#define  SN_SAL_SYSTEM_RESET                       0x02000002
+#define  SN_SAL_PROBE                              0x02000003
+#define  SN_SAL_GET_MASTER_NASID                   0x02000004
+#define         SN_SAL_GET_KLCONFIG_ADDR                  0x02000005
+#define  SN_SAL_LOG_CE                            0x02000006
+#define  SN_SAL_REGISTER_CE                       0x02000007
+#define  SN_SAL_GET_PARTITION_ADDR                0x02000009
+#define  SN_SAL_XP_ADDR_REGION                    0x0200000f
+#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL             0x02000010
+#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL            0x02000011
+#define  SN_SAL_PRINT_ERROR                       0x02000012
+#define  SN_SAL_SET_ERROR_HANDLING_FEATURES       0x0200001a   // reentrant
+#define  SN_SAL_GET_FIT_COMPT                     0x0200001b   // reentrant
+#define  SN_SAL_GET_HUB_INFO                       0x0200001c
+#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
+#define  SN_SAL_CONSOLE_PUTC                       0x02000021
+#define  SN_SAL_CONSOLE_GETC                       0x02000022
+#define  SN_SAL_CONSOLE_PUTS                       0x02000023
+#define  SN_SAL_CONSOLE_GETS                       0x02000024
+#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
+#define  SN_SAL_CONSOLE_POLL                       0x02000026
+#define  SN_SAL_CONSOLE_INTR                       0x02000027
+#define  SN_SAL_CONSOLE_PUTB                      0x02000028
+#define  SN_SAL_CONSOLE_XMIT_CHARS                0x0200002a
+#define  SN_SAL_CONSOLE_READC                     0x0200002b
+#define  SN_SAL_SYSCTL_MODID_GET                  0x02000031
+#define  SN_SAL_SYSCTL_GET                         0x02000032
+#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
+#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
+#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
+#define  SN_SAL_BUS_CONFIG                        0x02000037
+#define  SN_SAL_SYS_SERIAL_GET                    0x02000038
+#define  SN_SAL_PARTITION_SERIAL_GET              0x02000039
+#define  SN_SAL_SYSCTL_PARTITION_GET              0x0200003a
+#define  SN_SAL_SYSTEM_POWER_DOWN                 0x0200003b
+#define  SN_SAL_GET_MASTER_BASEIO_NASID                   0x0200003c
+#define  SN_SAL_COHERENCE                          0x0200003d
+#define  SN_SAL_MEMPROTECT                         0x0200003e
+#define  SN_SAL_SYSCTL_FRU_CAPTURE                0x0200003f
+
+#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP             0x02000042   // reentrant
+#define         SN_SAL_IROUTER_OP                         0x02000043
+#define  SN_SAL_IOIF_INTERRUPT                    0x0200004a
+#define  SN_SAL_HWPERF_OP                         0x02000050   // lock
+#define  SN_SAL_IOIF_ERROR_INTERRUPT              0x02000051
+
+#define  SN_SAL_IOIF_SLOT_ENABLE                  0x02000053
+#define  SN_SAL_IOIF_SLOT_DISABLE                 0x02000054
+#define  SN_SAL_IOIF_GET_HUBDEV_INFO              0x02000055
+#define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
+#define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
+#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
+
+#define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
+
+
+/*
+ * Service-specific constants
+ */
+
+/* Console interrupt manipulation */
+       /* action codes */
+#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
+#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
+#define SAL_CONSOLE_INTR_STATUS 2      /* retrieve the interrupt status */
+       /* interrupt specification & status return codes */
+#define SAL_CONSOLE_INTR_XMIT  1       /* output interrupt */
+#define SAL_CONSOLE_INTR_RECV  2       /* input interrupt */
+
+/* interrupt handling */
+#define SAL_INTR_ALLOC         1
+#define SAL_INTR_FREE          2
+
+/*
+ * IRouter (i.e. generalized system controller) operations
+ */
+#define SAL_IROUTER_OPEN       0       /* open a subchannel */
+#define SAL_IROUTER_CLOSE      1       /* close a subchannel */
+#define SAL_IROUTER_SEND       2       /* send part of an IRouter packet */
+#define SAL_IROUTER_RECV       3       /* receive part of an IRouter packet */
+#define SAL_IROUTER_INTR_STATUS        4       /* check the interrupt status 
for
+                                        * an open subchannel
+                                        */
+#define SAL_IROUTER_INTR_ON    5       /* enable an interrupt */
+#define SAL_IROUTER_INTR_OFF   6       /* disable an interrupt */
+#define SAL_IROUTER_INIT       7       /* initialize IRouter driver */
+
+/* IRouter interrupt mask bits */
+#define SAL_IROUTER_INTR_XMIT  SAL_CONSOLE_INTR_XMIT
+#define SAL_IROUTER_INTR_RECV  SAL_CONSOLE_INTR_RECV
+
+
+/*
+ * SAL Error Codes
+ */
+#define SALRET_MORE_PASSES     1
+#define SALRET_OK              0
+#define SALRET_NOT_IMPLEMENTED (-1)
+#define SALRET_INVALID_ARG     (-2)
+#define SALRET_ERROR           (-3)
+
+
+#ifndef XEN
+/**
+ * sn_sal_rev_major - get the major SGI SAL revision number
+ *
+ * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the major value from the
+ * @ia64_sal_systab structure constructed by ia64_sal_init().
+ */
+static inline int
+sn_sal_rev_major(void)
+{
+       struct ia64_sal_systab *systab = efi.sal_systab;
+
+       return (int)systab->sal_b_rev_major;
+}
+
+/**
+ * sn_sal_rev_minor - get the minor SGI SAL revision number
+ *
+ * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the minor value from the
+ * @ia64_sal_systab structure constructed by ia64_sal_init().
+ */
+static inline int
+sn_sal_rev_minor(void)
+{
+       struct ia64_sal_systab *systab = efi.sal_systab;
+       
+       return (int)systab->sal_b_rev_minor;
+}
+
+/*
+ * Specify the minimum PROM revsion required for this kernel.
+ * Note that they're stored in hex format...
+ */
+#define SN_SAL_MIN_MAJOR       0x4  /* SN2 kernels need at least PROM 4.0 */
+#define SN_SAL_MIN_MINOR       0x0
+
+/*
+ * Returns the master console nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_console_nasid(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       /* Master console nasid is in 'v0' */
+       return ret_stuff.v0;
+}
+
+/*
+ * Returns the master baseio nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_master_baseio_nasid(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 
0);
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       /* Master baseio nasid is in 'v0' */
+       return ret_stuff.v0;
+}
+
+static inline char *
+ia64_sn_get_klconfig_addr(nasid_t nasid)
+{
+       struct ia64_sal_retval ret_stuff;
+       int cnodeid;
+
+       cnodeid = nasid_to_cnodeid(nasid);
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 
0, 0);
+
+       /*
+        * We should panic if a valid cnode nasid does not produce
+        * a klconfig address.
+        */
+       if (ret_stuff.status != 0) {
+               panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", 
ret_stuff.status);
+       }
+       return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
+}
+#endif /* !XEN */
+
+/*
+ * Returns the next console character.
+ */
+static inline u64
+ia64_sn_console_getc(int *ch)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
+
+       /* character is in 'v0' */
+       *ch = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/*
+ * Read a character from the SAL console device, after a previous interrupt
+ * or poll operation has given us to know that a character is available
+ * to be read.
+ */
+static inline u64
+ia64_sn_console_readc(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
+
+       /* character is in 'v0' */
+       return ret_stuff.v0;
+}
+
+/*
+ * Sends the given character to the console.
+ */
+static inline u64
+ia64_sn_console_putc(char ch)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 
0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Sends the given buffer to the console.
+ */
+static inline u64
+ia64_sn_console_putb(const char *buf, int len)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0; 
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, 
(uint64_t)len, 0, 0, 0, 0, 0);
+
+       if ( ret_stuff.status == 0 ) {
+               return ret_stuff.v0;
+       }
+       return (u64)0;
+}
+
+#ifndef XEN
+/*
+ * Print a platform error record
+ */
+static inline u64
+ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, 
(uint64_t)rec, 0, 0, 0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Check for Platform errors
+ */
+static inline u64
+ia64_sn_plat_cpei_handler(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Checks for console input.
+ */
+static inline u64
+ia64_sn_console_check(int *result)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
+
+       /* result is in 'v0' */
+       *result = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/*
+ * Checks console interrupt status
+ */
+static inline u64
+ia64_sn_console_intr_status(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                0, SAL_CONSOLE_INTR_STATUS,
+                0, 0, 0, 0, 0);
+
+       if (ret_stuff.status == 0) {
+           return ret_stuff.v0;
+       }
+       
+       return 0;
+}
+
+/*
+ * Enable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_enable(uint64_t intr)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                intr, SAL_CONSOLE_INTR_ON,
+                0, 0, 0, 0, 0);
+}
+
+/*
+ * Disable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_disable(uint64_t intr)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                intr, SAL_CONSOLE_INTR_OFF,
+                0, 0, 0, 0, 0);
+}
+
+/*
+ * Sends a character buffer to the console asynchronously.
+ */
+static inline u64
+ia64_sn_console_xmit_chars(char *buf, int len)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
+                (uint64_t)buf, (uint64_t)len,
+                0, 0, 0, 0, 0);
+
+       if (ret_stuff.status == 0) {
+           return ret_stuff.v0;
+       }
+
+       return 0;
+}
+
+/*
+ * Returns the iobrick module Id
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 
0, 0, 0, 0, 0);
+
+       /* result is in 'v0' */
+       *result = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/**
+ * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
+ *
+ * SN_SAL_POD_MODE actually takes an argument, but it's always
+ * 0 when we call it from the kernel, so we don't have to expose
+ * it to the caller.
+ */
+static inline u64
+ia64_sn_pod_mode(void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+       return isrv.v0;
+}
+
+/**
+ * ia64_sn_probe_mem - read from memory safely
+ * @addr: address to probe
+ * @size: number bytes to read (1,2,4,8)
+ * @data_ptr: address to store value read by probe (-1 returned if probe fails)
+ *
+ * Call into the SAL to do a memory read.  If the read generates a machine
+ * check, this routine will recover gracefully and return -1 to the caller.
+ * @addr is usually a kernel virtual address in uncached space (i.e. the
+ * address starts with 0xc), but if called in physical mode, @addr should
+ * be a physical address.
+ *
+ * Return values:
+ *  0 - probe successful
+ *  1 - probe failed (generated MCA)
+ *  2 - Bad arg
+ * <0 - PAL error
+ */
+static inline u64
+ia64_sn_probe_mem(long addr, long size, void *data_ptr)
+{
+       struct ia64_sal_retval isrv;
+
+       SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
+
+       if (data_ptr) {
+               switch (size) {
+               case 1:
+                       *((u8*)data_ptr) = (u8)isrv.v0;
+                       break;
+               case 2:
+                       *((u16*)data_ptr) = (u16)isrv.v0;
+                       break;
+               case 4:
+                       *((u32*)data_ptr) = (u32)isrv.v0;
+                       break;
+               case 8:
+                       *((u64*)data_ptr) = (u64)isrv.v0;
+                       break;
+               default:
+                       isrv.status = 2;
+               }
+       }
+       return isrv.status;
+}
+
+/*
+ * Retrieve the system serial number as an ASCII string.
+ */
+static inline u64
+ia64_sn_sys_serial_get(char *buf)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 
0);
+       return ret_stuff.status;
+}
+
+extern char sn_system_serial_number_string[];
+extern u64 sn_partition_serial_number;
+
+static inline char *
+sn_system_serial_number(void) {
+       if (sn_system_serial_number_string[0]) {
+               return(sn_system_serial_number_string);
+       } else {
+               ia64_sn_sys_serial_get(sn_system_serial_number_string);
+               return(sn_system_serial_number_string);
+       }
+}
+       
+
+/*
+ * Returns a unique id number for this system and partition (suitable for
+ * use with license managers), based in part on the system serial number.
+ */
+static inline u64
+ia64_sn_partition_serial_get(void)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
+       if (ret_stuff.status != 0)
+           return 0;
+       return ret_stuff.v0;
+}
+
+static inline u64
+sn_partition_serial_number_val(void) {
+       if (sn_partition_serial_number) {
+               return(sn_partition_serial_number);
+       } else {
+               return(sn_partition_serial_number = 
ia64_sn_partition_serial_get());
+       }
+}
+
+/*
+ * Returns the partition id of the nasid passed in as an argument,
+ * or INVALID_PARTID if the partition id cannot be retrieved.
+ */
+static inline partid_t
+ia64_sn_sysctl_partition_get(nasid_t nasid)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
+                0, 0, 0, 0, 0, 0);
+       if (ret_stuff.status != 0)
+           return INVALID_PARTID;
+       return ((partid_t)ret_stuff.v0);
+}
+
+/*
+ * Returns the partition id of the current processor.
+ */
+
+extern partid_t sn_partid;
+
+static inline partid_t
+sn_local_partid(void) {
+       if (sn_partid < 0) {
+               return (sn_partid = 
ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
+       } else {
+               return sn_partid;
+       }
+}
+
+/*
+ * Register or unregister a physical address range being referenced across
+ * a partition boundary for which certain SAL errors should be scanned for,
+ * cleaned up and ignored.  This is of value for kernel partitioning code only.
+ * Values for the operation argument:
+ *     1 = register this address range with SAL
+ *     0 = unregister this address range with SAL
+ * 
+ * SAL maintains a reference count on an address range in case it is registered
+ * multiple times.
+ * 
+ * On success, returns the reference count of the address range after the SAL
+ * call has performed the current registration/unregistration.  Returns a
+ * negative value if an error occurred.
+ */
+static inline int
+sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
+                0, 0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Register or unregister an instruction range for which SAL errors should
+ * be ignored.  If an error occurs while in the registered range, SAL jumps
+ * to return_addr after ignoring the error.  Values for the operation argument:
+ *     1 = register this instruction range with SAL
+ *     0 = unregister this instruction range with SAL
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
+                        int virtual, int operation)
+{
+       struct ia64_sal_retval ret_stuff;
+       u64 call;
+       if (virtual) {
+               call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
+       } else {
+               call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
+       }
+       SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
+                0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Change or query the coherence domain for this partition. Each cpu-based
+ * nasid is represented by a bit in an array of 64-bit words:
+ *      0 = not in this partition's coherency domain
+ *      1 = in this partition's coherency domain
+ *
+ * It is not possible for the local system's nasids to be removed from
+ * the coherency domain.  Purpose of the domain arguments:
+ *      new_domain = set the coherence domain to the given nasids
+ *      old_domain = return the current coherence domain
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_change_coherence(u64 *new_domain, u64 *old_domain)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
+                0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Change memory access protections for a physical address range.
+ * nasid_array is not used on Altix, but may be in future architectures.
+ * Available memory protection access classes are defined after the function.
+ */
+static inline int
+sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
+{
+       struct ia64_sal_retval ret_stuff;
+       int cnodeid;
+       unsigned long irq_flags;
+
+       cnodeid = nasid_to_cnodeid(get_node_number(paddr));
+       // spin_lock(&NODEPDA(cnodeid)->bist_lock);
+       local_irq_save(irq_flags);
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
+                perms, 0, 0, 0);
+       local_irq_restore(irq_flags);
+       // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
+       return ret_stuff.status;
+}
+#define SN_MEMPROT_ACCESS_CLASS_0              0x14a080
+#define SN_MEMPROT_ACCESS_CLASS_1              0x2520c2
+#define SN_MEMPROT_ACCESS_CLASS_2              0x14a1ca
+#define SN_MEMPROT_ACCESS_CLASS_3              0x14a290
+#define SN_MEMPROT_ACCESS_CLASS_6              0x084080
+#define SN_MEMPROT_ACCESS_CLASS_7              0x021080
+
+/*
+ * Turns off system power.
+ */
+static inline void
+ia64_sn_power_down(void)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
+       while(1);
+       /* never returns */
+}
+
+/**
+ * ia64_sn_fru_capture - tell the system controller to capture hw state
+ *
+ * This routine will call the SAL which will tell the system controller(s)
+ * to capture hw mmr information from each SHub in the system.
+ */
+static inline u64
+ia64_sn_fru_capture(void)
+{
+        struct ia64_sal_retval isrv;
+        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
+        if (isrv.status)
+                return 0;
+        return isrv.v0;
+}
+
+/*
+ * Performs an operation on a PCI bus or slot -- power up, power down
+ * or reset.
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
+                             u64 bus, char slot, 
+                             u64 action)
+{
+       struct ia64_sal_retval rv = {0, 0, 0, 0};
+
+       SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, 
action,
+                bus, (u64) slot, 0, 0);
+       if (rv.status)
+               return rv.v0;
+       return 0;
+}
+
+
+/*
+ * Open a subchannel for sending arbitrary data to the system
+ * controller network via the system controller device associated with
+ * 'nasid'.  Return the subchannel number or a negative error code.
+ */
+static inline int
+ia64_sn_irtr_open(nasid_t nasid)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
+                          0, 0, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Close system controller subchannel 'subch' previously opened on 'nasid'.
+ */
+static inline int
+ia64_sn_irtr_close(nasid_t nasid, int subch)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
+                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Read data from system controller associated with 'nasid' on
+ * subchannel 'subch'.  The buffer to be filled is pointed to by
+ * 'buf', and its capacity is in the integer pointed to by 'len'.  The
+ * referent of 'len' is set to the number of bytes read by the SAL
+ * call.  The return value is either SALRET_OK (for bytes read) or
+ * SALRET_ERROR (for error or "no data available").
+ */
+static inline int
+ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
+                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+                          0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Write data to the system controller network via the system
+ * controller associated with 'nasid' on suchannel 'subch'.  The
+ * buffer to be written out is pointed to by 'buf', and 'len' is the
+ * number of bytes to be written.  The return value is either the
+ * number of bytes written (which could be zero) or a negative error
+ * code.
+ */
+static inline int
+ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
+                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+                          0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Check whether any interrupts are pending for the system controller
+ * associated with 'nasid' and its subchannel 'subch'.  The return
+ * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
+ * SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr(nasid_t nasid, int subch)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
+                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Enable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
+                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Disable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
+                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/**
+ * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
+ * @nasid: NASID of node to read
+ * @index: FIT entry index to be retrieved (0..n)
+ * @fitentry: 16 byte buffer where FIT entry will be stored.
+ * @banbuf: optional buffer for retrieving banner
+ * @banlen: length of banner buffer
+ *
+ * Access to the physical PROM chips needs to be serialized since reads and
+ * writes can't occur at the same time, so we need to call into the SAL when
+ * we want to look at the FIT entries on the chips.
+ *
+ * Returns:
+ *     %SALRET_OK if ok
+ *     %SALRET_INVALID_ARG if index too big
+ *     %SALRET_NOT_IMPLEMENTED if running on older PROM
+ *     ??? if nasid invalid OR banner buffer not large enough
+ */
+static inline int
+ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
+                     u64 banlen)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
+                       banbuf, banlen, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Initialize the SAL components of the system controller
+ * communication driver; specifically pass in a sizable buffer that
+ * can be used for allocation of subchannel queues as new subchannels
+ * are opened.  "buf" points to the buffer, and "len" specifies its
+ * length.
+ */
+static inline int
+ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
+                          (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Returns the nasid, subnode & slice corresponding to a SAPIC ID
+ *
+ *  In:
+ *     arg0 - SN_SAL_GET_SAPIC_INFO
+ *     arg1 - sapicid (lid >> 16) 
+ *  Out:
+ *     v0 - nasid
+ *     v1 - subnode
+ *     v2 - slice
+ */
+static inline u64
+ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 
0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+               if (nasid) *nasid = sapicid & 0xfff;
+               if (subnode) *subnode = (sapicid >> 13) & 1;
+               if (slice) *slice = (sapicid >> 12) & 3;
+               return 0;
+       }
+/***** END HACK *******/
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       if (nasid) *nasid = (int) ret_stuff.v0;
+       if (subnode) *subnode = (int) ret_stuff.v1;
+       if (slice) *slice = (int) ret_stuff.v2;
+       return 0;
+}
+ 
+/*
+ * Returns information about the HUB/SHUB.
+ *  In:
+ *     arg0 - SN_SAL_GET_HUB_INFO
+ *     arg1 - 0 (other values reserved for future use)
+ *  Out:
+ *     v0 - shub type (0=shub1, 1=shub2)
+ *     v1 - masid mask (ex., 0x7ff for 11 bit nasid)
+ *     v2 - bit position of low nasid bit
+ */
+static inline u64
+ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+               if (arg1) *arg1 = 0;
+               if (arg2) *arg2 = 0x7ff;
+               if (arg3) *arg3 = 38;
+               return 0;
+       }
+/***** END HACK *******/
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       if (arg1) *arg1 = ret_stuff.v0;
+       if (arg2) *arg2 = ret_stuff.v1;
+       if (arg3) *arg3 = ret_stuff.v2;
+       return 0;
+}
+ 
+/*
+ * This is the access point to the Altix PROM hardware performance
+ * and status monitoring interface. For info on using this, see
+ * include/asm-ia64/sn/sn2/sn_hwperf.h
+ */
+static inline int
+ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
+                  u64 a3, u64 a4, int *v0)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
+               opcode, a0, a1, a2, a3, a4);
+       if (v0)
+               *v0 = (int) rv.v0;
+       return (int) rv.status;
+}
+#endif /* !XEN */
+#endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/spinlock.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/spinlock.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,208 @@
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/intrinsics.h>
+#include <asm/system.h>
+
+typedef struct {
+       volatile unsigned int lock;
+#ifdef CONFIG_PREEMPT
+       unsigned int break_lock;
+#endif
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
+#define spin_lock_init(x)                      ((x)->lock = 0)
+
+#ifdef ASM_SUPPORTED
+/*
+ * Try to get the lock.  If we fail to get the lock, make a non-standard call 
to
+ * ia64_spinlock_contention().  We do not use a normal call because that would 
force all
+ * callers of spin_lock() to be non-leaf routines.  Instead, 
ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that spin_lock() marks 
"clobbered".
+ */
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", 
"r29", "r30", "b6", "memory"
+
+static inline void
+_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n\t"
+                     "mov r27=%2\n\t"
+                     "(p14) br.cond.spnt.many b6"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov r27=%2\n\t"
+                     "(p14) brl.cond.spnt.many 
ia64_spinlock_contention_pre3_4;;"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#else
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       /* mis-declare, so we get the entry-point, not it's function 
descriptor: */
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov r27=%2\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n\t"
+                     "(p14) br.call.spnt.many b6 = b6"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov r27=%2\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#endif
+}
+#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+#else /* !ASM_SUPPORTED */
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+# define _raw_spin_lock(x)                                                     
        \
+do {                                                                           
        \
+       __u32 *ia64_spinlock_ptr = (__u32 *) (x);                               
        \
+       __u64 ia64_spinlock_val;                                                
        \
+       ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);         
        \
+       if (unlikely(ia64_spinlock_val)) {                                      
        \
+               do {                                                            
        \
+                       while (*ia64_spinlock_ptr)                              
        \
+                               ia64_barrier();                                 
        \
+                       ia64_spinlock_val = 
ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
+               } while (ia64_spinlock_val);                                    
        \
+       }                                                                       
        \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+#define spin_is_locked(x)      ((x)->lock != 0)
+#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } 
while (0)
+#define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
+
+typedef struct {
+       volatile unsigned int read_counter      : 31;
+       volatile unsigned int write_lock        :  1;
+#ifdef CONFIG_PREEMPT
+       unsigned int break_lock;
+#endif
+} rwlock_t;
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
+#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
+
+#define _raw_read_lock(rw)                                                     
        \
+do {                                                                           
        \
+       rwlock_t *__read_lock_ptr = (rw);                                       
        \
+                                                                               
        \
+       while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {  
        \
+               ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                
        \
+               while (*(volatile int *)__read_lock_ptr < 0)                    
        \
+                       cpu_relax();                                            
        \
+       }                                                                       
        \
+} while (0)
+
+#define _raw_read_unlock(rw)                                   \
+do {                                                           \
+       rwlock_t *__read_lock_ptr = (rw);                       \
+       ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
+} while (0)
+
+#ifdef ASM_SUPPORTED
+#define _raw_write_lock(rw)                                                    
\
+do {                                                                           
\
+       __asm__ __volatile__ (                                                  
\
+               "mov ar.ccv = r0\n"                                             
\
+               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
+               "1:\n"                                                          
\
+               "ld4 r2 = [%0];;\n"                                             
\
+               "cmp4.eq p0,p7 = r0,r2\n"                                       
\
+               "(p7) br.cond.spnt.few 1b \n"                                   
\
+               "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"                       
\
+               "cmp4.eq p0,p7 = r0, r2\n"                                      
\
+               "(p7) br.cond.spnt.few 1b;;\n"                                  
\
+               :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            
\
+} while(0)
+
+#define _raw_write_trylock(rw)                                                 
\
+({                                                                             
\
+       register long result;                                                   
\
+                                                                               
\
+       __asm__ __volatile__ (                                                  
\
+               "mov ar.ccv = r0\n"                                             
\
+               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
+               "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         
\
+               : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          
\
+       (result == 0);                                                          
\
+})
+
+#else /* !ASM_SUPPORTED */
+
+#define _raw_write_lock(l)                                                     
        \
+({                                                                             
        \
+       __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);               
        \
+       __u32 *ia64_write_lock_ptr = (__u32 *) (l);                             
        \
+       do {                                                                    
        \
+               while (*ia64_write_lock_ptr)                                    
        \
+                       ia64_barrier();                                         
        \
+               ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 
0);     \
+       } while (ia64_val);                                                     
        \
+})
+
+#define _raw_write_trylock(rw)                                         \
+({                                                                     \
+       __u64 ia64_val;                                                 \
+       __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
+       ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
+       (ia64_val == 0);                                                \
+})
+
+#endif /* !ASM_SUPPORTED */
+
+#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+
+#define _raw_write_unlock(x)                                                   
        \
+({                                                                             
        \
+       smp_mb__before_clear_bit();     /* need barrier before releasing 
lock... */     \
+       clear_bit(31, (x));                                                     
        \
+})
+
+#endif /*  _ASM_IA64_SPINLOCK_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/string.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/string.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_STRING_H
+#define _ASM_IA64_STRING_H
+
+/*
+ * Here is where we want to put optimized versions of the string
+ * routines.
+ *
+ * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>      /* remove this once we remove the A-step 
workaround... */
+
+#define __HAVE_ARCH_STRLEN     1 /* see arch/ia64/lib/strlen.S */
+#define __HAVE_ARCH_MEMSET     1 /* see arch/ia64/lib/memset.S */
+#define __HAVE_ARCH_MEMCPY     1 /* see arch/ia64/lib/memcpy.S */
+
+extern __kernel_size_t strlen (const char *);
+extern void *memcpy (void *, const void *, __kernel_size_t);
+extern void *memset (void *, int, __kernel_size_t);
+
+#endif /* _ASM_IA64_STRING_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/asm/thread_info.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/thread_info.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#ifndef _ASM_IA64_THREAD_INFO_H
+#define _ASM_IA64_THREAD_INFO_H
+
+#include <asm/offsets.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#define PREEMPT_ACTIVE_BIT 30
+#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * On IA-64, we want to keep the task structure and kernel stack together, so 
they can be
+ * mapped by a single TLB entry and so they can be addressed by the "current" 
pointer
+ * without having to do pointer masking.
+ */
+struct thread_info {
+       struct task_struct *task;       /* XXX not really needed, except for 
dup_task_struct() */
+       struct exec_domain *exec_domain;/* execution domain */
+       __u32 flags;                    /* thread_info flags (see TIF_*) */
+       __u32 cpu;                      /* current CPU */
+       mm_segment_t addr_limit;        /* user-level address space limit */
+       __s32 preempt_count;            /* 0=premptable, <0=BUG; will also 
serve as bh-counter */
+       struct restart_block restart_block;
+       struct {
+               int signo;
+               int code;
+               void __user *addr;
+               unsigned long start_time;
+               pid_t pid;
+       } sigdelayed;                   /* Saved information for TIF_SIGDELAYED 
*/
+};
+
+#define THREAD_SIZE                    KERNEL_STACK_SIZE
+
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .task           = &tsk,                 \
+       .exec_domain    = &default_exec_domain, \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
+       .addr_limit     = KERNEL_DS,            \
+       .preempt_count  = 0,                    \
+       .restart_block = {                      \
+               .fn = do_no_restart_syscall,    \
+       },                                      \
+}
+
+/* how to get the thread information struct from C */
+#define current_thread_info()  ((struct thread_info *) ((char *) current + 
IA64_TASK_SIZE))
+#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + 
IA64_TASK_SIZE))
+#define free_thread_info(ti)   /* nothing */
+
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+#define alloc_task_struct()    ((task_t *)__get_free_pages(GFP_KERNEL, 
KERNEL_STACK_SIZE_ORDER))
+#define free_task_struct(tsk)  free_pages((unsigned long) (tsk), 
KERNEL_STACK_SIZE_ORDER)
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to 
access
+ * - pending work-to-be-done flags are in least-significant 16 bits, other 
flags
+ *   in top 16 bits
+ */
+#define TIF_NOTIFY_RESUME      0       /* resumption notification requested */
+#define TIF_SIGPENDING         1       /* signal pending */
+#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
+#define TIF_SYSCALL_TRACE      3       /* syscall trace active */
+#define TIF_SYSCALL_AUDIT      4       /* syscall auditing active */
+#define TIF_SIGDELAYED         5       /* signal delayed from MCA/INIT/NMI/PMI 
context */
+#define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling 
TIF_NEED_RESCHED */
+#define TIF_MEMDIE             17
+
+#define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEAUDIT        (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
+#define _TIF_SIGDELAYED        (1 << TIF_SIGDELAYED)
+#define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
+
+/* "work to do on user-return" bits */
+#define TIF_ALLWORK_MASK       
(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
+/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
+#define TIF_WORK_MASK          
(TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
+
+#endif /* _ASM_IA64_THREAD_INFO_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/timex.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/timex.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,40 @@
+#ifndef _ASM_IA64_TIMEX_H
+#define _ASM_IA64_TIMEX_H
+
+/*
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+/*
+ * 2001/01/18 davidm   Removed CLOCK_TICK_RATE.  It makes no sense on IA-64.
+ *                     Also removed cacheflush_time as it's entirely unused.
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+typedef unsigned long cycles_t;
+
+/*
+ * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
+ * local_cpu_data->itc_rate.  Fortunately, we don't have to, either: according 
to George
+ * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock.  
The time
+ * calculation assumes that you will use enough of these so that your tick 
size <= 1/HZ.
+ * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 
1/HZ ticks,
+ * the actual value is calculated and used to update the wall clock each 
jiffie.  Setting
+ * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no 
errors.  Hence we
+ * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of 
about
+ * 100MHz.
+ */
+#define CLOCK_TICK_RATE                (HZ * 100000UL)
+
+static inline cycles_t
+get_cycles (void)
+{
+       cycles_t ret;
+
+       ret = ia64_getreg(_IA64_REG_AR_ITC);
+       return ret;
+}
+
+#endif /* _ASM_IA64_TIMEX_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/topology.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/topology.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,90 @@
+/*
+ * linux/include/asm-ia64/topology.h
+ *
+ * Copyright (C) 2002, Erich Focht, NEC
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _ASM_IA64_TOPOLOGY_H
+#define _ASM_IA64_TOPOLOGY_H
+
+#include <asm/acpi.h>
+#include <asm/numa.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_NUMA
+/*
+ * Returns the number of the node containing CPU 'cpu'
+ */
+#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ */
+#define node_to_cpumask(node) (node_to_cpu_mask[node])
+
+/*
+ * Returns the number of the node containing Node 'nid'.
+ * Not implemented here. Multi-level hierarchies detected with
+ * the help of node_distance().
+ */
+#define parent_node(nid) (nid)
+
+/*
+ * Returns the number of the first CPU on Node 'node'.
+ */
+#define node_to_first_cpu(node) (__ffs(node_to_cpumask(node)))
+
+void build_cpu_to_node_map(void);
+
+/* sched_domains SD_NODE_INIT for IA64 NUMA machines */
+#define SD_NODE_INIT (struct sched_domain) {           \
+       .span                   = CPU_MASK_NONE,        \
+       .parent                 = NULL,                 \
+       .groups                 = NULL,                 \
+       .min_interval           = 80,                   \
+       .max_interval           = 320,                  \
+       .busy_factor            = 320,                  \
+       .imbalance_pct          = 125,                  \
+       .cache_hot_time         = (10*1000000),         \
+       .cache_nice_tries       = 1,                    \
+       .per_cpu_gain           = 100,                  \
+       .flags                  = SD_LOAD_BALANCE       \
+                               | SD_BALANCE_EXEC       \
+                               | SD_BALANCE_NEWIDLE    \
+                               | SD_WAKE_IDLE          \
+                               | SD_WAKE_BALANCE,      \
+       .last_balance           = jiffies,              \
+       .balance_interval       = 1,                    \
+       .nr_balance_failed      = 0,                    \
+}
+
+/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */
+#define SD_ALLNODES_INIT (struct sched_domain) {       \
+       .span                   = CPU_MASK_NONE,        \
+       .parent                 = NULL,                 \
+       .groups                 = NULL,                 \
+       .min_interval           = 80,                   \
+       .max_interval           = 320,                  \
+       .busy_factor            = 320,                  \
+       .imbalance_pct          = 125,                  \
+       .cache_hot_time         = (10*1000000),         \
+       .cache_nice_tries       = 1,                    \
+       .per_cpu_gain           = 100,                  \
+       .flags                  = SD_LOAD_BALANCE       \
+                               | SD_BALANCE_EXEC,      \
+       .last_balance           = jiffies,              \
+       .balance_interval       = 100*(63+num_online_cpus())/64,   \
+       .nr_balance_failed      = 0,                    \
+}
+
+#endif /* CONFIG_NUMA */
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_IA64_TOPOLOGY_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/unaligned.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/unaligned.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,121 @@
+#ifndef _ASM_IA64_UNALIGNED_H
+#define _ASM_IA64_UNALIGNED_H
+
+#include <linux/types.h>
+
+/*
+ * The main single-value unaligned transfer routines.
+ *
+ * Based on <asm-alpha/unaligned.h>.
+ *
+ * Copyright (C) 1998, 1999, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#define get_unaligned(ptr) \
+       ((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr))))
+
+#define put_unaligned(x,ptr) \
+       ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
+
+struct __una_u64 { __u64 x __attribute__((packed)); };
+struct __una_u32 { __u32 x __attribute__((packed)); };
+struct __una_u16 { __u16 x __attribute__((packed)); };
+
+static inline unsigned long
+__uld8 (const unsigned long * addr)
+{
+       const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
+       return ptr->x;
+}
+
+static inline unsigned long
+__uld4 (const unsigned int * addr)
+{
+       const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
+       return ptr->x;
+}
+
+static inline unsigned long
+__uld2 (const unsigned short * addr)
+{
+       const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
+       return ptr->x;
+}
+
+static inline void
+__ust8 (unsigned long val, unsigned long * addr)
+{
+       struct __una_u64 *ptr = (struct __una_u64 *) addr;
+       ptr->x = val;
+}
+
+static inline void
+__ust4 (unsigned long val, unsigned int * addr)
+{
+       struct __una_u32 *ptr = (struct __una_u32 *) addr;
+       ptr->x = val;
+}
+
+static inline void
+__ust2 (unsigned long val, unsigned short * addr)
+{
+       struct __una_u16 *ptr = (struct __una_u16 *) addr;
+       ptr->x = val;
+}
+
+
+/*
+ * This function doesn't actually exist.  The idea is that when someone uses 
the macros
+ * below with an unsupported size (datatype), the linker will alert us to the 
problem via
+ * an unresolved reference error.
+ */
+extern unsigned long ia64_bad_unaligned_access_length (void);
+
+#define ia64_get_unaligned(_ptr,size)                                          
\
+({                                                                             
\
+       const void *__ia64_ptr = (_ptr);                                        
\
+       unsigned long __ia64_val;                                               
\
+                                                                               
\
+       switch (size) {                                                         
\
+             case 1:                                                           
\
+               __ia64_val = *(const unsigned char *) __ia64_ptr;               
\
+               break;                                                          
\
+             case 2:                                                           
\
+               __ia64_val = __uld2((const unsigned short *)__ia64_ptr);        
\
+               break;                                                          
\
+             case 4:                                                           
\
+               __ia64_val = __uld4((const unsigned int *)__ia64_ptr);          
\
+               break;                                                          
\
+             case 8:                                                           
\
+               __ia64_val = __uld8((const unsigned long *)__ia64_ptr);         
\
+               break;                                                          
\
+             default:                                                          
\
+               __ia64_val = ia64_bad_unaligned_access_length();                
\
+       }                                                                       
\
+       __ia64_val;                                                             
\
+})
+
+#define ia64_put_unaligned(_val,_ptr,size)                             \
+do {                                                                   \
+       const void *__ia64_ptr = (_ptr);                                \
+       unsigned long __ia64_val = (_val);                              \
+                                                                       \
+       switch (size) {                                                 \
+             case 1:                                                   \
+               *(unsigned char *)__ia64_ptr = (__ia64_val);            \
+               break;                                                  \
+             case 2:                                                   \
+               __ust2(__ia64_val, (unsigned short *)__ia64_ptr);       \
+               break;                                                  \
+             case 4:                                                   \
+               __ust4(__ia64_val, (unsigned int *)__ia64_ptr);         \
+               break;                                                  \
+             case 8:                                                   \
+               __ust8(__ia64_val, (unsigned long *)__ia64_ptr);        \
+               break;                                                  \
+             default:                                                  \
+               ia64_bad_unaligned_access_length();                     \
+       }                                                               \
+} while (0)
+
+#endif /* _ASM_IA64_UNALIGNED_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/unistd.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/unistd.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,399 @@
+#ifndef _ASM_IA64_UNISTD_H
+#define _ASM_IA64_UNISTD_H
+
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <asm/break.h>
+
+#define __BREAK_SYSCALL                        __IA64_BREAK_SYSCALL
+
+#define __NR_ni_syscall                        1024
+#define __NR_exit                      1025
+#define __NR_read                      1026
+#define __NR_write                     1027
+#define __NR_open                      1028
+#define __NR_close                     1029
+#define __NR_creat                     1030
+#define __NR_link                      1031
+#define __NR_unlink                    1032
+#define __NR_execve                    1033
+#define __NR_chdir                     1034
+#define __NR_fchdir                    1035
+#define __NR_utimes                    1036
+#define __NR_mknod                     1037
+#define __NR_chmod                     1038
+#define __NR_chown                     1039
+#define __NR_lseek                     1040
+#define __NR_getpid                    1041
+#define __NR_getppid                   1042
+#define __NR_mount                     1043
+#define __NR_umount                    1044
+#define __NR_setuid                    1045
+#define __NR_getuid                    1046
+#define __NR_geteuid                   1047
+#define __NR_ptrace                    1048
+#define __NR_access                    1049
+#define __NR_sync                      1050
+#define __NR_fsync                     1051
+#define __NR_fdatasync                 1052
+#define __NR_kill                      1053
+#define __NR_rename                    1054
+#define __NR_mkdir                     1055
+#define __NR_rmdir                     1056
+#define __NR_dup                       1057
+#define __NR_pipe                      1058
+#define __NR_times                     1059
+#define __NR_brk                       1060
+#define __NR_setgid                    1061
+#define __NR_getgid                    1062
+#define __NR_getegid                   1063
+#define __NR_acct                      1064
+#define __NR_ioctl                     1065
+#define __NR_fcntl                     1066
+#define __NR_umask                     1067
+#define __NR_chroot                    1068
+#define __NR_ustat                     1069
+#define __NR_dup2                      1070
+#define __NR_setreuid                  1071
+#define __NR_setregid                  1072
+#define __NR_getresuid                 1073
+#define __NR_setresuid                 1074
+#define __NR_getresgid                 1075
+#define __NR_setresgid                 1076
+#define __NR_getgroups                 1077
+#define __NR_setgroups                 1078
+#define __NR_getpgid                   1079
+#define __NR_setpgid                   1080
+#define __NR_setsid                    1081
+#define __NR_getsid                    1082
+#define __NR_sethostname               1083
+#define __NR_setrlimit                 1084
+#define __NR_getrlimit                 1085
+#define __NR_getrusage                 1086
+#define __NR_gettimeofday              1087
+#define __NR_settimeofday              1088
+#define __NR_select                    1089
+#define __NR_poll                      1090
+#define __NR_symlink                   1091
+#define __NR_readlink                  1092
+#define __NR_uselib                    1093
+#define __NR_swapon                    1094
+#define __NR_swapoff                   1095
+#define __NR_reboot                    1096
+#define __NR_truncate                  1097
+#define __NR_ftruncate                 1098
+#define __NR_fchmod                    1099
+#define __NR_fchown                    1100
+#define __NR_getpriority               1101
+#define __NR_setpriority               1102
+#define __NR_statfs                    1103
+#define __NR_fstatfs                   1104
+#define __NR_gettid                    1105
+#define __NR_semget                    1106
+#define __NR_semop                     1107
+#define __NR_semctl                    1108
+#define __NR_msgget                    1109
+#define __NR_msgsnd                    1110
+#define __NR_msgrcv                    1111
+#define __NR_msgctl                    1112
+#define __NR_shmget                    1113
+#define __NR_shmat                     1114
+#define __NR_shmdt                     1115
+#define __NR_shmctl                    1116
+/* also known as klogctl() in GNU libc: */
+#define __NR_syslog                    1117
+#define __NR_setitimer                 1118
+#define __NR_getitimer                 1119
+/* 1120 was __NR_old_stat */
+/* 1121 was __NR_old_lstat */
+/* 1122 was __NR_old_fstat */
+#define __NR_vhangup                   1123
+#define __NR_lchown                    1124
+#define __NR_remap_file_pages          1125
+#define __NR_wait4                     1126
+#define __NR_sysinfo                   1127
+#define __NR_clone                     1128
+#define __NR_setdomainname             1129
+#define __NR_uname                     1130
+#define __NR_adjtimex                  1131
+/* 1132 was __NR_create_module */
+#define __NR_init_module               1133
+#define __NR_delete_module             1134
+/* 1135 was __NR_get_kernel_syms */
+/* 1136 was __NR_query_module */
+#define __NR_quotactl                  1137
+#define __NR_bdflush                   1138
+#define __NR_sysfs                     1139
+#define __NR_personality               1140
+#define __NR_afs_syscall               1141
+#define __NR_setfsuid                  1142
+#define __NR_setfsgid                  1143
+#define __NR_getdents                  1144
+#define __NR_flock                     1145
+#define __NR_readv                     1146
+#define __NR_writev                    1147
+#define __NR_pread64                   1148
+#define __NR_pwrite64                  1149
+#define __NR__sysctl                   1150
+#define __NR_mmap                      1151
+#define __NR_munmap                    1152
+#define __NR_mlock                     1153
+#define __NR_mlockall                  1154
+#define __NR_mprotect                  1155
+#define __NR_mremap                    1156
+#define __NR_msync                     1157
+#define __NR_munlock                   1158
+#define __NR_munlockall                        1159
+#define __NR_sched_getparam            1160
+#define __NR_sched_setparam            1161
+#define __NR_sched_getscheduler                1162
+#define __NR_sched_setscheduler                1163
+#define __NR_sched_yield               1164
+#define __NR_sched_get_priority_max    1165
+#define __NR_sched_get_priority_min    1166
+#define __NR_sched_rr_get_interval     1167
+#define __NR_nanosleep                 1168
+#define __NR_nfsservctl                        1169
+#define __NR_prctl                     1170
+/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
+#define __NR_mmap2                     1172
+#define __NR_pciconfig_read            1173
+#define __NR_pciconfig_write           1174
+#define __NR_perfmonctl                        1175
+#define __NR_sigaltstack               1176
+#define __NR_rt_sigaction              1177
+#define __NR_rt_sigpending             1178
+#define __NR_rt_sigprocmask            1179
+#define __NR_rt_sigqueueinfo           1180
+#define __NR_rt_sigreturn              1181
+#define __NR_rt_sigsuspend             1182
+#define __NR_rt_sigtimedwait           1183
+#define __NR_getcwd                    1184
+#define __NR_capget                    1185
+#define __NR_capset                    1186
+#define __NR_sendfile                  1187
+#define __NR_getpmsg                   1188
+#define __NR_putpmsg                   1189
+#define __NR_socket                    1190
+#define __NR_bind                      1191
+#define __NR_connect                   1192
+#define __NR_listen                    1193
+#define __NR_accept                    1194
+#define __NR_getsockname               1195
+#define __NR_getpeername               1196
+#define __NR_socketpair                        1197
+#define __NR_send                      1198
+#define __NR_sendto                    1199
+#define __NR_recv                      1200
+#define __NR_recvfrom                  1201
+#define __NR_shutdown                  1202
+#define __NR_setsockopt                        1203
+#define __NR_getsockopt                        1204
+#define __NR_sendmsg                   1205
+#define __NR_recvmsg                   1206
+#define __NR_pivot_root                        1207
+#define __NR_mincore                   1208
+#define __NR_madvise                   1209
+#define __NR_stat                      1210
+#define __NR_lstat                     1211
+#define __NR_fstat                     1212
+#define __NR_clone2                    1213
+#define __NR_getdents64                        1214
+#define __NR_getunwind                 1215
+#define __NR_readahead                 1216
+#define __NR_setxattr                  1217
+#define __NR_lsetxattr                 1218
+#define __NR_fsetxattr                 1219
+#define __NR_getxattr                  1220
+#define __NR_lgetxattr                 1221
+#define __NR_fgetxattr                 1222
+#define __NR_listxattr                 1223
+#define __NR_llistxattr                        1224
+#define __NR_flistxattr                        1225
+#define __NR_removexattr               1226
+#define __NR_lremovexattr              1227
+#define __NR_fremovexattr              1228
+#define __NR_tkill                     1229
+#define __NR_futex                     1230
+#define __NR_sched_setaffinity         1231
+#define __NR_sched_getaffinity         1232
+#define __NR_set_tid_address           1233
+#define __NR_fadvise64                 1234
+#define __NR_tgkill                    1235
+#define __NR_exit_group                        1236
+#define __NR_lookup_dcookie            1237
+#define __NR_io_setup                  1238
+#define __NR_io_destroy                        1239
+#define __NR_io_getevents              1240
+#define __NR_io_submit                 1241
+#define __NR_io_cancel                 1242
+#define __NR_epoll_create              1243
+#define __NR_epoll_ctl                 1244
+#define __NR_epoll_wait                        1245
+#define __NR_restart_syscall           1246
+#define __NR_semtimedop                        1247
+#define __NR_timer_create              1248
+#define __NR_timer_settime             1249
+#define __NR_timer_gettime             1250
+#define __NR_timer_getoverrun          1251
+#define __NR_timer_delete              1252
+#define __NR_clock_settime             1253
+#define __NR_clock_gettime             1254
+#define __NR_clock_getres              1255
+#define __NR_clock_nanosleep           1256
+#define __NR_fstatfs64                 1257
+#define __NR_statfs64                  1258
+#define __NR_mbind                     1259
+#define __NR_get_mempolicy             1260
+#define __NR_set_mempolicy             1261
+#define __NR_mq_open                   1262
+#define __NR_mq_unlink                 1263
+#define __NR_mq_timedsend              1264
+#define __NR_mq_timedreceive           1265
+#define __NR_mq_notify                 1266
+#define __NR_mq_getsetattr             1267
+#define __NR_kexec_load                        1268
+#define __NR_vserver                   1269
+#define __NR_waitid                    1270
+#define __NR_add_key                   1271
+#define __NR_request_key               1272
+#define __NR_keyctl                    1273
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#define NR_syscalls                    256 /* length of syscall table */
+
+#define __ARCH_WANT_SYS_RT_SIGACTION
+
+#ifdef CONFIG_IA32_SUPPORT
+# define __ARCH_WANT_SYS_FADVISE64
+# define __ARCH_WANT_SYS_GETPGRP
+# define __ARCH_WANT_SYS_LLSEEK
+# define __ARCH_WANT_SYS_NICE
+# define __ARCH_WANT_SYS_OLD_GETRLIMIT
+# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_SIGPENDING
+# define __ARCH_WANT_SYS_SIGPROCMASK
+# define __ARCH_WANT_COMPAT_SYS_TIME
+#endif
+
+#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long 
nr);
+
+#ifdef __KERNEL_SYSCALLS__
+
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <asm/ptrace.h>
+#include <linux/stringify.h>
+#include <linux/syscalls.h>
+
+static inline long
+open (const char * name, int mode, int flags)
+{
+       return sys_open(name, mode, flags);
+}
+
+static inline long
+dup (int fd)
+{
+       return sys_dup(fd);
+}
+
+static inline long
+close (int fd)
+{
+       return sys_close(fd);
+}
+
+static inline off_t
+lseek (int fd, off_t off, int whence)
+{
+       return sys_lseek(fd, off, whence);
+}
+
+static inline void
+_exit (int value)
+{
+       sys_exit(value);
+}
+
+#define exit(x) _exit(x)
+
+static inline long
+write (int fd, const char * buf, size_t nr)
+{
+       return sys_write(fd, buf, nr);
+}
+
+static inline long
+read (int fd, char * buf, size_t nr)
+{
+       return sys_read(fd, buf, nr);
+}
+
+
+static inline long
+setsid (void)
+{
+       return sys_setsid();
+}
+
+static inline pid_t
+waitpid (int pid, int * wait_stat, int flags)
+{
+       return sys_wait4(pid, wait_stat, flags, NULL);
+}
+
+
+extern int execve (const char *filename, char *const av[], char *const ep[]);
+extern pid_t clone (unsigned long flags, void *sp);
+
+#endif /* __KERNEL_SYSCALLS__ */
+
+asmlinkage unsigned long sys_mmap(
+                               unsigned long addr, unsigned long len,
+                               int prot, int flags,
+                               int fd, long off);
+asmlinkage unsigned long sys_mmap2(
+                               unsigned long addr, unsigned long len,
+                               int prot, int flags,
+                               int fd, long pgoff);
+struct pt_regs;
+struct sigaction;
+long sys_execve(char __user *filename, char __user * __user *argv,
+                          char __user * __user *envp, struct pt_regs *regs);
+asmlinkage long sys_pipe(void);
+asmlinkage long sys_ptrace(long request, pid_t pid,
+                          unsigned long addr, unsigned long data);
+asmlinkage long sys_rt_sigaction(int sig,
+                                const struct sigaction __user *act,
+                                struct sigaction __user *oact,
+                                size_t sigsetsize);
+
+/*
+ * "Conditional" syscalls
+ *
+ * Note, this macro can only be used in the file which defines sys_ni_syscall, 
i.e., in
+ * kernel/sys_ni.c.  This version causes warnings because the declaration 
isn't a
+ * proper prototype, but we can't use __typeof__ either, because not all 
cond_syscall()
+ * declarations have prototypes at the moment.
+ */
+#define cond_syscall(x) asmlinkage long x (void) 
__attribute__((weak,alias("sys_ni_syscall")));
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_IA64_UNISTD_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/unwind.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/unwind.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,240 @@
+#ifndef _ASM_IA64_UNWIND_H
+#define _ASM_IA64_UNWIND_H
+
+/*
+ * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * A simple API for unwinding kernel stacks.  This is used for
+ * debugging and error reporting purposes.  The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whitles, so there
+ * is not much point in implementing the full IA-64 unwind API (though
+ * it would of course be possible to implement the kernel API on top
+ * of it).
+ */
+
+struct task_struct;    /* forward declaration */
+struct switch_stack;   /* forward declaration */
+
+enum unw_application_register {
+       UNW_AR_BSP,
+       UNW_AR_BSPSTORE,
+       UNW_AR_PFS,
+       UNW_AR_RNAT,
+       UNW_AR_UNAT,
+       UNW_AR_LC,
+       UNW_AR_EC,
+       UNW_AR_FPSR,
+       UNW_AR_RSC,
+       UNW_AR_CCV,
+       UNW_AR_CSD,
+       UNW_AR_SSD
+};
+
+/*
+ * The following declarations are private to the unwind
+ * implementation:
+ */
+
+struct unw_stack {
+       unsigned long limit;
+       unsigned long top;
+};
+
+#define UNW_FLAG_INTERRUPT_FRAME       (1UL << 0)
+
+/*
+ * No user of this module should every access this structure directly
+ * as it is subject to change.  It is declared here solely so we can
+ * use automatic variables.
+ */
+struct unw_frame_info {
+       struct unw_stack regstk;
+       struct unw_stack memstk;
+       unsigned int flags;
+       short hint;
+       short prev_script;
+
+       /* current frame info: */
+       unsigned long bsp;              /* backing store pointer value */
+       unsigned long sp;               /* stack pointer value */
+       unsigned long psp;              /* previous sp value */
+       unsigned long ip;               /* instruction pointer value */
+       unsigned long pr;               /* current predicate values */
+       unsigned long *cfm_loc;         /* cfm save location (or NULL) */
+       unsigned long pt;               /* struct pt_regs location */
+
+       struct task_struct *task;
+       struct switch_stack *sw;
+
+       /* preserved state: */
+       unsigned long *bsp_loc;         /* previous bsp save location */
+       unsigned long *bspstore_loc;
+       unsigned long *pfs_loc;
+       unsigned long *rnat_loc;
+       unsigned long *rp_loc;
+       unsigned long *pri_unat_loc;
+       unsigned long *unat_loc;
+       unsigned long *pr_loc;
+       unsigned long *lc_loc;
+       unsigned long *fpsr_loc;
+       struct unw_ireg {
+               unsigned long *loc;
+               struct unw_ireg_nat {
+                       long type : 3;                  /* enum unw_nat_type */
+                       signed long off : 61;           /* NaT word is at 
loc+nat.off */
+               } nat;
+       } r4, r5, r6, r7;
+       unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
+       struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
+};
+
+/*
+ * The official API follows below:
+ */
+
+struct unw_table_entry {
+       u64 start_offset;
+       u64 end_offset;
+       u64 info_offset;
+};
+
+/*
+ * Initialize unwind support.
+ */
+extern void unw_init (void);
+
+extern void *unw_add_unwind_table (const char *name, unsigned long 
segment_base, unsigned long gp,
+                                  const void *table_start, const void 
*table_end);
+
+extern void unw_remove_unwind_table (void *handle);
+
+/*
+ * Prepare to unwind blocked task t.
+ */
+extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct 
task_struct *t);
+
+/*
+ * Prepare to unwind from interruption.  The pt-regs and switch-stack 
structures must have
+ * be "adjacent" (no state modifications between pt-regs and switch-stack).
+ */
+extern void unw_init_from_interruption (struct unw_frame_info *info, struct 
task_struct *t,
+                                       struct pt_regs *pt, struct switch_stack 
*sw);
+
+extern void unw_init_frame_info (struct unw_frame_info *info, struct 
task_struct *t,
+                                struct switch_stack *sw);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern void unw_init_running (void (*callback)(struct unw_frame_info *info, 
void *arg), void *arg);
+
+/*
+ * Unwind to previous to frame.  Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unw_unwind (struct unw_frame_info *info);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs).  Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unw_unwind_to_user (struct unw_frame_info *info);
+
+#define unw_is_intr_frame(info)        (((info)->flags & 
UNW_FLAG_INTERRUPT_FRAME) != 0)
+
+static inline int
+unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
+{
+       *valp = (info)->ip;
+       return 0;
+}
+
+static inline int
+unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
+{
+       *valp = (info)->sp;
+       return 0;
+}
+
+static inline int
+unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
+{
+       *valp = (info)->psp;
+       return 0;
+}
+
+static inline int
+unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
+{
+       *valp = (info)->bsp;
+       return 0;
+}
+
+static inline int
+unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
+{
+       *valp = *(info)->cfm_loc;
+       return 0;
+}
+
+static inline int
+unw_set_cfm (struct unw_frame_info *info, unsigned long val)
+{
+       *(info)->cfm_loc = val;
+       return 0;
+}
+
+static inline int
+unw_get_rp (struct unw_frame_info *info, unsigned long *val)
+{
+       if (!info->rp_loc)
+               return -1;
+       *val = *info->rp_loc;
+       return 0;
+}
+
+extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char 
*, int);
+extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, 
int);
+extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
+
+static inline int
+unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
+{
+       return unw_access_gr(i, n, &v, &nat, 1);
+}
+
+static inline int
+unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
+{
+       return unw_access_br(i, n, &v, 1);
+}
+
+static inline int
+unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
+{
+       return unw_access_fr(i, n, &v, 1);
+}
+
+static inline int
+unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
+{
+       return unw_access_ar(i, n, &v, 1);
+}
+
+static inline int
+unw_set_pr (struct unw_frame_info *i, unsigned long v)
+{
+       return unw_access_pr(i, &v, 1);
+}
+
+#define unw_get_gr(i,n,v,nat)  unw_access_gr(i,n,v,nat,0)
+#define unw_get_br(i,n,v)      unw_access_br(i,n,v,0)
+#define unw_get_fr(i,n,v)      unw_access_fr(i,n,v,0)
+#define unw_get_ar(i,n,v)      unw_access_ar(i,n,v,0)
+#define unw_get_pr(i,v)                unw_access_pr(i,v,0)
+
+#endif /* _ASM_UNWIND_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/asm/ustack.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/asm/ustack.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,16 @@
+#ifndef _ASM_IA64_USTACK_H
+#define _ASM_IA64_USTACK_H
+
+/*
+ * Constants for the user stack size
+ */
+
+#include <asm/page.h>
+
+/* The absolute hard limit for stack size is 1/2 of the mappable space in the 
region */
+#define MAX_USER_STACK_SIZE    (RGN_MAP_LIMIT/2)
+/* Make a default stack size of 2GB */
+#define DEFAULT_USER_STACK_SIZE        (1UL << 31)
+#define STACK_TOP              (0x6000000000000000UL + RGN_MAP_LIMIT)
+
+#endif /* _ASM_IA64_USTACK_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/bcd.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/bcd.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,20 @@
+/* Permission is hereby granted to copy, modify and redistribute this code
+ * in terms of the GNU Library General Public License, Version 2 or later,
+ * at your option.
+ */
+
+/* macros to translate to/from binary and binary-coded decimal (frequently
+ * found in RTC chips).
+ */
+
+#ifndef _BCD_H
+#define _BCD_H
+
+#define BCD2BIN(val)   (((val) & 0x0f) + ((val)>>4)*10)
+#define BIN2BCD(val)   ((((val)/10)<<4) + (val)%10)
+
+/* backwards compat */
+#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
+#define BIN_TO_BCD(val) ((val)=BIN2BCD(val))
+
+#endif /* _BCD_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/bitmap.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/bitmap.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,255 @@
+#ifndef __LINUX_BITMAP_H
+#define __LINUX_BITMAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+
+/*
+ * bitmaps provide bit arrays that consume one or more unsigned
+ * longs.  The bitmap interface and available operations are listed
+ * here, in bitmap.h
+ *
+ * Function implementations generic to all architectures are in
+ * lib/bitmap.c.  Functions implementations that are architecture
+ * specific are in various include/asm-<arch>/bitops.h headers
+ * and other arch/<arch> specific files.
+ *
+ * See lib/bitmap.c for more details.
+ */
+
+/*
+ * The available bitmap operations and their rough meaning in the
+ * case that the bitmap is a single unsigned long are thus:
+ *
+ * bitmap_zero(dst, nbits)                     *dst = 0UL
+ * bitmap_fill(dst, nbits)                     *dst = ~0UL
+ * bitmap_copy(dst, src, nbits)                        *dst = *src
+ * bitmap_and(dst, src1, src2, nbits)          *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits)           *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits)          *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits)       *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits)          *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits)             Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits)        Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits)            Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits)                    Are all bits zero in *src?
+ * bitmap_full(src, nbits)                     Are all bits set in *src?
+ * bitmap_weight(src, nbits)                   Hamming Weight: number set bits
+ * bitmap_shift_right(dst, src, n, nbits)      *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits)       *dst = *src << n
+ * bitmap_scnprintf(buf, len, src, nbits)      Print bitmap src to buf
+ * bitmap_parse(ubuf, ulen, dst, nbits)                Parse bitmap dst from 
buf
+ */
+
+/*
+ * Also the following operations in asm/bitops.h apply to bitmaps.
+ *
+ * set_bit(bit, addr)                  *addr |= bit
+ * clear_bit(bit, addr)                        *addr &= ~bit
+ * change_bit(bit, addr)               *addr ^= bit
+ * test_bit(bit, addr)                 Is bit set in *addr?
+ * test_and_set_bit(bit, addr)         Set bit and return old value
+ * test_and_clear_bit(bit, addr)       Clear bit and return old value
+ * test_and_change_bit(bit, addr)      Change bit and return old value
+ * find_first_zero_bit(addr, nbits)    Position first zero bit in *addr
+ * find_first_bit(addr, nbits)         Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit)        Position next zero bit in *addr 
>= bit
+ * find_next_bit(addr, nbits, bit)     Position next set bit in *addr >= bit
+ */
+
+/*
+ * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
+ * to declare an array named 'name' of just enough unsigned longs to
+ * contain all bit positions from 0 to 'bits' - 1.
+ */
+
+/*
+ * lib/bitmap.c provides these functions:
+ */
+
+extern int __bitmap_empty(const unsigned long *bitmap, int bits);
+extern int __bitmap_full(const unsigned long *bitmap, int bits);
+extern int __bitmap_equal(const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+                       int bits);
+extern void __bitmap_shift_right(unsigned long *dst,
+                        const unsigned long *src, int shift, int bits);
+extern void __bitmap_shift_left(unsigned long *dst,
+                        const unsigned long *src, int shift, int bits);
+extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern int __bitmap_intersects(const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern int __bitmap_subset(const unsigned long *bitmap1,
+                       const unsigned long *bitmap2, int bits);
+extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+
+extern int bitmap_scnprintf(char *buf, unsigned int len,
+                       const unsigned long *src, int nbits);
+extern int bitmap_parse(const char __user *ubuf, unsigned int ulen,
+                       unsigned long *dst, int nbits);
+extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
+extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
+extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
+
+#define BITMAP_LAST_WORD_MASK(nbits)                                   \
+(                                                                      \
+       ((nbits) % BITS_PER_LONG) ?                                     \
+               (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL               \
+)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = 0UL;
+       else {
+               int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+               memset(dst, 0, len);
+       }
+}
+
+static inline void bitmap_fill(unsigned long *dst, int nbits)
+{
+       size_t nlongs = BITS_TO_LONGS(nbits);
+       if (nlongs > 1) {
+               int len = (nlongs - 1) * sizeof(unsigned long);
+               memset(dst, 0xff,  len);
+       }
+       dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+                       int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src;
+       else {
+               int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+               memcpy(dst, src, len);
+       }
+}
+
+static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src1 & *src2;
+       else
+               __bitmap_and(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src1 | *src2;
+       else
+               __bitmap_or(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src1 ^ *src2;
+       else
+               __bitmap_xor(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src1 & ~(*src2);
+       else
+               __bitmap_andnot(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_complement(unsigned long *dst, const unsigned long 
*src,
+                       int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
+       else
+               __bitmap_complement(dst, src, nbits);
+}
+
+static inline int bitmap_equal(const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+       else
+               return __bitmap_equal(src1, src2, nbits);
+}
+
+static inline int bitmap_intersects(const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+       else
+               return __bitmap_intersects(src1, src2, nbits);
+}
+
+static inline int bitmap_subset(const unsigned long *src1,
+                       const unsigned long *src2, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
+       else
+               return __bitmap_subset(src1, src2, nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+       else
+               return __bitmap_empty(src, nbits);
+}
+
+static inline int bitmap_full(const unsigned long *src, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+       else
+               return __bitmap_full(src, nbits);
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+       return __bitmap_weight(src, nbits);
+}
+
+static inline void bitmap_shift_right(unsigned long *dst,
+                       const unsigned long *src, int n, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = *src >> n;
+       else
+               __bitmap_shift_right(dst, src, n, nbits);
+}
+
+static inline void bitmap_shift_left(unsigned long *dst,
+                       const unsigned long *src, int n, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
+       else
+               __bitmap_shift_left(dst, src, n, nbits);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __LINUX_BITMAP_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/bitops.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/bitops.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,137 @@
+#ifndef _LINUX_BITOPS_H
+#define _LINUX_BITOPS_H
+#include <asm/types.h>
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static inline int generic_ffs(int x)
+{
+       int r = 1;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xffff)) {
+               x >>= 16;
+               r += 16;
+       }
+       if (!(x & 0xff)) {
+               x >>= 8;
+               r += 8;
+       }
+       if (!(x & 0xf)) {
+               x >>= 4;
+               r += 4;
+       }
+       if (!(x & 3)) {
+               x >>= 2;
+               r += 2;
+       }
+       if (!(x & 1)) {
+               x >>= 1;
+               r += 1;
+       }
+       return r;
+}
+
+/*
+ * fls: find last bit set.
+ */
+
+static __inline__ int generic_fls(int x)
+{
+       int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xffff0000u)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xff000000u)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xf0000000u)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xc0000000u)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000u)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
+ */
+#include <asm/bitops.h>
+
+static __inline__ int get_bitmask_order(unsigned int count)
+{
+       int order;
+       
+       order = fls(count);
+       return order;   /* We could be slightly more clever with -1 here... */
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+static inline unsigned int generic_hweight32(unsigned int w)
+{
+        unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
+        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+        res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
+        res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
+        return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
+}
+
+static inline unsigned int generic_hweight16(unsigned int w)
+{
+        unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
+        res = (res & 0x3333) + ((res >> 2) & 0x3333);
+        res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
+        return (res & 0x00FF) + ((res >> 8) & 0x00FF);
+}
+
+static inline unsigned int generic_hweight8(unsigned int w)
+{
+        unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
+        res = (res & 0x33) + ((res >> 2) & 0x33);
+        return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+static inline unsigned long generic_hweight64(__u64 w)
+{
+#if BITS_PER_LONG < 64
+       return generic_hweight32((unsigned int)(w >> 32)) +
+                               generic_hweight32((unsigned int)w);
+#else
+       u64 res;
+       res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
+       res = (res & 0x3333333333333333ul) + ((res >> 2) & 
0x3333333333333333ul);
+       res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 
0x0F0F0F0F0F0F0F0Ful);
+       res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 
0x00FF00FF00FF00FFul);
+       res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 
0x0000FFFF0000FFFFul);
+       return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 
0x00000000FFFFFFFFul);
+#endif
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+       return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
+}
+
+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/byteorder/generic.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/byteorder/generic.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,172 @@
+#ifndef _LINUX_BYTEORDER_GENERIC_H
+#define _LINUX_BYTEORDER_GENERIC_H
+
+/*
+ * linux/byteorder_generic.h
+ * Generic Byte-reordering support
+ *
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19970707
+ *    gathered all the good ideas from all asm-foo/byteorder.h into one file,
+ *    cleaned them up.
+ *    I hope it is compliant with non-GCC compilers.
+ *    I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
+ *    because I wasn't sure it would be ok to put it in types.h
+ *    Upgraded it to 2.1.43
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971012
+ *    Upgraded it to 2.1.57
+ *    to please Linus T., replaced huge #ifdef's between little/big endian
+ *    by nestedly #include'd files.
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971205
+ *    Made it to 2.1.71; now a facelift:
+ *    Put files under include/linux/byteorder/
+ *    Split swab from generic support.
+ *
+ * TODO:
+ *   = Regular kernel maintainers could also replace all these manual
+ *    byteswap macros that remain, disseminated among drivers,
+ *    after some grep or the sources...
+ *   = Linus might want to rename all these macros and files to fit his taste,
+ *    to fit his personal naming scheme.
+ *   = it seems that a few drivers would also appreciate
+ *    nybble swapping support...
+ *   = every architecture could add their byteswap macro in asm/byteorder.h
+ *    see how some architectures already do (i386, alpha, ppc, etc)
+ *   = cpu_to_beXX and beXX_to_cpu might some day need to be well
+ *    distinguished throughout the kernel. This is not the case currently,
+ *    since little endian, big endian, and pdp endian machines needn't it.
+ *    But this might be the case for, say, a port of Linux to 20/21 bit
+ *    architectures (and F21 Linux addict around?).
+ */
+
+/*
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of long and short int between network and host format
+ *     ntohl(__u32 x)
+ *     ntohs(__u16 x)
+ *     htonl(__u32 x)
+ *     htons(__u16 x)
+ * It seems that some programs (which? where? or perhaps a standard? POSIX?)
+ * might like the above to be functions, not macros (why?).
+ * if that's true, then detect them, and take measures.
+ * Anyway, the measure is: define only ___ntohl as a macro instead,
+ * and in a separate file, have
+ * unsigned long inline ntohl(x){return ___ntohl(x);}
+ *
+ * The same for constant arguments
+ *     __constant_ntohl(__u32 x)
+ *     __constant_ntohs(__u16 x)
+ *     __constant_htonl(__u32 x)
+ *     __constant_htons(__u16 x)
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ *     cpu_to_[bl]eXX(__uXX x)
+ *     [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ *     cpu_to_[bl]eXXp(__uXX x)
+ *     [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ *     cpu_to_[bl]eXXs(__uXX x)
+ *     [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ *
+ */
+
+
+#if defined(__KERNEL__)
+/*
+ * inside the kernel, we can use nicknames;
+ * outside of it, we must avoid POSIX namespace pollution...
+ */
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+#endif
+
+
+#if defined(__KERNEL__)
+/*
+ * Handle ntohl and suches. These have various compatibility
+ * issues - like we want to give the prototype even though we
+ * also have a macro for them in case some strange program
+ * wants to take the address of the thing or something..
+ *
+ * Note that these used to return a "long" in libc5, even though
+ * long is often 64-bit these days.. Thus the casts.
+ *
+ * They have to be macros in order to do the constant folding
+ * correctly - if the argument passed into a inline function
+ * it is no longer constant according to gcc..
+ */
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+/*
+ * Do the prototypes. Somebody might want to take the
+ * address or some such sick thing..
+ */
+extern __u32                   ntohl(__be32);
+extern __be32                  htonl(__u32);
+extern __u16                   ntohs(__be16);
+extern __be16                  htons(__u16);
+
+#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
+
+#define ___htonl(x) __cpu_to_be32(x)
+#define ___htons(x) __cpu_to_be16(x)
+#define ___ntohl(x) __be32_to_cpu(x)
+#define ___ntohs(x) __be16_to_cpu(x)
+
+#define htonl(x) ___htonl(x)
+#define ntohl(x) ___ntohl(x)
+#define htons(x) ___htons(x)
+#define ntohs(x) ___ntohs(x)
+
+#endif /* OPTIMIZE */
+
+#endif /* KERNEL */
+
+
+#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/include/asm-ia64/linux/byteorder/little_endian.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/byteorder/little_endian.h      Tue Aug  2 
23:59:09 2005
@@ -0,0 +1,106 @@
+#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
+#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <linux/types.h>
+#include <linux/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force 
__u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force 
__u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force 
__u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+       return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+       return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+       return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+       return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+       return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+       return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+       return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+       return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+       return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+       return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+       return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+       return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <linux/byteorder/generic.h>
+
+#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/byteorder/swab.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/byteorder/swab.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,192 @@
+#ifndef _LINUX_BYTEORDER_SWAB_H
+#define _LINUX_BYTEORDER_SWAB_H
+
+/*
+ * linux/byteorder/swab.h
+ * Byte-swapping, independently from CPU endianness
+ *     swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971205
+ *    separated swab functions from cpu_to_XX,
+ *    to clean up support for bizarre-endian architectures.
+ *
+ * See asm-i386/byteorder.h and suches for examples of how to provide
+ * architecture-dependent optimized versions
+ *
+ */
+
+#include <linux/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x) \
+({ \
+       __u16 __x = (x); \
+       ((__u16)( \
+               (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
+               (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
+})
+
+#define ___swab32(x) \
+({ \
+       __u32 __x = (x); \
+       ((__u32)( \
+               (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+               (((__u32)(__x) & (__u32)0x0000ff00UL) <<  8) | \
+               (((__u32)(__x) & (__u32)0x00ff0000UL) >>  8) | \
+               (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+#define ___swab64(x) \
+({ \
+       __u64 __x = (x); \
+       ((__u64)( \
+               (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
+               (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
+               (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+               (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) <<  8) | \
+               (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >>  8) | \
+               (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+               (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+               (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) 
)); \
+})
+
+#define ___constant_swab16(x) \
+       ((__u16)( \
+               (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+               (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x) \
+       ((__u32)( \
+               (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+               (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) | \
+               (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) | \
+               (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x) \
+       ((__u64)( \
+               (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+               (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+               (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+               (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) | \
+               (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
+               (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+               (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+               (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+#  define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+#  define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+#  define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+#  define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+#  define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+#  define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+#  define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+#  define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+#  define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
+#  define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+#  define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+#  define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+#  define __swab16(x) __fswab16(x)
+#  define __swab32(x) __fswab32(x)
+#  define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
+{
+       return __arch__swab16(x);
+}
+static __inline__ __u16 __swab16p(const __u16 *x)
+{
+       return __arch__swab16p(x);
+}
+static __inline__ void __swab16s(__u16 *addr)
+{
+       __arch__swab16s(addr);
+}
+
+static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
+{
+       return __arch__swab32(x);
+}
+static __inline__ __u32 __swab32p(const __u32 *x)
+{
+       return __arch__swab32p(x);
+}
+static __inline__ void __swab32s(__u32 *addr)
+{
+       __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
+{
+#  ifdef __SWAB_64_THRU_32__
+       __u32 h = x >> 32;
+        __u32 l = x & ((1ULL<<32)-1);
+        return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+#  else
+       return __arch__swab64(x);
+#  endif
+}
+static __inline__ __u64 __swab64p(const __u64 *x)
+{
+       return __arch__swab64p(x);
+}
+static __inline__ void __swab64s(__u64 *addr)
+{
+       __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#if defined(__KERNEL__)
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+#endif
+
+#endif /* _LINUX_BYTEORDER_SWAB_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/cpumask.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/cpumask.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,379 @@
+#ifndef __LINUX_CPUMASK_H
+#define __LINUX_CPUMASK_H
+
+/*
+ * Cpumasks provide a bitmap suitable for representing the
+ * set of CPU's in a system, one bit position per CPU number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these cpumasks are based.
+ *
+ * For details of cpumask_scnprintf() and cpumask_parse(),
+ * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ *
+ * The available cpumask operations are:
+ *
+ * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
+ * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
+ * void cpus_setall(mask)              set all bits
+ * void cpus_clear(mask)               clear all bits
+ * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
+ * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
+ *
+ * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
+ * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
+ * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
+ * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
+ * void cpus_complement(dst, src)      dst = ~src
+ *
+ * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
+ * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
+ * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
+ * int cpus_empty(mask)                        Is mask empty (no bits sets)?
+ * int cpus_full(mask)                 Is mask full (all bits sets)?
+ * int cpus_weight(mask)               Hamming weigh - number of set bits
+ *
+ * void cpus_shift_right(dst, src, n)  Shift right
+ * void cpus_shift_left(dst, src, n)   Shift left
+ *
+ * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
+ * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
+ *
+ * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ * CPU_MASK_ALL                                Initializer - all bits set
+ * CPU_MASK_NONE                       Initializer - no bits set
+ * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
+ *
+ * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
+ * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ *
+ * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ *
+ * int num_online_cpus()               Number of online CPUs
+ * int num_possible_cpus()             Number of all possible CPUs
+ * int num_present_cpus()              Number of present CPUs
+ *
+ * int cpu_online(cpu)                 Is some cpu online?
+ * int cpu_possible(cpu)               Is some cpu possible?
+ * int cpu_present(cpu)                        Is some cpu present (can 
schedule)?
+ *
+ * int any_online_cpu(mask)            First online cpu in mask
+ *
+ * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
+ * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
+ * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ *    to generate slightly worse code.  Note for example the additional
+ *    40 lines of assembly code compiling the "for each possible cpu"
+ *    loops buried in the disk_stat_read() macros calls when compiling
+ *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
+ *    one-line #define for cpu_isset(), instead of wrapping an inline
+ *    inside a macro, the way we do the other calls.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <asm/bug.h>
+
+typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+extern cpumask_t _unused_cpumask_arg_;
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+       set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+       clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+       bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+       bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+       return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
+static inline void __cpus_complement(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int nbits)
+{
+       bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), 
NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
+static inline int __cpus_full(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_full(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_right(dst, src, n) \
+                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_right(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int n, int nbits)
+{
+       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int n, int nbits)
+{
+       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
+static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+{
+       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
+#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
+static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+{
+       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+}
+
+#define cpumask_of_cpu(cpu)                                            \
+({                                                                     \
+       typeof(_unused_cpumask_arg_) m;                                 \
+       if (sizeof(m) == sizeof(unsigned long)) {                       \
+               m.bits[0] = 1UL<<(cpu);                                 \
+       } else {                                                        \
+               cpus_clear(m);                                          \
+               cpu_set((cpu), m);                                      \
+       }                                                               \
+       m;                                                              \
+})
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL                                                   \
+(cpumask_t) { {                                                                
\
+       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+} }
+
+#else
+
+#define CPU_MASK_ALL                                                   \
+(cpumask_t) { {                                                                
\
+       [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
+       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+} }
+
+#endif
+
+#define CPU_MASK_NONE                                                  \
+(cpumask_t) { {                                                                
\
+       [0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL                         \
+} }
+
+#define CPU_MASK_CPU0                                                  \
+(cpumask_t) { {                                                                
\
+       [0] =  1UL                                                      \
+} }
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpumask_scnprintf(buf, len, src) \
+                       __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpumask_scnprintf(char *buf, int len,
+                                       const cpumask_t *srcp, int nbits)
+{
+       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpumask_parse(ubuf, ulen, src) \
+                       __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
+static inline int __cpumask_parse(const char __user *buf, int len,
+                                       cpumask_t *dstp, int nbits)
+{
+       return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#if NR_CPUS > 1
+#define for_each_cpu_mask(cpu, mask)           \
+       for ((cpu) = first_cpu(mask);           \
+               (cpu) < NR_CPUS;                \
+               (cpu) = next_cpu((cpu), (mask)))
+#else /* NR_CPUS == 1 */
+#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#endif /* NR_CPUS */
+
+/*
+ * The following particular system cpumasks and operations manage
+ * possible, present and online cpus.  Each of them is a fixed size
+ * bitmap of size NR_CPUS.
+ *
+ *  #ifdef CONFIG_HOTPLUG_CPU
+ *     cpu_possible_map - all NR_CPUS bits set
+ *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
+ *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
+ *  #else
+ *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
+ *     cpu_present_map  - copy of cpu_possible_map
+ *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
+ *  #endif
+ *
+ *  In either case, NR_CPUS is fixed at compile time, as the static
+ *  size of these bitmaps.  The cpu_possible_map is fixed at boot
+ *  time, as the set of CPU id's that it is possible might ever
+ *  be plugged in at anytime during the life of that system boot.
+ *  The cpu_present_map is dynamic(*), representing which CPUs
+ *  are currently plugged in.  And cpu_online_map is the dynamic
+ *  subset of cpu_present_map, indicating those CPUs available
+ *  for scheduling.
+ *
+ *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
+ *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
+ *  ACPI reports present at boot.
+ *
+ *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
+ *  depending on what ACPI reports as currently plugged in, otherwise
+ *  cpu_present_map is just a copy of cpu_possible_map.
+ *
+ *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
+ *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
+ *
+ * Subtleties:
+ * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ *    assumption that their single CPU is online.  The UP
+ *    cpu_{online,possible,present}_maps are placebos.  Changing them
+ *    will have no useful affect on the following num_*_cpus()
+ *    and cpu_*() macros in the UP case.  This ugliness is a UP
+ *    optimization - don't waste any instructions or memory references
+ *    asking if you're online or how many CPUs there are if there is
+ *    only one CPU.
+ * 2) Most SMP arch's #define some of these maps to be some
+ *    other map specific to that arch.  Therefore, the following
+ *    must be #define macros, not inlines.  To see why, examine
+ *    the assembly code produced by the following.  Note that
+ *    set1() writes phys_x_map, but set2() writes x_map:
+ *        int x_map, phys_x_map;
+ *        #define set1(a) x_map = a
+ *        inline void set2(int a) { x_map = a; }
+ *        #define x_map phys_x_map
+ *        main(){ set1(3); set2(5); }
+ */
+
+extern cpumask_t cpu_possible_map;
+#ifndef XEN
+extern cpumask_t cpu_online_map;
+#endif
+extern cpumask_t cpu_present_map;
+
+#if NR_CPUS > 1
+#define num_online_cpus()      cpus_weight(cpu_online_map)
+#define num_possible_cpus()    cpus_weight(cpu_possible_map)
+#define num_present_cpus()     cpus_weight(cpu_present_map)
+#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
+#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
+#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
+#else
+#define num_online_cpus()      1
+#define num_possible_cpus()    1
+#define num_present_cpus()     1
+#define cpu_online(cpu)                ((cpu) == 0)
+#define cpu_possible(cpu)      ((cpu) == 0)
+#define cpu_present(cpu)       ((cpu) == 0)
+#endif
+
+#define any_online_cpu(mask)                   \
+({                                             \
+       int cpu;                                \
+       for_each_cpu_mask(cpu, (mask))          \
+               if (cpu_online(cpu))            \
+                       break;                  \
+       cpu;                                    \
+})
+
+#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+
+#endif /* __LINUX_CPUMASK_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/dma-mapping.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/dma-mapping.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,56 @@
+#ifndef _ASM_LINUX_DMA_MAPPING_H
+#define _ASM_LINUX_DMA_MAPPING_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+       DMA_BIDIRECTIONAL = 0,
+       DMA_TO_DEVICE = 1,
+       DMA_FROM_DEVICE = 2,
+       DMA_NONE = 3,
+};
+
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+
+#include <asm/dma-mapping.h>
+
+/* Backwards compat, remove in 2.7.x */
+#define dma_sync_single                dma_sync_single_for_cpu
+#define dma_sync_sg            dma_sync_sg_for_cpu
+
+extern u64 dma_get_required_mask(struct device *dev);
+
+/* flags for the coherent memory api */
+#define        DMA_MEMORY_MAP                  0x01
+#define DMA_MEMORY_IO                  0x02
+#define DMA_MEMORY_INCLUDES_CHILDREN   0x04
+#define DMA_MEMORY_EXCLUSIVE           0x08
+
+#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+static inline int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+                           dma_addr_t device_addr, size_t size, int flags)
+{
+       return 0;
+}
+
+static inline void
+dma_release_declared_memory(struct device *dev)
+{
+}
+
+static inline void *
+dma_mark_declared_memory_occupied(struct device *dev,
+                                 dma_addr_t device_addr, size_t size)
+{
+       return ERR_PTR(-EBUSY);
+}
+#endif
+
+#endif
+
+
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/efi.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/efi.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,399 @@
+#ifndef _LINUX_EFI_H
+#define _LINUX_EFI_H
+
+/*
+ * Extensible Firmware Interface
+ * Based on 'Extensible Firmware Interface Specification' version 0.9, April 
30, 1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/rtc.h>
+#include <linux/ioport.h>
+
+#include <asm/page.h>
+#include <asm/system.h>
+
+#define EFI_SUCCESS            0
+#define EFI_LOAD_ERROR          ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_INVALID_PARAMETER  ( 2 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_UNSUPPORTED                ( 3 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE     ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BUFFER_TOO_SMALL   ( 5 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_NOT_FOUND          (14 | (1UL << (BITS_PER_LONG-1)))
+
+typedef unsigned long efi_status_t;
+typedef u8 efi_bool_t;
+typedef u16 efi_char16_t;              /* UNICODE character */
+
+
+typedef struct {
+       u8 b[16];
+} efi_guid_t;
+
+#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
+((efi_guid_t) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+  (b) & 0xff, ((b) >> 8) & 0xff, \
+  (c) & 0xff, ((c) >> 8) & 0xff, \
+  (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+/*
+ * Generic EFI table header
+ */
+typedef        struct {
+       u64 signature;
+       u32 revision;
+       u32 headersize;
+       u32 crc32;
+       u32 reserved;
+} efi_table_hdr_t;
+
+/*
+ * Memory map descriptor:
+ */
+
+/* Memory types: */
+#define EFI_RESERVED_TYPE               0
+#define EFI_LOADER_CODE                         1
+#define EFI_LOADER_DATA                         2
+#define EFI_BOOT_SERVICES_CODE          3
+#define EFI_BOOT_SERVICES_DATA          4
+#define EFI_RUNTIME_SERVICES_CODE       5
+#define EFI_RUNTIME_SERVICES_DATA       6
+#define EFI_CONVENTIONAL_MEMORY                 7
+#define EFI_UNUSABLE_MEMORY             8
+#define EFI_ACPI_RECLAIM_MEMORY                 9
+#define EFI_ACPI_MEMORY_NVS            10
+#define EFI_MEMORY_MAPPED_IO           11
+#define EFI_MEMORY_MAPPED_IO_PORT_SPACE        12
+#define EFI_PAL_CODE                   13
+#define EFI_MAX_MEMORY_TYPE            14
+
+/* Attribute values: */
+#define EFI_MEMORY_UC          ((u64)0x0000000000000001ULL)    /* uncached */
+#define EFI_MEMORY_WC          ((u64)0x0000000000000002ULL)    /* 
write-coalescing */
+#define EFI_MEMORY_WT          ((u64)0x0000000000000004ULL)    /* 
write-through */
+#define EFI_MEMORY_WB          ((u64)0x0000000000000008ULL)    /* write-back */
+#define EFI_MEMORY_WP          ((u64)0x0000000000001000ULL)    /* 
write-protect */
+#define EFI_MEMORY_RP          ((u64)0x0000000000002000ULL)    /* read-protect 
*/
+#define EFI_MEMORY_XP          ((u64)0x0000000000004000ULL)    /* 
execute-protect */
+#define EFI_MEMORY_RUNTIME     ((u64)0x8000000000000000ULL)    /* range 
requires runtime mapping */
+#define EFI_MEMORY_DESCRIPTOR_VERSION  1
+
+#define EFI_PAGE_SHIFT         12
+
+/*
+ * For current x86 implementations of EFI, there is
+ * additional padding in the mem descriptors.  This is not
+ * the case in ia64.  Need to have this fixed in the f/w.
+ */
+typedef struct {
+       u32 type;
+       u32 pad;
+       u64 phys_addr;
+       u64 virt_addr;
+       u64 num_pages;
+       u64 attribute;
+#if defined (__i386__)
+       u64 pad1;
+#endif
+} efi_memory_desc_t;
+
+typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, 
void *arg);
+
+/*
+ * Types and defines for Time Services
+ */
+#define EFI_TIME_ADJUST_DAYLIGHT 0x1
+#define EFI_TIME_IN_DAYLIGHT     0x2
+#define EFI_UNSPECIFIED_TIMEZONE 0x07ff
+
+typedef struct {
+       u16 year;
+       u8 month;
+       u8 day;
+       u8 hour;
+       u8 minute;
+       u8 second;
+       u8 pad1;
+       u32 nanosecond;
+       s16 timezone;
+       u8 daylight;
+       u8 pad2;
+} efi_time_t;
+
+typedef struct {
+       u32 resolution;
+       u32 accuracy;
+       u8 sets_to_zero;
+} efi_time_cap_t;
+
+/*
+ * Types and defines for EFI ResetSystem
+ */
+#define EFI_RESET_COLD 0
+#define EFI_RESET_WARM 1
+#define EFI_RESET_SHUTDOWN 2
+
+/*
+ * EFI Runtime Services table
+ */
+#define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL)
+#define EFI_RUNTIME_SERVICES_REVISION  0x00010000
+
+typedef struct {
+       efi_table_hdr_t hdr;
+       unsigned long get_time;
+       unsigned long set_time;
+       unsigned long get_wakeup_time;
+       unsigned long set_wakeup_time;
+       unsigned long set_virtual_address_map;
+       unsigned long convert_pointer;
+       unsigned long get_variable;
+       unsigned long get_next_variable;
+       unsigned long set_variable;
+       unsigned long get_next_high_mono_count;
+       unsigned long reset_system;
+} efi_runtime_services_t;
+
+typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
+typedef efi_status_t efi_set_time_t (efi_time_t *tm);
+typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t 
*pending,
+                                           efi_time_t *tm);
+typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t 
*tm);
+typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t 
*vendor, u32 *attr,
+                                        unsigned long *data_size, void *data);
+typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, 
efi_char16_t *name,
+                                             efi_guid_t *vendor);
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t 
*vendor, 
+                                        unsigned long attr, unsigned long 
data_size, 
+                                        void *data);
+typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
+typedef void efi_reset_system_t (int reset_type, efi_status_t status,
+                                unsigned long data_size, efi_char16_t *data);
+typedef efi_status_t efi_set_virtual_address_map_t (unsigned long 
memory_map_size,
+                                               unsigned long descriptor_size,
+                                               u32 descriptor_version,
+                                               efi_memory_desc_t *virtual_map);
+
+/*
+ *  EFI Configuration Table and GUID definitions
+ */
+#define NULL_GUID \
+    EFI_GUID(  0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 
0x00, 0x00 )
+
+#define MPS_TABLE_GUID    \
+    EFI_GUID(  0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 
0xc1, 0x4d )
+
+#define ACPI_TABLE_GUID    \
+    EFI_GUID(  0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 
0xc1, 0x4d )
+
+#define ACPI_20_TABLE_GUID    \
+    EFI_GUID(  0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81 )
+
+#define SMBIOS_TABLE_GUID    \
+    EFI_GUID(  0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 
0xc1, 0x4d )
+
+#define SAL_SYSTEM_TABLE_GUID    \
+    EFI_GUID(  0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 
0xc1, 0x4d )
+
+#define HCDP_TABLE_GUID        \
+    EFI_GUID(  0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 
0x78, 0x98 )
+
+#define UGA_IO_PROTOCOL_GUID \
+    EFI_GUID(  0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 
0x7, 0xa2 )
+
+#define EFI_GLOBAL_VARIABLE_GUID \
+    EFI_GUID(  0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 
0x2b, 0x8c )
+
+typedef struct {
+       efi_guid_t guid;
+       unsigned long table;
+} efi_config_table_t;
+
+#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_SYSTEM_TABLE_REVISION  ((1 << 16) | 00)
+
+typedef struct {
+       efi_table_hdr_t hdr;
+       unsigned long fw_vendor;        /* physical addr of CHAR16 vendor 
string */
+       u32 fw_revision;
+       unsigned long con_in_handle;
+       unsigned long con_in;
+       unsigned long con_out_handle;
+       unsigned long con_out;
+       unsigned long stderr_handle;
+       unsigned long stderr;
+       efi_runtime_services_t *runtime;
+       unsigned long boottime;
+       unsigned long nr_tables;
+       unsigned long tables;
+} efi_system_table_t;
+
+struct efi_memory_map {
+       efi_memory_desc_t *phys_map;
+       efi_memory_desc_t *map;
+       int nr_map;
+       unsigned long desc_version;
+};
+
+/*
+ * All runtime access to EFI goes through this structure:
+ */
+extern struct efi {
+       efi_system_table_t *systab;     /* EFI system table */
+       void *mps;                      /* MPS table */
+       void *acpi;                     /* ACPI table  (IA64 ext 0.71) */
+       void *acpi20;                   /* ACPI table  (ACPI 2.0) */
+       void *smbios;                   /* SM BIOS table */
+       void *sal_systab;               /* SAL system table */
+       void *boot_info;                /* boot info table */
+       void *hcdp;                     /* HCDP table */
+       void *uga;                      /* UGA table */
+       efi_get_time_t *get_time;
+       efi_set_time_t *set_time;
+       efi_get_wakeup_time_t *get_wakeup_time;
+       efi_set_wakeup_time_t *set_wakeup_time;
+       efi_get_variable_t *get_variable;
+       efi_get_next_variable_t *get_next_variable;
+       efi_set_variable_t *set_variable;
+       efi_get_next_high_mono_count_t *get_next_high_mono_count;
+       efi_reset_system_t *reset_system;
+       efi_set_virtual_address_map_t *set_virtual_address_map;
+} efi;
+
+static inline int
+efi_guidcmp (efi_guid_t left, efi_guid_t right)
+{
+       return memcmp(&left, &right, sizeof (efi_guid_t));
+}
+
+static inline char *
+efi_guid_unparse(efi_guid_t *guid, char *out)
+{
+       sprintf(out, 
"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+               guid->b[3], guid->b[2], guid->b[1], guid->b[0],
+               guid->b[5], guid->b[4], guid->b[7], guid->b[6],
+               guid->b[8], guid->b[9], guid->b[10], guid->b[11],
+               guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+        return out;
+}
+
+extern void efi_init (void);
+extern void *efi_get_pal_addr (void);
+extern void efi_map_pal_code (void);
+extern void efi_map_memmap(void);
+extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+extern void efi_gettimeofday (struct timespec *ts);
+extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, 
if possible */
+extern u64 efi_get_iobase (void);
+extern u32 efi_mem_type (unsigned long phys_addr);
+extern u64 efi_mem_attributes (unsigned long phys_addr);
+extern int __init efi_uart_console_only (void);
+extern void efi_initialize_iomem_resources(struct resource *code_resource,
+                                       struct resource *data_resource);
+extern efi_status_t phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
+extern unsigned long __init efi_get_time(void);
+extern int __init efi_set_rtc_mmss(unsigned long nowtime);
+extern struct efi_memory_map memmap;
+
+/**
+ * efi_range_is_wc - check the WC bit on an address range
+ * @start: starting kvirt address
+ * @len: length of range
+ *
+ * Consult the EFI memory map and make sure it's ok to set this range WC.
+ * Returns true or false.
+ */
+static inline int efi_range_is_wc(unsigned long start, unsigned long len)
+{
+       int i;
+
+       for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
+               unsigned long paddr = __pa(start + i);
+               if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC))
+                       return 0;
+       }
+       /* The range checked out */
+       return 1;
+}
+
+#ifdef CONFIG_EFI_PCDP
+extern int __init efi_setup_pcdp_console(char *);
+#endif
+
+/*
+ * We play games with efi_enabled so that the compiler will, if possible, 
remove
+ * EFI-related code altogether.
+ */
+#ifdef CONFIG_EFI
+# ifdef CONFIG_X86
+   extern int efi_enabled;
+# else
+#  define efi_enabled 1
+# endif
+#else
+# define efi_enabled 0
+#endif
+
+/*
+ * Variable Attributes
+ */
+#define EFI_VARIABLE_NON_VOLATILE       0x0000000000000001
+#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
+#define EFI_VARIABLE_RUNTIME_ACCESS     0x0000000000000004
+
+/*
+ * EFI Device Path information
+ */
+#define EFI_DEV_HW                     0x01
+#define  EFI_DEV_PCI                            1
+#define  EFI_DEV_PCCARD                                 2
+#define  EFI_DEV_MEM_MAPPED                     3
+#define  EFI_DEV_VENDOR                                 4
+#define  EFI_DEV_CONTROLLER                     5
+#define EFI_DEV_ACPI                   0x02
+#define   EFI_DEV_BASIC_ACPI                    1
+#define   EFI_DEV_EXPANDED_ACPI                         2
+#define EFI_DEV_MSG                    0x03
+#define   EFI_DEV_MSG_ATAPI                     1
+#define   EFI_DEV_MSG_SCSI                      2
+#define   EFI_DEV_MSG_FC                        3
+#define   EFI_DEV_MSG_1394                      4
+#define   EFI_DEV_MSG_USB                       5
+#define   EFI_DEV_MSG_USB_CLASS                        15
+#define   EFI_DEV_MSG_I20                       6
+#define   EFI_DEV_MSG_MAC                      11
+#define   EFI_DEV_MSG_IPV4                     12
+#define   EFI_DEV_MSG_IPV6                     13
+#define   EFI_DEV_MSG_INFINIBAND                9
+#define   EFI_DEV_MSG_UART                     14
+#define   EFI_DEV_MSG_VENDOR                   10
+#define EFI_DEV_MEDIA                  0x04
+#define   EFI_DEV_MEDIA_HARD_DRIVE              1
+#define   EFI_DEV_MEDIA_CDROM                   2
+#define   EFI_DEV_MEDIA_VENDOR                  3
+#define   EFI_DEV_MEDIA_FILE                    4
+#define   EFI_DEV_MEDIA_PROTOCOL                5
+#define EFI_DEV_BIOS_BOOT              0x05
+#define EFI_DEV_END_PATH               0x7F
+#define EFI_DEV_END_PATH2              0xFF
+#define   EFI_DEV_END_INSTANCE                 0x01
+#define   EFI_DEV_END_ENTIRE                   0xFF
+
+struct efi_generic_dev_path {
+       u8 type;
+       u8 sub_type;
+       u16 length;
+} __attribute ((packed));
+
+#endif /* _LINUX_EFI_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/err.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/err.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,31 @@
+#ifndef _LINUX_ERR_H
+#define _LINUX_ERR_H
+
+#include <linux/compiler.h>
+
+#include <asm/errno.h>
+
+/*
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a dentry
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ */
+static inline void *ERR_PTR(long error)
+{
+       return (void *) error;
+}
+
+static inline long PTR_ERR(const void *ptr)
+{
+       return (long) ptr;
+}
+
+static inline long IS_ERR(const void *ptr)
+{
+       return unlikely((unsigned long)ptr > (unsigned long)-1000L);
+}
+
+#endif /* _LINUX_ERR_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/gfp.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/gfp.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,134 @@
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+
+struct vm_area_struct;
+
+/*
+ * GFP bitmasks..
+ */
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
+#define __GFP_DMA      0x01
+#define __GFP_HIGHMEM  0x02
+
+/*
+ * Action modifiers - doesn't change the zoning
+ *
+ * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
+ * _might_ fail.  This depends upon the particular VM implementation.
+ *
+ * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures.
+ *
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ */
+#define __GFP_WAIT     0x10    /* Can wait and reschedule? */
+#define __GFP_HIGH     0x20    /* Should access emergency pools? */
+#define __GFP_IO       0x40    /* Can start physical IO? */
+#define __GFP_FS       0x80    /* Can call down to low-level FS? */
+#define __GFP_COLD     0x100   /* Cache-cold page required */
+#define __GFP_NOWARN   0x200   /* Suppress page allocation failure warning */
+#define __GFP_REPEAT   0x400   /* Retry the allocation.  Might fail */
+#define __GFP_NOFAIL   0x800   /* Retry for ever.  Cannot fail */
+#define __GFP_NORETRY  0x1000  /* Do not retry.  Might fail */
+#define __GFP_NO_GROW  0x2000  /* Slab internal usage */
+#define __GFP_COMP     0x4000  /* Add compound page metadata */
+#define __GFP_ZERO     0x8000  /* Return zeroed page on success */
+
+#define __GFP_BITS_SHIFT 16    /* Room for 16 __GFP_FOO bits */
+#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+
+/* if you forget to add the bitmask here kernel will crash, period */
+#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
+                       __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
+                       __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP)
+
+#define GFP_ATOMIC     (__GFP_HIGH)
+#define GFP_NOIO       (__GFP_WAIT)
+#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER   (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
+
+/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
+   platforms, used as appropriate on others */
+
+#define GFP_DMA                __GFP_DMA
+
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+
+/*
+ * We get the zone list from the current node and the gfp_mask.
+ * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ *
+ * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
+ * optimized to &contig_page_data at compile-time.
+ */
+
+#ifndef HAVE_ARCH_FREE_PAGE
+static inline void arch_free_page(struct page *page, int order) { }
+#endif
+
+extern struct page *
+FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
+
+static inline struct page *alloc_pages_node(int nid, unsigned int gfp_mask,
+                                               unsigned int order)
+{
+       if (unlikely(order >= MAX_ORDER))
+               return NULL;
+
+       return __alloc_pages(gfp_mask, order,
+               NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *alloc_pages_current(unsigned gfp_mask, unsigned order);
+
+static inline struct page *
+alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+       if (unlikely(order >= MAX_ORDER))
+               return NULL;
+
+       return alloc_pages_current(gfp_mask, order);
+}
+extern struct page *alloc_page_vma(unsigned gfp_mask,
+                       struct vm_area_struct *vma, unsigned long addr);
+#else
+#define alloc_pages(gfp_mask, order) \
+               alloc_pages_node(numa_node_id(), gfp_mask, order)
+#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#endif
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+
+extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned 
int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
+
+#define __get_free_page(gfp_mask) \
+               __get_free_pages((gfp_mask),0)
+
+#define __get_dma_pages(gfp_mask, order) \
+               __get_free_pages((gfp_mask) | GFP_DMA,(order))
+
+extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
+extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
+extern void FASTCALL(free_hot_page(struct page *page));
+extern void FASTCALL(free_cold_page(struct page *page));
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr),0)
+
+void page_alloc_init(void);
+
+#endif /* __LINUX_GFP_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/hardirq.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/hardirq.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,110 @@
+#ifndef LINUX_HARDIRQ_H
+#define LINUX_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/smp_lock.h>
+#include <asm/hardirq.h>
+#include <asm/system.h>
+
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count can be overridden per architecture, the default is:
+ *
+ * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
+ * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x0fff0000
+ */
+#define PREEMPT_BITS   8
+#define SOFTIRQ_BITS   8
+
+#ifndef HARDIRQ_BITS
+#define HARDIRQ_BITS   12
+/*
+ * The hardirq mask has to be large enough to have space for potentially
+ * all IRQ sources in the system nesting on a single CPU.
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+#endif
+
+#define PREEMPT_SHIFT  0
+#define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+
+#define __IRQ_MASK(x)  ((1UL << (x))-1)
+
+#define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+#define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
+#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
+#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq()               (hardirq_count())
+#define in_softirq()           (softirq_count())
+#ifndef XEN
+#define in_interrupt()         (irq_count())
+#else
+#define in_interrupt()         0               // FIXME LATER
+#endif
+
+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
+# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
+#else
+# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+#endif
+
+#ifdef CONFIG_PREEMPT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define preemptible() 0
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+
+#ifdef CONFIG_SMP
+extern void synchronize_irq(unsigned int irq);
+#else
+# define synchronize_irq(irq)  barrier()
+#endif
+
+#define nmi_enter()            irq_enter()
+#define nmi_exit()             sub_preempt_count(HARDIRQ_OFFSET)
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+static inline void account_user_vtime(struct task_struct *tsk)
+{
+}
+
+static inline void account_system_vtime(struct task_struct *tsk)
+{
+}
+#endif
+
+#define irq_enter()                                    \
+       do {                                            \
+               account_system_vtime(current);          \
+               add_preempt_count(HARDIRQ_OFFSET);      \
+       } while (0)
+
+extern void irq_exit(void);
+
+#endif /* LINUX_HARDIRQ_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/initrd.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/initrd.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,20 @@
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+/* 1 = load ramdisk, 0 = don't load */
+extern int rd_doload;
+
+/* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_prompt;
+
+/* starting block # of image */
+extern int rd_image_start;
+
+/* 1 if it is not an error if initrd_start < memory_start */
+extern int initrd_below_start_ok;
+
+/* free_initrd_mem always gets called with the next two as arguments.. */
+extern unsigned long initrd_start, initrd_end;
+extern void free_initrd_mem(unsigned long, unsigned long);
+
+extern unsigned int real_root_dev;
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/interrupt.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/interrupt.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,291 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/bitops.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+#include <linux/hardirq.h>
+#include <asm/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+/*
+ * For 2.4.x compatibility, 2.4.x can use
+ *
+ *     typedef void irqreturn_t;
+ *     #define IRQ_NONE
+ *     #define IRQ_HANDLED
+ *     #define IRQ_RETVAL(x)
+ *
+ * To mix old-style and new-style irq handler returns.
+ *
+ * IRQ_NONE means we didn't handle it.
+ * IRQ_HANDLED means that we did have a valid interrupt and handled it.
+ * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
+ */
+typedef int irqreturn_t;
+
+#define IRQ_NONE       (0)
+#define IRQ_HANDLED    (1)
+#define IRQ_RETVAL(x)  ((x) != 0)
+
+#ifndef XEN
+struct irqaction {
+       irqreturn_t (*handler)(int, void *, struct pt_regs *);
+       unsigned long flags;
+       cpumask_t mask;
+       const char *name;
+       void *dev_id;
+       struct irqaction *next;
+       int irq;
+       struct proc_dir_entry *dir;
+};
+
+extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
+extern int request_irq(unsigned int,
+                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                      unsigned long, const char *, void *);
+extern void free_irq(unsigned int, void *);
+#endif
+
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern void disable_irq_nosync(unsigned int irq);
+extern void disable_irq(unsigned int irq);
+extern void enable_irq(unsigned int irq);
+#endif
+
+/*
+ * Temporary defines for UP kernels, until all code gets fixed.
+ */
+#ifndef CONFIG_SMP
+static inline void __deprecated cli(void)
+{
+       local_irq_disable();
+}
+static inline void __deprecated sti(void)
+{
+       local_irq_enable();
+}
+static inline void __deprecated save_flags(unsigned long *x)
+{
+       local_save_flags(*x);
+}
+#define save_flags(x) save_flags(&x);
+static inline void __deprecated restore_flags(unsigned long x)
+{
+       local_irq_restore(x);
+}
+
+static inline void __deprecated save_and_cli(unsigned long *x)
+{
+       local_irq_save(*x);
+}
+#define save_and_cli(x)        save_and_cli(&x)
+#endif /* CONFIG_SMP */
+
+/* SoftIRQ primitives.  */
+#define local_bh_disable() \
+               do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
+#define __local_bh_enable() \
+               do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
+
+extern void local_bh_enable(void);
+
+/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
+   frequency threaded job scheduling. For almost all the purposes
+   tasklets are more than enough. F.e. all serial device BHs et
+   al. should be converted to tasklets, not to softirqs.
+ */
+
+enum
+{
+       HI_SOFTIRQ=0,
+       TIMER_SOFTIRQ,
+       NET_TX_SOFTIRQ,
+       NET_RX_SOFTIRQ,
+       SCSI_SOFTIRQ,
+       TASKLET_SOFTIRQ
+};
+
+/* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage.  KAO
+ */
+
+struct softirq_action
+{
+       void    (*action)(struct softirq_action *);
+       void    *data;
+};
+
+asmlinkage void do_softirq(void);
+//extern void open_softirq(int nr, void (*action)(struct softirq_action*), 
void *data);
+extern void softirq_init(void);
+#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << 
(nr); } while (0)
+extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
+extern void FASTCALL(raise_softirq(unsigned int nr));
+
+
+/* Tasklets --- multithreaded analogue of BHs.
+
+   Main feature differing them of generic softirqs: tasklet
+   is running only on one CPU simultaneously.
+
+   Main feature differing them of BHs: different tasklets
+   may be run simultaneously on different CPUs.
+
+   Properties:
+   * If tasklet_schedule() is called, then tasklet is guaranteed
+     to be executed on some cpu at least once after this.
+   * If the tasklet is already scheduled, but its excecution is still not
+     started, it will be executed only once.
+   * If this tasklet is already running on another CPU (or schedule is called
+     from tasklet itself), it is rescheduled for later.
+   * Tasklet is strictly serialized wrt itself, but not
+     wrt another tasklets. If client needs some intertask synchronization,
+     he makes it with spinlocks.
+ */
+
+struct tasklet_struct
+{
+       struct tasklet_struct *next;
+       unsigned long state;
+       atomic_t count;
+       void (*func)(unsigned long);
+       unsigned long data;
+};
+
+#define DECLARE_TASKLET(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+
+#define DECLARE_TASKLET_DISABLED(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+
+
+enum
+{
+       TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
+       TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
+};
+
+#ifdef CONFIG_SMP
+static inline int tasklet_trylock(struct tasklet_struct *t)
+{
+       return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock(struct tasklet_struct *t)
+{
+       smp_mb__before_clear_bit(); 
+       clear_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
+#else
+#define tasklet_trylock(t) 1
+#define tasklet_unlock_wait(t) do { } while (0)
+#define tasklet_unlock(t) do { } while (0)
+#endif
+
+extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_schedule(struct tasklet_struct *t)
+{
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               __tasklet_schedule(t);
+}
+
+extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               __tasklet_hi_schedule(t);
+}
+
+
+static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+{
+       atomic_inc(&t->count);
+       smp_mb__after_atomic_inc();
+}
+
+static inline void tasklet_disable(struct tasklet_struct *t)
+{
+       tasklet_disable_nosync(t);
+       tasklet_unlock_wait(t);
+       smp_mb();
+}
+
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+       smp_mb__before_atomic_dec();
+       atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+       smp_mb__before_atomic_dec();
+       atomic_dec(&t->count);
+}
+
+extern void tasklet_kill(struct tasklet_struct *t);
+extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+extern void tasklet_init(struct tasklet_struct *t,
+                        void (*func)(unsigned long), unsigned long data);
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization.  They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+
+#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
+static inline unsigned long probe_irq_on(void)
+{
+       return 0;
+}
+static inline int probe_irq_off(unsigned long val)
+{
+       return 0;
+}
+static inline unsigned int probe_irq_mask(unsigned long val)
+{
+       return 0;
+}
+#else
+extern unsigned long probe_irq_on(void);       /* returns 0 on failure */
+extern int probe_irq_off(unsigned long);       /* returns 0 or negative on 
failure */
+extern unsigned int probe_irq_mask(unsigned long);     /* returns mask of ISA 
interrupts */
+#endif
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/jiffies.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/jiffies.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,450 @@
+#ifndef _LINUX_JIFFIES_H
+#define _LINUX_JIFFIES_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <asm/param.h>                 /* for HZ */
+#include <asm/div64.h>
+
+#ifndef div_long_long_rem
+#define div_long_long_rem(dividend,divisor,remainder) \
+({                                                     \
+       u64 result = dividend;                          \
+       *remainder = do_div(result,divisor);            \
+       result;                                         \
+})
+#endif
+
+/*
+ * The following defines establish the engineering parameters of the PLL
+ * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
+ * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
+ * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
+ * nearest power of two in order to avoid hardware multiply operations.
+ */
+#if HZ >= 12 && HZ < 24
+# define SHIFT_HZ      4
+#elif HZ >= 24 && HZ < 48
+# define SHIFT_HZ      5
+#elif HZ >= 48 && HZ < 96
+# define SHIFT_HZ      6
+#elif HZ >= 96 && HZ < 192
+# define SHIFT_HZ      7
+#elif HZ >= 192 && HZ < 384
+# define SHIFT_HZ      8
+#elif HZ >= 384 && HZ < 768
+# define SHIFT_HZ      9
+#elif HZ >= 768 && HZ < 1536
+# define SHIFT_HZ      10
+#else
+# error You lose.
+#endif
+
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH  ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
+ * improve accuracy by shifting LSH bits, hence calculating:
+ *     (NOM << LSH) / DEN
+ * This however means trouble for large NOM, because (NOM << LSH) may no
+ * longer fit in 32 bits. The following way of calculating this gives us
+ * some slack, under the following conditions:
+ *   - (NOM / DEN) fits in (32 - LSH) bits.
+ *   - (NOM % DEN) fits in (32 - LSH) bits.
+ */
+#define SH_DIV(NOM,DEN,LSH) (   ((NOM / DEN) << LSH)                    \
+                             + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
+
+/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
+#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+
+/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
+#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+
+/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+
+/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and 
*/
+/* a value TUSEC for TICK_USEC (can be set bij adjtimex)               */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
+
+/* some arch's have a small-data section that can be accessed register-relative
+ * but that can only take up to, say, 4-byte variables. jiffies being part of
+ * an 8-byte variable may not be correctly accessed unless we force the issue
+ */
+#define __jiffy_data  __attribute__((section(".data")))
+
+/*
+ * The 64-bit value is not volatile - you MUST NOT read it
+ * without sampling the sequence number in xtime_lock.
+ * get_jiffies_64() will do this for you as appropriate.
+ */
+extern u64 __jiffy_data jiffies_64;
+extern unsigned long volatile __jiffy_data jiffies;
+
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void);
+#else
+static inline u64 get_jiffies_64(void)
+{
+       return (u64)jiffies;
+}
+#endif
+
+/*
+ *     These inlines deal with timer wrapping correctly. You are 
+ *     strongly encouraged to use them
+ *     1. Because people otherwise forget
+ *     2. Because if the timer wrap changes in future you won't have to
+ *        alter your driver code.
+ *
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a,b)                \
+       (typecheck(unsigned long, a) && \
+        typecheck(unsigned long, b) && \
+        ((long)(b) - (long)(a) < 0))
+#define time_before(a,b)       time_after(b,a)
+
+#define time_after_eq(a,b)     \
+       (typecheck(unsigned long, a) && \
+        typecheck(unsigned long, b) && \
+        ((long)(a) - (long)(b) >= 0))
+#define time_before_eq(a,b)    time_after_eq(b,a)
+
+/*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+/*
+ * Change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ *
+ * And some not so obvious.
+ *
+ * Note that we don't want to return MAX_LONG, because
+ * for various timeout reasons we often end up having
+ * to wait "jiffies+1" in order to guarantee that we wait
+ * at _least_ "jiffies" - so "jiffies+1" had better still
+ * be positive.
+ */
+#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
+
+/*
+ * We want to do realistic conversions of time so we need to use the same
+ * values the update wall clock code uses as the jiffies size.  This value
+ * is: TICK_NSEC (which is defined in timex.h).  This
+ * is a constant and is in nanoseconds.  We will used scaled math
+ * with a set of scales defined here as SEC_JIFFIE_SC,  USEC_JIFFIE_SC and
+ * NSEC_JIFFIE_SC.  Note that these defines contain nothing but
+ * constants and so are computed at compile time.  SHIFT_HZ (computed in
+ * timex.h) adjusts the scaling for different HZ values.
+
+ * Scaled math???  What is that?
+ *
+ * Scaled math is a way to do integer math on values that would,
+ * otherwise, either overflow, underflow, or cause undesired div
+ * instructions to appear in the execution path.  In short, we "scale"
+ * up the operands so they take more bits (more precision, less
+ * underflow), do the desired operation and then "scale" the result back
+ * by the same amount.  If we do the scaling by shifting we avoid the
+ * costly mpy and the dastardly div instructions.
+
+ * Suppose, for example, we want to convert from seconds to jiffies
+ * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE.  The
+ * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
+ * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
+ * might calculate at compile time, however, the result will only have
+ * about 3-4 bits of precision (less for smaller values of HZ).
+ *
+ * So, we scale as follows:
+ * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
+ * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
+ * Then we make SCALE a power of two so:
+ * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
+ * Now we define:
+ * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
+ * jiff = (sec * SEC_CONV) >> SCALE;
+ *
+ * Often the math we use will expand beyond 32-bits so we tell C how to
+ * do this and pass the 64-bit result of the mpy through the ">> SCALE"
+ * which should take the result back to 32-bits.  We want this expansion
+ * to capture as much precision as possible.  At the same time we don't
+ * want to overflow so we pick the SCALE to avoid this.  In this file,
+ * that means using a different scale for each range of HZ values (as
+ * defined in timex.h).
+ *
+ * For those who want to know, gcc will give a 64-bit result from a "*"
+ * operator if the result is a long long AND at least one of the
+ * operands is cast to long long (usually just prior to the "*" so as
+ * not to confuse it into thinking it really has a 64-bit operand,
+ * which, buy the way, it can do, but it take more code and at least 2
+ * mpys).
+
+ * We also need to be aware that one second in nanoseconds is only a
+ * couple of bits away from overflowing a 32-bit word, so we MUST use
+ * 64-bits to get the full range time in nanoseconds.
+
+ */
+
+/*
+ * Here are the scales we will use.  One for seconds, nanoseconds and
+ * microseconds.
+ *
+ * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
+ * check if the sign bit is set.  If not, we bump the shift count by 1.
+ * (Gets an extra bit of precision where we can use it.)
+ * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
+ * Haven't tested others.
+
+ * Limits of cpp (for #if expressions) only long (no long long), but
+ * then we only need the most signicant bit.
+ */
+
+#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
+#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
+#undef SEC_JIFFIE_SC
+#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+#endif
+#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
+#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) 
+\
+                                TICK_NSEC -1) / (u64)TICK_NSEC))
+
+#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+                                        TICK_NSEC -1) / (u64)TICK_NSEC))
+#define USEC_CONVERSION  \
+                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
+                                        TICK_NSEC -1) / (u64)TICK_NSEC))
+/*
+ * USEC_ROUND is used in the timeval to jiffie conversion.  See there
+ * for more details.  It is the scaled resolution rounding value.  Note
+ * that it is a 64-bit value.  Since, when it is applied, we are already
+ * in jiffies (albit scaled), it is nothing but the bits we will shift
+ * off.
+ */
+#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
+/*
+ * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
+ * into seconds.  The 64-bit case will overflow if we are not careful,
+ * so use the messy SH_DIV macro to do it.  Still all constants.
+ */
+#if BITS_PER_LONG < 64
+# define MAX_SEC_IN_JIFFIES \
+       (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
+#else  /* take care of overflow on 64 bits machines */
+# define MAX_SEC_IN_JIFFIES \
+       (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 
1) - 1)
+
+#endif
+
+/*
+ * Convert jiffies to milliseconds and back.
+ *
+ * Avoid unnecessary multiplications/divisions in the
+ * two most common HZ cases:
+ */
+static inline unsigned int jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+       return (1000 / HZ) * j;
+#elif HZ > 1000 && !(HZ % 1000)
+       return (j + (HZ / 1000) - 1)/(HZ / 1000);
+#else
+       return (j * 1000) / HZ;
+#endif
+}
+
+static inline unsigned int jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= 1000000 && !(1000000 % HZ)
+       return (1000000 / HZ) * j;
+#elif HZ > 1000000 && !(HZ % 1000000)
+       return (j + (HZ / 1000000) - 1)/(HZ / 1000000);
+#else
+       return (j * 1000000) / HZ;
+#endif
+}
+
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= 1000 && !(1000 % HZ)
+       return (m + (1000 / HZ) - 1) / (1000 / HZ);
+#elif HZ > 1000 && !(HZ % 1000)
+       return m * (HZ / 1000);
+#else
+       return (m * HZ + 999) / 1000;
+#endif
+}
+
+static inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+       if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= 1000000 && !(1000000 % HZ)
+       return (u + (1000000 / HZ) - 1) / (1000000 / HZ);
+#elif HZ > 1000000 && !(HZ % 1000000)
+       return u * (HZ / 1000000);
+#else
+       return (u * HZ + 999999) / 1000000;
+#endif
+}
+
+/*
+ * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
+ * that a remainder subtract here would not do the right thing as the
+ * resolution values don't fall on second boundries.  I.e. the line:
+ * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ *
+ * Rather, we just shift the bits off the right.
+ *
+ * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+ * value to a scaled second value.
+ */
+static __inline__ unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       unsigned long sec = value->tv_sec;
+       long nsec = value->tv_nsec + TICK_NSEC - 1;
+
+       if (sec >= MAX_SEC_IN_JIFFIES){
+               sec = MAX_SEC_IN_JIFFIES;
+               nsec = 0;
+       }
+       return (((u64)sec * SEC_CONVERSION) +
+               (((u64)nsec * NSEC_CONVERSION) >>
+                (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+
+}
+
+static __inline__ void
+jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+{
+       /*
+        * Convert jiffies to nanoseconds and separate with
+        * one divide.
+        */
+       u64 nsec = (u64)jiffies * TICK_NSEC;
+       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+}
+
+/* Same for "timeval"
+ *
+ * Well, almost.  The problem here is that the real system resolution is
+ * in nanoseconds and the value being converted is in micro seconds.
+ * Also for some machines (those that use HZ = 1024, in-particular),
+ * there is a LARGE error in the tick size in microseconds.
+
+ * The solution we use is to do the rounding AFTER we convert the
+ * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
+ * Instruction wise, this should cost only an additional add with carry
+ * instruction above the way it was done above.
+ */
+static __inline__ unsigned long
+timeval_to_jiffies(const struct timeval *value)
+{
+       unsigned long sec = value->tv_sec;
+       long usec = value->tv_usec;
+
+       if (sec >= MAX_SEC_IN_JIFFIES){
+               sec = MAX_SEC_IN_JIFFIES;
+               usec = 0;
+       }
+       return (((u64)sec * SEC_CONVERSION) +
+               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
+                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+}
+
+static __inline__ void
+jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
+{
+       /*
+        * Convert jiffies to nanoseconds and separate with
+        * one divide.
+        */
+       u64 nsec = (u64)jiffies * TICK_NSEC;
+       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
+       value->tv_usec /= NSEC_PER_USEC;
+}
+
+/*
+ * Convert jiffies/jiffies_64 to clock_t and back.
+ */
+static inline clock_t jiffies_to_clock_t(long x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+       return x / (HZ / USER_HZ);
+#else
+       u64 tmp = (u64)x * TICK_NSEC;
+       do_div(tmp, (NSEC_PER_SEC / USER_HZ));
+       return (long)tmp;
+#endif
+}
+
+static inline unsigned long clock_t_to_jiffies(unsigned long x)
+{
+#if (HZ % USER_HZ)==0
+       if (x >= ~0UL / (HZ / USER_HZ))
+               return ~0UL;
+       return x * (HZ / USER_HZ);
+#else
+       u64 jif;
+
+       /* Don't worry about loss of precision here .. */
+       if (x >= ~0UL / HZ * USER_HZ)
+               return ~0UL;
+
+       /* .. but do try to contain it here */
+       jif = x * (u64) HZ;
+       do_div(jif, USER_HZ);
+       return jif;
+#endif
+}
+
+static inline u64 jiffies_64_to_clock_t(u64 x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+       do_div(x, HZ / USER_HZ);
+#else
+       /*
+        * There are better ways that don't overflow early,
+        * but even this doesn't overflow in hundreds of years
+        * in 64 bits, so..
+        */
+       x *= TICK_NSEC;
+       do_div(x, (NSEC_PER_SEC / USER_HZ));
+#endif
+       return x;
+}
+
+static inline u64 nsec_to_clock_t(u64 x)
+{
+#if (NSEC_PER_SEC % USER_HZ) == 0
+       do_div(x, (NSEC_PER_SEC / USER_HZ));
+#elif (USER_HZ % 512) == 0
+       x *= USER_HZ/512;
+       do_div(x, (NSEC_PER_SEC / 512));
+#else
+       /*
+         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
+         * overflow after 64.99 years.
+         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+         */
+       x *= 9;
+       do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
+                                 / USER_HZ));
+#endif
+       return x;
+}
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/kmalloc_sizes.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/kmalloc_sizes.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,33 @@
+#if (PAGE_SIZE == 4096)
+       CACHE(32)
+#endif
+       CACHE(64)
+#if L1_CACHE_BYTES < 64
+       CACHE(96)
+#endif
+       CACHE(128)
+#if L1_CACHE_BYTES < 128
+       CACHE(192)
+#endif
+       CACHE(256)
+       CACHE(512)
+       CACHE(1024)
+       CACHE(2048)
+       CACHE(4096)
+       CACHE(8192)
+       CACHE(16384)
+       CACHE(32768)
+       CACHE(65536)
+       CACHE(131072)
+#ifndef CONFIG_MMU
+       CACHE(262144)
+       CACHE(524288)
+       CACHE(1048576)
+#ifdef CONFIG_LARGE_ALLOCS
+       CACHE(2097152)
+       CACHE(4194304)
+       CACHE(8388608)
+       CACHE(16777216)
+       CACHE(33554432)
+#endif /* CONFIG_LARGE_ALLOCS */
+#endif /* CONFIG_MMU */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/linkage.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/linkage.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,47 @@
+#ifndef _LINUX_LINKAGE_H
+#define _LINUX_LINKAGE_H
+
+#include <linux/config.h>
+#include <asm/linkage.h>
+
+#ifdef __cplusplus
+#define CPP_ASMLINKAGE extern "C"
+#else
+#define CPP_ASMLINKAGE
+#endif
+
+#ifndef asmlinkage
+#define asmlinkage CPP_ASMLINKAGE
+#endif
+
+#ifndef prevent_tail_call
+# define prevent_tail_call(ret) do { } while (0)
+#endif
+
+#ifndef __ALIGN
+#define __ALIGN                .align 4,0x90
+#define __ALIGN_STR    ".align 4,0x90"
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+
+#define ENTRY(name) \
+  .globl name; \
+  ALIGN; \
+  name:
+
+#endif
+
+#define NORET_TYPE    /**/
+#define ATTRIB_NORET  __attribute__((noreturn))
+#define NORET_AND     noreturn,
+
+#ifndef FASTCALL
+#define FASTCALL(x)    x
+#define fastcall
+#endif
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/linuxtime.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/linuxtime.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,181 @@
+#ifndef _LINUX_TIME_H
+#define _LINUX_TIME_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+#include <linux/seqlock.h>
+#endif
+
+#ifndef _STRUCT_TIMESPEC
+#define _STRUCT_TIMESPEC
+struct timespec {
+       time_t  tv_sec;         /* seconds */
+       long    tv_nsec;        /* nanoseconds */
+};
+#endif /* _STRUCT_TIMESPEC */
+
+struct timeval {
+       time_t          tv_sec;         /* seconds */
+       suseconds_t     tv_usec;        /* microseconds */
+};
+
+struct timezone {
+       int     tz_minuteswest; /* minutes west of Greenwich */
+       int     tz_dsttime;     /* type of dst correction */
+};
+
+#ifdef __KERNEL__
+
+/* Parameters used to convert the timespec values */
+#ifndef USEC_PER_SEC
+#define USEC_PER_SEC (1000000L)
+#endif
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC (1000000000L)
+#endif
+
+#ifndef NSEC_PER_USEC
+#define NSEC_PER_USEC (1000L)
+#endif
+
+static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) 
+{ 
+       return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+} 
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long
+mktime (unsigned int year, unsigned int mon,
+       unsigned int day, unsigned int hour,
+       unsigned int min, unsigned int sec)
+{
+       if (0 >= (int) (mon -= 2)) {    /* 1..12 -> 11,12,1..10 */
+               mon += 12;              /* Puts Feb last since it has leap day 
*/
+               year -= 1;
+       }
+
+       return (((
+               (unsigned long) (year/4 - year/100 + year/400 + 367*mon/12 + 
day) +
+                       year*365 - 719499
+           )*24 + hour /* now have hours */
+         )*60 + min /* now have minutes */
+       )*60 + sec; /* finally seconds */
+}
+
+extern struct timespec xtime;
+extern struct timespec wall_to_monotonic;
+extern seqlock_t xtime_lock;
+
+static inline unsigned long get_seconds(void)
+{ 
+       return xtime.tv_sec;
+}
+
+struct timespec current_kernel_time(void);
+
+#define CURRENT_TIME (current_kernel_time())
+#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
+
+extern void do_gettimeofday(struct timeval *tv);
+extern int do_settimeofday(struct timespec *tv);
+extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
+extern void clock_was_set(void); // call when ever the clock is set
+extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
+extern long do_nanosleep(struct timespec *t);
+extern long do_utimes(char __user * filename, struct timeval * times);
+struct itimerval;
+extern int do_setitimer(int which, struct itimerval *value, struct itimerval 
*ovalue);
+extern int do_getitimer(int which, struct itimerval *value);
+extern void getnstimeofday (struct timespec *tv);
+
+extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+
+static inline void
+set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
+{
+       while (nsec > NSEC_PER_SEC) {
+               nsec -= NSEC_PER_SEC;
+               ++sec;
+       }
+       while (nsec < 0) {
+               nsec += NSEC_PER_SEC;
+               --sec;
+       }
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+
+#endif /* __KERNEL__ */
+
+#define NFDBITS                        __NFDBITS
+
+#define FD_SETSIZE             __FD_SETSIZE
+#define FD_SET(fd,fdsetp)      __FD_SET(fd,fdsetp)
+#define FD_CLR(fd,fdsetp)      __FD_CLR(fd,fdsetp)
+#define FD_ISSET(fd,fdsetp)    __FD_ISSET(fd,fdsetp)
+#define FD_ZERO(fdsetp)                __FD_ZERO(fdsetp)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define        ITIMER_REAL     0
+#define        ITIMER_VIRTUAL  1
+#define        ITIMER_PROF     2
+
+struct  itimerspec {
+        struct  timespec it_interval;    /* timer period */
+        struct  timespec it_value;       /* timer expiration */
+};
+
+struct itimerval {
+       struct  timeval it_interval;    /* timer interval */
+       struct  timeval it_value;       /* current value */
+};
+
+
+/*
+ * The IDs of the various system clocks (for POSIX.1b interval timers).
+ */
+#define CLOCK_REALTIME           0
+#define CLOCK_MONOTONIC          1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID         3
+#define CLOCK_REALTIME_HR       4
+#define CLOCK_MONOTONIC_HR       5
+
+/*
+ * The IDs of various hardware clocks
+ */
+
+
+#define CLOCK_SGI_CYCLE 10
+#define MAX_CLOCKS 16
+#define CLOCKS_MASK  (CLOCK_REALTIME | CLOCK_MONOTONIC | \
+                     CLOCK_REALTIME_HR | CLOCK_MONOTONIC_HR)
+#define CLOCKS_MONO (CLOCK_MONOTONIC & CLOCK_MONOTONIC_HR)
+
+/*
+ * The various flags for setting POSIX.1b interval timers.
+ */
+
+#define TIMER_ABSTIME 0x01
+
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/mmzone.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/mmzone.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,415 @@
+#ifndef _LINUX_MMZONE_H
+#define _LINUX_MMZONE_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/numa.h>
+#include <asm/atomic.h>
+
+/* Free memory management - zoned buddy allocator.  */
+#ifndef CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER 11
+#else
+#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#endif
+
+struct free_area {
+       struct list_head        free_list;
+       unsigned long           nr_free;
+};
+
+struct pglist_data;
+
+/*
+ * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
+ * So add a wild amount of padding here to ensure that they fall into separate
+ * cachelines.  There are very few zone structures in the machine, so space
+ * consumption is not a concern here.
+ */
+#if defined(CONFIG_SMP)
+struct zone_padding {
+       char x[0];
+} ____cacheline_maxaligned_in_smp;
+#define ZONE_PADDING(name)     struct zone_padding name;
+#else
+#define ZONE_PADDING(name)
+#endif
+
+struct per_cpu_pages {
+       int count;              /* number of pages in the list */
+       int low;                /* low watermark, refill needed */
+       int high;               /* high watermark, emptying needed */
+       int batch;              /* chunk size for buddy add/remove */
+       struct list_head list;  /* the list of pages */
+};
+
+struct per_cpu_pageset {
+       struct per_cpu_pages pcp[2];    /* 0: hot.  1: cold */
+#ifdef CONFIG_NUMA
+       unsigned long numa_hit;         /* allocated in intended node */
+       unsigned long numa_miss;        /* allocated in non intended node */
+       unsigned long numa_foreign;     /* was intended here, hit elsewhere */
+       unsigned long interleave_hit;   /* interleaver prefered this zone */
+       unsigned long local_node;       /* allocation from local node */
+       unsigned long other_node;       /* allocation from other node */
+#endif
+} ____cacheline_aligned_in_smp;
+
+#define ZONE_DMA               0
+#define ZONE_NORMAL            1
+#define ZONE_HIGHMEM           2
+
+#define MAX_NR_ZONES           3       /* Sync this with ZONES_SHIFT */
+#define ZONES_SHIFT            2       /* ceil(log2(MAX_NR_ZONES)) */
+
+
+/*
+ * When a memory allocation must conform to specific limitations (such
+ * as being suitable for DMA) the caller will pass in hints to the
+ * allocator in the gfp_mask, in the zone modifier bits.  These bits
+ * are used to select a priority ordered list of memory zones which
+ * match the requested limits.  GFP_ZONEMASK defines which bits within
+ * the gfp_mask should be considered as zone modifiers.  Each valid
+ * combination of the zone modifier bits has a corresponding list
+ * of zones (in node_zonelists).  Thus for two zone modifiers there
+ * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will
+ * be 8 (2 ** 3) zonelists.  GFP_ZONETYPES defines the number of possible
+ * combinations of zone modifiers in "zone modifier space".
+ */
+#define GFP_ZONEMASK   0x03
+/*
+ * As an optimisation any zone modifier bits which are only valid when
+ * no other zone modifier bits are set (loners) should be placed in
+ * the highest order bits of this field.  This allows us to reduce the
+ * extent of the zonelists thus saving space.  For example in the case
+ * of three zone modifier bits, we could require up to eight zonelists.
+ * If the left most zone modifier is a "loner" then the highest valid
+ * zonelist would be four allowing us to allocate only five zonelists.
+ * Use the first form when the left most bit is not a "loner", otherwise
+ * use the second.
+ */
+/* #define GFP_ZONETYPES       (GFP_ZONEMASK + 1) */           /* Non-loner */
+#define GFP_ZONETYPES  ((GFP_ZONEMASK + 1) / 2 + 1)            /* Loner */
+
+/*
+ * On machines where it is needed (eg PCs) we divide physical memory
+ * into multiple physical zones. On a PC we have 3 zones:
+ *
+ * ZONE_DMA      < 16 MB       ISA DMA capable memory
+ * ZONE_NORMAL 16-896 MB       direct mapped by the kernel
+ * ZONE_HIGHMEM         > 896 MB       only page cache and user processes
+ */
+
+struct zone {
+       /* Fields commonly accessed by the page allocator */
+       unsigned long           free_pages;
+       unsigned long           pages_min, pages_low, pages_high;
+       /*
+        * We don't know if the memory that we're going to allocate will be 
freeable
+        * or/and it will be released eventually, so to avoid totally wasting 
several
+        * GB of ram we must reserve some of the lower zone memory (otherwise 
we risk
+        * to run OOM on the lower zones despite there's tons of freeable ram
+        * on the higher zones). This array is recalculated at runtime if the
+        * sysctl_lowmem_reserve_ratio sysctl changes.
+        */
+       unsigned long           lowmem_reserve[MAX_NR_ZONES];
+
+       struct per_cpu_pageset  pageset[NR_CPUS];
+
+       /*
+        * free areas of different sizes
+        */
+       spinlock_t              lock;
+       struct free_area        free_area[MAX_ORDER];
+
+
+       ZONE_PADDING(_pad1_)
+
+       /* Fields commonly accessed by the page reclaim scanner */
+       spinlock_t              lru_lock;       
+       struct list_head        active_list;
+       struct list_head        inactive_list;
+       unsigned long           nr_scan_active;
+       unsigned long           nr_scan_inactive;
+       unsigned long           nr_active;
+       unsigned long           nr_inactive;
+       unsigned long           pages_scanned;     /* since last reclaim */
+       int                     all_unreclaimable; /* All pages pinned */
+
+       /*
+        * prev_priority holds the scanning priority for this zone.  It is
+        * defined as the scanning priority at which we achieved our reclaim
+        * target at the previous try_to_free_pages() or balance_pgdat()
+        * invokation.
+        *
+        * We use prev_priority as a measure of how much stress page reclaim is
+        * under - it drives the swappiness decision: whether to unmap mapped
+        * pages.
+        *
+        * temp_priority is used to remember the scanning priority at which
+        * this zone was successfully refilled to free_pages == pages_high.
+        *
+        * Access to both these fields is quite racy even on uniprocessor.  But
+        * it is expected to average out OK.
+        */
+       int temp_priority;
+       int prev_priority;
+
+
+       ZONE_PADDING(_pad2_)
+       /* Rarely used or read-mostly fields */
+
+       /*
+        * wait_table           -- the array holding the hash table
+        * wait_table_size      -- the size of the hash table array
+        * wait_table_bits      -- wait_table_size == (1 << wait_table_bits)
+        *
+        * The purpose of all these is to keep track of the people
+        * waiting for a page to become available and make them
+        * runnable again when possible. The trouble is that this
+        * consumes a lot of space, especially when so few things
+        * wait on pages at a given time. So instead of using
+        * per-page waitqueues, we use a waitqueue hash table.
+        *
+        * The bucket discipline is to sleep on the same queue when
+        * colliding and wake all in that wait queue when removing.
+        * When something wakes, it must check to be sure its page is
+        * truly available, a la thundering herd. The cost of a
+        * collision is great, but given the expected load of the
+        * table, they should be so rare as to be outweighed by the
+        * benefits from the saved space.
+        *
+        * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
+        * primary users of these fields, and in mm/page_alloc.c
+        * free_area_init_core() performs the initialization of them.
+        */
+       wait_queue_head_t       * wait_table;
+       unsigned long           wait_table_size;
+       unsigned long           wait_table_bits;
+
+       /*
+        * Discontig memory support fields.
+        */
+       struct pglist_data      *zone_pgdat;
+       struct page             *zone_mem_map;
+       /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
+       unsigned long           zone_start_pfn;
+
+       unsigned long           spanned_pages;  /* total size, including holes 
*/
+       unsigned long           present_pages;  /* amount of memory (excluding 
holes) */
+
+       /*
+        * rarely used fields:
+        */
+       char                    *name;
+} ____cacheline_maxaligned_in_smp;
+
+
+/*
+ * The "priority" of VM scanning is how much of the queues we will scan in one
+ * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
+ * queues ("queue_length >> 12") during an aging round.
+ */
+#define DEF_PRIORITY 12
+
+/*
+ * One allocation request operates on a zonelist. A zonelist
+ * is a list of zones, the first one is the 'goal' of the
+ * allocation, the other zones are fallback zones, in decreasing
+ * priority.
+ *
+ * Right now a zonelist takes up less than a cacheline. We never
+ * modify it apart from boot-up, and only a few indices are used,
+ * so despite the zonelist table being relatively big, the cache
+ * footprint of this construct is very small.
+ */
+struct zonelist {
+       struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+};
+
+
+/*
+ * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
+ * (mostly NUMA machines?) to denote a higher-level memory zone than the
+ * zone denotes.
+ *
+ * On NUMA machines, each NUMA node would have a pg_data_t to describe
+ * it's memory layout.
+ *
+ * Memory statistics and page replacement data structures are maintained on a
+ * per-zone basis.
+ */
+struct bootmem_data;
+typedef struct pglist_data {
+       struct zone node_zones[MAX_NR_ZONES];
+       struct zonelist node_zonelists[GFP_ZONETYPES];
+       int nr_zones;
+       struct page *node_mem_map;
+       struct bootmem_data *bdata;
+       unsigned long node_start_pfn;
+       unsigned long node_present_pages; /* total number of physical pages */
+       unsigned long node_spanned_pages; /* total size of physical page
+                                            range, including holes */
+       int node_id;
+       struct pglist_data *pgdat_next;
+       wait_queue_head_t kswapd_wait;
+       struct task_struct *kswapd;
+       int kswapd_max_order;
+} pg_data_t;
+
+#define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
+#define node_spanned_pages(nid)        (NODE_DATA(nid)->node_spanned_pages)
+
+extern struct pglist_data *pgdat_list;
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+                       unsigned long *free, struct pglist_data *pgdat);
+void get_zone_counts(unsigned long *active, unsigned long *inactive,
+                       unsigned long *free);
+void build_all_zonelists(void);
+void wakeup_kswapd(struct zone *zone, int order);
+int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+               int alloc_type, int can_try_harder, int gfp_high);
+
+/*
+ * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
+ */
+#define zone_idx(zone)         ((zone) - (zone)->zone_pgdat->node_zones)
+
+/**
+ * for_each_pgdat - helper macro to iterate over all nodes
+ * @pgdat - pointer to a pg_data_t variable
+ *
+ * Meant to help with common loops of the form
+ * pgdat = pgdat_list;
+ * while(pgdat) {
+ *     ...
+ *     pgdat = pgdat->pgdat_next;
+ * }
+ */
+#define for_each_pgdat(pgdat) \
+       for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
+
+/*
+ * next_zone - helper magic for for_each_zone()
+ * Thanks to William Lee Irwin III for this piece of ingenuity.
+ */
+static inline struct zone *next_zone(struct zone *zone)
+{
+       pg_data_t *pgdat = zone->zone_pgdat;
+
+       if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
+               zone++;
+       else if (pgdat->pgdat_next) {
+               pgdat = pgdat->pgdat_next;
+               zone = pgdat->node_zones;
+       } else
+               zone = NULL;
+
+       return zone;
+}
+
+/**
+ * for_each_zone - helper macro to iterate over all memory zones
+ * @zone - pointer to struct zone variable
+ *
+ * The user only needs to declare the zone variable, for_each_zone
+ * fills it in. This basically means for_each_zone() is an
+ * easier to read version of this piece of code:
+ *
+ * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
+ *     for (i = 0; i < MAX_NR_ZONES; ++i) {
+ *             struct zone * z = pgdat->node_zones + i;
+ *             ...
+ *     }
+ * }
+ */
+#define for_each_zone(zone) \
+       for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+
+static inline int is_highmem_idx(int idx)
+{
+       return (idx == ZONE_HIGHMEM);
+}
+
+static inline int is_normal_idx(int idx)
+{
+       return (idx == ZONE_NORMAL);
+}
+/**
+ * is_highmem - helper function to quickly check if a struct zone is a 
+ *              highmem zone or not.  This is an attempt to keep references
+ *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
+ * @zone - pointer to struct zone variable
+ */
+static inline int is_highmem(struct zone *zone)
+{
+       return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
+}
+
+static inline int is_normal(struct zone *zone)
+{
+       return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
+}
+
+/* These two functions are used to setup the per zone pages min values */
+struct ctl_table;
+struct file;
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 
+                                       void __user *, size_t *, loff_t *);
+extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
+                                       void __user *, size_t *, loff_t *);
+
+#include <linux/topology.h>
+/* Returns the number of the current Node. */
+#define numa_node_id()         (cpu_to_node(_smp_processor_id()))
+
+#ifndef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data contig_page_data;
+#define NODE_DATA(nid)         (&contig_page_data)
+#define NODE_MEM_MAP(nid)      mem_map
+#define MAX_NODES_SHIFT                1
+#define pfn_to_nid(pfn)                (0)
+
+#else /* CONFIG_DISCONTIGMEM */
+
+#include <asm/mmzone.h>
+
+#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
+/*
+ * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
+ * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
+ */
+#define MAX_NODES_SHIFT                6
+#elif BITS_PER_LONG == 64
+/*
+ * with 64 bit flags field, there's plenty of room.
+ */
+#define MAX_NODES_SHIFT                10
+#endif
+
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#if NODES_SHIFT > MAX_NODES_SHIFT
+#error NODES_SHIFT > MAX_NODES_SHIFT
+#endif
+
+/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
+#define MAX_ZONES_SHIFT                2
+
+#if ZONES_SHIFT > MAX_ZONES_SHIFT
+#error ZONES_SHIFT > MAX_ZONES_SHIFT
+#endif
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _LINUX_MMZONE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/numa.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/numa.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,16 @@
+#ifndef _LINUX_NUMA_H
+#define _LINUX_NUMA_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/numnodes.h>
+#endif
+
+#ifndef NODES_SHIFT
+#define NODES_SHIFT     0
+#endif
+
+#define MAX_NUMNODES    (1 << NODES_SHIFT)
+
+#endif /* _LINUX_NUMA_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/page-flags.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/page-flags.h   Tue Aug  2 23:59:09 2005
@@ -0,0 +1,321 @@
+/*
+ * Macros for manipulating and testing page->flags
+ */
+
+#ifndef PAGE_FLAGS_H
+#define PAGE_FLAGS_H
+
+#include <linux/percpu.h>
+#include <linux/cache.h>
+#include <asm/pgtable.h>
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped out. Some
+ * of them might not even exist (eg empty_bad_page)...
+ *
+ * The PG_private bitflag is set if page->private contains a valid value.
+ *
+ * During disk I/O, PG_locked is used. This bit is set before I/O and
+ * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
+ * waiting for the I/O on this page to complete.
+ *
+ * PG_uptodate tells whether the page's contents is valid.  When a read
+ * completes, the page becomes uptodate, unless a disk I/O error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
+ * which is set any time the system accesses that page through the (mapping,
+ * index) hash table.  This referenced bit, together with the referenced bit
+ * in the page tables, is used to manipulate page->age and move the page across
+ * the active, inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the active,
+ * inactive_dirty and inactive_clean lists are protected by the
+ * zone->lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit.  The generic code
+ * guarantees that this bit is cleared for a page when it first is entered into
+ * the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual address
+ * space, they need to be kmapped separately for doing IO on the pages.  The
+ * struct page (these bits with information) are always mapped into kernel
+ * address space...
+ */
+
+/*
+ * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
+ * locked- and dirty-page accounting.  The top eight bits of page->flags are
+ * used for page->zone, so putting flag bits there doesn't work.
+ */
+#define PG_locked               0      /* Page is locked. Don't touch. */
+#define PG_error                1
+#define PG_referenced           2
+#define PG_uptodate             3
+
+#define PG_dirty                4
+#define PG_lru                  5
+#define PG_active               6
+#define PG_slab                         7      /* slab debug (Suparna wants 
this) */
+
+#define PG_highmem              8
+#define PG_checked              9      /* kill me in 2.5.<early>. */
+#define PG_arch_1              10
+#define PG_reserved            11
+
+#define PG_private             12      /* Has something at ->private */
+#define PG_writeback           13      /* Page is under writeback */
+#define PG_nosave              14      /* Used for system suspend/resume */
+#define PG_compound            15      /* Part of a compound page */
+
+#define PG_swapcache           16      /* Swap page: swp_entry_t in private */
+#define PG_mappedtodisk                17      /* Has blocks allocated on-disk 
*/
+#define PG_reclaim             18      /* To be reclaimed asap */
+#define PG_nosave_free         19      /* Free, should not be written */
+
+
+/*
+ * Global page accounting.  One instance per CPU.  Only unsigned longs are
+ * allowed.
+ */
+struct page_state {
+       unsigned long nr_dirty;         /* Dirty writeable pages */
+       unsigned long nr_writeback;     /* Pages under writeback */
+       unsigned long nr_unstable;      /* NFS unstable pages */
+       unsigned long nr_page_table_pages;/* Pages used for pagetables */
+       unsigned long nr_mapped;        /* mapped into pagetables */
+       unsigned long nr_slab;          /* In slab */
+#define GET_PAGE_STATE_LAST nr_slab
+
+       /*
+        * The below are zeroed by get_page_state().  Use get_full_page_state()
+        * to add up all these.
+        */
+       unsigned long pgpgin;           /* Disk reads */
+       unsigned long pgpgout;          /* Disk writes */
+       unsigned long pswpin;           /* swap reads */
+       unsigned long pswpout;          /* swap writes */
+       unsigned long pgalloc_high;     /* page allocations */
+
+       unsigned long pgalloc_normal;
+       unsigned long pgalloc_dma;
+       unsigned long pgfree;           /* page freeings */
+       unsigned long pgactivate;       /* pages moved inactive->active */
+       unsigned long pgdeactivate;     /* pages moved active->inactive */
+
+       unsigned long pgfault;          /* faults (major+minor) */
+       unsigned long pgmajfault;       /* faults (major only) */
+       unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
+       unsigned long pgrefill_normal;
+       unsigned long pgrefill_dma;
+
+       unsigned long pgsteal_high;     /* total highmem pages reclaimed */
+       unsigned long pgsteal_normal;
+       unsigned long pgsteal_dma;
+       unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
+       unsigned long pgscan_kswapd_normal;
+
+       unsigned long pgscan_kswapd_dma;
+       unsigned long pgscan_direct_high;/* total highmem pages scanned */
+       unsigned long pgscan_direct_normal;
+       unsigned long pgscan_direct_dma;
+       unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
+
+       unsigned long slabs_scanned;    /* slab objects scanned */
+       unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
+       unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
+       unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
+       unsigned long allocstall;       /* direct reclaim calls */
+
+       unsigned long pgrotated;        /* pages rotated to tail of the LRU */
+};
+
+extern void get_page_state(struct page_state *ret);
+extern void get_full_page_state(struct page_state *ret);
+extern unsigned long __read_page_state(unsigned offset);
+extern void __mod_page_state(unsigned offset, unsigned long delta);
+
+#define read_page_state(member) \
+       __read_page_state(offsetof(struct page_state, member))
+
+#define mod_page_state(member, delta)  \
+       __mod_page_state(offsetof(struct page_state, member), (delta))
+
+#define inc_page_state(member) mod_page_state(member, 1UL)
+#define dec_page_state(member) mod_page_state(member, 0UL - 1)
+#define add_page_state(member,delta) mod_page_state(member, (delta))
+#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
+
+#define mod_page_state_zone(zone, member, delta)                               
\
+       do {                                                                    
\
+               unsigned offset;                                                
\
+               if (is_highmem(zone))                                           
\
+                       offset = offsetof(struct page_state, member##_high);    
\
+               else if (is_normal(zone))                                       
\
+                       offset = offsetof(struct page_state, member##_normal);  
\
+               else                                                            
\
+                       offset = offsetof(struct page_state, member##_dma);     
\
+               __mod_page_state(offset, (delta));                              
\
+       } while (0)
+
+/*
+ * Manipulation of page state flags
+ */
+#define PageLocked(page)               \
+               test_bit(PG_locked, &(page)->flags)
+#define SetPageLocked(page)            \
+               set_bit(PG_locked, &(page)->flags)
+#define TestSetPageLocked(page)                \
+               test_and_set_bit(PG_locked, &(page)->flags)
+#define ClearPageLocked(page)          \
+               clear_bit(PG_locked, &(page)->flags)
+#define TestClearPageLocked(page)      \
+               test_and_clear_bit(PG_locked, &(page)->flags)
+
+#define PageError(page)                test_bit(PG_error, &(page)->flags)
+#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
+
+#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page)        set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page)      clear_bit(PG_referenced, &(page)->flags)
+#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, 
&(page)->flags)
+
+#define PageUptodate(page)     test_bit(PG_uptodate, &(page)->flags)
+#ifndef SetPageUptodate
+#define SetPageUptodate(page)  set_bit(PG_uptodate, &(page)->flags)
+#endif
+#define ClearPageUptodate(page)        clear_bit(PG_uptodate, &(page)->flags)
+
+#define PageDirty(page)                test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
+#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
+#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
+
+#define SetPageLRU(page)       set_bit(PG_lru, &(page)->flags)
+#define PageLRU(page)          test_bit(PG_lru, &(page)->flags)
+#define TestSetPageLRU(page)   test_and_set_bit(PG_lru, &(page)->flags)
+#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+
+#define PageActive(page)       test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page)    set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page)  clear_bit(PG_active, &(page)->flags)
+#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
+#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
+
+#define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
+#define SetPageSlab(page)      set_bit(PG_slab, &(page)->flags)
+#define ClearPageSlab(page)    clear_bit(PG_slab, &(page)->flags)
+#define TestClearPageSlab(page)        test_and_clear_bit(PG_slab, 
&(page)->flags)
+#define TestSetPageSlab(page)  test_and_set_bit(PG_slab, &(page)->flags)
+
+#ifdef CONFIG_HIGHMEM
+#define PageHighMem(page)      test_bit(PG_highmem, &(page)->flags)
+#else
+#define PageHighMem(page)      0 /* needed to optimize away at compile time */
+#endif
+
+#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+
+#define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
+#define SetPageReserved(page)  set_bit(PG_reserved, &(page)->flags)
+#define ClearPageReserved(page)        clear_bit(PG_reserved, &(page)->flags)
+#define __ClearPageReserved(page)      __clear_bit(PG_reserved, &(page)->flags)
+
+#define SetPagePrivate(page)   set_bit(PG_private, &(page)->flags)
+#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
+#define PagePrivate(page)      test_bit(PG_private, &(page)->flags)
+#define __SetPagePrivate(page)  __set_bit(PG_private, &(page)->flags)
+#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
+
+#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
+#define SetPageWriteback(page)                                         \
+       do {                                                            \
+               if (!test_and_set_bit(PG_writeback,                     \
+                               &(page)->flags))                        \
+                       inc_page_state(nr_writeback);                   \
+       } while (0)
+#define TestSetPageWriteback(page)                                     \
+       ({                                                              \
+               int ret;                                                \
+               ret = test_and_set_bit(PG_writeback,                    \
+                                       &(page)->flags);                \
+               if (!ret)                                               \
+                       inc_page_state(nr_writeback);                   \
+               ret;                                                    \
+       })
+#define ClearPageWriteback(page)                                       \
+       do {                                                            \
+               if (test_and_clear_bit(PG_writeback,                    \
+                               &(page)->flags))                        \
+                       dec_page_state(nr_writeback);                   \
+       } while (0)
+#define TestClearPageWriteback(page)                                   \
+       ({                                                              \
+               int ret;                                                \
+               ret = test_and_clear_bit(PG_writeback,                  \
+                               &(page)->flags);                        \
+               if (ret)                                                \
+                       dec_page_state(nr_writeback);                   \
+               ret;                                                    \
+       })
+
+#define PageNosave(page)       test_bit(PG_nosave, &(page)->flags)
+#define SetPageNosave(page)    set_bit(PG_nosave, &(page)->flags)
+#define TestSetPageNosave(page)        test_and_set_bit(PG_nosave, 
&(page)->flags)
+#define ClearPageNosave(page)          clear_bit(PG_nosave, &(page)->flags)
+#define TestClearPageNosave(page)      test_and_clear_bit(PG_nosave, 
&(page)->flags)
+
+#define PageNosaveFree(page)   test_bit(PG_nosave_free, &(page)->flags)
+#define SetPageNosaveFree(page)        set_bit(PG_nosave_free, &(page)->flags)
+#define ClearPageNosaveFree(page)              clear_bit(PG_nosave_free, 
&(page)->flags)
+
+#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
+#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
+#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
+
+#define PageReclaim(page)      test_bit(PG_reclaim, &(page)->flags)
+#define SetPageReclaim(page)   set_bit(PG_reclaim, &(page)->flags)
+#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
+#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, 
&(page)->flags)
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define PageCompound(page)     test_bit(PG_compound, &(page)->flags)
+#else
+#define PageCompound(page)     0
+#endif
+#define SetPageCompound(page)  set_bit(PG_compound, &(page)->flags)
+#define ClearPageCompound(page)        clear_bit(PG_compound, &(page)->flags)
+
+#ifdef CONFIG_SWAP
+#define PageSwapCache(page)    test_bit(PG_swapcache, &(page)->flags)
+#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
+#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
+#else
+#define PageSwapCache(page)    0
+#endif
+
+struct page;   /* forward declaration */
+
+int test_clear_page_dirty(struct page *page);
+int __clear_page_dirty(struct page *page);
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
+
+static inline void clear_page_dirty(struct page *page)
+{
+       test_clear_page_dirty(page);
+}
+
+static inline void set_page_writeback(struct page *page)
+{
+       test_set_page_writeback(page);
+}
+
+#endif /* PAGE_FLAGS_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/percpu.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/percpu.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,61 @@
+#ifndef __LINUX_PERCPU_H
+#define __LINUX_PERCPU_H
+#include <linux/spinlock.h> /* For preempt_disable() */
+#include <linux/slab.h> /* For kmalloc() */
+#include <linux/smp.h>
+#include <linux/string.h> /* For memset() */
+#include <asm/percpu.h>
+
+/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
+#ifndef PERCPU_ENOUGH_ROOM
+#define PERCPU_ENOUGH_ROOM 32768
+#endif
+
+/* Must be an lvalue. */
+#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
+#define put_cpu_var(var) preempt_enable()
+
+#ifdef CONFIG_SMP
+
+struct percpu_data {
+       void *ptrs[NR_CPUS];
+       void *blkp;
+};
+
+/* 
+ * Use this to get to a cpu's version of the per-cpu object allocated using
+ * alloc_percpu.  Non-atomic access to the current CPU's version should
+ * probably be combined with get_cpu()/put_cpu().
+ */ 
+#define per_cpu_ptr(ptr, cpu)                   \
+({                                              \
+        struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); 
\
+        (__typeof__(ptr))__p->ptrs[(cpu)];     \
+})
+
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(const void *);
+
+#else /* CONFIG_SMP */
+
+#define per_cpu_ptr(ptr, cpu) (ptr)
+
+static inline void *__alloc_percpu(size_t size, size_t align)
+{
+       void *ret = kmalloc(size, GFP_KERNEL);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+static inline void free_percpu(const void *ptr)
+{      
+       kfree(ptr);
+}
+
+#endif /* CONFIG_SMP */
+
+/* Simple wrapper for the common case: zeros memory. */
+#define alloc_percpu(type) \
+       ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
+
+#endif /* __LINUX_PERCPU_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/preempt.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/preempt.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,62 @@
+#ifndef __LINUX_PREEMPT_H
+#define __LINUX_PREEMPT_H
+
+/*
+ * include/linux/preempt.h - macros for accessing and manipulating
+ * preempt_count (used for kernel preemption, interrupt count, etc.)
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+
+#ifdef CONFIG_DEBUG_PREEMPT
+  extern void fastcall add_preempt_count(int val);
+  extern void fastcall sub_preempt_count(int val);
+#else
+# define add_preempt_count(val)        do { preempt_count() += (val); } while 
(0)
+# define sub_preempt_count(val)        do { preempt_count() -= (val); } while 
(0)
+#endif
+
+#define inc_preempt_count() add_preempt_count(1)
+#define dec_preempt_count() sub_preempt_count(1)
+
+#define preempt_count()        (current_thread_info()->preempt_count)
+
+#ifdef CONFIG_PREEMPT
+
+asmlinkage void preempt_schedule(void);
+
+#define preempt_disable() \
+do { \
+       inc_preempt_count(); \
+       barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+       barrier(); \
+       dec_preempt_count(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+       if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+               preempt_schedule(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+       preempt_enable_no_resched(); \
+       preempt_check_resched(); \
+} while (0)
+
+#else
+
+#define preempt_disable()              do { } while (0)
+#define preempt_enable_no_resched()    do { } while (0)
+#define preempt_enable()               do { } while (0)
+#define preempt_check_resched()                do { } while (0)
+
+#endif
+
+#endif /* __LINUX_PREEMPT_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/rbtree.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/rbtree.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,141 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@xxxxxxx>
+  
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/include/linux/rbtree.h
+
+  To use rbtrees you'll have to implement your own insert and search cores.
+  This will avoid us to use callbacks and to drop drammatically performances.
+  I know it's not the cleaner way,  but in C (not in C++) to get
+  performances and genericity...
+
+  Some example of insert and search follows here. The search is a plain
+  normal search over an ordered tree. The insert instead must be implemented
+  int two steps: as first thing the code must insert the element in
+  order as a red leaf in the tree, then the support library function
+  rb_insert_color() must be called. Such function will do the
+  not trivial work to rebalance the rbtree if necessary.
+
+-----------------------------------------------------------------------
+static inline struct page * rb_search_page_cache(struct inode * inode,
+                                                unsigned long offset)
+{
+       struct rb_node * n = inode->i_rb_page_cache.rb_node;
+       struct page * page;
+
+       while (n)
+       {
+               page = rb_entry(n, struct page, rb_page_cache);
+
+               if (offset < page->offset)
+                       n = n->rb_left;
+               else if (offset > page->offset)
+                       n = n->rb_right;
+               else
+                       return page;
+       }
+       return NULL;
+}
+
+static inline struct page * __rb_insert_page_cache(struct inode * inode,
+                                                  unsigned long offset,
+                                                  struct rb_node * node)
+{
+       struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
+       struct rb_node * parent = NULL;
+       struct page * page;
+
+       while (*p)
+       {
+               parent = *p;
+               page = rb_entry(parent, struct page, rb_page_cache);
+
+               if (offset < page->offset)
+                       p = &(*p)->rb_left;
+               else if (offset > page->offset)
+                       p = &(*p)->rb_right;
+               else
+                       return page;
+       }
+
+       rb_link_node(node, parent, p);
+
+       return NULL;
+}
+
+static inline struct page * rb_insert_page_cache(struct inode * inode,
+                                                unsigned long offset,
+                                                struct rb_node * node)
+{
+       struct page * ret;
+       if ((ret = __rb_insert_page_cache(inode, offset, node)))
+               goto out;
+       rb_insert_color(node, &inode->i_rb_page_cache);
+ out:
+       return ret;
+}
+-----------------------------------------------------------------------
+*/
+
+#ifndef        _LINUX_RBTREE_H
+#define        _LINUX_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node
+{
+       struct rb_node *rb_parent;
+       int rb_color;
+#define        RB_RED          0
+#define        RB_BLACK        1
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+};
+
+struct rb_root
+{
+       struct rb_node *rb_node;
+};
+
+#define RB_ROOT        (struct rb_root) { NULL, }
+#define        rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(struct rb_node *);
+extern struct rb_node *rb_prev(struct rb_node *);
+extern struct rb_node *rb_first(struct rb_root *);
+extern struct rb_node *rb_last(struct rb_root *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 
+                           struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+                               struct rb_node ** rb_link)
+{
+       node->rb_parent = parent;
+       node->rb_color = RB_RED;
+       node->rb_left = node->rb_right = NULL;
+
+       *rb_link = node;
+}
+
+#endif /* _LINUX_RBTREE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/rwsem.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/rwsem.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,115 @@
+/* rwsem.h: R/W semaphores, public interface
+ *
+ * Written by David Howells (dhowells@xxxxxxxxxx).
+ * Derived from asm-i386/semaphore.h
+ */
+
+#ifndef _LINUX_RWSEM_H
+#define _LINUX_RWSEM_H
+
+#include <linux/linkage.h>
+
+#define RWSEM_DEBUG 0
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+struct rw_semaphore;
+
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+#include <linux/rwsem-spinlock.h> /* use a generic implementation */
+#else
+#include <asm/rwsem.h> /* use an arch-specific implementation */
+#endif
+
+#ifndef rwsemtrace
+#if RWSEM_DEBUG
+extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
+#else
+#define rwsemtrace(SEM,FMT)
+#endif
+#endif
+
+/*
+ * lock for reading
+ */
+static inline void down_read(struct rw_semaphore *sem)
+{
+       might_sleep();
+       rwsemtrace(sem,"Entering down_read");
+       __down_read(sem);
+       rwsemtrace(sem,"Leaving down_read");
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+       int ret;
+       rwsemtrace(sem,"Entering down_read_trylock");
+       ret = __down_read_trylock(sem);
+       rwsemtrace(sem,"Leaving down_read_trylock");
+       return ret;
+}
+
+/*
+ * lock for writing
+ */
+static inline void down_write(struct rw_semaphore *sem)
+{
+       might_sleep();
+       rwsemtrace(sem,"Entering down_write");
+       __down_write(sem);
+       rwsemtrace(sem,"Leaving down_write");
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+       int ret;
+       rwsemtrace(sem,"Entering down_write_trylock");
+       ret = __down_write_trylock(sem);
+       rwsemtrace(sem,"Leaving down_write_trylock");
+       return ret;
+}
+
+/*
+ * release a read lock
+ */
+static inline void up_read(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering up_read");
+       __up_read(sem);
+       rwsemtrace(sem,"Leaving up_read");
+}
+
+/*
+ * release a write lock
+ */
+static inline void up_write(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering up_write");
+       __up_write(sem);
+       rwsemtrace(sem,"Leaving up_write");
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering downgrade_write");
+       __downgrade_write(sem);
+       rwsemtrace(sem,"Leaving downgrade_write");
+}
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_RWSEM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/seqlock.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/seqlock.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,175 @@
+#ifndef __LINUX_SEQLOCK_H
+#define __LINUX_SEQLOCK_H
+/*
+ * Reader/writer consistent mechanism without starving writers. This type of
+ * lock for data where the reader wants a consitent set of information
+ * and is willing to retry if the information changes.  Readers never
+ * block but they may have to retry if a writer is in
+ * progress. Writers do not wait for readers. 
+ *
+ * This is not as cache friendly as brlock. Also, this will not work
+ * for data that contains pointers, because any writer could
+ * invalidate a pointer that a reader was following.
+ *
+ * Expected reader usage:
+ *     do {
+ *         seq = read_seqbegin(&foo);
+ *     ...
+ *      } while (read_seqretry(&foo, seq));
+ *
+ *
+ * On non-SMP the spin locks disappear but the writer still needs
+ * to increment the sequence variables because an interrupt routine could
+ * change the state of the data.
+ *
+ * Based on x86_64 vsyscall gettimeofday 
+ * by Keith Owens and Andrea Arcangeli
+ */
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/preempt.h>
+
+typedef struct {
+       unsigned sequence;
+       spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems.  We think these are
+ * OK now.  Be cautious.
+ */
+#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
+#define seqlock_init(x)        do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } 
while (0)
+
+
+/* Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+       spin_lock(&sl->lock);
+       ++sl->sequence;
+       smp_wmb();                      
+}      
+
+static inline void write_sequnlock(seqlock_t *sl) 
+{
+       smp_wmb();
+       sl->sequence++;
+       spin_unlock(&sl->lock);
+}
+
+static inline int write_tryseqlock(seqlock_t *sl)
+{
+       int ret = spin_trylock(&sl->lock);
+
+       if (ret) {
+               ++sl->sequence;
+               smp_wmb();                      
+       }
+       return ret;
+}
+
+/* Start of read calculation -- fetch last complete writer token */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+       unsigned ret = sl->sequence;
+       smp_rmb();
+       return ret;
+}
+
+/* Test if reader processed invalid data.
+ * If initial values is odd, 
+ *     then writer had already started when section was entered
+ * If sequence value changed
+ *     then writer changed data while in section
+ *    
+ * Using xor saves one conditional branch.
+ */
+static inline int read_seqretry(const seqlock_t *sl, unsigned iv)
+{
+       smp_rmb();
+       return (iv & 1) | (sl->sequence ^ iv);
+}
+
+
+/*
+ * Version using sequence counter only.
+ * This can be used when code has its own mutex protecting the
+ * updating starting before the write_seqcountbeqin() and ending
+ * after the write_seqcount_end().
+ */
+
+typedef struct seqcount {
+       unsigned sequence;
+} seqcount_t;
+
+#define SEQCNT_ZERO { 0 }
+#define seqcount_init(x)       do { *(x) = (seqcount_t) SEQCNT_ZERO; } while 
(0)
+
+/* Start of read using pointer to a sequence counter only.  */
+static inline unsigned read_seqcount_begin(const seqcount_t *s)
+{
+       unsigned ret = s->sequence;
+       smp_rmb();
+       return ret;
+}
+
+/* Test if reader processed invalid data.
+ * Equivalent to: iv is odd or sequence number has changed.
+ *                (iv & 1) || (*s != iv)
+ * Using xor saves one conditional branch.
+ */
+static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
+{
+       smp_rmb();
+       return (iv & 1) | (s->sequence ^ iv);
+}
+
+
+/*
+ * Sequence counter only version assumes that callers are using their
+ * own mutexing.
+ */
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+       s->sequence++;
+       smp_wmb();
+}
+
+static inline void write_seqcount_end(seqcount_t *s)
+{
+       smp_wmb();
+       s->sequence++;
+}
+
+/*
+ * Possible sw/hw IRQ protected versions of the interfaces.
+ */
+#define write_seqlock_irqsave(lock, flags)                             \
+       do { local_irq_save(flags); write_seqlock(lock); } while (0)
+#define write_seqlock_irq(lock)                                                
\
+       do { local_irq_disable();   write_seqlock(lock); } while (0)
+#define write_seqlock_bh(lock)                                         \
+        do { local_bh_disable();    write_seqlock(lock); } while (0)
+
+#define write_sequnlock_irqrestore(lock, flags)                                
\
+       do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
+#define write_sequnlock_irq(lock)                                      \
+       do { write_sequnlock(lock); local_irq_enable(); } while(0)
+#define write_sequnlock_bh(lock)                                       \
+       do { write_sequnlock(lock); local_bh_enable(); } while(0)
+
+#define read_seqbegin_irqsave(lock, flags)                             \
+       ({ local_irq_save(flags);   read_seqbegin(lock); })
+
+#define read_seqretry_irqrestore(lock, iv, flags)                      \
+       ({                                                              \
+               int ret = read_seqretry(lock, iv);                      \
+               local_irq_restore(flags);                               \
+               ret;                                                    \
+       })
+
+#endif /* __LINUX_SEQLOCK_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/slab.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/slab.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,3 @@
+#include <xen/xmalloc.h>
+#include <linux/gfp.h>
+#include <asm/delay.h>
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/stddef.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/stddef.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,20 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#include <linux/compiler.h>
+
+#undef NULL
+#if defined(__cplusplus)
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+
+#undef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/thread_info.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/thread_info.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,92 @@
+/* thread_info.h: common low-level thread information accessors
+ *
+ * Copyright (C) 2002  David Howells (dhowells@xxxxxxxxxx)
+ * - Incorporating suggestions made by Linus Torvalds
+ */
+
+#ifndef _LINUX_THREAD_INFO_H
+#define _LINUX_THREAD_INFO_H
+
+/*
+ * System call restart block. 
+ */
+struct restart_block {
+       long (*fn)(struct restart_block *);
+       unsigned long arg0, arg1, arg2, arg3;
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#include <linux/bitops.h>
+#include <asm/thread_info.h>
+
+#ifdef __KERNEL__
+
+/*
+ * flag set/clear/test wrappers
+ * - pass TIF_xxxx constants to these functions
+ */
+
+static inline void set_thread_flag(int flag)
+{
+       set_bit(flag,&current_thread_info()->flags);
+}
+
+static inline void clear_thread_flag(int flag)
+{
+       clear_bit(flag,&current_thread_info()->flags);
+}
+
+static inline int test_and_set_thread_flag(int flag)
+{
+       return test_and_set_bit(flag,&current_thread_info()->flags);
+}
+
+static inline int test_and_clear_thread_flag(int flag)
+{
+       return test_and_clear_bit(flag,&current_thread_info()->flags);
+}
+
+static inline int test_thread_flag(int flag)
+{
+       return test_bit(flag,&current_thread_info()->flags);
+}
+
+static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+       set_bit(flag,&ti->flags);
+}
+
+static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+       clear_bit(flag,&ti->flags);
+}
+
+static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+       return test_and_set_bit(flag,&ti->flags);
+}
+
+static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int 
flag)
+{
+       return test_and_clear_bit(flag,&ti->flags);
+}
+
+static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+{
+       return test_bit(flag,&ti->flags);
+}
+
+static inline void set_need_resched(void)
+{
+       set_thread_flag(TIF_NEED_RESCHED);
+}
+
+static inline void clear_need_resched(void)
+{
+       clear_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
+#endif /* _LINUX_THREAD_INFO_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/threads.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/threads.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,35 @@
+#ifndef _LINUX_THREADS_H
+#define _LINUX_THREADS_H
+
+#include <linux/config.h>
+
+/*
+ * The default limit for the nr of threads is now in
+ * /proc/sys/kernel/threads-max.
+ */
+ 
+/*
+ * Maximum supported processors that can run under SMP.  This value is
+ * set via configure setting.  The maximum is equal to the size of the
+ * bitmasks used on that platform, i.e. 32 or 64.  Setting this smaller
+ * saves quite a bit of memory.
+ */
+#ifdef CONFIG_SMP
+#define NR_CPUS                CONFIG_NR_CPUS
+#else
+#define NR_CPUS                1
+#endif
+
+#define MIN_THREADS_LEFT_FOR_ROOT 4
+
+/*
+ * This controls the default maximum pid allocated to a process
+ */
+#define PID_MAX_DEFAULT 0x8000
+
+/*
+ * A maximum of 4 million PIDs should be enough for a while:
+ */
+#define PID_MAX_LIMIT (sizeof(long) > 4 ? 4*1024*1024 : PID_MAX_DEFAULT)
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/timex.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/timex.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,322 @@
+/*****************************************************************************
+ *                                                                           *
+ * Copyright (c) David L. Mills 1993                                         *
+ *                                                                           *
+ * Permission to use, copy, modify, and distribute this software and its     *
+ * documentation for any purpose and without fee is hereby granted, provided *
+ * that the above copyright notice appears in all copies and that both the   *
+ * copyright notice and this permission notice appear in supporting          *
+ * documentation, and that the name University of Delaware not be used in    *
+ * advertising or publicity pertaining to distribution of the software       *
+ * without specific, written prior permission.  The University of Delaware   *
+ * makes no representations about the suitability this software for any      *
+ * purpose.  It is provided "as is" without express or implied warranty.     *
+ *                                                                           *
+ *****************************************************************************/
+
+/*
+ * Modification history timex.h
+ *
+ * 29 Dec 97   Russell King
+ *     Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h
+ *     for ARM machines
+ *
+ *  9 Jan 97    Adrian Sun
+ *      Shifted LATCH define to allow access to alpha machines.
+ *
+ * 26 Sep 94   David L. Mills
+ *     Added defines for hybrid phase/frequency-lock loop.
+ *
+ * 19 Mar 94   David L. Mills
+ *     Moved defines from kernel routines to header file and added new
+ *     defines for PPS phase-lock loop.
+ *
+ * 20 Feb 94   David L. Mills
+ *     Revised status codes and structures for external clock and PPS
+ *     signal discipline.
+ *
+ * 28 Nov 93   David L. Mills
+ *     Adjusted parameters to improve stability and increase poll
+ *     interval.
+ *
+ * 17 Sep 93    David L. Mills
+ *      Created file $NTP/include/sys/timex.h
+ * 07 Oct 93    Torsten Duwe
+ *      Derived linux/timex.h
+ * 1995-08-13    Torsten Duwe
+ *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1997-08-30    Ulrich Windl
+ *      Added new constant NTP_PHASE_LIMIT
+ * 2004-08-12    Christoph Lameter
+ *      Reworked time interpolation logic
+ */
+#ifndef _LINUX_TIMEX_H
+#define _LINUX_TIMEX_H
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+
+#include <asm/param.h>
+#include <asm/timex.h>
+
+/*
+ * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
+ * for a slightly underdamped convergence characteristic. SHIFT_KH
+ * establishes the damping of the FLL and is chosen by wisdom and black
+ * art.
+ *
+ * MAXTC establishes the maximum time constant of the PLL. With the
+ * SHIFT_KG and SHIFT_KF values given and a time constant range from
+ * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
+ * respectively.
+ */
+#define SHIFT_KG 6             /* phase factor (shift) */
+#define SHIFT_KF 16            /* PLL frequency factor (shift) */
+#define SHIFT_KH 2             /* FLL frequency factor (shift) */
+#define MAXTC 6                        /* maximum time constant (shift) */
+
+/*
+ * The SHIFT_SCALE define establishes the decimal point of the time_phase
+ * variable which serves as an extension to the low-order bits of the
+ * system clock variable. The SHIFT_UPDATE define establishes the decimal
+ * point of the time_offset variable which represents the current offset
+ * with respect to standard time. The FINENSEC define represents 1 nsec in
+ * scaled units.
+ *
+ * SHIFT_USEC defines the scaling (shift) of the time_freq and
+ * time_tolerance variables, which represent the current frequency
+ * offset and maximum frequency tolerance.
+ *
+ * FINENSEC is 1 ns in SHIFT_UPDATE units of the time_phase variable.
+ */
+#define SHIFT_SCALE 22         /* phase scale (shift) */
+#define SHIFT_UPDATE (SHIFT_KG + MAXTC) /* time offset scale (shift) */
+#define SHIFT_USEC 16          /* frequency offset scale (shift) */
+#define FINENSEC (1L << (SHIFT_SCALE - 10)) /* ~1 ns in phase units */
+
+#define MAXPHASE 512000L        /* max phase error (us) */
+#define MAXFREQ (512L << SHIFT_USEC)  /* max frequency error (ppm) */
+#define MAXTIME (200L << PPS_AVG) /* max PPS error (jitter) (200 us) */
+#define MINSEC 16L              /* min interval between updates (s) */
+#define MAXSEC 1200L            /* max interval between updates (s) */
+#define        NTP_PHASE_LIMIT (MAXPHASE << 5) /* beyond max. dispersion */
+
+/*
+ * The following defines are used only if a pulse-per-second (PPS)
+ * signal is available and connected via a modem control lead, such as
+ * produced by the optional ppsclock feature incorporated in the Sun
+ * asynch driver. They establish the design parameters of the frequency-
+ * lock loop used to discipline the CPU clock oscillator to the PPS
+ * signal.
+ *
+ * PPS_AVG is the averaging factor for the frequency loop, as well as
+ * the time and frequency dispersion.
+ *
+ * PPS_SHIFT and PPS_SHIFTMAX specify the minimum and maximum
+ * calibration intervals, respectively, in seconds as a power of two.
+ *
+ * PPS_VALID is the maximum interval before the PPS signal is considered
+ * invalid and protocol updates used directly instead.
+ *
+ * MAXGLITCH is the maximum interval before a time offset of more than
+ * MAXTIME is believed.
+ */
+#define PPS_AVG 2              /* pps averaging constant (shift) */
+#define PPS_SHIFT 2            /* min interval duration (s) (shift) */
+#define PPS_SHIFTMAX 8         /* max interval duration (s) (shift) */
+#define PPS_VALID 120          /* pps signal watchdog max (s) */
+#define MAXGLITCH 30           /* pps signal glitch max (s) */
+
+/*
+ * syscall interface - used (mainly by NTP daemon)
+ * to discipline kernel clock oscillator
+ */
+struct timex {
+       unsigned int modes;     /* mode selector */
+       long offset;            /* time offset (usec) */
+       long freq;              /* frequency offset (scaled ppm) */
+       long maxerror;          /* maximum error (usec) */
+       long esterror;          /* estimated error (usec) */
+       int status;             /* clock command/status */
+       long constant;          /* pll time constant */
+       long precision;         /* clock precision (usec) (read only) */
+       long tolerance;         /* clock frequency tolerance (ppm)
+                                * (read only)
+                                */
+       struct timeval time;    /* (read only) */
+       long tick;              /* (modified) usecs between clock ticks */
+
+       long ppsfreq;           /* pps frequency (scaled ppm) (ro) */
+       long jitter;            /* pps jitter (us) (ro) */
+       int shift;              /* interval duration (s) (shift) (ro) */
+       long stabil;            /* pps stability (scaled ppm) (ro) */
+       long jitcnt;            /* jitter limit exceeded (ro) */
+       long calcnt;            /* calibration intervals (ro) */
+       long errcnt;            /* calibration errors (ro) */
+       long stbcnt;            /* stability limit exceeded (ro) */
+
+       int  :32; int  :32; int  :32; int  :32;
+       int  :32; int  :32; int  :32; int  :32;
+       int  :32; int  :32; int  :32; int  :32;
+};
+
+/*
+ * Mode codes (timex.mode)
+ */
+#define ADJ_OFFSET             0x0001  /* time offset */
+#define ADJ_FREQUENCY          0x0002  /* frequency offset */
+#define ADJ_MAXERROR           0x0004  /* maximum time error */
+#define ADJ_ESTERROR           0x0008  /* estimated time error */
+#define ADJ_STATUS             0x0010  /* clock status */
+#define ADJ_TIMECONST          0x0020  /* pll time constant */
+#define ADJ_TICK               0x4000  /* tick value */
+#define ADJ_OFFSET_SINGLESHOT  0x8001  /* old-fashioned adjtime */
+
+/* xntp 3.4 compatibility names */
+#define MOD_OFFSET     ADJ_OFFSET
+#define MOD_FREQUENCY  ADJ_FREQUENCY
+#define MOD_MAXERROR   ADJ_MAXERROR
+#define MOD_ESTERROR   ADJ_ESTERROR
+#define MOD_STATUS     ADJ_STATUS
+#define MOD_TIMECONST  ADJ_TIMECONST
+#define MOD_CLKB       ADJ_TICK
+#define MOD_CLKA       ADJ_OFFSET_SINGLESHOT /* 0x8000 in original */
+
+
+/*
+ * Status codes (timex.status)
+ */
+#define STA_PLL                0x0001  /* enable PLL updates (rw) */
+#define STA_PPSFREQ    0x0002  /* enable PPS freq discipline (rw) */
+#define STA_PPSTIME    0x0004  /* enable PPS time discipline (rw) */
+#define STA_FLL                0x0008  /* select frequency-lock mode (rw) */
+
+#define STA_INS                0x0010  /* insert leap (rw) */
+#define STA_DEL                0x0020  /* delete leap (rw) */
+#define STA_UNSYNC     0x0040  /* clock unsynchronized (rw) */
+#define STA_FREQHOLD   0x0080  /* hold frequency (rw) */
+
+#define STA_PPSSIGNAL  0x0100  /* PPS signal present (ro) */
+#define STA_PPSJITTER  0x0200  /* PPS signal jitter exceeded (ro) */
+#define STA_PPSWANDER  0x0400  /* PPS signal wander exceeded (ro) */
+#define STA_PPSERROR   0x0800  /* PPS signal calibration error (ro) */
+
+#define STA_CLOCKERR   0x1000  /* clock hardware fault (ro) */
+
+#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \
+    STA_PPSERROR | STA_CLOCKERR) /* read-only bits */
+
+/*
+ * Clock states (time_state)
+ */
+#define TIME_OK                0       /* clock synchronized, no leap second */
+#define TIME_INS       1       /* insert leap second */
+#define TIME_DEL       2       /* delete leap second */
+#define TIME_OOP       3       /* leap second in progress */
+#define TIME_WAIT      4       /* leap second has occurred */
+#define TIME_ERROR     5       /* clock not synchronized */
+#define TIME_BAD       TIME_ERROR /* bw compat */
+
+#ifdef __KERNEL__
+/*
+ * kernel variables
+ * Note: maximum error = NTP synch distance = dispersion + delay / 2;
+ * estimated error = NTP dispersion.
+ */
+extern unsigned long tick_usec;                /* USER_HZ period (usec) */
+extern unsigned long tick_nsec;                /* ACTHZ          period (nsec) 
*/
+extern int tickadj;                    /* amount of adjustment per tick */
+
+/*
+ * phase-lock loop variables
+ */
+extern int time_state;         /* clock status */
+extern int time_status;                /* clock synchronization status bits */
+extern long time_offset;       /* time adjustment (us) */
+extern long time_constant;     /* pll time constant */
+extern long time_tolerance;    /* frequency tolerance (ppm) */
+extern long time_precision;    /* clock precision (us) */
+extern long time_maxerror;     /* maximum error */
+extern long time_esterror;     /* estimated error */
+
+extern long time_phase;                /* phase offset (scaled us) */
+extern long time_freq;         /* frequency offset (scaled ppm) */
+extern long time_adj;          /* tick adjust (scaled 1 / HZ) */
+extern long time_reftime;      /* time at last adjustment (s) */
+
+extern long time_adjust;       /* The amount of adjtime left */
+extern long time_next_adjust;  /* Value for time_adjust at next tick */
+
+/* interface variables pps->timer interrupt */
+extern long pps_offset;                /* pps time offset (us) */
+extern long pps_jitter;                /* time dispersion (jitter) (us) */
+extern long pps_freq;          /* frequency offset (scaled ppm) */
+extern long pps_stabil;                /* frequency dispersion (scaled ppm) */
+extern long pps_valid;         /* pps signal watchdog counter */
+
+/* interface variables pps->adjtimex */
+extern int pps_shift;          /* interval duration (s) (shift) */
+extern long pps_jitcnt;                /* jitter limit exceeded */
+extern long pps_calcnt;                /* calibration intervals */
+extern long pps_errcnt;                /* calibration errors */
+extern long pps_stbcnt;                /* stability limit exceeded */
+
+#ifdef CONFIG_TIME_INTERPOLATION
+
+#define TIME_SOURCE_CPU 0
+#define TIME_SOURCE_MMIO64 1
+#define TIME_SOURCE_MMIO32 2
+#define TIME_SOURCE_FUNCTION 3
+
+/* For proper operations time_interpolator clocks must run slightly slower
+ * than the standard clock since the interpolator may only correct by having
+ * time jump forward during a tick. A slower clock is usually a side effect
+ * of the integer divide of the nanoseconds in a second by the frequency.
+ * The accuracy of the division can be increased by specifying a shift.
+ * However, this may cause the clock not to be slow enough.
+ * The interpolator will self-tune the clock by slowing down if no
+ * resets occur or speeding up if the time jumps per analysis cycle
+ * become too high.
+ *
+ * Setting jitter compensates for a fluctuating timesource by comparing
+ * to the last value read from the timesource to insure that an earlier value
+ * is not returned by a later call. The price to pay
+ * for the compensation is that the timer routines are not as scalable anymore.
+ */
+
+struct time_interpolator {
+       u16 source;                     /* time source flags */
+       u8 shift;                       /* increases accuracy of multiply by 
shifting. */
+                               /* Note that bits may be lost if shift is set 
too high */
+       u8 jitter;                      /* if set compensate for fluctuations */
+       u32 nsec_per_cyc;               /* set by register_time_interpolator() 
*/
+       void *addr;                     /* address of counter or function */
+       u64 mask;                       /* mask the valid bits of the counter */
+       unsigned long offset;           /* nsec offset at last update of 
interpolator */
+       u64 last_counter;               /* counter value in units of the 
counter at last update */
+       u64 last_cycle;                 /* Last timer value if 
TIME_SOURCE_JITTER is set */
+       u64 frequency;                  /* frequency in counts/second */
+       long drift;                     /* drift in parts-per-million (or -1) */
+       unsigned long skips;            /* skips forward */
+       unsigned long ns_skipped;       /* nanoseconds skipped */
+       struct time_interpolator *next;
+};
+
+extern void register_time_interpolator(struct time_interpolator *);
+extern void unregister_time_interpolator(struct time_interpolator *);
+extern void time_interpolator_reset(void);
+extern unsigned long time_interpolator_get_offset(void);
+
+#else /* !CONFIG_TIME_INTERPOLATION */
+
+static inline void
+time_interpolator_reset(void)
+{
+}
+
+#endif /* !CONFIG_TIME_INTERPOLATION */
+
+#endif /* KERNEL */
+
+#endif /* LINUX_TIMEX_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/topology.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/topology.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,133 @@
+/*
+ * include/linux/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.          
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@xxxxxxxxxx>
+ */
+#ifndef _LINUX_TOPOLOGY_H
+#define _LINUX_TOPOLOGY_H
+
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+
+#include <asm/topology.h>
+
+#ifndef nr_cpus_node
+#define nr_cpus_node(node)                                                     
\
+       ({                                                                      
\
+               cpumask_t __tmp__;                                              
\
+               __tmp__ = node_to_cpumask(node);                                
\
+               cpus_weight(__tmp__);                                           
\
+       })
+#endif
+
+#define for_each_node_with_cpus(node)                                          
\
+       for_each_online_node(node)                                              
\
+               if (nr_cpus_node(node))
+
+#ifndef node_distance
+/* Conform to ACPI 2.0 SLIT distance definitions */
+#define LOCAL_DISTANCE         10
+#define REMOTE_DISTANCE                20
+#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : 
REMOTE_DISTANCE)
+#endif
+#ifndef PENALTY_FOR_NODE_WITH_CPUS
+#define PENALTY_FOR_NODE_WITH_CPUS     (1)
+#endif
+
+/*
+ * Below are the 3 major initializers used in building sched_domains:
+ * SD_SIBLING_INIT, for SMT domains
+ * SD_CPU_INIT, for SMP domains
+ * SD_NODE_INIT, for NUMA domains
+ *
+ * Any architecture that cares to do any tuning to these values should do so
+ * by defining their own arch-specific initializer in include/asm/topology.h.
+ * A definition there will automagically override these default initializers
+ * and allow arch-specific performance tuning of sched_domains.
+ */
+#ifdef CONFIG_SCHED_SMT
+/* MCD - Do we really need this?  It is always on if CONFIG_SCHED_SMT is,
+ * so can't we drop this in favor of CONFIG_SCHED_SMT?
+ */
+#define ARCH_HAS_SCHED_WAKE_IDLE
+/* Common values for SMT siblings */
+#ifndef SD_SIBLING_INIT
+#define SD_SIBLING_INIT (struct sched_domain) {                \
+       .span                   = CPU_MASK_NONE,        \
+       .parent                 = NULL,                 \
+       .groups                 = NULL,                 \
+       .min_interval           = 1,                    \
+       .max_interval           = 2,                    \
+       .busy_factor            = 8,                    \
+       .imbalance_pct          = 110,                  \
+       .cache_hot_time         = 0,                    \
+       .cache_nice_tries       = 0,                    \
+       .per_cpu_gain           = 25,                   \
+       .flags                  = SD_LOAD_BALANCE       \
+                               | SD_BALANCE_NEWIDLE    \
+                               | SD_BALANCE_EXEC       \
+                               | SD_WAKE_AFFINE        \
+                               | SD_WAKE_IDLE          \
+                               | SD_SHARE_CPUPOWER,    \
+       .last_balance           = jiffies,              \
+       .balance_interval       = 1,                    \
+       .nr_balance_failed      = 0,                    \
+}
+#endif
+#endif /* CONFIG_SCHED_SMT */
+
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) {            \
+       .span                   = CPU_MASK_NONE,        \
+       .parent                 = NULL,                 \
+       .groups                 = NULL,                 \
+       .min_interval           = 1,                    \
+       .max_interval           = 4,                    \
+       .busy_factor            = 64,                   \
+       .imbalance_pct          = 125,                  \
+       .cache_hot_time         = (5*1000000/2),        \
+       .cache_nice_tries       = 1,                    \
+       .per_cpu_gain           = 100,                  \
+       .flags                  = SD_LOAD_BALANCE       \
+                               | SD_BALANCE_NEWIDLE    \
+                               | SD_BALANCE_EXEC       \
+                               | SD_WAKE_AFFINE        \
+                               | SD_WAKE_IDLE          \
+                               | SD_WAKE_BALANCE,      \
+       .last_balance           = jiffies,              \
+       .balance_interval       = 1,                    \
+       .nr_balance_failed      = 0,                    \
+}
+#endif
+
+#ifdef CONFIG_NUMA
+#ifndef SD_NODE_INIT
+#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
+#endif
+#endif /* CONFIG_NUMA */
+
+#endif /* _LINUX_TOPOLOGY_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/linux/wait.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/linux/wait.h Tue Aug  2 23:59:09 2005
@@ -0,0 +1,400 @@
+#ifndef _LINUX_WAIT_H
+#define _LINUX_WAIT_H
+
+#define WNOHANG                0x00000001
+#define WUNTRACED      0x00000002
+#define WSTOPPED       WUNTRACED
+#define WEXITED                0x00000004
+#define WCONTINUED     0x00000008
+#define WNOWAIT                0x01000000      /* Don't reap, just poll 
status.  */
+
+#define __WNOTHREAD    0x20000000      /* Don't wait on children of other 
threads in this group */
+#define __WALL         0x40000000      /* Wait on all children, regardless of 
type */
+#define __WCLONE       0x80000000      /* Wait only on non-SIGCHLD children */
+
+/* First argument to waitid: */
+#define P_ALL          0
+#define P_PID          1
+#define P_PGID         2
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/current.h>
+
+typedef struct __wait_queue wait_queue_t;
+typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, 
void *key);
+int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void 
*key);
+
+struct __wait_queue {
+       unsigned int flags;
+#define WQ_FLAG_EXCLUSIVE      0x01
+       struct task_struct * task;
+       wait_queue_func_t func;
+       struct list_head task_list;
+};
+
+struct wait_bit_key {
+       void *flags;
+       int bit_nr;
+};
+
+struct wait_bit_queue {
+       struct wait_bit_key key;
+       wait_queue_t wait;
+};
+
+struct __wait_queue_head {
+       spinlock_t lock;
+       struct list_head task_list;
+};
+typedef struct __wait_queue_head wait_queue_head_t;
+
+
+/*
+ * Macros for declaration and initialisaton of the datatypes
+ */
+
+#define __WAITQUEUE_INITIALIZER(name, tsk) {                           \
+       .task           = tsk,                                          \
+       .func           = default_wake_function,                        \
+       .task_list      = { NULL, NULL } }
+
+#define DECLARE_WAITQUEUE(name, tsk)                                   \
+       wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
+
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                          \
+       .lock           = SPIN_LOCK_UNLOCKED,                           \
+       .task_list      = { &(name).task_list, &(name).task_list } }
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+       wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
+
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit)                          \
+       { .flags = word, .bit_nr = bit, }
+
+static inline void init_waitqueue_head(wait_queue_head_t *q)
+{
+       q->lock = SPIN_LOCK_UNLOCKED;
+       INIT_LIST_HEAD(&q->task_list);
+}
+
+static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
+{
+       q->flags = 0;
+       q->task = p;
+       q->func = default_wake_function;
+}
+
+static inline void init_waitqueue_func_entry(wait_queue_t *q,
+                                       wait_queue_func_t func)
+{
+       q->flags = 0;
+       q->task = NULL;
+       q->func = func;
+}
+
+static inline int waitqueue_active(wait_queue_head_t *q)
+{
+       return !list_empty(&q->task_list);
+}
+
+/*
+ * Used to distinguish between sync and async io wait context:
+ * sync i/o typically specifies a NULL wait queue entry or a wait
+ * queue entry bound to a task (current task) to wake up.
+ * aio specifies a wait queue entry with an async notification
+ * callback routine, not associated with any task.
+ */
+#define is_sync_wait(wait)     (!(wait) || ((wait)->task))
+
+extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * 
wait));
+extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, 
wait_queue_t * wait));
+extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * 
wait));
+
+static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
+{
+       list_add(&new->task_list, &head->task_list);
+}
+
+/*
+ * Used for wake-one threads:
+ */
+static inline void __add_wait_queue_tail(wait_queue_head_t *head,
+                                               wait_queue_t *new)
+{
+       list_add_tail(&new->task_list, &head->task_list);
+}
+
+static inline void __remove_wait_queue(wait_queue_head_t *head,
+                                                       wait_queue_t *old)
+{
+       list_del(&old->task_list);
+}
+
+void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void 
*key));
+extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int 
mode));
+extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, 
int nr));
+void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
+int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int 
(*)(void *), unsigned));
+int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, 
int (*)(void *), unsigned));
+void FASTCALL(wake_up_bit(void *, int));
+int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
+int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), 
unsigned));
+wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
+
+#define wake_up(x)                     __wake_up(x, TASK_UNINTERRUPTIBLE | 
TASK_INTERRUPTIBLE, 1, NULL)
+#define wake_up_nr(x, nr)              __wake_up(x, TASK_UNINTERRUPTIBLE | 
TASK_INTERRUPTIBLE, nr, NULL)
+#define wake_up_all(x)                 __wake_up(x, TASK_UNINTERRUPTIBLE | 
TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up_interruptible(x)       __wake_up(x, TASK_INTERRUPTIBLE, 1, 
NULL)
+#define wake_up_interruptible_nr(x, nr)        __wake_up(x, 
TASK_INTERRUPTIBLE, nr, NULL)
+#define wake_up_interruptible_all(x)   __wake_up(x, TASK_INTERRUPTIBLE, 0, 
NULL)
+#define        wake_up_locked(x)               __wake_up_locked((x), 
TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
+#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 
1)
+
+#define __wait_event(wq, condition)                                    \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
+               if (condition)                                          \
+                       break;                                          \
+               schedule();                                             \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+#define wait_event(wq, condition)                                      \
+do {                                                                   \
+       if (condition)                                                  \
+               break;                                                  \
+       __wait_event(wq, condition);                                    \
+} while (0)
+
+#define __wait_event_timeout(wq, condition, ret)                       \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
+               if (condition)                                          \
+                       break;                                          \
+               ret = schedule_timeout(ret);                            \
+               if (!ret)                                               \
+                       break;                                          \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+#define wait_event_timeout(wq, condition, timeout)                     \
+({                                                                     \
+       long __ret = timeout;                                           \
+       if (!(condition))                                               \
+               __wait_event_timeout(wq, condition, __ret);             \
+       __ret;                                                          \
+})
+
+#define __wait_event_interruptible(wq, condition, ret)                 \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
+               if (condition)                                          \
+                       break;                                          \
+               if (!signal_pending(current)) {                         \
+                       schedule();                                     \
+                       continue;                                       \
+               }                                                       \
+               ret = -ERESTARTSYS;                                     \
+               break;                                                  \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+#define wait_event_interruptible(wq, condition)                                
\
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (!(condition))                                               \
+               __wait_event_interruptible(wq, condition, __ret);       \
+       __ret;                                                          \
+})
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)         \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
+               if (condition)                                          \
+                       break;                                          \
+               if (!signal_pending(current)) {                         \
+                       ret = schedule_timeout(ret);                    \
+                       if (!ret)                                       \
+                               break;                                  \
+                       continue;                                       \
+               }                                                       \
+               ret = -ERESTARTSYS;                                     \
+               break;                                                  \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)       \
+({                                                                     \
+       long __ret = timeout;                                           \
+       if (!(condition))                                               \
+               __wait_event_interruptible_timeout(wq, condition, __ret); \
+       __ret;                                                          \
+})
+
+#define __wait_event_interruptible_exclusive(wq, condition, ret)       \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait_exclusive(&wq, &__wait,                 \
+                                       TASK_INTERRUPTIBLE);            \
+               if (condition)                                          \
+                       break;                                          \
+               if (!signal_pending(current)) {                         \
+                       schedule();                                     \
+                       continue;                                       \
+               }                                                       \
+               ret = -ERESTARTSYS;                                     \
+               break;                                                  \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+#define wait_event_interruptible_exclusive(wq, condition)              \
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (!(condition))                                               \
+               __wait_event_interruptible_exclusive(wq, condition, __ret);\
+       __ret;                                                          \
+})
+
+/*
+ * Must be called with the spinlock in the wait_queue_head_t held.
+ */
+static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
+                                                  wait_queue_t * wait)
+{
+       wait->flags |= WQ_FLAG_EXCLUSIVE;
+       __add_wait_queue_tail(q,  wait);
+}
+
+/*
+ * Must be called with the spinlock in the wait_queue_head_t held.
+ */
+static inline void remove_wait_queue_locked(wait_queue_head_t *q,
+                                           wait_queue_t * wait)
+{
+       __remove_wait_queue(q,  wait);
+}
+
+/*
+ * These are the old interfaces to sleep waiting for an event.
+ * They are racy.  DO NOT use them, use the wait_event* interfaces above.  
+ * We plan to remove these interfaces during 2.7.
+ */
+extern void FASTCALL(sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
+                                     signed long timeout));
+extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
+                                                   signed long timeout));
+
+/*
+ * Waitqueues which are removed from the waitqueue_head at wakeup time
+ */
+void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
+                               wait_queue_t *wait, int state));
+void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
+                               wait_queue_t *wait, int state));
+void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void 
*key);
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+
+#define DEFINE_WAIT(name)                                              \
+       wait_queue_t name = {                                           \
+               .task           = current,                              \
+               .func           = autoremove_wake_function,             \
+               .task_list      = {     .next = &(name).task_list,      \
+                                       .prev = &(name).task_list,      \
+                               },                                      \
+       }
+
+#define DEFINE_WAIT_BIT(name, word, bit)                               \
+       struct wait_bit_queue name = {                                  \
+               .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
+               .wait   = {                                             \
+                       .task           = current,                      \
+                       .func           = wake_bit_function,            \
+                       .task_list      =                               \
+                               LIST_HEAD_INIT((name).wait.task_list),  \
+               },                                                      \
+       }
+
+#define init_wait(wait)                                                        
\
+       do {                                                            \
+               (wait)->task = current;                                 \
+               (wait)->func = autoremove_wake_function;                \
+               INIT_LIST_HEAD(&(wait)->task_list);                     \
+       } while (0)
+
+/**
+ * wait_on_bit - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit.
+ * For instance, if one were to have waiters on a bitflag, one would
+ * call wait_on_bit() in threads waiting for the bit to clear.
+ * One uses wait_on_bit() where one is waiting for the bit to clear,
+ * but has no intention of setting it.
+ */
+static inline int wait_on_bit(void *word, int bit,
+                               int (*action)(void *), unsigned mode)
+{
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+
+/**
+ * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit
+ * when one intends to set it, for instance, trying to lock bitflags.
+ * For instance, if one were to have waiters trying to set bitflag
+ * and waiting for it to clear before setting it, one would call
+ * wait_on_bit() in threads waiting to be able to set the bit.
+ * One uses wait_on_bit_lock() where one is waiting for the bit to
+ * clear with the intention of setting it, and when done, clearing it.
+ */
+static inline int wait_on_bit_lock(void *word, int bit,
+                               int (*action)(void *), unsigned mode)
+{
+       if (!test_and_set_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
+       
+#endif /* __KERNEL__ */
+
+#endif
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/mca_asm.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/mca_asm.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,323 @@
+/*
+ * File:       mca_asm.h
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander (vijay@xxxxxxxxxxxx)
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@xxxxxxxxx>
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#define PSR_IC         13
+#define PSR_I          14
+#define        PSR_DT          17
+#define PSR_RT         27
+#define PSR_MC         35
+#define PSR_IT         36
+#define PSR_BN         44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Lop off bits 61 thru 63 in the virtual address
+ */
+#ifdef XEN
+#define INST_VA_TO_PA(addr)                                                    
\
+       dep     addr    = 0, addr, 60, 4
+#else // XEN
+#define INST_VA_TO_PA(addr)                                                    
\
+       dep     addr    = 0, addr, 61, 3
+#endif // XEN
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr)                                                    
\
+       tpa     addr    = addr
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Put 0x7 in bits 61 thru 63.
+ */
+#ifdef XEN
+#define DATA_PA_TO_VA(addr,temp)                                               
        \
+       mov     temp    = 0xf   ;;                                              
        \
+       dep     addr    = temp, addr, 60, 4
+#else // XEN
+#define DATA_PA_TO_VA(addr,temp)                                               
        \
+       mov     temp    = 0x7   ;;                                              
        \
+       dep     addr    = temp, addr, 61, 3
+#endif // XEN
+
+#define GET_THIS_PADDR(reg, var)               \
+       mov     reg = IA64_KR(PER_CPU_DATA);;   \
+        addl   reg = THIS_CPU(var), reg
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ *     1.      Save the current psr
+ *     2.      Make sure that all the upper 32 bits are off
+ *
+ *     3.      Clear the interrupt enable and interrupt state collection bits
+ *             in the psr before updating the ipsr and iip.
+ *
+ *     4.      Turn off the instruction, data and rse translation bits of the 
psr
+ *             and store the new value into ipsr
+ *             Also make sure that the interrupts are disabled.
+ *             Ensure that we are in little endian mode.
+ *             [psr.{rt, it, dt, i, be} = 0]
+ *
+ *     5.      Get the physical address corresponding to the virtual address
+ *             of the next instruction bundle and put it in iip.
+ *             (Using magic numbers 24 and 40 in the deposint instruction since
+ *              the IA64_SDK code directly maps to lower 24bits as physical 
address
+ *              from a virtual address).
+ *
+ *     6.      Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define  PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)                
                \
+       mov     old_psr = psr;                                                  
        \
+       ;;                                                                      
        \
+       dep     old_psr = 0, old_psr, 32, 32;                                   
        \
+                                                                               
        \
+       mov     ar.rsc = 0 ;                                                    
        \
+       ;;                                                                      
        \
+       srlz.d;                                                                 
        \
+       mov     temp2 = ar.bspstore;                                            
        \
+       ;;                                                                      
        \
+       DATA_VA_TO_PA(temp2);                                                   
        \
+       ;;                                                                      
        \
+       mov     temp1 = ar.rnat;                                                
        \
+       ;;                                                                      
        \
+       mov     ar.bspstore = temp2;                                            
        \
+       ;;                                                                      
        \
+       mov     ar.rnat = temp1;                                                
        \
+       mov     temp1 = psr;                                                    
        \
+       mov     temp2 = psr;                                                    
        \
+       ;;                                                                      
        \
+                                                                               
        \
+       dep     temp2 = 0, temp2, PSR_IC, 2;                                    
        \
+       ;;                                                                      
        \
+       mov     psr.l = temp2;                                                  
        \
+       ;;                                                                      
        \
+       srlz.d;                                                                 
        \
+       dep     temp1 = 0, temp1, 32, 32;                                       
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_IT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_DT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_RT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_I, 1;                                     
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_IC, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = -1, temp1, PSR_MC, 1;                                   
        \
+       ;;                                                                      
        \
+       mov     cr.ipsr = temp1;                                                
        \
+       ;;                                                                      
        \
+       LOAD_PHYSICAL(p0, temp2, start_addr);                                   
        \
+       ;;                                                                      
        \
+       mov     cr.iip = temp2;                                                 
        \
+       mov     cr.ifs = r0;                                                    
        \
+       DATA_VA_TO_PA(sp);                                                      
        \
+       DATA_VA_TO_PA(gp);                                                      
        \
+       ;;                                                                      
        \
+       srlz.i;                                                                 
        \
+       ;;                                                                      
        \
+       nop     1;                                                              
        \
+       nop     2;                                                              
        \
+       nop     1;                                                              
        \
+       nop     2;                                                              
        \
+       rfi;                                                                    
        \
+       ;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ *     1.      Get the old saved psr
+ *
+ *     2.      Clear the interrupt state collection bit in the current psr.
+ *
+ *     3.      Set the instruction translation bit back in the old psr
+ *             Note we have to do this since we are right now saving only the
+ *             lower 32-bits of old psr.(Also the old psr has the data and
+ *             rse translation bits on)
+ *
+ *     4.      Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ *     5.      Reset the current thread pointer (r13).
+ *
+ *     6.      Set iip to the virtual address of the next instruction bundle.
+ *
+ *     7.      Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)  \
+       mov     temp2 = psr;                                    \
+       ;;                                                      \
+       mov     old_psr = temp2;                                \
+       ;;                                                      \
+       dep     temp2 = 0, temp2, PSR_IC, 2;                    \
+       ;;                                                      \
+       mov     psr.l = temp2;                                  \
+       mov     ar.rsc = 0;                                     \
+       ;;                                                      \
+       srlz.d;                                                 \
+       mov     r13 = ar.k6;                                    \
+       mov     temp2 = ar.bspstore;                            \
+       ;;                                                      \
+       DATA_PA_TO_VA(temp2,temp1);                             \
+       ;;                                                      \
+       mov     temp1 = ar.rnat;                                \
+       ;;                                                      \
+       mov     ar.bspstore = temp2;                            \
+       ;;                                                      \
+       mov     ar.rnat = temp1;                                \
+       ;;                                                      \
+       mov     temp1 = old_psr;                                \
+       ;;                                                      \
+       mov     temp2 = 1;                                      \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_IC, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_IT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_DT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_RT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_BN, 1;                \
+       ;;                                                      \
+                                                               \
+       mov     cr.ipsr = temp1;                                \
+       movl    temp2 = start_addr;                             \
+       ;;                                                      \
+       mov     cr.iip = temp2;                                 \
+       ;;                                                      \
+       DATA_PA_TO_VA(sp, temp1);                               \
+       DATA_PA_TO_VA(gp, temp2);                               \
+       srlz.i;                                                 \
+       ;;                                                      \
+       nop     1;                                              \
+       nop     2;                                              \
+       nop     1;                                              \
+       rfi                                                     \
+       ;;
+
+/*
+ * The following offsets capture the order in which the
+ * RSE related registers from the old context are
+ * saved onto the new stack frame.
+ *
+ *     +-----------------------+
+ *     |NDIRTY [BSP - BSPSTORE]|
+ *     +-----------------------+
+ *     |       RNAT            |
+ *     +-----------------------+
+ *     |       BSPSTORE        |
+ *     +-----------------------+
+ *     |       IFS             |
+ *     +-----------------------+
+ *     |       PFS             |
+ *     +-----------------------+
+ *     |       RSC             |
+ *     +-----------------------+ <-------- Bottom of new stack frame
+ */
+#define  rse_rsc_offset                0
+#define  rse_pfs_offset                (rse_rsc_offset+0x08)
+#define  rse_ifs_offset                (rse_pfs_offset+0x08)
+#define  rse_bspstore_offset   (rse_ifs_offset+0x08)
+#define  rse_rnat_offset       (rse_bspstore_offset+0x08)
+#define  rse_ndirty_offset     (rse_rnat_offset+0x08)
+
+/*
+ * rse_switch_context
+ *
+ *     1. Save old RSC onto the new stack frame
+ *     2. Save PFS onto new stack frame
+ *     3. Cover the old frame and start a new frame.
+ *     4. Save IFS onto new stack frame
+ *     5. Save the old BSPSTORE on the new stack frame
+ *     6. Save the old RNAT on the new stack frame
+ *     7. Write BSPSTORE with the new backing store pointer
+ *     8. Read and save the new BSP to calculate the #dirty registers
+ * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
+ */
+#define rse_switch_context(temp,p_stackframe,p_bspstore)                       
\
+       ;;                                                                      
\
+       mov     temp=ar.rsc;;                                                   
\
+       st8     [p_stackframe]=temp,8;;                                 \
+       mov     temp=ar.pfs;;                                                   
\
+       st8     [p_stackframe]=temp,8;                                          
\
+       cover ;;                                                                
\
+       mov     temp=cr.ifs;;                                                   
\
+       st8     [p_stackframe]=temp,8;;                                         
\
+       mov     temp=ar.bspstore;;                                              
\
+       st8     [p_stackframe]=temp,8;;                                 \
+       mov     temp=ar.rnat;;                                                  
\
+       st8     [p_stackframe]=temp,8;                                          
\
+       mov     ar.bspstore=p_bspstore;;                                        
\
+       mov     temp=ar.bsp;;                                                   
\
+       sub     temp=temp,p_bspstore;;                                          
\
+       st8     [p_stackframe]=temp,8;;
+
+/*
+ * rse_return_context
+ *     1. Allocate a zero-sized frame
+ *     2. Store the number of dirty registers RSC.loadrs field
+ *     3. Issue a loadrs to insure that any registers from the interrupted
+ *        context which were saved on the new stack frame have been loaded
+ *        back into the stacked registers
+ *     4. Restore BSPSTORE
+ *     5. Restore RNAT
+ *     6. Restore PFS
+ *     7. Restore IFS
+ *     8. Restore RSC
+ *     9. Issue an RFI
+ */
+#define rse_return_context(psr_mask_reg,temp,p_stackframe)                     
\
+       ;;                                                                      
\
+       alloc   temp=ar.pfs,0,0,0,0;                                            
\
+       add     p_stackframe=rse_ndirty_offset,p_stackframe;;                   
\
+       ld8     temp=[p_stackframe];;                                           
\
+       shl     temp=temp,16;;                                                  
\
+       mov     ar.rsc=temp;;                                                   
\
+       loadrs;;                                                                
\
+       add     
p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.bspstore=temp;;                                              
\
+       add     
p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.rnat=temp;;                                                  
\
+       add     p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;;     
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.pfs=temp;;                                                   
\
+       add     p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;;      
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     cr.ifs=temp;;                                                   
\
+       add     p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;;      
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.rsc=temp ;                                                   
\
+       mov     temp=psr;;                                                      
\
+       or      temp=temp,psr_mask_reg;;                                        
\
+       mov     cr.ipsr=temp;;                                                  
\
+       mov     temp=ip;;                                                       
\
+       add     temp=0x30,temp;;                                                
\
+       mov     cr.iip=temp;;                                                   
\
+       srlz.i;;                                                                
\
+       rfi;;
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/page.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/page.h       Tue Aug  2 23:59:09 2005
@@ -0,0 +1,238 @@
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/intrinsics.h>
+#include <asm/types.h>
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT    12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT    13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT    14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT    16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE              (__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK              (~(PAGE_SIZE - 1))
+#define PAGE_ALIGN(addr)       (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#define PERCPU_PAGE_SHIFT      16      /* log2() of max. size of per-CPU area 
*/
+
+#define PERCPU_PAGE_SIZE       (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+
+#define RGN_MAP_LIMIT  ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per 
region addr limit */
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define REGION_HPAGE          (4UL)   /* note: this is hardcoded in 
reload_context()!*/
+# define REGION_SHIFT          61
+# define HPAGE_REGION_BASE     (REGION_HPAGE << REGION_SHIFT)
+# define HPAGE_SHIFT           hpage_shift
+# define HPAGE_SHIFT_DEFAULT   28      /* check ia64 SDM for architecture 
supported size */
+# define HPAGE_SIZE            (__IA64_UL_CONST(1) << HPAGE_SHIFT)
+# define HPAGE_MASK            (~(HPAGE_SIZE - 1))
+
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef __ASSEMBLY__
+# define __pa(x)               ((x) - PAGE_OFFSET)
+# define __va(x)               ((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+# ifdef __KERNEL__
+#  define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+/*
+ * clear_user_page() and copy_user_page() can't be inline functions because
+ * flush_dcache_page() can't be defined until later...
+ */
+#define clear_user_page(addr, vaddr, page)     \
+do {                                           \
+       clear_page(addr);                       \
+       flush_dcache_page(page);                \
+} while (0)
+
+#define copy_user_page(to, from, vaddr, page)  \
+do {                                           \
+       copy_page((to), (from));                \
+       flush_dcache_page(page);                \
+} while (0)
+
+
+#define alloc_zeroed_user_highpage(vma, vaddr) \
+({                                             \
+       struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, 
vaddr); \
+       if (page)                               \
+               flush_dcache_page(page);        \
+       page;                                   \
+})
+
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern int ia64_pfn_valid (unsigned long pfn);
+#else
+# define ia64_pfn_valid(pfn) 1
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+#ifdef XEN
+# define pfn_valid(pfn)                (0)
+# define page_to_pfn(_page)    ((unsigned long)((_page) - frame_table))
+# define pfn_to_page(_pfn)     (frame_table + (_pfn))
+#else
+# define pfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
+# define page_to_pfn(page)     ((unsigned long) (page - mem_map))
+# define pfn_to_page(pfn)      (mem_map + (pfn))
+#endif
+#else
+extern struct page *vmem_map;
+extern unsigned long max_low_pfn;
+# define pfn_valid(pfn)                (((pfn) < max_low_pfn) && 
ia64_pfn_valid(pfn))
+# define page_to_pfn(page)     ((unsigned long) (page - vmem_map))
+# define pfn_to_page(pfn)      (vmem_map + (pfn))
+#endif
+
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef XEN
+#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
+#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
+#endif
+
+typedef union ia64_va {
+       struct {
+               unsigned long off : 61;         /* intra-region offset */
+               unsigned long reg :  3;         /* region number */
+       } f;
+       unsigned long l;
+       void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero.  They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#ifdef XEN
+typedef union xen_va {
+       struct {
+               unsigned long off : 60;
+               unsigned long reg : 4;
+       } f;
+       unsigned long l;
+       void *p;
+} xen_va;
+
+// xen/drivers/console.c uses __va in a declaration (should be fixed!)
+#define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
+#define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
+#else
+#define __pa(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
+#define __va(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
+#endif
+
+#define REGION_NUMBER(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+#define REGION_OFFSET(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+
+#define REGION_SIZE            REGION_NUMBER(1)
+#define REGION_KERNEL          7
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define htlbpage_to_page(x)   (((unsigned long) REGION_NUMBER(x) << 61)       
                \
+                                | (REGION_OFFSET(x) >> 
(HPAGE_SHIFT-PAGE_SHIFT)))
+# define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
+# define is_hugepage_only_range(addr, len)             \
+        (REGION_NUMBER(addr) == REGION_HPAGE &&        \
+         REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+extern unsigned int hpage_shift;
+#endif
+
+static __inline__ int
+get_order (unsigned long size)
+{
+       long double d = size - 1;
+       long order;
+
+       order = ia64_getf_exp(d);
+       order = order - PAGE_SHIFT - 0xffff + 1;
+       if (order < 0)
+               order = 0;
+       return order;
+}
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#ifdef STRICT_MM_TYPECHECKS
+  /*
+   * These are used to make use of C type-checking..
+   */
+  typedef struct { unsigned long pte; } pte_t;
+  typedef struct { unsigned long pmd; } pmd_t;
+  typedef struct { unsigned long pgd; } pgd_t;
+  typedef struct { unsigned long pgprot; } pgprot_t;
+
+# define pte_val(x)    ((x).pte)
+# define pmd_val(x)    ((x).pmd)
+# define pgd_val(x)    ((x).pgd)
+# define pgprot_val(x) ((x).pgprot)
+
+# define __pte(x)      ((pte_t) { (x) } )
+# define __pgprot(x)   ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+  /*
+   * .. while these make it easier on the compiler
+   */
+# ifndef __ASSEMBLY__
+    typedef unsigned long pte_t;
+    typedef unsigned long pmd_t;
+    typedef unsigned long pgd_t;
+    typedef unsigned long pgprot_t;
+# endif
+
+# define pte_val(x)    (x)
+# define pmd_val(x)    (x)
+# define pgd_val(x)    (x)
+# define pgprot_val(x) (x)
+
+# define __pte(x)      (x)
+# define __pgd(x)      (x)
+# define __pgprot(x)   (x)
+#endif /* !STRICT_MM_TYPECHECKS */
+
+#ifdef XEN
+#define PAGE_OFFSET                    __IA64_UL_CONST(0xf000000000000000)
+#else
+#define PAGE_OFFSET                    __IA64_UL_CONST(0xe000000000000000)
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS          (VM_READ | VM_WRITE |                   
                \
+                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC 
|                \
+                                        (((current->personality & 
READ_IMPLIES_EXEC) != 0)     \
+                                         ? VM_EXEC : 0))
+
+#endif /* _ASM_IA64_PAGE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/pal.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/pal.h        Tue Aug  2 23:59:09 2005
@@ -0,0 +1,1567 @@
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 
1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar 
<sprasad@xxxxxxxxxxxxxxxxxxxx>
+ *
+ * 99/10/01    davidm  Make sure we pass zero for reserved parameters.
+ * 00/03/07    davidm  Updated pal_cache_flush() to be in sync with PAL v2.6.
+ * 00/03/23     cfleck  Modified processor min-state save area to match 
updated PAL & SAL info
+ * 00/05/24     eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25    eranian Support for stack calls, and static physical calls
+ * 00/06/18    eranian Support for stacked physical calls
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH                1       /* flush i/d cache */
+#define PAL_CACHE_INFO         2       /* get detailed i/d cache info */
+#define PAL_CACHE_INIT         3       /* initialize i/d cache */
+#define PAL_CACHE_SUMMARY      4       /* get summary of cache heirarchy */
+#define PAL_MEM_ATTRIB         5       /* list supported memory attributes */
+#define PAL_PTCE_INFO          6       /* purge TLB info */
+#define PAL_VM_INFO            7       /* return supported virtual memory 
features */
+#define PAL_VM_SUMMARY         8       /* return summary on supported vm 
features */
+#define PAL_BUS_GET_FEATURES   9       /* return processor bus interface 
features settings */
+#define PAL_BUS_SET_FEATURES   10      /* set processor bus features */
+#define PAL_DEBUG_INFO         11      /* get number of debug registers */
+#define PAL_FIXED_ADDR         12      /* get fixed component of processors's 
directed address */
+#define PAL_FREQ_BASE          13      /* base frequency of the platform */
+#define PAL_FREQ_RATIOS                14      /* ratio of processor, bus and 
ITC frequency */
+#define PAL_PERF_MON_INFO      15      /* return performance monitor info */
+#define PAL_PLATFORM_ADDR      16      /* set processor interrupt block and IO 
port space addr */
+#define PAL_PROC_GET_FEATURES  17      /* get configurable processor features 
& settings */
+#define PAL_PROC_SET_FEATURES  18      /* enable/disable configurable 
processor features */
+#define PAL_RSE_INFO           19      /* return rse information */
+#define PAL_VERSION            20      /* return version of PAL code */
+#define PAL_MC_CLEAR_LOG       21      /* clear all processor log info */
+#define PAL_MC_DRAIN           22      /* drain operations which could result 
in an MCA */
+#define PAL_MC_EXPECTED                23      /* set/reset expected MCA 
indicator */
+#define PAL_MC_DYNAMIC_STATE   24      /* get processor dynamic state */
+#define PAL_MC_ERROR_INFO      25      /* get processor MCA info and static 
state */
+#define PAL_MC_RESUME          26      /* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM    27      /* Register memory for PAL to use 
during MCAs and inits */
+#define PAL_HALT               28      /* enter the low power HALT state */
+#define PAL_HALT_LIGHT         29      /* enter the low power light halt 
state*/
+#define PAL_COPY_INFO          30      /* returns info needed to relocate PAL 
*/
+#define PAL_CACHE_LINE_INIT    31      /* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT     32      /* register PMI memory entry points 
with the processor */
+#define PAL_ENTER_IA_32_ENV    33      /* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE       34      /* return vm TC and page walker page 
sizes */
+
+#define PAL_MEM_FOR_TEST       37      /* get amount of memory needed for late 
processor test */
+#define PAL_CACHE_PROT_INFO    38      /* get i/d cache protection info */
+#define PAL_REGISTER_INFO      39      /* return AR and CR register 
information*/
+#define PAL_SHUTDOWN           40      /* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY        41      /* Make Processor Prefetches 
Visible */
+
+#define PAL_COPY_PAL           256     /* relocate PAL procedures and PAL PMI 
*/
+#define PAL_HALT_INFO          257     /* return the low power capabilities of 
processor */
+#define PAL_TEST_PROC          258     /* perform late processor self-test */
+#define PAL_CACHE_READ         259     /* read tag & data of cacheline for 
diagnostic testing */
+#define PAL_CACHE_WRITE                260     /* write tag & data of 
cacheline for diagnostic testing */
+#define PAL_VM_TR_READ         261     /* read contents of translation 
register */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64                            pal_status_t;
+
+#define PAL_STATUS_SUCCESS             0       /* No error */
+#define PAL_STATUS_UNIMPLEMENTED       (-1)    /* Unimplemented procedure */
+#define PAL_STATUS_EINVAL              (-2)    /* Invalid argument */
+#define PAL_STATUS_ERROR               (-3)    /* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL     (-4)    /* Could not initialize the
+                                                * specified level and type of
+                                                * cache without sideeffects
+                                                * and "restrict" was 1
+                                                */
+
+/* Processor cache level in the heirarchy */
+typedef u64                            pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0             0       /* L0 */
+#define PAL_CACHE_LEVEL_L1             1       /* L1 */
+#define PAL_CACHE_LEVEL_L2             2       /* L2 */
+
+
+/* Processor cache type at a particular level in the heirarchy */
+
+typedef u64                            pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION     1       /* Instruction cache */
+#define PAL_CACHE_TYPE_DATA            2       /* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA        3       /* Both Data & 
Instruction */
+
+
+#define PAL_CACHE_FLUSH_INVALIDATE     1       /* Invalidate clean lines */
+#define PAL_CACHE_FLUSH_CHK_INTRS      2       /* check for interrupts/mc 
while flushing */
+
+/* Processor cache line size in bytes  */
+typedef int                            pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64                            pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID   0       /* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED    1       /* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2       /* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED  3       /* Modified */
+
+typedef struct pal_freq_ratio {
+       u64 den : 32, num : 32; /* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef        union  pal_cache_config_info_1_s {
+       struct {
+               u64             u               : 1,    /* 0 Unified cache ? */
+                               at              : 2,    /* 2-1 Cache mem attr*/
+                               reserved        : 5,    /* 7-3 Reserved */
+                               associativity   : 8,    /* 16-8 Associativity*/
+                               line_size       : 8,    /* 23-17 Line size */
+                               stride          : 8,    /* 31-24 Stride */
+                               store_latency   : 8,    /*39-32 Store latency*/
+                               load_latency    : 8,    /* 47-40 Load latency*/
+                               store_hints     : 8,    /* 55-48 Store hints*/
+                               load_hints      : 8;    /* 63-56 Load hints */
+       } pcci1_bits;
+       u64                     pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef        union  pal_cache_config_info_2_s {
+       struct {
+               u64             cache_size      : 32,   /*cache size in bytes*/
+
+
+                               alias_boundary  : 8,    /* 39-32 aliased addr
+                                                        * separation for max
+                                                        * performance.
+                                                        */
+                               tag_ls_bit      : 8,    /* 47-40 LSb of addr*/
+                               tag_ms_bit      : 8,    /* 55-48 MSb of addr*/
+                               reserved        : 8;    /* 63-56 Reserved */
+       } pcci2_bits;
+       u64                     pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+       pal_status_t                    pcci_status;
+       pal_cache_config_info_1_t       pcci_info_1;
+       pal_cache_config_info_2_t       pcci_info_2;
+       u64                             pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hints          pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints          pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency                pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency                pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride            pcci_info_1.pcci1_bits.stride
+#define pcci_line_size         pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc             pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr                pcci_info_1.pcci1_bits.at
+#define pcci_unified           pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb           pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb           pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary    pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size                pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT              0       /* Write through cache */
+#define PAL_CACHE_ATTR_WB              1       /* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB                2       /* Either write thru or 
write
+                                                * back depending on TLB
+                                                * memory attributes
+                                                */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1          0       /* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1         1       /* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL       3       /* Non-temporal all levels */
+
+/* Processor cache protection  information */
+typedef union pal_cache_protection_element_u {
+       u32                     pcpi_data;
+       struct {
+               u32             data_bits       : 8, /* # data bits covered by
+                                                     * each unit of protection
+                                                     */
+
+                               tagprot_lsb     : 6, /* Least -do- */
+                               tagprot_msb     : 6, /* Most Sig. tag address
+                                                     * bit that this
+                                                     * protection covers.
+                                                     */
+                               prot_bits       : 6, /* # of protection bits */
+                               method          : 4, /* Protection method */
+                               t_d             : 2; /* Indicates which part
+                                                     * of the cache this
+                                                     * protection encoding
+                                                     * applies.
+                                                     */
+       } pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part   pcp_info.t_d
+#define pcpi_prot_method       pcp_info.method
+#define pcpi_prot_bits         pcp_info.prot_bits
+#define pcpi_tagprot_msb       pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb       pcp_info.tagprot_lsb
+#define pcpi_data_bits         pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA       0       /* Data protection  */
+#define PAL_CACHE_PROT_PART_TAG                1       /* Tag  protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA   2       /* Tag+data protection (tag is
+                                                * more significant )
+                                                */
+#define PAL_CACHE_PROT_PART_DATA_TAG   3       /* Data+tag protection (data is
+                                                * more significant )
+                                                */
+#define PAL_CACHE_PROT_PART_MAX                6
+
+
+typedef struct pal_cache_protection_info_s {
+       pal_status_t                    pcpi_status;
+       pal_cache_protection_element_t  pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE             0       /* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY       1       /* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY      2       /* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC              3       /* ECC protection */
+
+
+/* Processor cache line identification in the heirarchy */
+typedef union pal_cache_line_id_u {
+       u64                     pclid_data;
+       struct {
+               u64             cache_type      : 8,    /* 7-0 cache type */
+                               level           : 8,    /* 15-8 level of the
+                                                        * cache in the
+                                                        * heirarchy.
+                                                        */
+                               way             : 8,    /* 23-16 way in the set
+                                                        */
+                               part            : 8,    /* 31-24 part of the
+                                                        * cache
+                                                        */
+                               reserved        : 32;   /* 63-32 is reserved*/
+       } pclid_info_read;
+       struct {
+               u64             cache_type      : 8,    /* 7-0 cache type */
+                               level           : 8,    /* 15-8 level of the
+                                                        * cache in the
+                                                        * heirarchy.
+                                                        */
+                               way             : 8,    /* 23-16 way in the set
+                                                        */
+                               part            : 8,    /* 31-24 part of the
+                                                        * cache
+                                                        */
+                               mesi            : 8,    /* 39-32 cache line
+                                                        * state
+                                                        */
+                               start           : 8,    /* 47-40 lsb of data to
+                                                        * invert
+                                                        */
+                               length          : 8,    /* 55-48 #bits to
+                                                        * invert
+                                                        */
+                               trigger         : 8;    /* 63-56 Trigger error
+                                                        * by doing a load
+                                                        * after the write
+                                                        */
+
+       } pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part                pclid_info_read.part
+#define pclid_read_way         pclid_info_read.way
+#define pclid_read_level       pclid_info_read.level
+#define pclid_read_cache_type  pclid_info_read.cache_type
+
+#define pclid_write_trigger    pclid_info_write.trigger
+#define pclid_write_length     pclid_info_write.length
+#define pclid_write_start      pclid_info_write.start
+#define pclid_write_mesi       pclid_info_write.mesi
+#define pclid_write_part       pclid_info_write.part
+#define pclid_write_way                pclid_info_write.way
+#define pclid_write_level      pclid_info_write.level
+#define pclid_write_cache_type pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA            0       /* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG             1       /* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT       2       /* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT                3       /* Tag 
protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT   4       /* Data+tag
+                                                        * protection
+                                                        */
+typedef struct pal_cache_line_info_s {
+       pal_status_t            pcli_status;            /* Return status of the 
read cache line
+                                                        * info call.
+                                                        */
+       u64                     pcli_data;              /* 64-bit data, tag, 
protection bits .. */
+       u64                     pcli_data_len;          /* data length in bits 
*/
+       pal_cache_line_state_t  pcli_cache_line_state;  /* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits  */
+typedef u64                                    pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA                     (1 << 0)
+#define PAL_MC_PENDING_INIT                    (1 << 1)
+
+/* Error information type */
+typedef u64                                    pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR                  0       /* Processor */
+#define PAL_MC_INFO_CACHE_CHECK                        1       /* Cache check 
*/
+#define PAL_MC_INFO_TLB_CHECK                  2       /* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK                  3       /* Bus check */
+#define PAL_MC_INFO_REQ_ADDR                   4       /* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR                  5       /* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR                        6       /* Target 
address */
+#define PAL_MC_INFO_IMPL_DEP                   7       /* Implementation
+                                                        * dependent
+                                                        */
+
+
+typedef struct pal_process_state_info_s {
+       u64             reserved1       : 2,
+                       rz              : 1,    /* PAL_CHECK processor
+                                                * rendezvous
+                                                * successful.
+                                                */
+
+                       ra              : 1,    /* PAL_CHECK attempted
+                                                * a rendezvous.
+                                                */
+                       me              : 1,    /* Distinct multiple
+                                                * errors occurred
+                                                */
+
+                       mn              : 1,    /* Min. state save
+                                                * area has been
+                                                * registered with PAL
+                                                */
+
+                       sy              : 1,    /* Storage integrity
+                                                * synched
+                                                */
+
+
+                       co              : 1,    /* Continuable */
+                       ci              : 1,    /* MC isolated */
+                       us              : 1,    /* Uncontained storage
+                                                * damage.
+                                                */
+
+
+                       hd              : 1,    /* Non-essential hw
+                                                * lost (no loss of
+                                                * functionality)
+                                                * causing the
+                                                * processor to run in
+                                                * degraded mode.
+                                                */
+
+                       tl              : 1,    /* 1 => MC occurred
+                                                * after an instr was
+                                                * executed but before
+                                                * the trap that
+                                                * resulted from instr
+                                                * execution was
+                                                * generated.
+                                                * (Trap Lost )
+                                                */
+                       mi              : 1,    /* More information available
+                                                * call PAL_MC_ERROR_INFO
+                                                */
+                       pi              : 1,    /* Precise instruction pointer 
*/
+                       pm              : 1,    /* Precise min-state save area 
*/
+
+                       dy              : 1,    /* Processor dynamic
+                                                * state valid
+                                                */
+
+
+                       in              : 1,    /* 0 = MC, 1 = INIT */
+                       rs              : 1,    /* RSE valid */
+                       cm              : 1,    /* MC corrected */
+                       ex              : 1,    /* MC is expected */
+                       cr              : 1,    /* Control regs valid*/
+                       pc              : 1,    /* Perf cntrs valid */
+                       dr              : 1,    /* Debug regs valid */
+                       tr              : 1,    /* Translation regs
+                                                * valid
+                                                */
+                       rr              : 1,    /* Region regs valid */
+                       ar              : 1,    /* App regs valid */
+                       br              : 1,    /* Branch regs valid */
+                       pr              : 1,    /* Predicate registers
+                                                * valid
+                                                */
+
+                       fp              : 1,    /* fp registers valid*/
+                       b1              : 1,    /* Preserved bank one
+                                                * general registers
+                                                * are valid
+                                                */
+                       b0              : 1,    /* Preserved bank zero
+                                                * general registers
+                                                * are valid
+                                                */
+                       gr              : 1,    /* General registers
+                                                * are valid
+                                                * (excl. banked regs)
+                                                */
+                       dsize           : 16,   /* size of dynamic
+                                                * state returned
+                                                * by the processor
+                                                */
+
+                       reserved2       : 11,
+                       cc              : 1,    /* Cache check */
+                       tc              : 1,    /* TLB check */
+                       bc              : 1,    /* Bus check */
+                       rc              : 1,    /* Register file check */
+                       uc              : 1;    /* Uarch check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+       u64             op              : 4,    /* Type of cache
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       level           : 2,    /* Cache level */
+                       reserved1       : 2,
+                       dl              : 1,    /* Failure in data part
+                                                * of cache line
+                                                */
+                       tl              : 1,    /* Failure in tag part
+                                                * of cache line
+                                                */
+                       dc              : 1,    /* Failure in dcache */
+                       ic              : 1,    /* Failure in icache */
+                       mesi            : 3,    /* Cache line state */
+                       mv              : 1,    /* mesi valid */
+                       way             : 5,    /* Way in which the
+                                                * error occurred
+                                                */
+                       wiv             : 1,    /* Way field valid */
+                       reserved2       : 10,
+
+                       index           : 20,   /* Cache line index */
+                       reserved3       : 2,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+       u64             tr_slot         : 8,    /* Slot# of TR where
+                                                * error occurred
+                                                */
+                       trv             : 1,    /* tr_slot field is valid */
+                       reserved1       : 1,
+                       level           : 2,    /* TLB level where failure 
occurred */
+                       reserved2       : 4,
+                       dtr             : 1,    /* Fail in data TR */
+                       itr             : 1,    /* Fail in inst TR */
+                       dtc             : 1,    /* Fail in data TC */
+                       itc             : 1,    /* Fail in inst. TC */
+                       op              : 4,    /* Cache operation */
+                       reserved3       : 30,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+       u64             size            : 5,    /* Xaction size */
+                       ib              : 1,    /* Internal bus error */
+                       eb              : 1,    /* External bus error */
+                       cc              : 1,    /* Error occurred
+                                                * during cache-cache
+                                                * transfer.
+                                                */
+                       type            : 8,    /* Bus xaction type*/
+                       sev             : 5,    /* Bus error severity*/
+                       hier            : 2,    /* Bus hierarchy level */
+                       reserved1       : 1,
+                       bsi             : 8,    /* Bus error status
+                                                * info
+                                                */
+                       reserved2       : 22,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_bus_check_info_t;
+
+typedef struct pal_reg_file_check_info_s {
+       u64             id              : 4,    /* Register file identifier */
+                       op              : 4,    /* Type of register
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       reg_num         : 7,    /* Register number */
+                       rnv             : 1,    /* reg_num valid */
+                       reserved2       : 38,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       reserved3       : 3,
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_reg_file_check_info_t;
+
+typedef struct pal_uarch_check_info_s {
+       u64             sid             : 5,    /* Structure identification */
+                       level           : 3,    /* Level of failure */
+                       array_id        : 4,    /* Array identification */
+                       op              : 4,    /* Type of
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       way             : 6,    /* Way of structure */
+                       wv              : 1,    /* way valid */
+                       xv              : 1,    /* index valid */
+                       reserved1       : 8,
+                       index           : 8,    /* Index or set of the uarch
+                                                * structure that failed.
+                                                */
+                       reserved2       : 24,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_uarch_check_info_t;
+
+typedef union pal_mc_error_info_u {
+       u64                             pmei_data;
+       pal_processor_state_info_t      pme_processor;
+       pal_cache_check_info_t          pme_cache;
+       pal_tlb_check_info_t            pme_tlb;
+       pal_bus_check_info_t            pme_bus;
+       pal_reg_file_check_info_t       pme_reg_file;
+       pal_uarch_check_info_t          pme_uarch;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check                        pme_processor.uc
+#define pmci_proc_bus_check                    pme_processor.bc
+#define pmci_proc_tlb_check                    pme_processor.tc
+#define pmci_proc_cache_check                  pme_processor.cc
+#define pmci_proc_dynamic_state_size           pme_processor.dsize
+#define pmci_proc_gpr_valid                    pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid    pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid    pme_processor.b1
+#define pmci_proc_fp_valid                     pme_processor.fp
+#define pmci_proc_predicate_regs_valid         pme_processor.pr
+#define pmci_proc_branch_regs_valid            pme_processor.br
+#define pmci_proc_app_regs_valid               pme_processor.ar
+#define pmci_proc_region_regs_valid            pme_processor.rr
+#define pmci_proc_translation_regs_valid       pme_processor.tr
+#define pmci_proc_debug_regs_valid             pme_processor.dr
+#define pmci_proc_perf_counters_valid          pme_processor.pc
+#define pmci_proc_control_regs_valid           pme_processor.cr
+#define pmci_proc_machine_check_expected       pme_processor.ex
+#define pmci_proc_machine_check_corrected      pme_processor.cm
+#define pmci_proc_rse_valid                    pme_processor.rs
+#define pmci_proc_machine_check_or_init                pme_processor.in
+#define pmci_proc_dynamic_state_valid          pme_processor.dy
+#define pmci_proc_operation                    pme_processor.op
+#define pmci_proc_trap_lost                    pme_processor.tl
+#define pmci_proc_hardware_damage              pme_processor.hd
+#define pmci_proc_uncontained_storage_damage   pme_processor.us
+#define pmci_proc_machine_check_isolated       pme_processor.ci
+#define pmci_proc_continuable                  pme_processor.co
+#define pmci_proc_storage_intergrity_synced    pme_processor.sy
+#define pmci_proc_min_state_save_area_regd     pme_processor.mn
+#define        pmci_proc_distinct_multiple_errors      pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous     pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete      pme_processor.rz
+
+
+#define pmci_cache_level                       pme_cache.level
+#define pmci_cache_line_state                  pme_cache.mesi
+#define pmci_cache_line_state_valid            pme_cache.mv
+#define pmci_cache_line_index                  pme_cache.index
+#define pmci_cache_instr_cache_fail            pme_cache.ic
+#define pmci_cache_data_cache_fail             pme_cache.dc
+#define pmci_cache_line_tag_fail               pme_cache.tl
+#define pmci_cache_line_data_fail              pme_cache.dl
+#define pmci_cache_operation                   pme_cache.op
+#define pmci_cache_way_valid                   pme_cache.wv
+#define pmci_cache_target_address_valid                pme_cache.tv
+#define pmci_cache_way                         pme_cache.way
+#define pmci_cache_mc                          pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail  pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail   pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail    pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail     pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot          pme_tlb.tr_slot
+#define pmci_tlb_mc                            pme_tlb.mc
+
+#define pmci_bus_status_info                   pme_bus.bsi
+#define pmci_bus_req_address_valid             pme_bus.rq
+#define pmci_bus_resp_address_valid            pme_bus.rp
+#define pmci_bus_target_address_valid          pme_bus.tv
+#define pmci_bus_error_severity                        pme_bus.sev
+#define pmci_bus_transaction_type              pme_bus.type
+#define pmci_bus_cache_cache_transfer          pme_bus.cc
+#define pmci_bus_transaction_size              pme_bus.size
+#define pmci_bus_internal_error                        pme_bus.ib
+#define pmci_bus_external_error                        pme_bus.eb
+#define pmci_bus_mc                            pme_bus.mc
+
+/*
+ * NOTE: this min_state_save area struct only includes the 1KB
+ * architectural state save area.  The other 3 KB is scratch space
+ * for PAL.
+ */
+
+typedef struct pal_min_state_area_s {
+       u64     pmsa_nat_bits;          /* nat bits for saved GRs  */
+       u64     pmsa_gr[15];            /* GR1  - GR15             */
+       u64     pmsa_bank0_gr[16];      /* GR16 - GR31             */
+       u64     pmsa_bank1_gr[16];      /* GR16 - GR31             */
+       u64     pmsa_pr;                /* predicate registers     */
+       u64     pmsa_br0;               /* branch register 0       */
+       u64     pmsa_rsc;               /* ar.rsc                  */
+       u64     pmsa_iip;               /* cr.iip                  */
+       u64     pmsa_ipsr;              /* cr.ipsr                 */
+       u64     pmsa_ifs;               /* cr.ifs                  */
+       u64     pmsa_xip;               /* previous iip            */
+       u64     pmsa_xpsr;              /* previous psr            */
+       u64     pmsa_xfs;               /* previous ifs            */
+       u64     pmsa_br1;               /* branch register 1       */
+       u64     pmsa_reserved[70];      /* pal_min_state_area should total to 
1KB */
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+       /*
+        * A zero status value indicates call completed without error.
+        * A negative status value indicates reason of call failure.
+        * A positive status value indicates success but an
+        * informational value should be printed (e.g., "reboot for
+        * change to take effect").
+        */
+       s64 status;
+       u64 v0;
+       u64 v1;
+       u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed.  Reserved parameters are not optional
+ * parameters.
+ */
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
+extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
+
+#define PAL_CALL(iprv,a0,a1,a2,a3) do {                        \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do {         \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do {            \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_stacked(a0, a1, a2, a3);   \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do {                   \
+       struct ia64_fpreg fr[6];                                \
+       ia64_save_scratch_fpregs(fr);                           \
+       iprv = ia64_pal_call_phys_static(a0, a1, a2, a3);       \
+       ia64_load_scratch_fpregs(fr);                           \
+} while (0)
+
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do {               \
+       struct ia64_fpreg fr[6];                                \
+       ia64_save_scratch_fpregs(fr);                           \
+       iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3);      \
+       ia64_load_scratch_fpregs(fr);                           \
+} while (0)
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t         l0d_cache_config_info;
+extern pal_cache_config_info_t         l0i_cache_config_info;
+extern pal_cache_config_info_t         l1_cache_config_info;
+extern pal_cache_config_info_t         l2_cache_config_info;
+
+extern pal_cache_protection_info_t     l0d_cache_protection_info;
+extern pal_cache_protection_info_t     l0i_cache_protection_info;
+extern pal_cache_protection_info_t     l1_cache_protection_info;
+extern pal_cache_protection_info_t     l2_cache_protection_info;
+
+extern pal_cache_config_info_t         
pal_cache_config_info_get(pal_cache_level_t,
+                                                                 
pal_cache_type_t);
+
+extern pal_cache_protection_info_t     
pal_cache_protection_info_get(pal_cache_level_t,
+                                                                     
pal_cache_type_t);
+
+
+extern void                            pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+       u64     pal_bus_features_val;
+       struct {
+               u64     pbf_reserved1                           :       29;
+               u64     pbf_req_bus_parking                     :       1;
+               u64     pbf_bus_lock_mask                       :       1;
+               u64     pbf_enable_half_xfer_rate               :       1;
+               u64     pbf_reserved2                           :       22;
+               u64     pbf_disable_xaction_queueing            :       1;
+               u64     pbf_disable_resp_err_check              :       1;
+               u64     pbf_disable_berr_check                  :       1;
+               u64     pbf_disable_bus_req_internal_err_signal :       1;
+               u64     pbf_disable_bus_req_berr_signal         :       1;
+               u64     pbf_disable_bus_init_event_check        :       1;
+               u64     pbf_disable_bus_init_event_signal       :       1;
+               u64     pbf_disable_bus_addr_err_check          :       1;
+               u64     pbf_disable_bus_addr_err_signal         :       1;
+               u64     pbf_disable_bus_data_err_check          :       1;
+       } pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+static inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+                          pal_bus_features_u_t *features_status,
+                          pal_bus_features_u_t *features_control)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+       if (features_avail)
+               features_avail->pal_bus_features_val = iprv.v0;
+       if (features_status)
+               features_status->pal_bus_features_val = iprv.v1;
+       if (features_control)
+               features_control->pal_bus_features_val = iprv.v2;
+       return iprv.status;
+}
+
+/* Enables/disables specific processor bus features */
+static inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, 
feature_select.pal_bus_features_val, 0, 0);
+       return iprv.status;
+}
+
+/* Get detailed cache information */
+static inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, 
pal_cache_config_info_t *conf)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+       if (iprv.status == 0) {
+               conf->pcci_status                 = iprv.status;
+               conf->pcci_info_1.pcci1_data      = iprv.v0;
+               conf->pcci_info_2.pcci2_data      = iprv.v1;
+               conf->pcci_reserved               = iprv.v2;
+       }
+       return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+static inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, 
pal_cache_protection_info_t *prot)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+       if (iprv.status == 0) {
+               prot->pcpi_status           = iprv.status;
+               prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+               prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+               prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+               prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+               prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+               prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+       }
+       return iprv.status;
+}
+
+/*
+ * Flush the processor instruction or data caches.  *PROGRESS must be
+ * initialized to zero before calling this for the first time..
+ */
+static inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 
*vector)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, 
*progress);
+       if (vector)
+               *vector = iprv.v0;
+       *progress = iprv.v1;
+       return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+static inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
+       return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+static inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+       return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+static inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
+       return iprv.status;
+}
+
+/* Return summary information about the heirarchy of caches controlled by the 
processor */
+static inline s64
+ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+       if (cache_levels)
+               *cache_levels = iprv.v0;
+       if (unique_caches)
+               *unique_caches = iprv.v1;
+       return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+static inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 
data)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, 
data);
+       return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to 
memory */
+static inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+                   u64 *buffer_size, u64 *buffer_align)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+       if (buffer_size)
+               *buffer_size = iprv.v0;
+       if (buffer_align)
+               *buffer_align = iprv.v1;
+       return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 
*pal_proc_offset)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+       if (pal_proc_offset)
+               *pal_proc_offset = iprv.v0;
+       return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+static inline s64
+ia64_pal_debug_info (u64 *inst_regs,  u64 *data_regs)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+       if (inst_regs)
+               *inst_regs = iprv.v0;
+       if (data_regs)
+               *data_regs = iprv.v1;
+
+       return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+static inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+       return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+static inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+       if (global_unique_addr)
+               *global_unique_addr = iprv.v0;
+       return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+static inline s64
+ia64_pal_freq_base (u64 *platform_base_freq)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+       if (platform_base_freq)
+               *platform_base_freq = iprv.v0;
+       return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+static inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio 
*bus_ratio,
+                     struct pal_freq_ratio *itc_ratio)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+       if (proc_ratio)
+               *(u64 *)proc_ratio = iprv.v0;
+       if (bus_ratio)
+               *(u64 *)bus_ratio = iprv.v1;
+       if (itc_ratio)
+               *(u64 *)itc_ratio = iprv.v2;
+       return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+static inline s64
+ia64_pal_halt (u64 halt_state)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+       return iprv.status;
+}
+
+typedef union pal_power_mgmt_info_u {
+       u64                     ppmi_data;
+       struct {
+              u64              exit_latency            : 16,
+                               entry_latency           : 16,
+                               power_consumption       : 28,
+                               im                      : 1,
+                               co                      : 1,
+                               reserved                : 2;
+       } pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management 
capabilities. */
+static inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+       return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and 
execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+static inline s64
+ia64_pal_halt_light (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Clear all the processor error logging   registers and reset the indicator 
that allows
+ * the error logging registers to be written. This procedure also checks the 
pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+static inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+       if (pending_vector)
+               *pending_vector = iprv.v0;
+       return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or 
that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+static inline s64
+ia64_pal_mc_drain (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+static inline s64
+ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+       if (size)
+               *size = iprv.v0;
+       if (pds)
+               *pds = iprv.v1;
+       return iprv.status;
+}
+
+/* Return processor machine check information */
+static inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 
*error_info)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+       if (size)
+               *size = iprv.v0;
+       if (error_info)
+               *error_info = iprv.v1;
+       return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK 
willnot
+ * attempt to correct any expected machine checks.
+ */
+static inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+       if (previous)
+               *previous = iprv.v0;
+       return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+static inline s64
+ia64_pal_mc_register_mem (u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+       return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if 
necessary
+ * and resume execution
+ */
+static inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+       return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+static inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+       if (mem_attrib)
+               *mem_attrib = iprv.v0 & 0xff;
+       return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+static inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+       if (bytes_needed)
+               *bytes_needed = iprv.v0;
+       if (alignment)
+               *alignment = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+       u64                       ppmi_data;
+       struct {
+              u64              generic         : 8,
+                               width           : 8,
+                               cycles          : 8,
+                               retired         : 8,
+                               reserved        : 32;
+       } pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+static inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+       if (pm_info)
+               pm_info->ppmi_data = iprv.v0;
+       return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+static inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+       return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+static inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+       return iprv.status;
+}
+
+struct pal_features_s;
+/* Provide information about configurable processor features */
+static inline s64
+ia64_pal_proc_get_features (u64 *features_avail,
+                           u64 *features_status,
+                           u64 *features_control)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
+       if (iprv.status == 0) {
+               *features_avail   = iprv.v0;
+               *features_status  = iprv.v1;
+               *features_control = iprv.v2;
+       }
+       return iprv.status;
+}
+
+/* Enable/disable processor dependent features */
+static inline s64
+ia64_pal_proc_set_features (u64 feature_select)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+       return iprv.status;
+}
+
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+       u64             base;
+       u32             count[2];
+       u32             stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+static inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+       struct ia64_pal_retval iprv;
+
+       if (!ptce)
+               return -1;
+
+       PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+       if (iprv.status == 0) {
+               ptce->base = iprv.v0;
+               ptce->count[0] = iprv.v1 >> 32;
+               ptce->count[1] = iprv.v1 & 0xffffffff;
+               ptce->stride[0] = iprv.v2 >> 32;
+               ptce->stride[1] = iprv.v2 & 0xffffffff;
+       }
+       return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+static inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+       if (reg_info_1)
+               *reg_info_1 = iprv.v0;
+       if (reg_info_2)
+               *reg_info_2 = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_hints_u {
+       u64                     ph_data;
+       struct {
+              u64              si              : 1,
+                               li              : 1,
+                               reserved        : 62;
+       } pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+       if (num_phys_stacked)
+               *num_phys_stacked = iprv.v0;
+       if (hints)
+               hints->ph_data = iprv.v1;
+       return iprv.status;
+}
+
+/* Cause the processor to enter        SHUTDOWN state, where prefetching and 
execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+static inline s64
+ia64_pal_shutdown (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+static inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 
*self_test_state)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+       if (self_test_state)
+               *self_test_state = iprv.v0;
+       return iprv.status;
+}
+
+typedef union  pal_version_u {
+       u64     pal_version_val;
+       struct {
+               u64     pv_pal_b_rev            :       8;
+               u64     pv_pal_b_model          :       8;
+               u64     pv_reserved1            :       8;
+               u64     pv_pal_vendor           :       8;
+               u64     pv_pal_a_rev            :       8;
+               u64     pv_pal_a_model          :       8;
+               u64     pv_reserved2            :       16;
+       } pal_version_s;
+} pal_version_u_t;
+
+
+/* Return PAL version information */
+static inline s64
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t 
*pal_cur_version)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+       if (pal_min_version)
+               pal_min_version->pal_version_val = iprv.v0;
+
+       if (pal_cur_version)
+               pal_cur_version->pal_version_val = iprv.v1;
+
+       return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+       u64                     pti_val;
+       struct {
+              u64              num_sets        :       8,
+                               associativity   :       8,
+                               num_entries     :       16,
+                               pf              :       1,
+                               unified         :       1,
+                               reduce_tr       :       1,
+                               reserved        :       29;
+       } pal_tc_info_s;
+} pal_tc_info_u_t;
+
+#define tc_reduce_tr           pal_tc_info_s.reduce_tr
+#define tc_unified             pal_tc_info_s.unified
+#define tc_pf                  pal_tc_info_s.pf
+#define tc_num_entries         pal_tc_info_s.num_entries
+#define tc_associativity       pal_tc_info_s.associativity
+#define tc_num_sets            pal_tc_info_s.num_sets
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type,  pal_tc_info_u_t *tc_info, u64 
*tc_pages)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+       if (tc_info)
+               tc_info->pti_val = iprv.v0;
+       if (tc_pages)
+               *tc_pages = iprv.v1;
+       return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the 
processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+       if (tr_pages)
+               *tr_pages = iprv.v0;
+       if (vw_pages)
+               *vw_pages = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+       u64                     pvi1_val;
+       struct {
+               u64             vw              : 1,
+                               phys_add_size   : 7,
+                               key_size        : 8,
+                               max_pkr         : 8,
+                               hash_tag_id     : 8,
+                               max_dtr_entry   : 8,
+                               max_itr_entry   : 8,
+                               max_unique_tcs  : 8,
+                               num_tc_levels   : 8;
+       } pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+typedef union pal_vm_info_2_u {
+       u64                     pvi2_val;
+       struct {
+               u64             impl_va_msb     : 8,
+                               rid_size        : 8,
+                               reserved        : 48;
+       } pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the 
processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t 
*vm_info_2)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+       if (vm_info_1)
+               vm_info_1->pvi1_val = iprv.v0;
+       if (vm_info_2)
+               vm_info_2->pvi2_val = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+       u64                     piv_val;
+       struct {
+              u64              access_rights_valid     : 1,
+                               priv_level_valid        : 1,
+                               dirty_bit_valid         : 1,
+                               mem_attr_valid          : 1,
+                               reserved                : 60;
+       } pal_tr_valid_s;
+} pal_tr_valid_u_t;
+
+/* Read a translation register */
+static inline s64
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t 
*tr_valid)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, 
tr_type,(u64)ia64_tpa(tr_buffer));
+       if (tr_valid)
+               tr_valid->piv_val = iprv.v0;
+       return iprv.status;
+}
+
+/*
+ * PAL_PREFETCH_VISIBILITY transaction types
+ */
+#define PAL_VISIBILITY_VIRTUAL         0
+#define PAL_VISIBILITY_PHYSICAL                1
+
+/*
+ * PAL_PREFETCH_VISIBILITY return codes
+ */
+#define PAL_VISIBILITY_OK              1
+#define PAL_VISIBILITY_OK_REMOTE_NEEDED        0
+#define PAL_VISIBILITY_INVAL_ARG       -2
+#define PAL_VISIBILITY_ERROR           -3
+
+static inline s64
+ia64_pal_prefetch_visibility (s64 trans_type)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
+       return iprv.status;
+}
+
+#ifdef CONFIG_VTI
+#include <asm/vmx_pal.h>
+#endif // CONFIG_VTI
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/pgalloc.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/pgalloc.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,196 @@
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@xxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/threads.h>
+
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+/*
+ * Very stupidly, we used to get new pgd's and pmd's, init their contents
+ * to point to the NULL versions of the next level page table, later on
+ * completely re-init them the same way, then free them up.  This wasted
+ * a lot of work and caused unnecessary memory traffic.  How broken...
+ * We fix this by caching them.
+ */
+#define pgd_quicklist          (local_cpu_data->pgd_quick)
+#define pmd_quicklist          (local_cpu_data->pmd_quick)
+#define pgtable_cache_size     (local_cpu_data->pgtable_cache_sz)
+
+static inline pgd_t*
+pgd_alloc_one_fast (struct mm_struct *mm)
+{
+       unsigned long *ret = NULL;
+
+       preempt_disable();
+
+       ret = pgd_quicklist;
+       if (likely(ret != NULL)) {
+               pgd_quicklist = (unsigned long *)(*ret);
+               ret[0] = 0;
+               --pgtable_cache_size;
+       } else
+               ret = NULL;
+
+       preempt_enable();
+
+       return (pgd_t *) ret;
+}
+
+static inline pgd_t*
+pgd_alloc (struct mm_struct *mm)
+{
+       /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
+       pgd_t *pgd = pgd_alloc_one_fast(mm);
+
+       if (unlikely(pgd == NULL)) {
+#ifdef XEN
+               pgd = (pgd_t *)alloc_xenheap_page();
+               memset(pgd,0,PAGE_SIZE);
+#else
+               pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+#endif
+       }
+       return pgd;
+}
+
+static inline void
+pgd_free (pgd_t *pgd)
+{
+       preempt_disable();
+       *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+       pgd_quicklist = (unsigned long *) pgd;
+       ++pgtable_cache_size;
+       preempt_enable();
+}
+
+static inline void
+pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
+{
+       pud_val(*pud_entry) = __pa(pmd);
+}
+
+static inline pmd_t*
+pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long *ret = NULL;
+
+       preempt_disable();
+
+       ret = (unsigned long *)pmd_quicklist;
+       if (likely(ret != NULL)) {
+               pmd_quicklist = (unsigned long *)(*ret);
+               ret[0] = 0;
+               --pgtable_cache_size;
+       }
+
+       preempt_enable();
+
+       return (pmd_t *)ret;
+}
+
+static inline pmd_t*
+pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
+       memset(pmd,0,PAGE_SIZE);
+#else
+       pmd_t *pmd = (pmd_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
+
+       return pmd;
+}
+
+static inline void
+pmd_free (pmd_t *pmd)
+{
+       preempt_disable();
+       *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
+       pmd_quicklist = (unsigned long *) pmd;
+       ++pgtable_cache_size;
+       preempt_enable();
+}
+
+#define __pmd_free_tlb(tlb, pmd)       pmd_free(pmd)
+
+static inline void
+pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
+{
+       pmd_val(*pmd_entry) = page_to_phys(pte);
+}
+
+static inline void
+pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
+{
+       pmd_val(*pmd_entry) = __pa(pte);
+}
+
+static inline struct page *
+pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       struct page *pte = alloc_xenheap_page();
+       memset(pte,0,PAGE_SIZE);
+#else
+       struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+#endif
+
+       return pte;
+}
+
+static inline pte_t *
+pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       pte_t *pte = (pte_t *)alloc_xenheap_page();
+       memset(pte,0,PAGE_SIZE);
+#else
+       pte_t *pte = (pte_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
+
+       return pte;
+}
+
+static inline void
+pte_free (struct page *pte)
+{
+#ifdef XEN
+       free_xenheap_page(pte);
+#else
+       __free_page(pte);
+#endif
+}
+
+static inline void
+pte_free_kernel (pte_t *pte)
+{
+#ifdef XEN
+       free_xenheap_page((unsigned long) pte);
+#else
+       free_page((unsigned long) pte);
+#endif
+}
+
+#define __pte_free_tlb(tlb, pte)       tlb_remove_page((tlb), (pte))
+
+extern void check_pgt_cache (void);
+
+#endif /* _ASM_IA64_PGALLOC_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/processor.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/processor.h  Tue Aug  2 23:59:09 2005
@@ -0,0 +1,705 @@
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ *
+ * 11/24/98    S.Eranian       added ia64_set_iva()
+ * 12/03/99    D. Mosberger    implement thread_saved_pc() via kernel unwind 
API
+ * 06/16/00    A. Mallick      added csd/ssd/tssd for ia32 support
+ */
+
+#include <linux/config.h>
+
+#include <asm/intrinsics.h>
+#include <asm/kregs.h>
+#include <asm/ptrace.h>
+#include <asm/ustack.h>
+
+/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
+#define ARCH_HAS_SCHED_DOMAIN
+
+#define IA64_NUM_DBG_REGS      8
+/*
+ * Limits for PMC and PMD are set to less than maximum architected values
+ * but should be sufficient for a while
+ */
+#define IA64_NUM_PMC_REGS      32
+#define IA64_NUM_PMD_REGS      32
+
+#define DEFAULT_MAP_BASE       __IA64_UL_CONST(0x2000000000000000)
+#define DEFAULT_TASK_SIZE      __IA64_UL_CONST(0xa000000000000000)
+
+/*
+ * TASK_SIZE really is a mis-named.  It really is the maximum user
+ * space address (plus one).  On IA-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE              (current->thread.task_size)
+
+/*
+ * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a 
mapping for
+ * address-space MM.  Note that with 32-bit tasks, this is still 
DEFAULT_TASK_SIZE,
+ * because the kernel may have installed helper-mappings above TASK_SIZE.  For 
example,
+ * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
+ */
+#define MM_VM_SIZE(mm)         DEFAULT_TASK_SIZE
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE     (current->thread.map_base)
+
+#define IA64_THREAD_FPH_VALID  (__IA64_UL(1) << 0)     /* floating-point high 
state valid? */
+#define IA64_THREAD_DBG_VALID  (__IA64_UL(1) << 1)     /* debug registers 
valid? */
+#define IA64_THREAD_PM_VALID   (__IA64_UL(1) << 2)     /* performance 
registers valid? */
+#define IA64_THREAD_UAC_NOPRINT        (__IA64_UL(1) << 3)     /* don't log 
unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4)     /* generate SIGBUS on 
unaligned acc. */
+                                                       /* bit 5 is currently 
unused */
+#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)  /* don't log any fpswa 
faults */
+#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)  /* send a SIGFPE for 
fpswa faults */
+
+#define IA64_THREAD_UAC_SHIFT  3
+#define IA64_THREAD_UAC_MASK   (IA64_THREAD_UAC_NOPRINT | 
IA64_THREAD_UAC_SIGBUS)
+#define IA64_THREAD_FPEMU_SHIFT        6
+#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | 
IA64_THREAD_FPEMU_SIGFPE)
+
+
+/*
+ * This shift should be large enough to be able to represent 
1000000000/itc_freq with good
+ * accuracy while being small enough to fit 
10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
+ * (this will give enough slack to represent 10 seconds worth of time as a 
scaled number).
+ */
+#define IA64_NSEC_PER_CYC_SHIFT        30
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+#include <asm/atomic.h>
+#ifdef CONFIG_NUMA
+#include <asm/nodedata.h>
+#endif
+#ifdef XEN
+#include <asm/xenprocessor.h>
+#endif
+
+#ifndef XEN
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+       __u64 reserved0 : 1;
+       __u64 be : 1;
+       __u64 up : 1;
+       __u64 ac : 1;
+       __u64 mfl : 1;
+       __u64 mfh : 1;
+       __u64 reserved1 : 7;
+       __u64 ic : 1;
+       __u64 i : 1;
+       __u64 pk : 1;
+       __u64 reserved2 : 1;
+       __u64 dt : 1;
+       __u64 dfl : 1;
+       __u64 dfh : 1;
+       __u64 sp : 1;
+       __u64 pp : 1;
+       __u64 di : 1;
+       __u64 si : 1;
+       __u64 db : 1;
+       __u64 lp : 1;
+       __u64 tb : 1;
+       __u64 rt : 1;
+       __u64 reserved3 : 4;
+       __u64 cpl : 2;
+       __u64 is : 1;
+       __u64 mc : 1;
+       __u64 it : 1;
+       __u64 id : 1;
+       __u64 da : 1;
+       __u64 dd : 1;
+       __u64 ss : 1;
+       __u64 ri : 2;
+       __u64 ed : 1;
+       __u64 bn : 1;
+       __u64 reserved4 : 19;
+};
+#endif
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state.  Frequently used
+ * state comes earlier:
+ */
+struct cpuinfo_ia64 {
+       __u32 softirq_pending;
+       __u64 itm_delta;        /* # of clock cycles between clock ticks */
+       __u64 itm_next;         /* interval timer mask value to use for next 
clock tick */
+       __u64 nsec_per_cyc;     /* 
(1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
+       __u64 unimpl_va_mask;   /* mask of unimplemented virtual address bits 
(from PAL) */
+       __u64 unimpl_pa_mask;   /* mask of unimplemented physical address bits 
(from PAL) */
+       __u64 *pgd_quick;
+       __u64 *pmd_quick;
+       __u64 pgtable_cache_sz;
+       __u64 itc_freq;         /* frequency of ITC counter */
+       __u64 proc_freq;        /* frequency of processor */
+       __u64 cyc_per_usec;     /* itc_freq/1000000 */
+       __u64 ptce_base;
+       __u32 ptce_count[2];
+       __u32 ptce_stride[2];
+       struct task_struct *ksoftirqd;  /* kernel softirq daemon for this CPU */
+
+#ifdef CONFIG_SMP
+       __u64 loops_per_jiffy;
+       int cpu;
+#endif
+
+       /* CPUID-derived information: */
+       __u64 ppn;
+       __u64 features;
+       __u8 number;
+       __u8 revision;
+       __u8 model;
+       __u8 family;
+       __u8 archrev;
+       char vendor[16];
+
+#ifdef CONFIG_NUMA
+       struct ia64_node_data *node_data;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+
+/*
+ * The "local" data variable.  It refers to the per-CPU data of the currently 
executing
+ * CPU, much like "current" points to the per-task data of the currently 
executing task.
+ * Do not use the address of local_cpu_data, since it will be different from
+ * cpu_data(smp_processor_id())!
+ */
+#define local_cpu_data         (&__ia64_per_cpu_var(cpu_info))
+#define cpu_data(cpu)          (&per_cpu(cpu_info, cpu))
+
+extern void identify_cpu (struct cpuinfo_ia64 *);
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+#define SET_UNALIGN_CTL(task,value)                                            
                \
+({                                                                             
                \
+       (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)  
                \
+                               | (((value) << IA64_THREAD_UAC_SHIFT) & 
IA64_THREAD_UAC_MASK)); \
+       0;                                                                      
                \
+})
+#define GET_UNALIGN_CTL(task,addr)                                             
                \
+({                                                                             
                \
+       put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> 
IA64_THREAD_UAC_SHIFT,        \
+                (int __user *) (addr));                                        
                \
+})
+
+#define SET_FPEMU_CTL(task,value)                                              
                \
+({                                                                             
                \
+       (task)->thread.flags = (((task)->thread.flags & 
~IA64_THREAD_FPEMU_MASK)                \
+                         | (((value) << IA64_THREAD_FPEMU_SHIFT) & 
IA64_THREAD_FPEMU_MASK));   \
+       0;                                                                      
                \
+})
+#define GET_FPEMU_CTL(task,addr)                                               
                \
+({                                                                             
                \
+       put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> 
IA64_THREAD_FPEMU_SHIFT,    \
+                (int __user *) (addr));                                        
                \
+})
+
+#ifdef CONFIG_IA32_SUPPORT
+struct desc_struct {
+       unsigned int a, b;
+};
+
+#define desc_empty(desc)               (!((desc)->a + (desc)->b))
+#define desc_equal(desc1, desc2)       (((desc1)->a == (desc2)->a) && 
((desc1)->b == (desc2)->b))
+
+#define GDT_ENTRY_TLS_ENTRIES  3
+#define GDT_ENTRY_TLS_MIN      6
+#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+struct partial_page_list;
+#endif
+
+struct thread_struct {
+       __u32 flags;                    /* various thread flags (see 
IA64_THREAD_*) */
+       /* writing on_ustack is performance-critical, so it's worth spending 8 
bits on it... */
+       __u8 on_ustack;                 /* executing on user-stacks? */
+       __u8 pad[3];
+       __u64 ksp;                      /* kernel stack pointer */
+       __u64 map_base;                 /* base address for get_unmapped_area() 
*/
+       __u64 task_size;                /* limit for task size */
+       __u64 rbs_bot;                  /* the base address for the RBS */
+       int last_fph_cpu;               /* CPU that may hold the contents of 
f32-f127 */
+
+#ifdef CONFIG_IA32_SUPPORT
+       __u64 eflag;                    /* IA32 EFLAGS reg */
+       __u64 fsr;                      /* IA32 floating pt status reg */
+       __u64 fcr;                      /* IA32 floating pt control reg */
+       __u64 fir;                      /* IA32 fp except. instr. reg */
+       __u64 fdr;                      /* IA32 fp except. data reg */
+       __u64 old_k1;                   /* old value of ar.k1 */
+       __u64 old_iob;                  /* old IOBase value */
+       struct partial_page_list *ppl;  /* partial page list for 4K page size 
issue */
+        /* cached TLS descriptors. */
+       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+
+# define INIT_THREAD_IA32      .eflag =        0,                      \
+                               .fsr =          0,                      \
+                               .fcr =          0x17800000037fULL,      \
+                               .fir =          0,                      \
+                               .fdr =          0,                      \
+                               .old_k1 =       0,                      \
+                               .old_iob =      0,                      \
+                               .ppl =          NULL,
+#else
+# define INIT_THREAD_IA32
+#endif /* CONFIG_IA32_SUPPORT */
+#ifdef CONFIG_PERFMON
+       __u64 pmcs[IA64_NUM_PMC_REGS];
+       __u64 pmds[IA64_NUM_PMD_REGS];
+       void *pfm_context;                   /* pointer to detailed PMU context 
*/
+       unsigned long pfm_needs_checking;    /* when >0, pending perfmon work 
on kernel exit */
+# define INIT_THREAD_PM                .pmcs =                 {0UL, },  \
+                               .pmds =                 {0UL, },  \
+                               .pfm_context =          NULL,     \
+                               .pfm_needs_checking =   0UL,
+#else
+# define INIT_THREAD_PM
+#endif
+       __u64 dbr[IA64_NUM_DBG_REGS];
+       __u64 ibr[IA64_NUM_DBG_REGS];
+       struct ia64_fpreg fph[96];      /* saved/loaded on demand */
+};
+
+#define INIT_THREAD {                                          \
+       .flags =        0,                                      \
+       .on_ustack =    0,                                      \
+       .ksp =          0,                                      \
+       .map_base =     DEFAULT_MAP_BASE,                       \
+       .rbs_bot =      STACK_TOP - DEFAULT_USER_STACK_SIZE,    \
+       .task_size =    DEFAULT_TASK_SIZE,                      \
+       .last_fph_cpu =  -1,                                    \
+       INIT_THREAD_IA32                                        \
+       INIT_THREAD_PM                                          \
+       .dbr =          {0, },                                  \
+       .ibr =          {0, },                                  \
+       .fph =          {{{{0}}}, }                             \
+}
+
+#define start_thread(regs,new_ip,new_sp) do {                                  
                \
+       set_fs(USER_DS);                                                        
                \
+       regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | 
IA64_PSR_CPL))                \
+                        & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | 
IA64_PSR_IS));              \
+       regs->cr_iip = new_ip;                                                  
                \
+       regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */     
                \
+       regs->ar_rnat = 0;                                                      
                \
+       regs->ar_bspstore = current->thread.rbs_bot;                            
                \
+       regs->ar_fpsr = FPSR_DEFAULT;                                           
                \
+       regs->loadrs = 0;                                                       
                \
+       regs->r8 = current->mm->dumpable;       /* set "don't zap registers" 
flag */            \
+       regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */     
                \
+       if (unlikely(!current->mm->dumpable)) {                                 
                \
+               /*                                                              
                \
+                * Zap scratch regs to avoid leaking bits between processes 
with different      \
+                * uid/privileges.                                              
                \
+                */                                                             
                \
+               regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;                   
                \
+               regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; 
regs->r15 = 0;       \
+       }                                                                       
                \
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exit status of the task via
+ * wait().
+ */
+#define release_thread(dead_task)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)   do { } while (0)
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE 1: Only a kernel-only process (ie the swapper or direct
+ * descendants who haven't done an "execve()") should use this: it
+ * will work within a system call from a "real" process, but the
+ * process memory space will not be free'd until both the parent and
+ * the child have exited.
+ *
+ * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
+ * into trouble in init/main.c when the child thread returns to
+ * do_basic_setup() and the timing is such that free_initmem() has
+ * been called already.
+ */
+extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Get wait channel for task P.  */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK.  */
+#define KSTK_EIP(tsk)                                  \
+  ({                                                   \
+       struct pt_regs *_regs = ia64_task_regs(tsk);    \
+       _regs->cr_iip + ia64_psr(_regs)->ri;            \
+  })
+
+/* Return stack pointer of blocked task TSK.  */
+#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
+
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum)                                    \
+({                                                             \
+       unsigned long r = 0;                                    \
+                                                               \
+       switch (regnum) {                                       \
+           case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;   \
+           case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;   \
+           case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;   \
+           case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;   \
+           case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;   \
+           case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;   \
+           case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;   \
+           case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;   \
+           default: ia64_getreg_unknown_kr(); break;           \
+       }                                                       \
+       r;                                                      \
+})
+
+#define ia64_set_kr(regnum, r)                                         \
+({                                                             \
+       switch (regnum) {                                       \
+           case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;    \
+           case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;    \
+           case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;    \
+           case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;    \
+           case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;    \
+           case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;    \
+           case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;    \
+           case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;    \
+           default: ia64_setreg_unknown_kr(); break;           \
+       }                                                       \
+})
+
+/*
+ * The following three macros can't be inline functions because we don't have 
struct
+ * task_struct at this point.
+ */
+
+/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+#ifndef XEN
+#define ia64_is_local_fpu_owner(t)                                             
                \
+({                                                                             
                \
+       struct task_struct *__ia64_islfo_task = (t);                            
                \
+       (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()           
                \
+        && __ia64_islfo_task == (struct task_struct *) 
ia64_get_kr(IA64_KR_FPU_OWNER));        \
+})
+#endif
+
+/* Mark task T as owning the fph partition of the CPU we're running on. */
+#define ia64_set_local_fpu_owner(t) do {                                       
        \
+       struct task_struct *__ia64_slfo_task = (t);                             
        \
+       __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();             
        \
+       ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);       
        \
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs.  */
+#define ia64_drop_fpu(t)       ((t)->thread.last_fph_cpu = -1)
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#ifdef CONFIG_IA32_SUPPORT
+extern void ia32_save_state (struct task_struct *task);
+extern void ia32_load_state (struct task_struct *task);
+#endif
+
+#define ia64_fph_enable()      do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
+#define ia64_fph_disable()     do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+       ia64_fph_enable();
+       __ia64_init_fpu();
+       ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+       ia64_fph_enable();
+       __ia64_save_fpu(fph);
+       ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+       ia64_fph_enable();
+       __ia64_load_fpu(fph);
+       ia64_fph_disable();
+}
+
+static inline __u64
+ia64_clear_ic (void)
+{
+       __u64 psr;
+       psr = ia64_getreg(_IA64_REG_PSR);
+       ia64_stop();
+       ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+       ia64_srlz_i();
+       return psr;
+}
+
+/*
+ * Restore the psr.
+ */
+static inline void
+ia64_set_psr (__u64 psr)
+{
+       ia64_stop();
+       ia64_setreg(_IA64_REG_PSR_L, psr);
+       ia64_srlz_d();
+}
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+static inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+         __u64 vmaddr, __u64 pte,
+         __u64 log_page_size)
+{
+       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+       ia64_stop();
+       if (target_mask & 0x1)
+               ia64_itri(tr_num, pte);
+       if (target_mask & 0x2)
+               ia64_itrd(tr_num, pte);
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+         __u64 log_page_size)
+{
+       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+       ia64_stop();
+       /* as per EAS2.6, itc must be the last instruction in an instruction 
group */
+       if (target_mask & 0x1)
+               ia64_itci(pte);
+       if (target_mask & 0x2)
+               ia64_itcd(pte);
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+static inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+       if (target_mask & 0x1)
+               ia64_ptri(vmaddr, (log_size << 2));
+       if (target_mask & 0x2)
+               ia64_ptrd(vmaddr, (log_size << 2));
+}
+
+/* Set the interrupt vector address.  The address must be suitably aligned 
(32KB).  */
+static inline void
+ia64_set_iva (void *ivt_addr)
+{
+       ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+       ia64_srlz_i();
+}
+
+/* Set the page table address and control bits.  */
+static inline void
+ia64_set_pta (__u64 pta)
+{
+       /* Note: srlz.i implies srlz.d */
+       ia64_setreg(_IA64_REG_CR_PTA, pta);
+       ia64_srlz_i();
+}
+
+static inline void
+ia64_eoi (void)
+{
+       ia64_setreg(_IA64_REG_CR_EOI, 0);
+       ia64_srlz_d();
+}
+
+#define cpu_relax()    ia64_hint(ia64_hint_pause)
+
+static inline void
+ia64_set_lrr0 (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_LRR0, val);
+       ia64_srlz_d();
+}
+
+static inline void
+ia64_set_lrr1 (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_LRR1, val);
+       ia64_srlz_d();
+}
+
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+static inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+       return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR.  UNAT is the mask to be updated.
+ */
+static inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+       __u64 bit = ia64_unat_pos(spill_addr);
+       __u64 mask = 1UL << bit;
+
+       *unat = (*unat & ~mask) | (nat << bit);
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * Note that the only way T can block is through a call to schedule() -> 
switch_to().
+ */
+static inline unsigned long
+thread_saved_pc (struct task_struct *t)
+{
+       struct unw_frame_info info;
+       unsigned long ip;
+
+       unw_init_from_blocked_task(&info, t);
+       if (unw_unwind(&info) < 0)
+               return 0;
+       unw_get_ip(&info, &ip);
+       return ip;
+}
+
+/*
+ * Get the current instruction/program counter value.
+ */
+#define current_text_addr() \
+       ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
+
+static inline __u64
+ia64_get_ivr (void)
+{
+       __u64 r;
+       ia64_srlz_d();
+       r = ia64_getreg(_IA64_REG_CR_IVR);
+       ia64_srlz_d();
+       return r;
+}
+
+static inline void
+ia64_set_dbr (__u64 regnum, __u64 value)
+{
+       __ia64_set_dbr(regnum, value);
+#ifdef CONFIG_ITANIUM
+       ia64_srlz_d();
+#endif
+}
+
+static inline __u64
+ia64_get_dbr (__u64 regnum)
+{
+       __u64 retval;
+
+       retval = __ia64_get_dbr(regnum);
+#ifdef CONFIG_ITANIUM
+       ia64_srlz_d();
+#endif
+       return retval;
+}
+
+static inline __u64
+ia64_rotr (__u64 w, __u64 n)
+{
+       return (w >> n) | (w << (64 - n));
+}
+
+#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
+
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+       void *result;
+       result = (void *) ia64_tpa(addr);
+       return __va(result);
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define PREFETCH_STRIDE                        L1_CACHE_BYTES
+
+static inline void
+prefetch (const void *x)
+{
+        ia64_lfetch(ia64_lfhint_none, x);
+}
+
+static inline void
+prefetchw (const void *x)
+{
+       ia64_lfetch_excl(ia64_lfhint_none, x);
+}
+
+#define spin_lock_prefetch(x)  prefetchw(x)
+
+extern unsigned long boot_option_idle_override;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/ptrace.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/ptrace.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,341 @@
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2003 Intel Co
+ *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ *     Arun Sharma <arun.sharma@xxxxxxxxx>
+ *
+ * 12/07/98    S. Eranian      added pt_regs & switch_stack
+ * 12/21/98    D. Mosberger    updated to match latest code
+ *  6/17/99    D. Mosberger    added second unat member to "struct 
switch_stack"
+ *
+ */
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ *            +----------------------+ ------- IA64_STK_OFFSET
+ *                   |                      |   ^
+ *            | struct pt_regs       |  |
+ *           |                      |   |
+ *            +----------------------+  |
+ *           |                      |   |
+ *                   |    memory stack      |   |
+ *           | (growing downwards)  |   |
+ *           //.....................//  |
+ *                                      |
+ *           //.....................//  |
+ *           |                      |   |
+ *            +----------------------+  |
+ *            | struct switch_stack  |  |
+ *           |                      |   |
+ *           +----------------------+   |
+ *           |                      |   |
+ *           //.....................//  |
+ *                                      |
+ *           //.....................//  |
+ *           |                      |   |
+ *           |  register stack      |   |
+ *           | (growing upwards)    |   |
+ *            |                             |   |
+ *           +----------------------+   |  --- IA64_RBS_OFFSET
+ *            |  struct thread_info  |  |  ^
+ *           +----------------------+   |  |
+ *           |                      |   |  |
+ *            |  struct task_struct  |  |  |
+ * current -> |                             |   |  |
+ *           +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+#include <linux/config.h>
+
+#include <asm/fpu.h>
+#include <asm/offsets.h>
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define KERNEL_STACK_SIZE_ORDER               3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define KERNEL_STACK_SIZE_ORDER               2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define KERNEL_STACK_SIZE_ORDER               1
+#else
+# define KERNEL_STACK_SIZE_ORDER               0
+#endif
+
+#define IA64_RBS_OFFSET                        ((IA64_TASK_SIZE + 
IA64_THREAD_INFO_SIZE + 15) & ~15)
+#define IA64_STK_OFFSET                        ((1 << 
KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
+
+#define KERNEL_STACK_SIZE              IA64_STK_OFFSET
+
+#ifndef __ASSEMBLY__
+
+#include <asm/current.h>
+#include <asm/page.h>
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+#ifdef XEN
+#include <public/arch-ia64.h>
+#else
+struct pt_regs {
+       /* The following registers are saved by SAVE_MIN: */
+       unsigned long b6;               /* scratch */
+       unsigned long b7;               /* scratch */
+
+       unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
+       unsigned long ar_ssd;           /* reserved for future use (scratch) */
+
+       unsigned long r8;               /* scratch (return value register 0) */
+       unsigned long r9;               /* scratch (return value register 1) */
+       unsigned long r10;              /* scratch (return value register 2) */
+       unsigned long r11;              /* scratch (return value register 3) */
+
+       unsigned long cr_ipsr;          /* interrupted task's psr */
+       unsigned long cr_iip;           /* interrupted task's instruction 
pointer */
+       /*
+        * interrupted task's function state; if bit 63 is cleared, it
+        * contains syscall's ar.pfs.pfm:
+        */
+       unsigned long cr_ifs;
+
+       unsigned long ar_unat;          /* interrupted task's NaT register 
(preserved) */
+       unsigned long ar_pfs;           /* prev function state  */
+       unsigned long ar_rsc;           /* RSE configuration */
+       /* The following two are valid only if cr_ipsr.cpl > 0: */
+       unsigned long ar_rnat;          /* RSE NaT */
+       unsigned long ar_bspstore;      /* RSE bspstore */
+
+       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
+       unsigned long b0;               /* return pointer (bp) */
+       unsigned long loadrs;           /* size of dirty partition << 16 */
+
+       unsigned long r1;               /* the gp pointer */
+       unsigned long r12;              /* interrupted task's memory stack 
pointer */
+       unsigned long r13;              /* thread pointer */
+
+       unsigned long ar_fpsr;          /* floating point status (preserved) */
+       unsigned long r15;              /* scratch */
+
+       /* The remaining registers are NOT saved for system calls.  */
+
+       unsigned long r14;              /* scratch */
+       unsigned long r2;               /* scratch */
+       unsigned long r3;               /* scratch */
+
+       /* The following registers are saved by SAVE_REST: */
+       unsigned long r16;              /* scratch */
+       unsigned long r17;              /* scratch */
+       unsigned long r18;              /* scratch */
+       unsigned long r19;              /* scratch */
+       unsigned long r20;              /* scratch */
+       unsigned long r21;              /* scratch */
+       unsigned long r22;              /* scratch */
+       unsigned long r23;              /* scratch */
+       unsigned long r24;              /* scratch */
+       unsigned long r25;              /* scratch */
+       unsigned long r26;              /* scratch */
+       unsigned long r27;              /* scratch */
+       unsigned long r28;              /* scratch */
+       unsigned long r29;              /* scratch */
+       unsigned long r30;              /* scratch */
+       unsigned long r31;              /* scratch */
+
+       unsigned long ar_ccv;           /* compare/exchange value (scratch) */
+
+       /*
+        * Floating point registers that the kernel considers scratch:
+        */
+       struct ia64_fpreg f6;           /* scratch */
+       struct ia64_fpreg f7;           /* scratch */
+       struct ia64_fpreg f8;           /* scratch */
+       struct ia64_fpreg f9;           /* scratch */
+       struct ia64_fpreg f10;          /* scratch */
+       struct ia64_fpreg f11;          /* scratch */
+};
+#endif
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch.  This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+       unsigned long caller_unat;      /* user NaT collection register 
(preserved) */
+       unsigned long ar_fpsr;          /* floating-point status register */
+
+       struct ia64_fpreg f2;           /* preserved */
+       struct ia64_fpreg f3;           /* preserved */
+       struct ia64_fpreg f4;           /* preserved */
+       struct ia64_fpreg f5;           /* preserved */
+
+       struct ia64_fpreg f12;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f13;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f14;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f15;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f16;          /* preserved */
+       struct ia64_fpreg f17;          /* preserved */
+       struct ia64_fpreg f18;          /* preserved */
+       struct ia64_fpreg f19;          /* preserved */
+       struct ia64_fpreg f20;          /* preserved */
+       struct ia64_fpreg f21;          /* preserved */
+       struct ia64_fpreg f22;          /* preserved */
+       struct ia64_fpreg f23;          /* preserved */
+       struct ia64_fpreg f24;          /* preserved */
+       struct ia64_fpreg f25;          /* preserved */
+       struct ia64_fpreg f26;          /* preserved */
+       struct ia64_fpreg f27;          /* preserved */
+       struct ia64_fpreg f28;          /* preserved */
+       struct ia64_fpreg f29;          /* preserved */
+       struct ia64_fpreg f30;          /* preserved */
+       struct ia64_fpreg f31;          /* preserved */
+
+       unsigned long r4;               /* preserved */
+       unsigned long r5;               /* preserved */
+       unsigned long r6;               /* preserved */
+       unsigned long r7;               /* preserved */
+
+       unsigned long b0;               /* so we can force a direct return in 
copy_thread */
+       unsigned long b1;
+       unsigned long b2;
+       unsigned long b3;
+       unsigned long b4;
+       unsigned long b5;
+
+       unsigned long ar_pfs;           /* previous function state */
+       unsigned long ar_lc;            /* loop counter (preserved) */
+       unsigned long ar_unat;          /* NaT bits for r4-r7 */
+       unsigned long ar_rnat;          /* RSE NaT collection register */
+       unsigned long ar_bspstore;      /* RSE dirty base (preserved) */
+       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
+};
+
+#ifdef __KERNEL__
+/*
+ * We use the ia64_psr(regs)->ri to determine which of the three
+ * instructions in bundle (16 bytes) took the sample. Generate
+ * the canonical representation by adding to instruction pointer.
+ */
+# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+/* Conserve space in histogram by encoding slot bits in address
+ * bits 2 and 3 rather than bits 0 and 1.
+ */
+#define profile_pc(regs)                                               \
+({                                                                     \
+       unsigned long __ip = instruction_pointer(regs);                 \
+       (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
+})
+
+  /* given a pointer to a task_struct, return the user's pt_regs */
+# define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + 
IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs)                        ((struct ia64_psr *) 
&(regs)->cr_ipsr)
+# define user_mode(regs)               (((struct ia64_psr *) 
&(regs)->cr_ipsr)->cpl != 0)
+# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - 
sizeof(*regs))
+# define fsys_mode(task,regs)                                  \
+  ({                                                           \
+         struct task_struct *_task = (task);                   \
+         struct pt_regs *_regs = (regs);                       \
+         !user_mode(_regs) && user_stack(_task, _regs);        \
+  })
+
+  /*
+   * System call handlers that, upon successful completion, need to return a 
negative value
+   * should call force_successful_syscall_return() right before returning.  On 
architectures
+   * where the syscall convention provides for a separate error flag (e.g., 
alpha, ia64,
+   * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure 
that the error
+   * flag will not get set.  On architectures which do not support a separate 
error flag,
+   * the macro is a no-op and the spurious error condition needs to be 
filtered out by some
+   * other means (e.g., in user-level, by passing an extra argument to the 
syscall handler,
+   * or something along those lines).
+   *
+   * On ia64, we can clear the user's pt_regs->r8 to force a successful 
syscall.
+   */
+# define force_successful_syscall_return()     (ia64_task_regs(current)->r8 = 
0)
+
+  struct task_struct;                  /* forward decl */
+  struct unw_frame_info;               /* forward decl */
+
+  extern void show_regs (struct pt_regs *);
+  extern void ia64_do_show_stack (struct unw_frame_info *, void *);
+  extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct 
pt_regs *,
+                                             unsigned long *);
+  extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned 
long,
+                        unsigned long, long *);
+  extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned 
long,
+                        unsigned long, long);
+  extern void ia64_flush_fph (struct task_struct *);
+  extern void ia64_sync_fph (struct task_struct *);
+  extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
+                                 unsigned long, unsigned long);
+
+  /* get nat bits for scratch registers such that bit N==1 iff scratch 
register rN is a NaT */
+  extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned 
long scratch_unat);
+  /* put nat bits for scratch registers such that scratch register rN is a NaT 
iff bit N==1 */
+  extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned 
long nat);
+
+  extern void ia64_increment_ip (struct pt_regs *pt);
+  extern void ia64_decrement_ip (struct pt_regs *pt);
+
+#endif /* !__KERNEL__ */
+
+/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
+struct pt_all_user_regs {
+       unsigned long nat;
+       unsigned long cr_iip;
+       unsigned long cfm;
+       unsigned long cr_ipsr;
+       unsigned long pr;
+
+       unsigned long gr[32];
+       unsigned long br[8];
+       unsigned long ar[128];
+       struct ia64_fpreg fr[128];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* indices to application-registers array in pt_all_user_regs */
+#define PT_AUR_RSC     16
+#define PT_AUR_BSP     17
+#define PT_AUR_BSPSTORE        18
+#define PT_AUR_RNAT    19
+#define PT_AUR_CCV     32
+#define PT_AUR_UNAT    36
+#define PT_AUR_FPSR    40
+#define PT_AUR_PFS     64
+#define PT_AUR_LC      65
+#define PT_AUR_EC      66
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+#define PTRACE_OLD_GETSIGINFO  13      /* (replaced by PTRACE_GETSIGINFO in 
<linux/ptrace.h>)  */
+#define PTRACE_OLD_SETSIGINFO  14      /* (replaced by PTRACE_SETSIGINFO in 
<linux/ptrace.h>)  */
+#define PTRACE_GETREGS         18      /* get all registers (pt_all_user_regs) 
in one shot */
+#define PTRACE_SETREGS         19      /* set all registers (pt_all_user_regs) 
in one shot */
+
+#define PTRACE_OLDSETOPTIONS   21
+
+#endif /* _ASM_IA64_PTRACE_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/system.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/system.h     Tue Aug  2 23:59:09 2005
@@ -0,0 +1,299 @@
+#ifndef _ASM_IA64_SYSTEM_H
+#define _ASM_IA64_SYSTEM_H
+
+/*
+ * System defines. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code.  This is based
+ * on information published in the Processor Abstraction Layer
+ * and the System Abstraction Layer manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <asm/kregs.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/percpu.h>
+#ifdef XEN
+#include <asm/xensystem.h>
+#endif
+
+#define GATE_ADDR              __IA64_UL_CONST(0xa000000000000000)
+/*
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
+#ifndef XEN
+#define KERNEL_START            __IA64_UL_CONST(0xa000000100000000)
+#define PERCPU_ADDR            (-PERCPU_PAGE_SIZE)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct pci_vector_struct {
+       __u16 segment;  /* PCI Segment number */
+       __u16 bus;      /* PCI Bus number */
+       __u32 pci_id;   /* ACPI split 16 bits device, 16 bits function (see 
section 6.1.1) */
+       __u8 pin;       /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+       __u32 irq;      /* IRQ assigned */
+};
+
+extern struct ia64_boot_param {
+       __u64 command_line;             /* physical address of command line 
arguments */
+       __u64 efi_systab;               /* physical address of EFI system table 
*/
+       __u64 efi_memmap;               /* physical address of EFI memory map */
+       __u64 efi_memmap_size;          /* size of EFI memory map */
+       __u64 efi_memdesc_size;         /* size of an EFI memory map descriptor 
*/
+       __u32 efi_memdesc_version;      /* memory descriptor version */
+       struct {
+               __u16 num_cols; /* number of columns on console output device */
+               __u16 num_rows; /* number of rows on console output device */
+               __u16 orig_x;   /* cursor's x position */
+               __u16 orig_y;   /* cursor's y position */
+       } console_info;
+       __u64 fpswa;            /* physical address of the fpswa interface */
+       __u64 initrd_start;
+       __u64 initrd_size;
+} *ia64_boot_param;
+
+/*
+ * Macros to force memory ordering.  In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ *   wmb():    Guarantees that all preceding stores to memory-
+ *             like regions are visible before any subsequent
+ *             stores and that all following stores will be
+ *             visible only after all previous stores.
+ *   rmb():    Like wmb(), but for reads.
+ *   mb():     wmb()/rmb() combo, i.e., all previous memory
+ *             accesses are visible before all subsequent
+ *             accesses and vice versa.  This is also known as
+ *             a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers.  For that, mf.a needs to
+ * be used.  However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb()   ia64_mf()
+#define rmb()  mb()
+#define wmb()  mb()
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_SMP
+# define smp_mb()      mb()
+# define smp_rmb()     rmb()
+# define smp_wmb()     wmb()
+# define smp_read_barrier_depends()    read_barrier_depends()
+#else
+# define smp_mb()      barrier()
+# define smp_rmb()     barrier()
+# define smp_wmb()     barrier()
+# define smp_read_barrier_depends()    do { } while(0)
+#endif
+
+/*
+ * XXX check on these---I suspect what Linus really wants here is
+ * acquire vs release semantics but we can't discuss this stuff with
+ * Linus just yet.  Grrr...
+ */
+#define set_mb(var, value)     do { (var) = (value); mb(); } while (0)
+#define set_wmb(var, value)    do { (var) = (value); mb(); } while (0)
+
+#define safe_halt()         ia64_pal_halt_light()    /* PAL_HALT_LIGHT */
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+/* For spinlocks etc */
+
+/*
+ * - clearing psr.i is implicitly serialized (visible by next insn)
+ * - setting psr.i requires data serialization
+ * - we need a stop-bit before reading PSR because we sometimes
+ *   write a floating-point register right before reading the PSR
+ *   and that writes to PSR.mfl
+ */
+#define __local_irq_save(x)                    \
+do {                                           \
+       ia64_stop();                            \
+       (x) = ia64_getreg(_IA64_REG_PSR);       \
+       ia64_stop();                            \
+       ia64_rsm(IA64_PSR_I);                   \
+} while (0)
+
+#define __local_irq_disable()                  \
+do {                                           \
+       ia64_stop();                            \
+       ia64_rsm(IA64_PSR_I);                   \
+} while (0)
+
+#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+  extern unsigned long last_cli_ip;
+
+# define __save_ip()           last_cli_ip = ia64_getreg(_IA64_REG_IP)
+
+# define local_irq_save(x)                                     \
+do {                                                           \
+       unsigned long psr;                                      \
+                                                               \
+       __local_irq_save(psr);                                  \
+       if (psr & IA64_PSR_I)                                   \
+               __save_ip();                                    \
+       (x) = psr;                                              \
+} while (0)
+
+# define local_irq_disable()   do { unsigned long x; local_irq_save(x); } 
while (0)
+
+# define local_irq_restore(x)                                  \
+do {                                                           \
+       unsigned long old_psr, psr = (x);                       \
+                                                               \
+       local_save_flags(old_psr);                              \
+       __local_irq_restore(psr);                               \
+       if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I))      \
+               __save_ip();                                    \
+} while (0)
+
+#else /* !CONFIG_IA64_DEBUG_IRQ */
+# define local_irq_save(x)     __local_irq_save(x)
+# define local_irq_disable()   __local_irq_disable()
+# define local_irq_restore(x)  __local_irq_restore(x)
+#endif /* !CONFIG_IA64_DEBUG_IRQ */
+
+#define local_irq_enable()     ({ ia64_stop(); ia64_ssm(IA64_PSR_I); 
ia64_srlz_d(); })
+#define local_save_flags(flags)        ({ ia64_stop(); (flags) = 
ia64_getreg(_IA64_REG_PSR); })
+
+#define irqs_disabled()                                \
+({                                             \
+       unsigned long __ia64_id_flags;          \
+       local_save_flags(__ia64_id_flags);      \
+       (__ia64_id_flags & IA64_PSR_I) == 0;    \
+})
+
+#ifdef __KERNEL__
+
+#define prepare_to_switch()    do { } while(0)
+
+#ifdef CONFIG_IA32_SUPPORT
+# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
+#else
+# define IS_IA32_PROCESS(regs)         0
+struct task_struct;
+static inline void ia32_save_state(struct task_struct *t 
__attribute__((unused))){}
+static inline void ia32_load_state(struct task_struct *t 
__attribute__((unused))){}
+#endif
+
+/*
+ * Context switch from one thread to another.  If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+
+struct task_struct;
+
+extern void ia64_save_extra (struct task_struct *task);
+extern void ia64_load_extra (struct task_struct *task);
+
+#ifdef CONFIG_PERFMON
+  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
+# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
+#else
+# define PERFMON_IS_SYSWIDE() (0)
+#endif
+
+#ifndef XEN
+#define IA64_HAS_EXTRA_STATE(t)                                                
        \
+       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       
\
+        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do {                                       
                 \
+       if (IA64_HAS_EXTRA_STATE(prev))                                         
                 \
+               ia64_save_extra(prev);                                          
                 \
+       if (IA64_HAS_EXTRA_STATE(next))                                         
                 \
+               ia64_load_extra(next);                                          
                 \
+       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);   
                 \
+       (last) = ia64_switch_to((next));                                        
                 \
+} while (0)
+#endif 
+
+#ifdef CONFIG_SMP
+/*
+ * In the SMP case, we save the fph state when context-switching away from a 
thread that
+ * modified fph.  This way, when the thread gets scheduled on another CPU, the 
CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of 
having to fetch
+ * the latest fph state from another CPU.  In other words: eager save, lazy 
restore.
+ */
+# define switch_to(prev,next,last) do {                                        
        \
+       if (ia64_psr(ia64_task_regs(prev))->mfh && 
ia64_is_local_fpu_owner(prev)) {                             \
+               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        
\
+               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  
\
+               __ia64_save_fpu((prev)->thread.fph);                            
\
+       }                                                                       
\
+       __switch_to(prev, next, last);                                          
\
+} while (0)
+#else
+# define switch_to(prev,next,last)     __switch_to(prev, next, last)
+#endif
+
+/*
+ * On IA-64, we don't want to hold the runqueue's lock during the low-level 
context-switch,
+ * because that could cause a deadlock.  Here is an example by Erich Focht:
+ *
+ * Example:
+ * CPU#0:
+ * schedule()
+ *    -> spin_lock_irq(&rq->lock)
+ *    -> context_switch()
+ *       -> wrap_mmu_context()
+ *          -> read_lock(&tasklist_lock)
+ *
+ * CPU#1:
+ * sys_wait4() or release_task() or forget_original_parent()
+ *    -> write_lock(&tasklist_lock)
+ *    -> do_notify_parent()
+ *       -> wake_up_parent()
+ *          -> try_to_wake_up()
+ *             -> spin_lock_irq(&parent_rq->lock)
+ *
+ * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
+ * of that CPU which will not be released, because there we wait for the
+ * tasklist_lock to become available.
+ */
+#define prepare_arch_switch(rq, next)          \
+do {                                           \
+       spin_lock(&(next)->switch_lock);        \
+       spin_unlock(&(rq)->lock);               \
+} while (0)
+#define finish_arch_switch(rq, prev)   spin_unlock_irq(&(prev)->switch_lock)
+#define task_running(rq, p)            ((rq)->curr == (p) || 
spin_is_locked(&(p)->switch_lock))
+
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+
+void cpu_idle_wait(void);
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SYSTEM_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/types.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/types.h      Tue Aug  2 23:59:09 2005
@@ -0,0 +1,104 @@
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+#ifdef XEN
+#ifndef __ASSEMBLY__
+typedef unsigned long ssize_t;
+typedef unsigned long size_t;
+typedef long long loff_t;
+#endif
+#endif
+
+/*
+ * This file is never included by application software unless explicitly 
requested (e.g.,
+ * via linux/types.h) in which case the application is Linux specific so 
(user-) name
+ * space pollution is not a major issue.  However, for interoperability, 
libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x)          (x)
+# define __IA64_UL_CONST(x)    x
+
+# ifdef __KERNEL__
+#  define BITS_PER_LONG 64
+# endif
+
+#else
+# define __IA64_UL(x)          ((unsigned long)(x))
+# define __IA64_UL_CONST(x)    x##UL
+
+typedef unsigned int umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+# ifdef __KERNEL__
+
+typedef __s8 s8;
+typedef __u8 u8;
+
+typedef __s16 s16;
+typedef __u16 u16;
+
+typedef __s32 s32;
+typedef __u32 u32;
+
+typedef __s64 s64;
+typedef __u64 u64;
+
+#ifdef XEN
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+#ifdef __CHECKER__
+#define __bitwise __attribute__((bitwise))
+#else
+#define __bitwise
+#endif
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+#endif
+#endif
+
+#define BITS_PER_LONG 64
+
+/* DMA addresses are 64-bits wide, in general.  */
+
+typedef u64 dma_addr_t;
+
+typedef unsigned short kmem_bufctl_t;
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_TYPES_H */
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/uaccess.h
--- /dev/null   Tue Aug  2 17:20:46 2005
+++ b/xen/include/asm-ia64/uaccess.h    Tue Aug  2 23:59:09 2005
@@ -0,0 +1,381 @@
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary.  This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses.  Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible.  This code is also fairly performance sensitive,
+ * so we want to spend as little time doing safety checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped.  The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it.  This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault.  When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Based on <asm-alpha/uaccess.h>.
+ *
+ * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#ifdef CONFIG_VTI
+#include <asm/vmx_uaccess.h>
+#else // CONFIG_VTI
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#include <asm/intrinsics.h>
+#include <asm/pgtable.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS      ((mm_segment_t) { ~0UL })               /* cf. 
access_ok() */
+#define USER_DS                ((mm_segment_t) { TASK_SIZE-1 })        /* cf. 
access_ok() */
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+#define get_ds()  (KERNEL_DS)
+#define get_fs()  (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)       ((a).seg == (b).seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area really is 
in
+ * user-level space.  In order to do this efficiently, we make sure that the 
page at
+ * address TASK_SIZE is never valid.  We also need to make sure that the 
address doesn't
+ * point inside the virtually mapped linear page table.
+ */
+#ifdef XEN
+/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
+#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned 
long)(addr)))
+#else
+#define __access_ok(addr, size, segment)                                       
        \
+({                                                                             
        \
+       __chk_user_ptr(addr);                                                   
        \
+       (likely((unsigned long) (addr) <= (segment).seg)                        
        \
+        && ((segment).seg == KERNEL_DS.seg                                     
        \
+            || likely(REGION_OFFSET((unsigned long) (addr)) < 
RGN_MAP_LIMIT)));        \
+})
+#endif
+#define access_ok(type, addr, size)    __access_ok((addr), (size), get_fs())
+
+static inline int
+verify_area (int type, const void __user *addr, unsigned long size)
+{
+       return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr)       __put_user_check((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)), get_fs())
+#define get_user(x, ptr)       __get_user_check((x), (ptr), sizeof(*(ptr)), 
get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x, ptr)     __put_user_nocheck((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr)     __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_unaligned_unknown (void);
+
+#define __put_user_unaligned(x, ptr)                                           
                \
+({                                                                             
                \
+       long __ret;                                                             
                \
+       switch (sizeof(*(ptr))) {                                               
                \
+               case 1: __ret = __put_user((x), (ptr)); break;                  
                \
+               case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))           
                \
+                       | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
+               case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))          
                \
+                       | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
+               case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))          
                \
+                       | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
+               default: __ret = __put_user_unaligned_unknown();                
                \
+       }                                                                       
                \
+       __ret;                                                                  
                \
+})
+
+extern long __get_user_unaligned_unknown (void);
+
+#define __get_user_unaligned(x, ptr)                                           
                \
+({                                                                             
                \
+       long __ret;                                                             
                \
+       switch (sizeof(*(ptr))) {                                               
                \
+               case 1: __ret = __get_user((x), (ptr)); break;                  
                \
+               case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))           
                \
+                       | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
+               case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))          
                \
+                       | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
+               case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))          
                \
+                       | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
+               default: __ret = __get_user_unaligned_unknown();                
                \
+       }                                                                       
                \
+       __ret;                                                                  
                \
+})
+
+#ifdef ASM_SUPPORTED
+  struct __large_struct { unsigned long buf[100]; };
+# define __m(x) (*(struct __large_struct __user *)(x))
+
+/* We need to declare the __ex_table section before we can use it in .xdata.  
*/
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+# define __get_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       register long __gu_r8 asm ("r8") = 0;                                   
                \
+       register long __gu_r9 asm ("r9");                                       
                \
+       asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by 
exception handler\n"     \
+            "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"                         
                \
+            "[1:]"                                                             
                \
+            : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));    
                \
+       (err) = __gu_r8;                                                        
                \
+       (val) = __gu_r9;                                                        
                \
+} while (0)
+
+/*
+ * The "__put_user_size()" macro tells gcc it reads from memory instead of 
writing it.  This
+ * is because they do not write to any memory gcc knows about, so there are no 
aliasing
+ * issues.
+ */
+# define __put_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       register long __pu_r8 asm ("r8") = 0;                                   
                \
+       asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by 
exception handler\n" \
+                     "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"                  
                \
+                     "[1:]"                                                    
                \
+                     : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), 
"0"(__pu_r8));               \
+       (err) = __pu_r8;                                                        
                \
+} while (0)
+
+#else /* !ASM_SUPPORTED */
+# define RELOC_TYPE    2       /* ip-rel */
+# define __get_user_size(val, addr, n, err)                            \
+do {                                                                   \
+       __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);   \
+       (err) = ia64_getreg(_IA64_REG_R8);                              \
+       (val) = ia64_getreg(_IA64_REG_R9);                              \
+} while (0)
+# define __put_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned 
long) (val));    \
+       (err) = ia64_getreg(_IA64_REG_R8);                                      
                \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+extern void __get_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
+ * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate 
it while
+ * using r8/r9.
+ */
+#define __do_get_user(check, x, ptr, size, segment)                            
        \
+({                                                                             
        \
+       const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);                      
        \
+       __typeof__ (size) __gu_size = (size);                                   
        \
+       long __gu_err = -EFAULT, __gu_val = 0;                                  
        \
+                                                                               
        \
+       if (!check || __access_ok(__gu_ptr, size, segment))                     
        \
+               switch (__gu_size) {                                            
        \
+                     case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); 
break;  \
+                     case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); 
break;  \
+                     case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); 
break;  \
+                     case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); 
break;  \
+                     default: __get_user_unknown(); break;                     
        \
+               }                                                               
        \
+       (x) = (__typeof__(*(__gu_ptr))) __gu_val;                               
        \
+       __gu_err;                                                               
        \
+})
+
+#define __get_user_nocheck(x, ptr, size)       __do_get_user(0, x, ptr, size, 
KERNEL_DS)
+#define __get_user_check(x, ptr, size, segment)        __do_get_user(1, x, 
ptr, size, segment)
+
+extern void __put_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
+ * could clobber r8 (among others).  Thus, be careful not to evaluate them 
while using r8.
+ */
+#define __do_put_user(check, x, ptr, size, segment)                            
        \
+({                                                                             
        \
+       __typeof__ (x) __pu_x = (x);                                            
        \
+       __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                           
        \
+       __typeof__ (size) __pu_size = (size);                                   
        \
+       long __pu_err = -EFAULT;                                                
        \
+                                                                               
        \
+       if (!check || __access_ok(__pu_ptr, __pu_size, segment))                
        \
+               switch (__pu_size) {                                            
        \
+                     case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); 
break;    \
+                     case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); 
break;    \
+                     case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); 
break;    \
+                     case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); 
break;    \
+                     default: __put_user_unknown(); break;                     
        \
+               }                                                               
        \
+       __pu_err;                                                               
        \
+})
+
+#define __put_user_nocheck(x, ptr, size)       __do_put_user(0, x, ptr, size, 
KERNEL_DS)
+#define __put_user_check(x, ptr, size, segment)        __do_put_user(1, x, 
ptr, size, segment)
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __must_check __copy_user (void __user *to, const void 
__user *from,
+                                              unsigned long count);
+
+static inline unsigned long
+__copy_to_user (void __user *to, const void *from, unsigned long count)
+{
+       return __copy_user(to, (void __user *) from, count);
+}
+
+static inline unsigned long
+__copy_from_user (void *to, const void __user *from, unsigned long count)
+{
+       return __copy_user((void __user *) to, from, count);
+}
+
+#define __copy_to_user_inatomic                __copy_to_user
+#define __copy_from_user_inatomic      __copy_from_user
+#define copy_to_user(to, from, n)                                              
        \
+({                                                                             
        \
+       void __user *__cu_to = (to);                                            
        \
+       const void *__cu_from = (from);                                         
        \
+       long __cu_len = (n);                                                    
        \
+                                                                               
        \
+       if (__access_ok(__cu_to, __cu_len, get_fs()))                           
        \
+               __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, 
__cu_len);   \
+       __cu_len;                                                               
        \
+})
+
+#define copy_from_user(to, from, n)                                            
        \
+({                                                                             
        \
+       void *__cu_to = (to);                                                   
        \
+       const void __user *__cu_from = (from);                                  
        \
+       long __cu_len = (n);                                                    
        \
+                                                                               
        \
+       __chk_user_ptr(__cu_from);                                              
        \
+       if (__access_ok(__cu_from, __cu_len, get_fs()))                         
        \
+               __cu_len = __copy_user((void __user *) __cu_to, __cu_from, 
__cu_len);   \
+       __cu_len;                                                               
        \
+})
+
+#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
+
+static inline unsigned long
+copy_in_user (void __user *to, const void __user *from, unsigned long n)
+{
+       if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, 
to, n)))
+               n = __copy_user(to, from, n);
+       return n;
+}
+
+extern unsigned long __do_clear_user (void __user *, unsigned long);
+
+#define __clear_user(to, n)            __do_clear_user(to, n)
+
+#define clear_user(to, n)                                      \
+({                                                             \
+       unsigned long __cu_len = (n);                           \
+       if (__access_ok(to, __cu_len, get_fs()))                \
+               __cu_len = __do_clear_user(to, __cu_len);       \
+       __cu_len;                                               \
+})
+
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire buffer 
filled, else
+ * strlen.
+ */
+extern long __must_check __strncpy_from_user (char *to, const char __user 
*from, long to_len);
+
+#define strncpy_from_user(to, from, n)                                 \
+({                                                                     \
+       const char __user * __sfu_from = (from);                        \
+       long __sfu_ret = -EFAULT;                                       \
+       if (__access_ok(__sfu_from, 0, get_fs()))                       \
+               __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
+       __sfu_ret;                                                      \
+})
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern unsigned long __strlen_user (const char __user *);
+
+#define strlen_user(str)                               \
+({                                                     \
+       const char __user *__su_str = (str);            \
+       unsigned long __su_ret = 0;                     \
+       if (__access_ok(__su_str, 0, get_fs()))         \
+               __su_ret = __strlen_user(__su_str);     \
+       __su_ret;                                       \
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char __user *, long);
+
+#define strnlen_user(str, len)                                 \
+({                                                             \
+       const char __user *__su_str = (str);                    \
+       unsigned long __su_ret = 0;                             \
+       if (__access_ok(__su_str, 0, get_fs()))                 \
+               __su_ret = __strnlen_user(__su_str, len);       \
+       __su_ret;                                               \
+})
+
+#endif // CONFIG_VTI
+/* Generic code can't deal with the location-relative format that we use for 
compactness.  */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+struct exception_table_entry {
+       int addr;       /* location-relative address of insn this fixup is for 
*/
+       int cont;       /* location-relative continuation addr.; if bit 2 is 
set, r9 is set to 0 */
+};
+
+extern void ia64_handle_exception (struct pt_regs *regs, const struct 
exception_table_entry *e);
+extern const struct exception_table_entry *search_exception_tables (unsigned 
long addr);
+
+static inline int
+ia64_done_with_exception (struct pt_regs *regs)
+{
+       const struct exception_table_entry *e;
+       e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+       if (e) {
+               ia64_handle_exception(regs, e);
+               return 1;
+       }
+       return 0;
+}
+
+#endif /* _ASM_IA64_UACCESS_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/lib/Makefile
--- a/xen/arch/ia64/lib/Makefile        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,44 +0,0 @@
-#
-# Makefile for ia64-specific library routines..
-#
-
-include $(BASEDIR)/Rules.mk
-
-OBJS := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o                  \
-       __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o                   \
-       bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \
-       clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o   \
-       flush.o ip_fast_csum.o do_csum.o copy_user.o                    \
-       memset.o strlen.o memcpy.o 
-
-default: $(OBJS)
-       $(LD) -r -o ia64lib.o $(OBJS)
-
-AFLAGS += -I$(BASEDIR)/include -D__ASSEMBLY__
-
-__divdi3.o: idiv64.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
-
-__udivdi3.o: idiv64.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
-
-__moddi3.o: idiv64.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
-
-__umoddi3.o: idiv64.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
-
-__divsi3.o: idiv32.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
-
-__udivsi3.o: idiv32.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
-
-__modsi3.o: idiv32.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
-
-__umodsi3.o: idiv32.S
-       $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
-
-clean:
-       rm -f *.o *~
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/cpumask.h
--- a/xen/arch/ia64/patch/linux-2.6.11/cpumask.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,12 +0,0 @@
---- ../../linux-2.6.11/include/linux/cpumask.h 2005-03-02 00:38:00.000000000 
-0700
-+++ include/asm-ia64/linux/cpumask.h   2005-04-28 13:21:20.000000000 -0600
-@@ -342,7 +342,9 @@
-  */
- 
- extern cpumask_t cpu_possible_map;
-+#ifndef XEN
- extern cpumask_t cpu_online_map;
-+#endif
- extern cpumask_t cpu_present_map;
- 
- #if NR_CPUS > 1
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/efi.c
--- a/xen/arch/ia64/patch/linux-2.6.11/efi.c    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,50 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/kernel/efi.c  2005-03-02 00:37:47.000000000 
-0700
-+++ arch/ia64/efi.c    2005-06-09 06:15:36.000000000 -0600
-@@ -320,6 +320,16 @@
-               if (!(md->attribute & EFI_MEMORY_WB))
-                       continue;
- 
-+#ifdef XEN
-+// this works around a problem in the ski bootloader
-+{
-+              extern long running_on_sim;
-+              if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
-+                      continue;
-+}
-+// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
-+              if (md->phys_addr >= 0x100000000) continue;
-+#endif
-               /*
-                * granule_addr is the base of md's first granule.
-                * [granule_addr - first_non_wb_addr) is guaranteed to
-@@ -719,6 +729,30 @@
-       return 0;
- }
- 
-+#ifdef XEN
-+// variation of efi_get_iobase which returns entire memory descriptor
-+efi_memory_desc_t *
-+efi_get_io_md (void)
-+{
-+      void *efi_map_start, *efi_map_end, *p;
-+      efi_memory_desc_t *md;
-+      u64 efi_desc_size;
-+
-+      efi_map_start = __va(ia64_boot_param->efi_memmap);
-+      efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-+      efi_desc_size = ia64_boot_param->efi_memdesc_size;
-+
-+      for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-+              md = p;
-+              if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
-+                      if (md->attribute & EFI_MEMORY_UC)
-+                              return md;
-+              }
-+      }
-+      return 0;
-+}
-+#endif
-+
- u32
- efi_mem_type (unsigned long phys_addr)
- {
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/entry.S
--- a/xen/arch/ia64/patch/linux-2.6.11/entry.S  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,237 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/kernel/entry.S        2005-03-02 
00:37:50.000000000 -0700
-+++ arch/ia64/entry.S  2005-05-23 16:49:23.000000000 -0600
-@@ -46,6 +46,7 @@
- 
- #include "minstate.h"
- 
-+#ifndef XEN
-       /*
-        * execve() is special because in case of success, we need to
-        * setup a null register window frame.
-@@ -174,6 +175,7 @@
-       mov rp=loc0
-       br.ret.sptk.many rp
- END(sys_clone)
-+#endif /* !XEN */
- 
- /*
-  * prev_task <- ia64_switch_to(struct task_struct *next)
-@@ -191,7 +193,11 @@
-       movl r25=init_task
-       mov r27=IA64_KR(CURRENT_STACK)
-       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-+#ifdef XEN
-+      dep r20=0,in0,60,4              // physical address of "next"
-+#else
-       dep r20=0,in0,61,3              // physical address of "next"
-+#endif
-       ;;
-       st8 [r22]=sp                    // save kernel stack pointer of old task
-       shr.u r26=r20,IA64_GRANULE_SHIFT
-@@ -220,6 +226,16 @@
-       br.ret.sptk.many rp             // boogie on out in new context
- 
- .map:
-+#ifdef XEN
-+      // avoid overlapping with kernel TR
-+      movl r25=KERNEL_START
-+      dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
-+      ;;
-+      cmp.eq p7,p0=r25,r23
-+      ;;
-+(p7)  mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
-+(p7)  br.cond.sptk .done
-+#endif
-       rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
-       movl r25=PAGE_KERNEL
-       ;;
-@@ -376,7 +392,11 @@
-  *    - b7 holds address to return to
-  *    - must not touch r8-r11
-  */
-+#ifdef XEN
-+GLOBAL_ENTRY(load_switch_stack)
-+#else
- ENTRY(load_switch_stack)
-+#endif
-       .prologue
-       .altrp b7
- 
-@@ -470,6 +490,7 @@
-       br.cond.sptk.many b7
- END(load_switch_stack)
- 
-+#ifndef XEN
- GLOBAL_ENTRY(__ia64_syscall)
-       .regstk 6,0,0,0
-       mov r15=in5                             // put syscall number in place
-@@ -588,6 +609,7 @@
- }
- .ret4:        br.cond.sptk ia64_leave_kernel
- END(ia64_strace_leave_kernel)
-+#endif
- 
- GLOBAL_ENTRY(ia64_ret_from_clone)
-       PT_REGS_UNWIND_INFO(0)
-@@ -604,6 +626,15 @@
-        */
-       br.call.sptk.many rp=ia64_invoke_schedule_tail
- }
-+#ifdef XEN
-+      // new domains are cloned but not exec'ed so switch to user mode here
-+      cmp.ne pKStk,pUStk=r0,r0
-+#ifdef CONFIG_VTI
-+      br.cond.spnt ia64_leave_hypervisor
-+#else // CONFIG_VTI
-+      br.cond.spnt ia64_leave_kernel
-+#endif // CONFIG_VTI
-+#else
- .ret8:
-       adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
-@@ -614,6 +645,7 @@
-       ;;
-       cmp.ne p6,p0=r2,r0
- (p6)  br.cond.spnt .strace_check_retval
-+#endif
-       ;;                                      // added stop bits to prevent 
r8 dependency
- END(ia64_ret_from_clone)
-       // fall through
-@@ -700,19 +732,27 @@
- .work_processed_syscall:
-       adds r2=PT(LOADRS)+16,r12
-       adds r3=PT(AR_BSPSTORE)+16,r12
-+#ifdef XEN
-+      ;;
-+#else
-       adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
- (p6)  ld4 r31=[r18]                           // load 
current_thread_info()->flags
-+#endif
-       ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for 
"loadrs"
-       mov b7=r0               // clear b7
-       ;;
-       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
-       ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
-+#ifndef XEN
- (p6)  and r15=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
-+#endif
-       ;;
-       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
-+#ifndef XEN
- (p6)  cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
- (p6)  br.cond.spnt .work_pending_syscall
-+#endif
-       ;;
-       // start restoring the state saved on the kernel stack (struct pt_regs):
-       ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
-@@ -757,7 +797,11 @@
-       ;;
-       ld8.fill r12=[r2]       // restore r12 (sp)
-       ld8.fill r15=[r3]       // restore r15
-+#ifdef XEN
-+      movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
-+#else
-       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+#endif
-       ;;
- (pUStk)       ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
- (pUStk) st1 [r14]=r17
-@@ -814,9 +858,18 @@
- (pUStk)       cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
- #endif
- .work_processed_kernel:
-+#ifdef XEN
-+      alloc loc0=ar.pfs,0,1,1,0
-+      adds out0=16,r12
-+      ;;
-+(p6)  br.call.sptk.many b0=deliver_pending_interrupt
-+      mov ar.pfs=loc0
-+      mov r31=r0
-+#else
-       adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
- (p6)  ld4 r31=[r17]                           // load 
current_thread_info()->flags
-+#endif
-       adds r21=PT(PR)+16,r12
-       ;;
- 
-@@ -828,17 +881,20 @@
-       ld8 r28=[r2],8          // load b6
-       adds r29=PT(R24)+16,r12
- 
--      ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
-+      ld8.fill r16=[r3]
-       adds r30=PT(AR_CCV)+16,r12
- (p6)  and r19=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
-       ;;
-+      adds r3=PT(AR_CSD)-PT(R16),r3
-       ld8.fill r24=[r29]
-       ld8 r15=[r30]           // load ar.ccv
- (p6)  cmp4.ne.unc p6,p0=r19, r0               // any special work pending?
-       ;;
-       ld8 r29=[r2],16         // load b7
-       ld8 r30=[r3],16         // load ar.csd
-+#ifndef XEN
- (p6)  br.cond.spnt .work_pending
-+#endif
-       ;;
-       ld8 r31=[r2],16         // load ar.ssd
-       ld8.fill r8=[r3],16
-@@ -934,7 +990,11 @@
-       shr.u r18=r19,16        // get byte size of existing "dirty" partition
-       ;;
-       mov r16=ar.bsp          // get existing backing store pointer
-+#ifdef XEN
-+      movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-+#else
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+#endif
-       ;;
-       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
- (pKStk)       br.cond.dpnt skip_rbs_switch
-@@ -1069,6 +1129,7 @@
-       mov pr=r31,-1           // I0
-       rfi                     // B
- 
-+#ifndef XEN
-       /*
-        * On entry:
-        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
-@@ -1130,6 +1191,7 @@
-       ld8 r8=[r2]
-       ld8 r10=[r3]
-       br.cond.sptk.many .work_processed_syscall       // re-check
-+#endif
- 
- END(ia64_leave_kernel)
- 
-@@ -1166,6 +1228,7 @@
-       br.ret.sptk.many rp
- END(ia64_invoke_schedule_tail)
- 
-+#ifndef XEN
-       /*
-        * Setup stack and call do_notify_resume_user().  Note that pSys and 
pNonSys need to
-        * be set up by the caller.  We declare 8 input registers so the system 
call
-@@ -1264,6 +1327,7 @@
-       mov ar.unat=r9
-       br.many b7
- END(sys_rt_sigreturn)
-+#endif
- 
- GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
-       .prologue
-@@ -1278,6 +1342,7 @@
-       br.cond.sptk.many rp                            // goes to 
ia64_leave_kernel
- END(ia64_prepare_handle_unaligned)
- 
-+#ifndef XEN
-       //
-       // unw_init_running(void (*callback)(info, arg), void *arg)
-       //
-@@ -1585,3 +1650,4 @@
-       data8 sys_ni_syscall
- 
-       .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
-+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/entry.h
--- a/xen/arch/ia64/patch/linux-2.6.11/entry.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,37 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.h
     2005-03-01 23:38:07.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/arch/ia64/entry.h       
2005-05-18 14:00:53.000000000 -0700
-@@ -7,6 +7,12 @@
- #define PRED_LEAVE_SYSCALL    1 /* TRUE iff leave from syscall */
- #define PRED_KERNEL_STACK     2 /* returning to kernel-stacks? */
- #define PRED_USER_STACK               3 /* returning to user-stacks? */
-+#ifdef CONFIG_VTI
-+#define PRED_EMUL             2 /* Need to save r4-r7 for inst emulation */
-+#define PRED_NON_EMUL         3 /* No need to save r4-r7 for normal path */
-+#define PRED_BN0              6 /* Guest is in bank 0 */
-+#define PRED_BN1              7 /* Guest is in bank 1 */
-+#endif // CONFIG_VTI
- #define PRED_SYSCALL          4 /* inside a system call? */
- #define PRED_NON_SYSCALL      5 /* complement of PRED_SYSCALL */
- 
-@@ -17,12 +23,21 @@
- # define pLvSys               PASTE(p,PRED_LEAVE_SYSCALL)
- # define pKStk                PASTE(p,PRED_KERNEL_STACK)
- # define pUStk                PASTE(p,PRED_USER_STACK)
-+#ifdef CONFIG_VTI
-+# define pEml         PASTE(p,PRED_EMUL)
-+# define pNonEml      PASTE(p,PRED_NON_EMUL)
-+# define pBN0         PASTE(p,PRED_BN0)
-+# define pBN1         PASTE(p,PRED_BN1)
-+#endif // CONFIG_VTI
- # define pSys         PASTE(p,PRED_SYSCALL)
- # define pNonSys      PASTE(p,PRED_NON_SYSCALL)
- #endif
- 
- #define PT(f)         (IA64_PT_REGS_##f##_OFFSET)
- #define SW(f)         (IA64_SWITCH_STACK_##f##_OFFSET)
-+#ifdef CONFIG_VTI
-+#define VPD(f)      (VPD_##f##_START_OFFSET)
-+#endif // CONFIG_VTI
- 
- #define PT_REGS_SAVES(off)                    \
-       .unwabi 3, 'i';                         \
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h
--- a/xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,69 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/gcc_intrin.h
        2005-03-01 23:38:08.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/gcc_intrin.h   
2005-05-18 14:00:53.000000000 -0700
-@@ -368,6 +368,66 @@
- #define ia64_mf()     asm volatile ("mf" ::: "memory")
- #define ia64_mfa()    asm volatile ("mf.a" ::: "memory")
- 
-+#ifdef CONFIG_VTI
-+/*
-+ * Flushrs instruction stream.
-+ */
-+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
-+
-+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
-+
-+#define ia64_get_rsc()                          \
-+({                                  \
-+    unsigned long val;                     \
-+    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
-+    val;                               \
-+})
-+
-+#define ia64_set_rsc(val)                       \
-+    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
-+
-+#define ia64_get_bspstore()     \
-+({                                  \
-+    unsigned long val;                     \
-+    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
-+    val;                               \
-+})
-+
-+#define ia64_set_bspstore(val)                       \
-+    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
-+
-+#define ia64_get_rnat()     \
-+({                                  \
-+    unsigned long val;                     \
-+    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
-+    val;                               \
-+})
-+
-+#define ia64_set_rnat(val)                       \
-+    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
-+
-+#define ia64_ttag(addr)                                                       
\
-+({                                                                            
\
-+      __u64 ia64_intri_res;                                                   
\
-+      asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        
\
-+      ia64_intri_res;                                                         
\
-+})
-+
-+#define ia64_get_dcr()                          \
-+({                                      \
-+    __u64 result;                               \
-+    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
-+    result;                                 \
-+})
-+
-+#define ia64_set_dcr(val)                           \
-+({                                      \
-+    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
-+})
-+
-+#endif // CONFIG_VTI
-+
-+
- #define ia64_invala() asm volatile ("invala" ::: "memory")
- 
- #define ia64_thash(addr)                                                      
\
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/hardirq.h
--- a/xen/arch/ia64/patch/linux-2.6.11/hardirq.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,14 +0,0 @@
---- ../../linux-2.6.11/include/linux/hardirq.h 2005-03-02 00:38:00.000000000 
-0700
-+++ include/asm-ia64/linux/hardirq.h   2005-04-28 16:34:39.000000000 -0600
-@@ -60,7 +60,11 @@
-  */
- #define in_irq()              (hardirq_count())
- #define in_softirq()          (softirq_count())
-+#ifndef XEN
- #define in_interrupt()                (irq_count())
-+#else
-+#define in_interrupt()                0               // FIXME LATER
-+#endif
- 
- #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- # define in_atomic()  ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/head.S
--- a/xen/arch/ia64/patch/linux-2.6.11/head.S   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,120 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/head.S
       2005-03-01 23:38:13.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/head.S 
2005-05-18 12:40:50.000000000 -0700
-@@ -76,21 +76,21 @@
-        * We initialize all of them to prevent inadvertently assuming
-        * something about the state of address translation early in boot.
-        */
--      mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r7=(0<<61)
--      mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r9=(1<<61)
--      mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r11=(2<<61)
--      mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r13=(3<<61)
--      mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r15=(4<<61)
--      mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-+      movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r17=(5<<61)
--      mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-+      movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r19=(6<<61)
--      mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-+      movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r21=(7<<61)
-       ;;
-       mov rr[r7]=r6
-@@ -129,8 +129,13 @@
-       /*
-        * Switch into virtual mode:
-        */
-+#ifdef CONFIG_VTI
-+      movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
-+                |IA64_PSR_DI)
-+#else // CONFIG_VTI
-       movl 
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
-                 |IA64_PSR_DI)
-+#endif // CONFIG_VTI
-       ;;
-       mov cr.ipsr=r16
-       movl r17=1f
-@@ -143,7 +148,11 @@
- 1:    // now we are in virtual mode
- 
-       // set IVT entry point---can't access I/O ports without it
-+#ifdef CONFIG_VTI
-+    movl r3=vmx_ia64_ivt
-+#else // CONFIG_VTI
-       movl r3=ia64_ivt
-+#endif // CONFIG_VTI
-       ;;
-       mov cr.iva=r3
-       movl r2=FPSR_DEFAULT
-@@ -187,7 +196,11 @@
-       dep r18=0,r3,0,12
-       ;;
-       or r18=r17,r18
-+#ifdef XEN
-+      dep r2=-1,r3,60,4       // IMVA of task
-+#else
-       dep r2=-1,r3,61,3       // IMVA of task
-+#endif
-       ;;
-       mov r17=rr[r2]
-       shr.u r16=r3,IA64_GRANULE_SHIFT
-@@ -207,8 +220,15 @@
- 
- .load_current:
-       // load the "current" pointer (r13) and ar.k6 with the current task
-+#ifdef CONFIG_VTI
-+      mov r21=r2              // virtual address
-+      ;;
-+      bsw.1
-+      ;;
-+#else // CONFIG_VTI
-       mov IA64_KR(CURRENT)=r2         // virtual address
-       mov IA64_KR(CURRENT_STACK)=r16
-+#endif // CONFIG_VTI
-       mov r13=r2
-       /*
-        * Reserve space at the top of the stack for "struct pt_regs".  Kernel 
threads
-@@ -227,7 +247,11 @@
-       ;;
-       mov ar.rsc=0x3          // place RSE in eager mode
- 
-+#ifdef XEN
-+(isBP)        dep r28=-1,r28,60,4     // make address virtual
-+#else
- (isBP)        dep r28=-1,r28,61,3     // make address virtual
-+#endif
- (isBP)        movl r2=ia64_boot_param
-       ;;
- (isBP)        st8 [r2]=r28            // save the address of the boot param 
area passed by the bootloader
-@@ -254,7 +278,9 @@
-       br.call.sptk.many b0=console_print
- 
- self: hint @pause
-+      ;;
-       br.sptk.many self               // endless loop
-+      ;;
- END(_start)
- 
- GLOBAL_ENTRY(ia64_save_debug_regs)
-@@ -850,7 +876,11 @@
-  * intermediate precision so that we can produce a full 64-bit result.
-  */
- GLOBAL_ENTRY(sched_clock)
-+#ifdef XEN
-+      movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-+#else
-       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
-+#endif
-       mov.m r9=ar.itc         // fetch cycle-counter                          
(35 cyc)
-       ;;
-       ldf8 f8=[r8]
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h
--- a/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h
  2005-03-01 23:38:17.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/hpsim_ssc.h
     2005-05-18 12:40:19.000000000 -0700
-@@ -33,4 +33,23 @@
-  */
- extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
- 
-+#ifdef XEN
-+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
-+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-+#define SSC_OPEN                      50
-+#define SSC_CLOSE                     51
-+#define SSC_READ                      52
-+#define SSC_WRITE                     53
-+#define SSC_GET_COMPLETION            54
-+#define SSC_WAIT_COMPLETION           55
-+
-+#define SSC_WRITE_ACCESS              2
-+#define SSC_READ_ACCESS                       1
-+
-+struct ssc_disk_req {
-+      unsigned long addr;
-+      unsigned long len;
-+};
-+#endif
-+
- #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/ia64regs.h
--- a/xen/arch/ia64/patch/linux-2.6.11/ia64regs.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,38 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ia64regs.h
  2005-03-01 23:38:07.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ia64regs.h     
2005-05-18 14:00:53.000000000 -0700
-@@ -87,6 +87,35 @@
- #define _IA64_REG_CR_LRR0     4176
- #define _IA64_REG_CR_LRR1     4177
- 
-+#ifdef  CONFIG_VTI
-+#define IA64_REG_CR_DCR   0
-+#define IA64_REG_CR_ITM   1
-+#define IA64_REG_CR_IVA   2
-+#define IA64_REG_CR_PTA   8
-+#define IA64_REG_CR_IPSR  16
-+#define IA64_REG_CR_ISR   17
-+#define IA64_REG_CR_IIP   19
-+#define IA64_REG_CR_IFA   20
-+#define IA64_REG_CR_ITIR  21
-+#define IA64_REG_CR_IIPA  22
-+#define IA64_REG_CR_IFS   23
-+#define IA64_REG_CR_IIM   24
-+#define IA64_REG_CR_IHA   25
-+#define IA64_REG_CR_LID   64
-+#define IA64_REG_CR_IVR   65
-+#define IA64_REG_CR_TPR   66
-+#define IA64_REG_CR_EOI   67
-+#define IA64_REG_CR_IRR0  68
-+#define IA64_REG_CR_IRR1  69
-+#define IA64_REG_CR_IRR2  70
-+#define IA64_REG_CR_IRR3  71
-+#define IA64_REG_CR_ITV   72
-+#define IA64_REG_CR_PMV   73
-+#define IA64_REG_CR_CMCV  74
-+#define IA64_REG_CR_LRR0  80
-+#define IA64_REG_CR_LRR1  81
-+#endif  //  CONFIG_VTI
-+
- /* Indirect Registers for getindreg() and setindreg() */
- 
- #define _IA64_REG_INDR_CPUID  9000    /* getindreg only */
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/interrupt.h
--- a/xen/arch/ia64/patch/linux-2.6.11/interrupt.h      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,27 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/linux/interrupt.h
     2005-03-01 23:38:09.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/linux/interrupt.h
       2005-05-18 12:40:50.000000000 -0700
-@@ -33,6 +33,7 @@
- #define IRQ_HANDLED   (1)
- #define IRQ_RETVAL(x) ((x) != 0)
- 
-+#ifndef XEN
- struct irqaction {
-       irqreturn_t (*handler)(int, void *, struct pt_regs *);
-       unsigned long flags;
-@@ -49,6 +50,7 @@
-                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
-                      unsigned long, const char *, void *);
- extern void free_irq(unsigned int, void *);
-+#endif
- 
- 
- #ifdef CONFIG_GENERIC_HARDIRQS
-@@ -121,7 +123,7 @@
- };
- 
- asmlinkage void do_softirq(void);
--extern void open_softirq(int nr, void (*action)(struct softirq_action*), void 
*data);
-+//extern void open_softirq(int nr, void (*action)(struct softirq_action*), 
void *data);
- extern void softirq_init(void);
- #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << 
(nr); } while (0)
- extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/io.h
--- a/xen/arch/ia64/patch/linux-2.6.11/io.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,14 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/io.h
 2005-03-01 23:38:34.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/io.h 
   2005-05-18 12:40:50.000000000 -0700
-@@ -23,7 +23,11 @@
- #define __SLOW_DOWN_IO        do { } while (0)
- #define SLOW_DOWN_IO  do { } while (0)
- 
-+#ifdef XEN
-+#define __IA64_UNCACHED_OFFSET        0xe800000000000000UL
-+#else
- #define __IA64_UNCACHED_OFFSET        0xc000000000000000UL    /* region 6 */
-+#endif
- 
- /*
-  * The legacy I/O space defined by the ia64 architecture supports only 65536 
ports, but
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
--- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,126 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c
   2005-03-01 23:38:07.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/irq_ia64.c  
   2005-05-18 12:40:51.000000000 -0700
-@@ -106,6 +106,9 @@
-       unsigned long saved_tpr;
- 
- #if IRQ_DEBUG
-+#ifdef XEN
-+      xen_debug_irq(vector, regs);
-+#endif
-       {
-               unsigned long bsp, sp;
- 
-@@ -148,6 +151,9 @@
-                       ia64_setreg(_IA64_REG_CR_TPR, vector);
-                       ia64_srlz_d();
- 
-+#ifdef XEN
-+                      if (!xen_do_IRQ(vector))
-+#endif
-                       __do_IRQ(local_vector_to_irq(vector), regs);
- 
-                       /*
-@@ -167,6 +173,103 @@
-       irq_exit();
- }
- 
-+#ifdef  CONFIG_VTI
-+#define vmx_irq_enter()               \
-+      add_preempt_count(HARDIRQ_OFFSET);
-+
-+/* Now softirq will be checked when leaving hypervisor, or else
-+ * scheduler irq will be executed too early.
-+ */
-+#define vmx_irq_exit(void)    \
-+      sub_preempt_count(HARDIRQ_OFFSET);
-+/*
-+ * That's where the IVT branches when we get an external
-+ * interrupt. This branches to the correct hardware IRQ handler via
-+ * function ptr.
-+ */
-+void
-+vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-+{
-+      unsigned long saved_tpr;
-+      int     wake_dom0 = 0;
-+
-+
-+#if IRQ_DEBUG
-+      {
-+              unsigned long bsp, sp;
-+
-+              /*
-+               * Note: if the interrupt happened while executing in
-+               * the context switch routine (ia64_switch_to), we may
-+               * get a spurious stack overflow here.  This is
-+               * because the register and the memory stack are not
-+               * switched atomically.
-+               */
-+              bsp = ia64_getreg(_IA64_REG_AR_BSP);
-+              sp = ia64_getreg(_IA64_REG_AR_SP);
-+
-+              if ((sp - bsp) < 1024) {
-+                      static unsigned char count;
-+                      static long last_time;
-+
-+                      if (jiffies - last_time > 5*HZ)
-+                              count = 0;
-+                      if (++count < 5) {
-+                              last_time = jiffies;
-+                              printk("ia64_handle_irq: DANGER: less than "
-+                                     "1KB of free stack space!!\n"
-+                                     "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
-+                      }
-+              }
-+      }
-+#endif /* IRQ_DEBUG */
-+
-+      /*
-+       * Always set TPR to limit maximum interrupt nesting depth to
-+       * 16 (without this, it would be ~240, which could easily lead
-+       * to kernel stack overflows).
-+       */
-+      vmx_irq_enter();
-+      saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-+      ia64_srlz_d();
-+      while (vector != IA64_SPURIOUS_INT_VECTOR) {
-+          if (!IS_RESCHEDULE(vector)) {
-+              ia64_setreg(_IA64_REG_CR_TPR, vector);
-+              ia64_srlz_d();
-+
-+              if (vector != IA64_TIMER_VECTOR) {
-+                      /* FIXME: Leave IRQ re-route later */
-+                      vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
-+                      wake_dom0 = 1;
-+              }
-+              else {  // FIXME: Handle Timer only now
-+                      __do_IRQ(local_vector_to_irq(vector), regs);
-+              }
-+              
-+              /*
-+               * Disable interrupts and send EOI:
-+               */
-+              local_irq_disable();
-+              ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-+          }
-+          else {
-+                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
-+            }
-+          ia64_eoi();
-+          vector = ia64_get_ivr();
-+      }
-+      /*
-+       * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
-+       * handler needs to be able to wait for further keyboard interrupts, 
which can't
-+       * come through until ia64_eoi() has been done.
-+       */
-+      vmx_irq_exit();
-+      if ( wake_dom0 && current != dom0 ) 
-+              domain_wake(dom0->vcpu[0]);
-+}
-+#endif
-+
-+
- #ifdef CONFIG_HOTPLUG_CPU
- /*
-  * This function emulates a interrupt processing when a cpu is about to be
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/kregs.h
--- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,66 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h
      2005-03-01 23:37:49.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h
 2005-05-18 12:40:50.000000000 -0700
-@@ -29,8 +29,21 @@
-  */
- #define IA64_TR_KERNEL                0       /* itr0, dtr0: maps kernel 
image (code & data) */
- #define IA64_TR_PALCODE               1       /* itr1: maps PALcode as 
required by EFI */
-+#ifdef CONFIG_VTI
-+#define IA64_TR_XEN_IN_DOM    6       /* itr6, dtr6: Double mapping for xen 
image in domain space */
-+#endif // CONFIG_VTI
- #define IA64_TR_PERCPU_DATA   1       /* dtr1: percpu data */
- #define IA64_TR_CURRENT_STACK 2       /* dtr2: maps kernel's memory- & 
register-stacks */
-+#ifdef XEN
-+#define IA64_TR_SHARED_INFO   3       /* dtr3: page shared with domain */
-+#define       IA64_TR_VHPT            4       /* dtr4: vhpt */
-+#define IA64_TR_ARCH_INFO      5
-+#ifdef CONFIG_VTI
-+#define IA64_TR_VHPT_IN_DOM   5       /* dtr5: Double mapping for vhpt table 
in domain space */
-+#define IA64_TR_RR7_SWITCH_STUB       7       /* dtr7: mapping for rr7 switch 
stub */
-+#define IA64_TEMP_PHYSICAL    8       /* itr8, dtr8: temp mapping for guest 
physical memory 256M */
-+#endif // CONFIG_VTI
-+#endif
- 
- /* Processor status register bits: */
- #define IA64_PSR_BE_BIT               1
-@@ -66,6 +78,9 @@
- #define IA64_PSR_ED_BIT               43
- #define IA64_PSR_BN_BIT               44
- #define IA64_PSR_IA_BIT               45
-+#ifdef CONFIG_VTI
-+#define IA64_PSR_VM_BIT               46
-+#endif // CONFIG_VTI
- 
- /* A mask of PSR bits that we generally don't want to inherit across a 
clone2() or an
-    execve().  Only list flags here that need to be cleared/set for BOTH 
clone2() and
-@@ -107,6 +122,9 @@
- #define IA64_PSR_ED   (__IA64_UL(1) << IA64_PSR_ED_BIT)
- #define IA64_PSR_BN   (__IA64_UL(1) << IA64_PSR_BN_BIT)
- #define IA64_PSR_IA   (__IA64_UL(1) << IA64_PSR_IA_BIT)
-+#ifdef CONFIG_VTI
-+#define IA64_PSR_VM   (__IA64_UL(1) << IA64_PSR_VM_BIT)
-+#endif // CONFIG_VTI
- 
- /* User mask bits: */
- #define IA64_PSR_UM   (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL 
| IA64_PSR_MFH)
-@@ -160,4 +178,21 @@
- #define IA64_ISR_CODE_LFETCH  4
- #define IA64_ISR_CODE_PROBEF  5
- 
-+#ifdef XEN
-+/* Interruption Function State */
-+#define IA64_IFS_V_BIT                63
-+#define IA64_IFS_V    (__IA64_UL(1) << IA64_IFS_V_BIT)
-+
-+/* Page Table Address */
-+#define IA64_PTA_VE_BIT 0
-+#define IA64_PTA_SIZE_BIT 2
-+#define IA64_PTA_VF_BIT 8
-+#define IA64_PTA_BASE_BIT 15
-+
-+#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
-+#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
-+#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
-+#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
-+#endif
-+
- #endif /* _ASM_IA64_kREGS_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/mca_asm.h
--- a/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,32 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/mca_asm.h
    2005-03-01 23:38:38.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/mca_asm.h
       2005-05-18 12:40:19.000000000 -0700
-@@ -26,8 +26,13 @@
-  * direct mapped to physical addresses.
-  *    1. Lop off bits 61 thru 63 in the virtual address
-  */
-+#ifdef XEN
-+#define INST_VA_TO_PA(addr)                                                   
\
-+      dep     addr    = 0, addr, 60, 4
-+#else // XEN
- #define INST_VA_TO_PA(addr)                                                   
\
-       dep     addr    = 0, addr, 61, 3
-+#endif // XEN
- /*
-  * This macro converts a data virtual address to a physical address
-  * Right now for simulation purposes the virtual addresses are
-@@ -42,9 +47,15 @@
-  * direct mapped to physical addresses.
-  *    1. Put 0x7 in bits 61 thru 63.
-  */
-+#ifdef XEN
-+#define DATA_PA_TO_VA(addr,temp)                                              
        \
-+      mov     temp    = 0xf   ;;                                              
        \
-+      dep     addr    = temp, addr, 60, 4
-+#else // XEN
- #define DATA_PA_TO_VA(addr,temp)                                              
        \
-       mov     temp    = 0x7   ;;                                              
        \
-       dep     addr    = temp, addr, 61, 3
-+#endif // XEN
- 
- #define GET_THIS_PADDR(reg, var)              \
-       mov     reg = IA64_KR(PER_CPU_DATA);;   \
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/minstate.h
--- a/xen/arch/ia64/patch/linux-2.6.11/minstate.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,25 +0,0 @@
- minstate.h |    4 ++--
- 1 files changed, 2 insertions(+), 2 deletions(-)
-
-Index: linux-2.6.11-xendiffs/arch/ia64/kernel/minstate.h
-===================================================================
---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/minstate.h     2005-04-06 
22:51:31.170261541 -0500
-+++ linux-2.6.11-xendiffs/arch/ia64/kernel/minstate.h  2005-04-06 
22:54:03.210575034 -0500
-@@ -48,7 +48,7 @@
- (pUStk)       mov r24=ar.rnat;                                                
                        \
- (pUStk)       addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base 
of memory stack */      \
- (pUStk)       mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
--(pUStk)       dep r22=-1,r22,61,3;                    /* compute kernel 
virtual addr of RBS */        \
-+(pUStk)       dep r22=-1,r22,60,4;                    /* compute kernel 
virtual addr of RBS */        \
-       ;;                                                                      
                \
- (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;                /* if in kernel mode, 
use sp (r12) */           \
- (pUStk)       mov ar.bspstore=r22;                    /* switch to kernel RBS 
*/                      \
-@@ -57,7 +57,7 @@
- (pUStk)       mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */             \
- 
- #define MINSTATE_END_SAVE_MIN_PHYS                                            
                \
--      dep r12=-1,r12,61,3;            /* make sp a kernel virtual address */  
                \
-+      dep r12=-1,r12,60,4;            /* make sp a kernel virtual address */  
                \
-       ;;
- 
- #ifdef MINSTATE_VIRT
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/mm_contig.c
--- a/xen/arch/ia64/patch/linux-2.6.11/mm_contig.c      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,47 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/mm/contig.c   2005-03-02 00:37:55.000000000 
-0700
-+++ arch/ia64/mm_contig.c      2005-04-28 16:13:52.000000000 -0600
-@@ -35,6 +35,7 @@
-  *
-  * Just walks the pages in the system and describes where they're allocated.
-  */
-+#ifndef XEN
- void
- show_mem (void)
- {
-@@ -63,6 +64,7 @@
-       printk("%d pages swap cached\n", cached);
-       printk("%ld pages in page table cache\n", pgtable_cache_size);
- }
-+#endif
- 
- /* physical address where the bootmem map is located */
- unsigned long bootmap_start;
-@@ -140,6 +142,7 @@
-  * Walk the EFI memory map and find usable memory for the system, taking
-  * into account reserved areas.
-  */
-+#ifndef XEN
- void
- find_memory (void)
- {
-@@ -168,6 +171,7 @@
- 
-       find_initrd();
- }
-+#endif
- 
- #ifdef CONFIG_SMP
- /**
-@@ -225,6 +229,7 @@
-  * Set up the page tables.
-  */
- 
-+#ifndef XEN
- void
- paging_init (void)
- {
-@@ -297,3 +302,4 @@
- #endif /* !CONFIG_VIRTUAL_MEM_MAP */
-       zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
- }
-+#endif /* !CONFIG_XEN */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/page.h
--- a/xen/arch/ia64/patch/linux-2.6.11/page.h   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,74 +0,0 @@
---- 
/home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/page.h
        2005-03-01 23:37:48.000000000 -0800
-+++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/page.h   
2005-05-20 09:36:02.000000000 -0700
-@@ -32,6 +32,7 @@
- #define PAGE_ALIGN(addr)      (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
- 
- #define PERCPU_PAGE_SHIFT     16      /* log2() of max. size of per-CPU area 
*/
-+
- #define PERCPU_PAGE_SIZE      (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
- 
- #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per 
region addr limit */
-@@ -95,9 +96,15 @@
- #endif
- 
- #ifndef CONFIG_DISCONTIGMEM
-+#ifdef XEN
-+# define pfn_valid(pfn)               (0)
-+# define page_to_pfn(_page)   ((unsigned long)((_page) - frame_table))
-+# define pfn_to_page(_pfn)    (frame_table + (_pfn))
-+#else
- # define pfn_valid(pfn)               (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
- # define page_to_pfn(page)    ((unsigned long) (page - mem_map))
- # define pfn_to_page(pfn)     (mem_map + (pfn))
-+#endif
- #else
- extern struct page *vmem_map;
- extern unsigned long max_low_pfn;
-@@ -109,6 +116,11 @@
- #define page_to_phys(page)    (page_to_pfn(page) << PAGE_SHIFT)
- #define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
- 
-+#ifdef XEN
-+#define page_to_virt(_page)   phys_to_virt(page_to_phys(_page))
-+#define phys_to_page(kaddr)   pfn_to_page(((kaddr) >> PAGE_SHIFT))
-+#endif
-+
- typedef union ia64_va {
-       struct {
-               unsigned long off : 61;         /* intra-region offset */
-@@ -124,8 +136,23 @@
-  * expressed in this way to ensure they result in a single "dep"
-  * instruction.
-  */
-+#ifdef XEN
-+typedef union xen_va {
-+      struct {
-+              unsigned long off : 60;
-+              unsigned long reg : 4;
-+      } f;
-+      unsigned long l;
-+      void *p;
-+} xen_va;
-+
-+// xen/drivers/console.c uses __va in a declaration (should be fixed!)
-+#define __pa(x)               ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
-+#define __va(x)               ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-+#else
- #define __pa(x)               ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
- #define __va(x)               ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-+#endif
- 
- #define REGION_NUMBER(x)      ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
- #define REGION_OFFSET(x)      ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
-@@ -197,7 +224,11 @@
- # define __pgprot(x)  (x)
- #endif /* !STRICT_MM_TYPECHECKS */
- 
-+#ifdef XEN
-+#define PAGE_OFFSET                   __IA64_UL_CONST(0xf000000000000000)
-+#else
- #define PAGE_OFFSET                   __IA64_UL_CONST(0xe000000000000000)
-+#endif
- 
- #define VM_DATA_DEFAULT_FLAGS         (VM_READ | VM_WRITE |                   
                \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC 
|                \
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/pal.S
--- a/xen/arch/ia64/patch/linux-2.6.11/pal.S    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/pal.S
        2005-03-01 23:38:33.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/pal.S  
2005-05-18 12:40:19.000000000 -0700
-@@ -166,7 +166,11 @@
-       adds r8  = 1f-1b,r8             // calculate return address for call
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-+#ifdef XEN
-+      dep.z loc2=loc2,0,60            // convert pal entry point to physical
-+#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-+#endif // XEN
-       tpa r8=r8                       // convert rp to physical
-       ;;
-       mov b7 = loc2                   // install target to branch reg
-@@ -225,7 +229,11 @@
-       mov loc3 = psr          // save psr
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-+#ifdef XEN
-+      dep.z loc2=loc2,0,60            // convert pal entry point to physical
-+#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-+#endif // XEN
-       ;;
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       movl r16=PAL_PSR_BITS_TO_CLEAR
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/pal.h
--- a/xen/arch/ia64/patch/linux-2.6.11/pal.h    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,12 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/pal.h
       2005-03-01 23:38:13.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/pal.h  
2005-05-18 14:00:53.000000000 -0700
-@@ -1559,6 +1559,9 @@
-       return iprv.status;
- }
- 
-+#ifdef CONFIG_VTI
-+#include <asm/vmx_pal.h>
-+#endif // CONFIG_VTI
- #endif /* __ASSEMBLY__ */
- 
- #endif /* _ASM_IA64_PAL_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
--- a/xen/arch/ia64/patch/linux-2.6.11/pgalloc.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,76 +0,0 @@
---- ../../linux-2.6.11/include/asm-ia64/pgalloc.h      2005-03-02 
00:37:31.000000000 -0700
-+++ include/asm-ia64/pgalloc.h 2005-06-09 13:40:48.000000000 -0600
-@@ -61,7 +61,12 @@
-       pgd_t *pgd = pgd_alloc_one_fast(mm);
- 
-       if (unlikely(pgd == NULL)) {
-+#ifdef XEN
-+              pgd = (pgd_t *)alloc_xenheap_page();
-+              memset(pgd,0,PAGE_SIZE);
-+#else
-               pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-+#endif
-       }
-       return pgd;
- }
-@@ -104,7 +109,12 @@
- static inline pmd_t*
- pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
- {
-+#ifdef XEN
-+      pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
-+      memset(pmd,0,PAGE_SIZE);
-+#else
-       pmd_t *pmd = (pmd_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+#endif
- 
-       return pmd;
- }
-@@ -136,7 +146,12 @@
- static inline struct page *
- pte_alloc_one (struct mm_struct *mm, unsigned long addr)
- {
-+#ifdef XEN
-+      struct page *pte = alloc_xenheap_page();
-+      memset(pte,0,PAGE_SIZE);
-+#else
-       struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+#endif
- 
-       return pte;
- }
-@@ -144,7 +159,12 @@
- static inline pte_t *
- pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
- {
-+#ifdef XEN
-+      pte_t *pte = (pte_t *)alloc_xenheap_page();
-+      memset(pte,0,PAGE_SIZE);
-+#else
-       pte_t *pte = (pte_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+#endif
- 
-       return pte;
- }
-@@ -152,13 +172,21 @@
- static inline void
- pte_free (struct page *pte)
- {
-+#ifdef XEN
-+      free_xenheap_page(pte);
-+#else
-       __free_page(pte);
-+#endif
- }
- 
- static inline void
- pte_free_kernel (pte_t *pte)
- {
-+#ifdef XEN
-+      free_xenheap_page((unsigned long) pte);
-+#else
-       free_page((unsigned long) pte);
-+#endif
- }
- 
- #define __pte_free_tlb(tlb, pte)      tlb_remove_page((tlb), (pte))
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/processor.h
--- a/xen/arch/ia64/patch/linux-2.6.11/processor.h      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,37 +0,0 @@
---- 
/home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/processor.h
   2005-03-01 23:37:58.000000000 -0800
-+++ 
/home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/processor.h   
   2005-05-20 09:36:02.000000000 -0700
-@@ -94,7 +94,11 @@
- #ifdef CONFIG_NUMA
- #include <asm/nodedata.h>
- #endif
-+#ifdef XEN
-+#include <asm/xenprocessor.h>
-+#endif
- 
-+#ifndef XEN
- /* like above but expressed as bitfields for more efficient access: */
- struct ia64_psr {
-       __u64 reserved0 : 1;
-@@ -133,6 +137,7 @@
-       __u64 bn : 1;
-       __u64 reserved4 : 19;
- };
-+#endif
- 
- /*
-  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
-@@ -408,12 +413,14 @@
-  */
- 
- /* Return TRUE if task T owns the fph partition of the CPU we're running on. 
*/
-+#ifndef XEN
- #define ia64_is_local_fpu_owner(t)                                            
                \
- ({                                                                            
                \
-       struct task_struct *__ia64_islfo_task = (t);                            
                \
-       (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()           
                \
-        && __ia64_islfo_task == (struct task_struct *) 
ia64_get_kr(IA64_KR_FPU_OWNER));        \
- })
-+#endif
- 
- /* Mark task T as owning the fph partition of the CPU we're running on. */
- #define ia64_set_local_fpu_owner(t) do {                                      
        \
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/ptrace.h
--- a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,20 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ptrace.h
    2005-03-01 23:38:38.000000000 -0800
-+++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ptrace.h       
2005-05-18 14:00:53.000000000 -0700
-@@ -95,6 +95,9 @@
-  * (because the memory stack pointer MUST ALWAYS be aligned this way)
-  *
-  */
-+#ifdef XEN
-+#include <public/arch-ia64.h>
-+#else
- struct pt_regs {
-       /* The following registers are saved by SAVE_MIN: */
-       unsigned long b6;               /* scratch */
-@@ -170,6 +173,7 @@
-       struct ia64_fpreg f10;          /* scratch */
-       struct ia64_fpreg f11;          /* scratch */
- };
-+#endif
- 
- /*
-  * This structure contains the addition registers that need to
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/series
--- a/xen/arch/ia64/patch/linux-2.6.11/series   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,40 +0,0 @@
-bootmem.h
-current.h
-efi.c
-efi.h
-entry.S
-gcc_intrin.h
-hardirq.h
-head.S
-hpsim_irq.c
-hpsim_ssc.h
-hw_irq.h
-ide.h
-init_task.c
-init_task.h
-interrupt.h
-io.h
-irq.h
-irq_ia64.c
-ivt.S
-kregs.h
-lds.S
-linuxtime.h
-minstate.h
-mm_bootmem.c
-mm_contig.c
-mmzone.h
-page_alloc.c
-page.h
-processor.h
-sal.h
-setup.c
-slab.c
-slab.h
-system.h
-time.c
-kernel-time.c
-tlb.c
-types.h
-unaligned.c
-wait.h
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/setup.c
--- a/xen/arch/ia64/patch/linux-2.6.11/setup.c  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,151 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/kernel/setup.c        2005-03-02 
00:37:49.000000000 -0700
-+++ arch/ia64/setup.c  2005-06-03 10:14:24.000000000 -0600
-@@ -51,6 +51,10 @@
- #include <asm/smp.h>
- #include <asm/system.h>
- #include <asm/unistd.h>
-+#ifdef CONFIG_VTI
-+#include <asm/vmx.h>
-+#endif // CONFIG_VTI
-+#include <asm/io.h>
- 
- #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
- # error "struct cpuinfo_ia64 too big!"
-@@ -127,7 +131,16 @@
-               range_end   = min(end, rsvd_region[i].start);
- 
-               if (range_start < range_end)
-+#ifdef XEN
-+              {
-+              /* init_boot_pages requires "ps, pe" */
-+                      printk("Init boot pages: 0x%lx -> 0x%lx.\n",
-+                              __pa(range_start), __pa(range_end));
-+                      (*func)(__pa(range_start), __pa(range_end), 0);
-+              }
-+#else
-                       call_pernode_memory(__pa(range_start), range_end - 
range_start, func);
-+#endif
- 
-               /* nothing more available in this segment */
-               if (range_end == end) return 0;
-@@ -185,7 +198,12 @@
-       n++;
- 
-       rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
-+#ifdef XEN
-+      /* Reserve xen image/bitmap/xen-heap */
-+      rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
-+#else
-       rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
-+#endif
-       n++;
- 
- #ifdef CONFIG_BLK_DEV_INITRD
-@@ -299,17 +317,25 @@
- }
- 
- void __init
-+#ifdef XEN
-+early_setup_arch (char **cmdline_p)
-+#else
- setup_arch (char **cmdline_p)
-+#endif
- {
-       unw_init();
- 
-       ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
- 
-       *cmdline_p = __va(ia64_boot_param->command_line);
-+#ifdef XEN
-+      efi_init();
-+#else
-       strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
- 
-       efi_init();
-       io_port_init();
-+#endif
- 
- #ifdef CONFIG_IA64_GENERIC
-       {
-@@ -336,6 +362,11 @@
-       }
- #endif
- 
-+#ifdef XEN
-+      early_cmdline_parse(cmdline_p);
-+      cmdline_parse(*cmdline_p);
-+#undef CONFIG_ACPI_BOOT
-+#endif
-       if (early_console_setup(*cmdline_p) == 0)
-               mark_bsp_online();
- 
-@@ -351,8 +382,18 @@
- # endif
- #endif /* CONFIG_APCI_BOOT */
- 
-+#ifndef XEN
-       find_memory();
-+#else
-+      io_port_init();
-+}
- 
-+void __init
-+late_setup_arch (char **cmdline_p)
-+{
-+#undef CONFIG_ACPI_BOOT
-+      acpi_table_init();
-+#endif
-       /* process SAL system table: */
-       ia64_sal_init(efi.sal_systab);
- 
-@@ -360,6 +401,10 @@
-       cpu_physical_id(0) = hard_smp_processor_id();
- #endif
- 
-+#ifdef CONFIG_VTI
-+      identify_vmx_feature();
-+#endif // CONFIG_VTI
-+
-       cpu_init();     /* initialize the bootstrap CPU */
- 
- #ifdef CONFIG_ACPI_BOOT
-@@ -492,12 +537,14 @@
- {
- }
- 
-+#ifndef XEN
- struct seq_operations cpuinfo_op = {
-       .start =        c_start,
-       .next =         c_next,
-       .stop =         c_stop,
-       .show =         show_cpuinfo
- };
-+#endif
- 
- void
- identify_cpu (struct cpuinfo_ia64 *c)
-@@ -551,6 +598,12 @@
-       }
-       c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
-       c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
-+
-+#ifdef CONFIG_VTI
-+      /* If vmx feature is on, do necessary initialization for vmx */
-+      if (vmx_enabled)
-+              vmx_init_env();
-+#endif
- }
- 
- void
-@@ -659,7 +712,11 @@
-                                       | IA64_DCR_DA | IA64_DCR_DD | 
IA64_DCR_LC));
-       atomic_inc(&init_mm.mm_count);
-       current->active_mm = &init_mm;
-+#ifdef XEN
-+      if (current->domain->arch.mm)
-+#else
-       if (current->mm)
-+#endif
-               BUG();
- 
-       ia64_mmu_init(ia64_imva(cpu_data));
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/sn_sal.h
--- a/xen/arch/ia64/patch/linux-2.6.11/sn_sal.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,33 +0,0 @@
---- /data/lwork/attica1/edwardsg/linux-2.6.11/include/asm-ia64/sn/sn_sal.h     
2005-03-02 01:38:33 -06:00
-+++ include/asm-ia64/sn/sn_sal.h       2005-06-01 14:31:47 -05:00
-@@ -123,6 +123,7 @@
- #define SALRET_ERROR          (-3)
-
-
-+#ifndef XEN
- /**
-  * sn_sal_rev_major - get the major SGI SAL revision number
-  *
-@@ -226,6 +227,7 @@ ia64_sn_get_klconfig_addr(nasid_t nasid)
-       }
-       return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
- }
-+#endif /* !XEN */
-
- /*
-  * Returns the next console character.
-@@ -304,6 +306,7 @@ ia64_sn_console_putb(const char *buf, in
-       return (u64)0;
- }
-
-+#ifndef XEN
- /*
-  * Print a platform error record
-  */
-@@ -987,5 +990,5 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opc
-               *v0 = (int) rv.v0;
-       return (int) rv.status;
- }
--
-+#endif /* !XEN */
- #endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/system.h
--- a/xen/arch/ia64/patch/linux-2.6.11/system.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,38 +0,0 @@
---- 
/home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/system.h
      2005-03-01 23:38:07.000000000 -0800
-+++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/system.h 
2005-05-20 09:36:02.000000000 -0700
-@@ -18,14 +18,19 @@
- #include <asm/page.h>
- #include <asm/pal.h>
- #include <asm/percpu.h>
-+#ifdef XEN
-+#include <asm/xensystem.h>
-+#endif
- 
- #define GATE_ADDR             __IA64_UL_CONST(0xa000000000000000)
- /*
-  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
-  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
-  */
-+#ifndef XEN
- #define KERNEL_START           __IA64_UL_CONST(0xa000000100000000)
- #define PERCPU_ADDR           (-PERCPU_PAGE_SIZE)
-+#endif
- 
- #ifndef __ASSEMBLY__
- 
-@@ -218,6 +223,7 @@
- # define PERFMON_IS_SYSWIDE() (0)
- #endif
- 
-+#ifndef XEN
- #define IA64_HAS_EXTRA_STATE(t)                                               
        \
-       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       
\
-        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
-@@ -230,6 +236,7 @@
-       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);   
                 \
-       (last) = ia64_switch_to((next));                                        
                 \
- } while (0)
-+#endif 
- 
- #ifdef CONFIG_SMP
- /*
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/time.c
--- a/xen/arch/ia64/patch/linux-2.6.11/time.c   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,56 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/kernel/time.c 2005-03-02 00:37:50.000000000 
-0700
-+++ arch/ia64/time.c   2005-05-02 11:19:29.000000000 -0600
-@@ -29,6 +29,9 @@
- #include <asm/sal.h>
- #include <asm/sections.h>
- #include <asm/system.h>
-+#ifdef XEN
-+#include <linux/jiffies.h>    // not included by xen/sched.h
-+#endif
- 
- extern unsigned long wall_jiffies;
- 
-@@ -45,6 +48,7 @@
- 
- #endif
- 
-+#ifndef XEN
- static struct time_interpolator itc_interpolator = {
-       .shift = 16,
-       .mask = 0xffffffffffffffffLL,
-@@ -110,6 +114,7 @@
-       } while (time_after_eq(ia64_get_itc(), new_itm));
-       return IRQ_HANDLED;
- }
-+#endif
- 
- /*
-  * Encapsulate access to the itm structure for SMP.
-@@ -212,6 +217,7 @@
-                                       + itc_freq/2)/itc_freq;
- 
-       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-+#ifndef XEN
-               itc_interpolator.frequency = local_cpu_data->itc_freq;
-               itc_interpolator.drift = itc_drift;
- #ifdef CONFIG_SMP
-@@ -228,12 +234,14 @@
-               if (!nojitter) itc_interpolator.jitter = 1;
- #endif
-               register_time_interpolator(&itc_interpolator);
-+#endif
-       }
- 
-       /* Setup the CPU local timer tick */
-       ia64_cpu_local_tick();
- }
- 
-+#ifndef XEN
- static struct irqaction timer_irqaction = {
-       .handler =      timer_interrupt,
-       .flags =        SA_INTERRUPT,
-@@ -253,3 +261,4 @@
-        */
-       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, 
-xtime.tv_nsec);
- }
-+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/tlb.c
--- a/xen/arch/ia64/patch/linux-2.6.11/tlb.c    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,38 +0,0 @@
---- ../../linux-2.6.11/arch/ia64/mm/tlb.c      2005-03-02 00:38:38.000000000 
-0700
-+++ arch/ia64/tlb.c    2005-05-02 10:23:09.000000000 -0600
-@@ -43,6 +43,9 @@
- void
- wrap_mmu_context (struct mm_struct *mm)
- {
-+#ifdef XEN
-+printf("wrap_mmu_context: called, not implemented\n");
-+#else
-       unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
-       struct task_struct *tsk;
-       int i;
-@@ -83,6 +86,7 @@
-               put_cpu();
-       }
-       local_flush_tlb_all();
-+#endif
- }
- 
- void
-@@ -132,6 +136,9 @@
- void
- flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned 
long end)
- {
-+#ifdef XEN
-+printf("flush_tlb_range: called, not implemented\n");
-+#else
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long size = end - start;
-       unsigned long nbits;
-@@ -163,6 +170,7 @@
- # endif
- 
-       ia64_srlz_i();                  /* srlz.i implies srlz.d */
-+#endif
- }
- EXPORT_SYMBOL(flush_tlb_range);
- 
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/types.h
--- a/xen/arch/ia64/patch/linux-2.6.11/types.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,44 +0,0 @@
---- ../../linux-2.6.11/include/asm-ia64/types.h        2005-03-04 
10:26:30.000000000 -0700
-+++ include/asm-ia64/types.h   2005-04-11 15:23:49.000000000 -0600
-@@ -1,5 +1,12 @@
- #ifndef _ASM_IA64_TYPES_H
- #define _ASM_IA64_TYPES_H
-+#ifdef XEN
-+#ifndef __ASSEMBLY__
-+typedef unsigned long ssize_t;
-+typedef unsigned long size_t;
-+typedef long long loff_t;
-+#endif
-+#endif
- 
- /*
-  * This file is never included by application software unless explicitly 
requested (e.g.,
-@@ -61,6 +68,28 @@
- typedef __s64 s64;
- typedef __u64 u64;
- 
-+#ifdef XEN
-+/*
-+ * Below are truly Linux-specific types that should never collide with
-+ * any application/library that wants linux/types.h.
-+ */
-+
-+#ifdef __CHECKER__
-+#define __bitwise __attribute__((bitwise))
-+#else
-+#define __bitwise
-+#endif
-+
-+typedef __u16 __bitwise __le16;
-+typedef __u16 __bitwise __be16;
-+typedef __u32 __bitwise __le32;
-+typedef __u32 __bitwise __be32;
-+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-+typedef __u64 __bitwise __le64;
-+typedef __u64 __bitwise __be64;
-+#endif
-+#endif
-+
- #define BITS_PER_LONG 64
- 
- /* DMA addresses are 64-bits wide, in general.  */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.11/uaccess.h
--- a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,41 +0,0 @@
---- ../../linux-2.6.11/include/asm-ia64/uaccess.h      2005-03-02 
00:37:53.000000000 -0700
-+++ include/asm-ia64/uaccess.h 2005-06-21 21:53:20.000000000 -0600
-@@ -32,6 +32,10 @@
-  *    David Mosberger-Tang <davidm@xxxxxxxxxx>
-  */
- 
-+#ifdef CONFIG_VTI
-+#include <asm/vmx_uaccess.h>
-+#else // CONFIG_VTI
-+
- #include <linux/compiler.h>
- #include <linux/errno.h>
- #include <linux/sched.h>
-@@ -60,6 +64,11 @@
-  * address TASK_SIZE is never valid.  We also need to make sure that the 
address doesn't
-  * point inside the virtually mapped linear page table.
-  */
-+#ifdef XEN
-+/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
-+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
-+#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned 
long)(addr)))
-+#else
- #define __access_ok(addr, size, segment)                                      
        \
- ({                                                                            
        \
-       __chk_user_ptr(addr);                                                   
        \
-@@ -67,6 +76,7 @@
-        && ((segment).seg == KERNEL_DS.seg                                     
        \
-            || likely(REGION_OFFSET((unsigned long) (addr)) < 
RGN_MAP_LIMIT)));        \
- })
-+#endif
- #define access_ok(type, addr, size)   __access_ok((addr), (size), get_fs())
- 
- static inline int
-@@ -343,6 +353,7 @@
-       __su_ret;                                               \
- })
- 
-+#endif // CONFIG_VTI
- /* Generic code can't deal with the location-relative format that we use for 
compactness.  */
- #define ARCH_HAS_SORT_EXTABLE
- #define ARCH_HAS_SEARCH_EXTABLE
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.11/unaligned.c
--- a/xen/arch/ia64/patch/linux-2.6.11/unaligned.c      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,227 +0,0 @@
---- 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/unaligned.c
  2005-03-01 23:38:25.000000000 -0800
-+++ 
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/unaligned.c 
   2005-05-18 12:40:50.000000000 -0700
-@@ -201,7 +201,11 @@
- 
-       RPT(r1), RPT(r2), RPT(r3),
- 
-+#ifdef  CONFIG_VTI
-+      RPT(r4), RPT(r5), RPT(r6), RPT(r7),
-+#else   //CONFIG_VTI
-       RSW(r4), RSW(r5), RSW(r6), RSW(r7),
-+#endif  //CONFIG_VTI
- 
-       RPT(r8), RPT(r9), RPT(r10), RPT(r11),
-       RPT(r12), RPT(r13), RPT(r14), RPT(r15),
-@@ -291,6 +295,121 @@
-       return reg;
- }
- 
-+#ifdef CONFIG_VTI
-+static void
-+set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
-+{
-+      struct switch_stack *sw = (struct switch_stack *) regs - 1;
-+      unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
-+      unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-+      unsigned long rnats, nat_mask;
-+    unsigned long old_rsc,new_rsc;
-+      unsigned long on_kbs,rnat;
-+      long sof = (regs->cr_ifs) & 0x7f;
-+      long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-+      long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-+      long ridx = r1 - 32;
-+
-+      if (ridx >= sof) {
-+              /* this should never happen, as the "rsvd register fault" has 
higher priority */
-+              DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
-+              return;
-+      }
-+
-+      if (ridx < sor)
-+              ridx = rotate_reg(sor, rrb_gr, ridx);
-+
-+    old_rsc=ia64_get_rsc();
-+    new_rsc=old_rsc&(~0x3);
-+    ia64_set_rsc(new_rsc);
-+
-+    bspstore = ia64_get_bspstore();
-+    bsp =kbs + (regs->loadrs >> 19);//16+3
-+
-+      addr = ia64_rse_skip_regs(bsp, -sof + ridx);
-+    nat_mask = 1UL << ia64_rse_slot_num(addr);
-+      rnat_addr = ia64_rse_rnat_addr(addr);
-+
-+    if(addr >= bspstore){
-+
-+        ia64_flushrs ();
-+        ia64_mf ();
-+              *addr = val;
-+        bspstore = ia64_get_bspstore();
-+      rnat = ia64_get_rnat ();
-+        if(bspstore < rnat_addr){
-+            rnat=rnat&(~nat_mask);
-+        }else{
-+            *rnat_addr = (*rnat_addr)&(~nat_mask);
-+        }
-+        ia64_mf();
-+        ia64_loadrs();
-+        ia64_set_rnat(rnat);
-+    }else{
-+
-+      rnat = ia64_get_rnat ();
-+              *addr = val;
-+        if(bspstore < rnat_addr){
-+            rnat=rnat&(~nat_mask);
-+        }else{
-+            *rnat_addr = (*rnat_addr)&(~nat_mask);
-+        }
-+        ia64_set_bspstore (bspstore);
-+        ia64_set_rnat(rnat);
-+    }
-+    ia64_set_rsc(old_rsc);
-+}
-+
-+
-+static void
-+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
-+{
-+      struct switch_stack *sw = (struct switch_stack *) regs - 1;
-+      unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
-+      unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-+      unsigned long rnats, nat_mask;
-+      unsigned long on_kbs;
-+    unsigned long old_rsc, new_rsc;
-+      long sof = (regs->cr_ifs) & 0x7f;
-+      long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-+      long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-+      long ridx = r1 - 32;
-+
-+      if (ridx >= sof) {
-+              /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
-+              DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
-+              panic("wrong stack register number");
-+      }
-+
-+      if (ridx < sor)
-+              ridx = rotate_reg(sor, rrb_gr, ridx);
-+
-+    old_rsc=ia64_get_rsc();
-+    new_rsc=old_rsc&(~(0x3));
-+    ia64_set_rsc(new_rsc);
-+
-+    bspstore = ia64_get_bspstore();
-+    bsp =kbs + (regs->loadrs >> 19); //16+3;
-+
-+      addr = ia64_rse_skip_regs(bsp, -sof + ridx);
-+    nat_mask = 1UL << ia64_rse_slot_num(addr);
-+      rnat_addr = ia64_rse_rnat_addr(addr);
-+
-+    if(addr >= bspstore){
-+
-+        ia64_flushrs ();
-+        ia64_mf ();
-+        bspstore = ia64_get_bspstore();
-+    }
-+      *val=*addr;
-+    if(bspstore < rnat_addr){
-+        *nat=!!(ia64_get_rnat()&nat_mask);
-+    }else{
-+        *nat = !!((*rnat_addr)&nat_mask);
-+    }
-+    ia64_set_rsc(old_rsc);
-+}
-+#else // CONFIG_VTI
- static void
- set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
- {
-@@ -435,9 +554,14 @@
-               *nat = 0;
-       return;
- }
-+#endif // CONFIG_VTI
- 
- 
-+#ifdef XEN
-+void
-+#else
- static void
-+#endif
- setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs 
*regs)
- {
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-@@ -466,7 +590,11 @@
-               unat = &sw->ar_unat;
-       } else {
-               addr = (unsigned long)regs;
-+#ifdef CONFIG_VTI
-+              unat = &regs->eml_unat;
-+#else //CONFIG_VTI
-               unat = &sw->caller_unat;
-+#endif  //CONFIG_VTI
-       }
-       DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
-              addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
-@@ -522,7 +650,11 @@
-        */
-       if (regnum >= IA64_FIRST_ROTATING_FR) {
-               ia64_sync_fph(current);
-+#ifdef XEN
-+              current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
-+#else
-               current->thread.fph[fph_index(regs, regnum)] = *fpval;
-+#endif
-       } else {
-               /*
-                * pt_regs or switch_stack ?
-@@ -581,7 +713,11 @@
-        */
-       if (regnum >= IA64_FIRST_ROTATING_FR) {
-               ia64_flush_fph(current);
-+#ifdef XEN
-+              *fpval = current->arch._thread.fph[fph_index(regs, regnum)];
-+#else
-               *fpval = current->thread.fph[fph_index(regs, regnum)];
-+#endif
-       } else {
-               /*
-                * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
-@@ -611,7 +747,11 @@
- }
- 
- 
-+#ifdef XEN
-+void
-+#else
- static void
-+#endif
- getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs 
*regs)
- {
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-@@ -640,7 +780,11 @@
-               unat = &sw->ar_unat;
-       } else {
-               addr = (unsigned long)regs;
-+#ifdef  CONFIG_VTI
-+              unat = &regs->eml_unat;;
-+#else   //CONFIG_VTI
-               unat = &sw->caller_unat;
-+#endif  //CONFIG_VTI
-       }
- 
-       DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
-@@ -1294,6 +1438,9 @@
- void
- ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
- {
-+#ifdef XEN
-+printk("ia64_handle_unaligned: called, not working yet\n");
-+#else
-       struct ia64_psr *ipsr = ia64_psr(regs);
-       mm_segment_t old_fs = get_fs();
-       unsigned long bundle[2];
-@@ -1502,4 +1649,5 @@
-       si.si_imm = 0;
-       force_sig_info(SIGBUS, &si, current);
-       goto done;
-+#endif
- }
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/bootmem.h
--- a/xen/arch/ia64/patch/linux-2.6.7/bootmem.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,12 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/bootmem.h     
2004-06-15 23:19:52.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/bootmem.h        
2004-08-25 19:28:13.000000000 -0600
-@@ -41,7 +41,9 @@
- extern void __init free_bootmem (unsigned long addr, unsigned long size);
- extern void * __init __alloc_bootmem (unsigned long size, unsigned long 
align, unsigned long goal);
- #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-+#ifndef XEN
- extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
-+#endif
- #define alloc_bootmem(x) \
-       __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
- #define alloc_bootmem_low(x) \
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/current.h
--- a/xen/arch/ia64/patch/linux-2.6.7/current.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,17 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/current.h  
2004-06-15 23:19:52.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/current.h      
2004-08-25 19:28:12.000000000 -0600
-@@ -12,6 +12,14 @@
-  * In kernel mode, thread pointer (r13) is used to point to the current task
-  * structure.
-  */
-+#ifdef XEN
-+struct domain;
-+#define get_current() ((struct vcpu *) ia64_getreg(_IA64_REG_TP))
-+#define current get_current()
-+//#define set_current(d)      ia64_setreg(_IA64_REG_TP,(void *)d);
-+#define set_current(d)                (ia64_r13 = (void *)d)
-+#else
- #define current       ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
-+#endif
- 
- #endif /* _ASM_IA64_CURRENT_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/efi.c
--- a/xen/arch/ia64/patch/linux-2.6.7/efi.c     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,85 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/efi.c      
2004-06-15 23:18:55.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/efi.c 2004-12-17 
13:47:03.000000000 -0700
-@@ -25,6 +25,9 @@
- #include <linux/types.h>
- #include <linux/time.h>
- #include <linux/efi.h>
-+#ifdef XEN
-+#include <xen/sched.h>
-+#endif
- 
- #include <asm/io.h>
- #include <asm/kregs.h>
-@@ -49,7 +52,10 @@
- {                                                                             
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_status_t ret;                                                       
                \
-+      efi_time_cap_t *atc = NULL;                                             
                \
-                                                                               
                \
-+      if (tc)                                                                 
                \
-+              atc = adjust_arg(tc);                                           
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), 
adjust_arg(tm),     \
-                               adjust_arg(tc));                                
                \
-@@ -201,6 +207,7 @@
-       if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)
-               return;
- 
-+      dummy();
-       ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, 
tm.second);
-       ts->tv_nsec = tm.nanosecond;
- }
-@@ -303,6 +310,10 @@
-               if (!(md->attribute & EFI_MEMORY_WB))
-                       continue;
- 
-+#ifdef XEN
-+// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
-+              if (md->phys_addr >= 0x100000000) continue;
-+#endif
-               /*
-                * granule_addr is the base of md's first granule.
-                * [granule_addr - first_non_wb_addr) is guaranteed to
-@@ -456,9 +467,11 @@
- 
-               cpu = smp_processor_id();
- 
-+#ifndef XEN
-               /* insert this TR into our list for MCA recovery purposes */
-               ia64_mca_tlb_list[cpu].pal_base = vaddr & mask;
-               ia64_mca_tlb_list[cpu].pal_paddr = 
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
-+#endif
-       }
- }
- 
-@@ -680,6 +693,30 @@
-       return 0;
- }
- 
-+#ifdef XEN
-+// variation of efi_get_iobase which returns entire memory descriptor
-+efi_memory_desc_t *
-+efi_get_io_md (void)
-+{
-+      void *efi_map_start, *efi_map_end, *p;
-+      efi_memory_desc_t *md;
-+      u64 efi_desc_size;
-+
-+      efi_map_start = __va(ia64_boot_param->efi_memmap);
-+      efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-+      efi_desc_size = ia64_boot_param->efi_memdesc_size;
-+
-+      for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-+              md = p;
-+              if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
-+                      if (md->attribute & EFI_MEMORY_UC)
-+                              return md;
-+              }
-+      }
-+      return 0;
-+}
-+#endif
-+
- u32
- efi_mem_type (unsigned long phys_addr)
- {
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/efi.h
--- a/xen/arch/ia64/patch/linux-2.6.7/efi.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,13 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/efi.h 
2004-06-15 23:20:03.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/efi.h    
2004-08-25 19:28:13.000000000 -0600
-@@ -15,8 +15,10 @@
- #include <linux/string.h>
- #include <linux/time.h>
- #include <linux/types.h>
-+#ifndef XEN
- #include <linux/proc_fs.h>
- #include <linux/rtc.h>
-+#endif
- #include <linux/ioport.h>
- 
- #include <asm/page.h>
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/entry.S
--- a/xen/arch/ia64/patch/linux-2.6.7/entry.S   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,195 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/entry.S 2005-03-24 19:39:56.000000000 
-0700
-+++ arch/ia64/entry.S  2005-04-01 12:56:01.000000000 -0700
-@@ -35,7 +35,9 @@
- 
- #include <asm/asmmacro.h>
- #include <asm/cache.h>
-+#ifndef XEN
- #include <asm/errno.h>
-+#endif
- #include <asm/kregs.h>
- #include <asm/offsets.h>
- #include <asm/pgtable.h>
-@@ -46,6 +48,23 @@
- 
- #include "minstate.h"
- 
-+#ifdef XEN
-+#define       sys_execve 0
-+#define do_fork 0
-+#define       syscall_trace 0
-+#define schedule 0
-+#define do_notify_resume_user 0
-+#define ia64_rt_sigsuspend 0
-+#define ia64_rt_sigreturn 0
-+#define       ia64_handle_unaligned 0
-+#define       errno 0
-+#define       sys_ni_syscall 0
-+#define unw_init_frame_info 0
-+#define sys_call_table 0
-+#endif
-+
-+      /*
-+
-       /*
-        * execve() is special because in case of success, we need to
-        * setup a null register window frame.
-@@ -178,11 +197,14 @@
-       DO_SAVE_SWITCH_STACK
-       .body
- 
-+#ifdef XEN
-+//#undef IA64_TASK_THREAD_KSP_OFFSET
-+//#define     IA64_TASK_THREAD_KSP_OFFSET     0x38
-       adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-       movl r25=init_task
-       mov r27=IA64_KR(CURRENT_STACK)
-       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
--      dep r20=0,in0,61,3              // physical address of "current"
-+      dep r20=0,in0,60,4              // physical address of "current"
-       ;;
-       st8 [r22]=sp                    // save kernel stack pointer of old task
-       shr.u r26=r20,IA64_GRANULE_SHIFT
-@@ -194,6 +216,22 @@
- (p6)  cmp.eq p7,p6=r26,r27
- (p6)  br.cond.dpnt .map
-       ;;
-+#else
-+      adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-+      mov r27=IA64_KR(CURRENT_STACK)
-+      dep r20=0,in0,61,3              // physical address of "current"
-+      ;;
-+      st8 [r22]=sp                    // save kernel stack pointer of old task
-+      shr.u r26=r20,IA64_GRANULE_SHIFT
-+      adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-+      ;;
-+      /*
-+       * If we've already mapped this task's page, we can skip doing it again.
-+       */
-+      cmp.eq p7,p6=r26,r27
-+(p6)  br.cond.dpnt .map
-+      ;;
-+#endif
- .done:
- (p6)  ssm psr.ic                      // if we we had to map, renable the 
psr.ic bit FIRST!!!
-       ;;
-@@ -211,6 +249,16 @@
-       br.ret.sptk.many rp             // boogie on out in new context
- 
- .map:
-+#ifdef XEN
-+      // avoid overlapping with kernel TR
-+      movl r25=KERNEL_START
-+      dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
-+      ;;
-+      cmp.eq p7,p0=r25,r23
-+      ;;
-+(p7)  mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
-+(p7)  br.cond.sptk .done
-+#endif
-       rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
-       movl r25=PAGE_KERNEL
-       ;;
-@@ -367,7 +415,11 @@
-  *    - b7 holds address to return to
-  *    - must not touch r8-r11
-  */
-+#ifdef XEN
-+GLOBAL_ENTRY(load_switch_stack)
-+#else
- ENTRY(load_switch_stack)
-+#endif
-       .prologue
-       .altrp b7
- 
-@@ -595,6 +647,11 @@
-        */
-       br.call.sptk.many rp=ia64_invoke_schedule_tail
- }
-+#ifdef XEN
-+      // new domains are cloned but not exec'ed so switch to user mode here
-+      cmp.ne pKStk,pUStk=r0,r0
-+      br.cond.spnt ia64_leave_kernel
-+#else
- .ret8:
-       adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
-@@ -603,6 +660,7 @@
-       mov r8=0
-       tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
- (p6)  br.cond.spnt .strace_check_retval
-+#endif
-       ;;                                      // added stop bits to prevent 
r8 dependency
- END(ia64_ret_from_clone)
-       // fall through
-@@ -684,9 +742,14 @@
- #endif /* CONFIG_PREEMPT */
-       adds r16=PT(LOADRS)+16,r12
-       adds r17=PT(AR_BSPSTORE)+16,r12
-+#ifdef XEN
-+      mov r31=r0
-+      ;;
-+#else
-       adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
- (p6)  ld4 r31=[r18]                           // load 
current_thread_info()->flags
-+#endif
-       ld8 r19=[r16],PT(B6)-PT(LOADRS)         // load ar.rsc value for 
"loadrs"
-       nop.i 0
-       ;;
-@@ -745,7 +808,11 @@
-       mov b7=r0               // clear b7
-       ;;
- (pUStk) st1 [r14]=r3
-+#ifdef XEN
-+      movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-+#else
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+#endif
-       ;;
-       mov r16=ar.bsp          // get existing backing store pointer
-       srlz.i                  // ensure interruption collection is off
-@@ -796,9 +863,18 @@
-       ;;
- (p6)  cmp.eq.unc p6,p0=r21,r0         // p6 <- p6 && (r21 == 0)
- #endif /* CONFIG_PREEMPT */
-+#ifdef XEN
-+      alloc loc0=ar.pfs,0,1,1,0
-+      adds out0=16,r12
-+      ;;
-+(p6)  br.call.sptk.many b0=deliver_pending_interrupt
-+      mov ar.pfs=loc0
-+      mov r31=r0
-+#else
-       adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
- (p6)  ld4 r31=[r17]                           // load 
current_thread_info()->flags
-+#endif
-       adds r21=PT(PR)+16,r12
-       ;;
- 
-@@ -912,7 +988,11 @@
-       shr.u r18=r19,16        // get byte size of existing "dirty" partition
-       ;;
-       mov r16=ar.bsp          // get existing backing store pointer
-+#ifdef XEN
-+      movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-+#else
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+#endif
-       ;;
-       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
- (pKStk)       br.cond.dpnt skip_rbs_switch
-@@ -1264,6 +1344,7 @@
-       br.ret.sptk.many rp
- END(unw_init_running)
- 
-+#ifndef XEN
-       .rodata
-       .align 8
-       .globl sys_call_table
-@@ -1526,3 +1607,4 @@
-       data8 sys_ni_syscall
- 
-       .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
-+#endif
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h
--- a/xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,20 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/gcc_intrin.h    
   2005-01-23 13:23:36.000000000 -0700
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/gcc_intrin.h   
2004-08-25 19:28:13.000000000 -0600
-@@ -92,6 +92,9 @@
- 
- #define ia64_hint_pause 0
- 
-+#ifdef XEN
-+#define ia64_hint(mode)       0
-+#else
- #define ia64_hint(mode)                                               \
- ({                                                            \
-       switch (mode) {                                         \
-@@ -100,6 +103,7 @@
-               break;                                          \
-       }                                                       \
- })
-+#endif
- 
- 
- /* Integer values for mux1 instruction */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/hardirq.h
--- a/xen/arch/ia64/patch/linux-2.6.7/hardirq.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,22 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hardirq.h  
2004-06-15 23:19:02.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hardirq.h      
2004-12-17 13:47:03.000000000 -0700
-@@ -81,10 +81,19 @@
-  */
- #define in_irq()              (hardirq_count())
- #define in_softirq()          (softirq_count())
-+#ifdef XEN
- #define in_interrupt()                (irq_count())
-+#else
-+#define in_interrupt()                0               // FIXME LATER
-+#endif
- 
-+#ifdef XEN
-+#define hardirq_trylock(cpu)  (!in_interrupt())
-+#define hardirq_endlock(cpu)  do { } while (0)
-+#else
- #define hardirq_trylock()     (!in_interrupt())
- #define hardirq_endlock()     do { } while (0)
-+#endif
- 
- #ifdef CONFIG_PREEMPT
- # include <linux/smp_lock.h>
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/head.S
--- a/xen/arch/ia64/patch/linux-2.6.7/head.S    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,93 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/head.S  2005-03-24 19:39:56.000000000 
-0700
-+++ arch/ia64/head.S   2005-04-01 12:56:01.000000000 -0700
-@@ -1,3 +1,8 @@
-+#ifdef XEN
-+#define       console_print   printf
-+#define kernel_thread_helper 0
-+#define sys_exit 0
-+#endif
- /*
-  * Here is where the ball gets rolling as far as the kernel is concerned.
-  * When control is transferred to _start, the bootload has already
-@@ -166,7 +171,11 @@
-       dep r18=0,r3,0,12
-       ;;
-       or r18=r17,r18
-+#ifdef XEN
-+      dep r2=-1,r3,60,4       // IMVA of task
-+#else
-       dep r2=-1,r3,61,3       // IMVA of task
-+#endif
-       ;;
-       mov r17=rr[r2]
-       ;;
-@@ -205,7 +214,11 @@
-       ;;
-       mov ar.rsc=0x3          // place RSE in eager mode
- 
-+#ifdef XEN
-+(isBP)        dep r28=-1,r28,60,4     // make address virtual
-+#else
- (isBP)        dep r28=-1,r28,61,3     // make address virtual
-+#endif
- (isBP)        movl r2=ia64_boot_param
-       ;;
- (isBP)        st8 [r2]=r28            // save the address of the boot param 
area passed by the bootloader
-@@ -238,14 +251,30 @@
-       br.call.sptk.many rp=sys_fw_init
- .ret1:
- #endif
-+#ifdef XEN
-+      alloc r2=ar.pfs,8,0,2,0
-+      ;;
-+#define fake_mbi_magic 0
-+#define MULTIBOOT_INFO_SIZE   1024
-+      .rodata
-+fake_mbi:
-+      .skip MULTIBOOT_INFO_SIZE
-+      .previous
-+      movl out0=fake_mbi
-+      ;;
-+      br.call.sptk.many rp=cmain
-+#else
-       br.call.sptk.many rp=start_kernel
-+#endif
- .ret2:        addl r3=@ltoff(halt_msg),gp
-       ;;
-       alloc r2=ar.pfs,8,0,2,0
-       ;;
-       ld8 out0=[r3]
-       br.call.sptk.many b0=console_print
-+      ;;
- self: br.sptk.many self               // endless loop
-+      ;;
- END(_start)
- 
- GLOBAL_ENTRY(ia64_save_debug_regs)
-@@ -781,8 +810,13 @@
-       movl r18=KERNEL_START
-       dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
-       dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
-+#ifdef XEN
-+      dep r17=-1,r17,60,4
-+      dep sp=-1,sp,60,4
-+#else
-       dep r17=-1,r17,61,3
-       dep sp=-1,sp,61,3
-+#endif
-       ;;
-       or r3=r3,r18
-       or r14=r14,r18
-@@ -838,7 +872,12 @@
-  * intermediate precision so that we can produce a full 64-bit result.
-  */
- GLOBAL_ENTRY(sched_clock)
-+#ifdef XEN
-+      break 0;;       // FIX IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-+      //movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-+#else
-       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
-+#endif
-       mov.m r9=ar.itc         // fetch cycle-counter                          
(35 cyc)
-       ;;
-       ldf8 f8=[r8]
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c
--- a/xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,36 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_irq.c     
   2004-06-15 23:20:26.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/hpsim_irq.c   2004-11-01 
17:54:15.000000000 -0700
-@@ -9,7 +9,17 @@
- #include <linux/kernel.h>
- #include <linux/sched.h>
- #include <linux/irq.h>
-+#ifdef XEN
-+#include <asm/hw_irq.h>
-+#endif
- 
-+#if 1
-+void __init
-+hpsim_irq_init (void)
-+{
-+      printf("*** hpsim_irq_init called: NOT NEEDED?!?!?\n");
-+}
-+#else
- static unsigned int
- hpsim_irq_startup (unsigned int irq)
- {
-@@ -19,6 +29,10 @@
- static void
- hpsim_irq_noop (unsigned int irq)
- {
-+#if 1
-+printf("hpsim_irq_noop: irq=%d\n",irq);
-+while(irq);
-+#endif
- }
- 
- static struct hw_interrupt_type irq_type_hp_sim = {
-@@ -44,3 +58,4 @@
-                       idesc->handler = &irq_type_hp_sim;
-       }
- }
-+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h
--- a/xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_ssc.h     
   2004-06-15 23:19:43.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hpsim_ssc.h    
2004-08-29 01:04:23.000000000 -0600
-@@ -33,4 +33,23 @@
-  */
- extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
- 
-+#ifdef XEN
-+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
-+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-+#define SSC_OPEN                      50
-+#define SSC_CLOSE                     51
-+#define SSC_READ                      52
-+#define SSC_WRITE                     53
-+#define SSC_GET_COMPLETION            54
-+#define SSC_WAIT_COMPLETION           55
-+
-+#define SSC_WRITE_ACCESS              2
-+#define SSC_READ_ACCESS                       1
-+
-+struct ssc_disk_req {
-+      unsigned long addr;
-+      unsigned long len;
-+};
-+#endif
-+
- #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/hw_irq.h
--- a/xen/arch/ia64/patch/linux-2.6.7/hw_irq.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,24 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hw_irq.h   
2004-06-15 23:19:22.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hw_irq.h       
2004-08-27 09:07:38.000000000 -0600
-@@ -9,7 +9,9 @@
- #include <linux/interrupt.h>
- #include <linux/sched.h>
- #include <linux/types.h>
-+#ifndef XEN
- #include <linux/profile.h>
-+#endif
- 
- #include <asm/machvec.h>
- #include <asm/ptrace.h>
-@@ -96,7 +98,11 @@
-  * Default implementations for the irq-descriptor API:
-  */
- 
-+#ifdef XEN
-+#define _irq_desc irq_desc
-+#else
- extern irq_desc_t _irq_desc[NR_IRQS];
-+#endif
- 
- #ifndef CONFIG_IA64_GENERIC
- static inline irq_desc_t *
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/ide.h
--- a/xen/arch/ia64/patch/linux-2.6.7/ide.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,35 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/ide.h      
2004-06-15 23:19:36.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/ide.h  2004-08-25 
19:28:13.000000000 -0600
-@@ -64,6 +64,32 @@
- #define ide_init_default_irq(base)    ide_default_irq(base)
- #endif
- 
-+#ifdef XEN
-+// this is moved to linux/ide.h in newer versions of linux
-+typedef union {
-+      unsigned all                    : 8;    /* all of the bits together */
-+      struct {
-+              unsigned head           : 4;    /* always zeros here */
-+              unsigned unit           : 1;    /* drive select number, 0 or 1 
*/
-+              unsigned bit5           : 1;    /* always 1 */
-+              unsigned lba            : 1;    /* using LBA instead of CHS */
-+              unsigned bit7           : 1;    /* always 1 */
-+      } b;
-+} select_t;
-+
-+typedef union {
-+      unsigned all                    : 8;    /* all of the bits together */
-+      struct {
-+              unsigned bit0           : 1;
-+              unsigned nIEN           : 1;    /* device INTRQ to host */
-+              unsigned SRST           : 1;    /* host soft reset bit */
-+              unsigned bit3           : 1;    /* ATA-2 thingy */
-+              unsigned reserved456    : 3;
-+              unsigned HOB            : 1;    /* 48-bit address ordering */
-+      } b;
-+} control_t;
-+#endif
-+
- #include <asm-generic/ide_iops.h>
- 
- #endif /* __KERNEL__ */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/init_task.c
--- a/xen/arch/ia64/patch/linux-2.6.7/init_task.c       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,35 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/init_task.c     
   2004-06-15 23:20:26.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/init_task.c   2004-08-27 
00:06:35.000000000 -0600
-@@ -15,10 +15,12 @@
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
- 
-+#ifndef XEN
- static struct fs_struct init_fs = INIT_FS;
- static struct files_struct init_files = INIT_FILES;
- static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
- static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+#endif
- struct mm_struct init_mm = INIT_MM(init_mm);
- 
- EXPORT_SYMBOL(init_mm);
-@@ -33,13 +35,19 @@
- 
- union {
-       struct {
-+#ifdef XEN
-+              struct domain task;
-+#else
-               struct task_struct task;
-               struct thread_info thread_info;
-+#endif
-       } s;
-       unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
- } init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) 
= {{
-       .task =         INIT_TASK(init_task_mem.s.task),
-+#ifndef XEN
-       .thread_info =  INIT_THREAD_INFO(init_task_mem.s.task)
-+#endif
- }};
- 
- EXPORT_SYMBOL(init_task);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/init_task.h
--- a/xen/arch/ia64/patch/linux-2.6.7/init_task.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,53 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/init_task.h   
2004-06-15 23:18:57.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/init_task.h      
2004-11-15 17:06:20.000000000 -0700
-@@ -31,6 +31,18 @@
-       .max_reqs       = ~0U,                          \
- }
- 
-+#ifdef XEN
-+#define INIT_MM(name) \
-+{                                                             \
-+      .mm_rb          = RB_ROOT,                              \
-+      .pgd            = swapper_pg_dir,                       \
-+      .mm_users       = ATOMIC_INIT(2),                       \
-+      .mm_count       = ATOMIC_INIT(1),                       \
-+      .page_table_lock =  SPIN_LOCK_UNLOCKED,                 \
-+      .mmlist         = LIST_HEAD_INIT(name.mmlist),          \
-+      .cpu_vm_mask    = CPU_MASK_ALL,                         \
-+}
-+#else
- #define INIT_MM(name) \
- {                                                             \
-       .mm_rb          = RB_ROOT,                              \
-@@ -43,6 +55,7 @@
-       .cpu_vm_mask    = CPU_MASK_ALL,                         \
-       .default_kioctx = INIT_KIOCTX(name.default_kioctx, name),       \
- }
-+#endif
- 
- #define INIT_SIGNALS(sig) {   \
-       .count          = ATOMIC_INIT(1),               \
-@@ -64,6 +77,15 @@
-  *  INIT_TASK is used to set up the first task table, touch at
-  * your own risk!. Base=0, limit=0x1fffff (=2MB)
-  */
-+#ifdef XEN
-+#define INIT_TASK(tsk) \
-+{                                                     \
-+      /*processor:    0,*/                            \
-+      /*domain_id:    IDLE_DOMAIN_ID,*/               \
-+      /*domain_flags: DOMF_idle_domain,*/             \
-+      refcnt:         ATOMIC_INIT(1)                  \
-+}
-+#else
- #define INIT_TASK(tsk)        \
- {                                                                     \
-       .state          = 0,                                            \
-@@ -113,6 +135,7 @@
-       .switch_lock    = SPIN_LOCK_UNLOCKED,                           \
-       .journal_info   = NULL,                                         \
- }
-+#endif
- 
- 
- 
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/interrupt.h
--- a/xen/arch/ia64/patch/linux-2.6.7/interrupt.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,18 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/interrupt.h   
2004-06-15 23:19:29.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/interrupt.h      
2004-08-25 19:28:13.000000000 -0600
-@@ -32,6 +32,7 @@
- #define IRQ_HANDLED   (1)
- #define IRQ_RETVAL(x) ((x) != 0)
- 
-+#ifndef XEN
- struct irqaction {
-       irqreturn_t (*handler)(int, void *, struct pt_regs *);
-       unsigned long flags;
-@@ -46,6 +47,7 @@
-                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
-                      unsigned long, const char *, void *);
- extern void free_irq(unsigned int, void *);
-+#endif
- 
- /*
-  * Temporary defines for UP kernels, until all code gets fixed.
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/io.h
--- a/xen/arch/ia64/patch/linux-2.6.7/io.h      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,14 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/io.h       
2004-06-15 23:18:57.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/io.h   2004-11-05 
16:53:36.000000000 -0700
-@@ -23,7 +23,11 @@
- #define __SLOW_DOWN_IO        do { } while (0)
- #define SLOW_DOWN_IO  do { } while (0)
- 
-+#ifdef XEN
-+#define __IA64_UNCACHED_OFFSET        0xdffc000000000000      /* region 6 */
-+#else
- #define __IA64_UNCACHED_OFFSET        0xc000000000000000      /* region 6 */
-+#endif
- 
- /*
-  * The legacy I/O space defined by the ia64 architecture supports only 65536 
ports, but
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/irq.h
--- a/xen/arch/ia64/patch/linux-2.6.7/irq.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,18 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/irq.h      
2005-01-23 13:23:36.000000000 -0700
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/irq.h  2004-08-25 
19:28:13.000000000 -0600
-@@ -30,6 +30,15 @@
- extern void enable_irq (unsigned int);
- extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
- 
-+#ifdef XEN
-+// dup'ed from signal.h to avoid changes to includes
-+#define       SA_NOPROFILE    0x02000000
-+#define       SA_SHIRQ        0x04000000
-+#define       SA_RESTART      0x10000000
-+#define       SA_INTERRUPT    0x20000000
-+#define       SA_SAMPLE_RANDOM        SA_RESTART
-+#endif
-+
- #ifdef CONFIG_SMP
- extern void move_irq(int irq);
- #else
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
--- a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,82 +0,0 @@
---- /home/djm/linux-2.6.7/arch/ia64/kernel/irq_ia64.c  2004-06-15 
23:19:13.000000000 -0600
-+++ arch/ia64/irq_ia64.c       2005-02-17 13:17:16.000000000 -0700
-@@ -17,18 +17,26 @@
- #include <linux/config.h>
- #include <linux/module.h>
- 
-+#ifndef XEN
- #include <linux/jiffies.h>
-+#endif
- #include <linux/errno.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/ioport.h>
-+#ifndef XEN
- #include <linux/kernel_stat.h>
-+#endif
- #include <linux/slab.h>
-+#ifndef XEN
- #include <linux/ptrace.h>
- #include <linux/random.h>     /* for rand_initialize_irq() */
- #include <linux/signal.h>
-+#endif
- #include <linux/smp.h>
-+#ifndef XEN
- #include <linux/smp_lock.h>
-+#endif
- #include <linux/threads.h>
- 
- #include <asm/bitops.h>
-@@ -101,6 +109,24 @@
- ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
- {
-       unsigned long saved_tpr;
-+#if 0
-+//FIXME: For debug only, can be removed
-+      static char firstirq = 1;
-+      static char firsttime[256];
-+      static char firstpend[256];
-+      if (firstirq) {
-+              int i;
-+              for (i=0;i<256;i++) firsttime[i] = 1;
-+              for (i=0;i<256;i++) firstpend[i] = 1;
-+              firstirq = 0;
-+      }
-+      if (firsttime[vector]) {
-+              printf("**** (entry) First received int on vector=%d,itc=%lx\n",
-+                      (unsigned long) vector, ia64_get_itc());
-+              firsttime[vector] = 0;
-+      }
-+#endif
-+
- 
- #if IRQ_DEBUG
-       {
-@@ -145,6 +171,27 @@
-                       ia64_setreg(_IA64_REG_CR_TPR, vector);
-                       ia64_srlz_d();
- 
-+#ifdef XEN
-+      if (vector != 0xef) {
-+              extern void vcpu_pend_interrupt(void *, int);
-+#if 0
-+              if (firsttime[vector]) {
-+                      printf("**** (iterate) First received int on 
vector=%d,itc=%lx\n",
-+                      (unsigned long) vector, ia64_get_itc());
-+                      firsttime[vector] = 0;
-+              }
-+              if (firstpend[vector]) {
-+                      printf("**** First pended int on vector=%d,itc=%lx\n",
-+                              (unsigned long) vector,ia64_get_itc());
-+                      firstpend[vector] = 0;
-+              }
-+#endif
-+              //FIXME: TEMPORARY HACK!!!!
-+              vcpu_pend_interrupt(dom0->vcpu[0],vector);
-+              domain_wake(dom0->vcpu[0]);
-+      }
-+      else
-+#endif
-                       do_IRQ(local_vector_to_irq(vector), regs);
- 
-                       /*
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/ivt.S
--- a/xen/arch/ia64/patch/linux-2.6.7/ivt.S     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,528 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/ivt.S   2004-06-15 23:18:59.000000000 
-0600
-+++ arch/ia64/ivt.S    2005-04-01 12:56:01.000000000 -0700
-@@ -1,3 +1,21 @@
-+
-+#ifdef XEN
-+//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
-+// these are all hacked out for now as the entire IVT
-+// will eventually be replaced... just want to use it
-+// for startup code to handle TLB misses
-+//#define ia64_leave_kernel 0
-+//#define ia64_ret_from_syscall 0
-+//#define ia64_handle_irq 0
-+//#define ia64_fault 0
-+#define ia64_illegal_op_fault 0
-+#define ia64_prepare_handle_unaligned 0
-+#define ia64_bad_break 0
-+#define ia64_trace_syscall 0
-+#define sys_call_table 0
-+#define sys_ni_syscall 0
-+#include <asm/vhpt.h>
-+#endif
- /*
-  * arch/ia64/kernel/ivt.S
-  *
-@@ -76,6 +94,13 @@
-       mov r19=n;;                     /* prepare to save predicates */        
        \
-       br.sptk.many dispatch_to_fault_handler
- 
-+#ifdef XEN
-+#define REFLECT(n)                                                            
        \
-+      mov r31=pr;                                                             
        \
-+      mov r19=n;;                     /* prepare to save predicates */        
        \
-+      br.sptk.many dispatch_reflection
-+#endif
-+
-       .section .text.ivt,"ax"
- 
-       .align 32768    // align on 32KB boundary
-@@ -213,6 +238,13 @@
- // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
- ENTRY(itlb_miss)
-       DBG_FAULT(1)
-+#ifdef XEN
-+      VHPT_CCHAIN_LOOKUP(itlb_miss,i)
-+#ifdef VHPT_GLOBAL
-+      br.cond.sptk page_fault
-+      ;;
-+#endif
-+#endif
-       /*
-        * The ITLB handler accesses the L3 PTE via the virtually mapped linear
-        * page table.  If a nested TLB miss occurs, we switch into physical
-@@ -257,6 +289,13 @@
- // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
- ENTRY(dtlb_miss)
-       DBG_FAULT(2)
-+#ifdef XEN
-+      VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
-+#ifdef VHPT_GLOBAL
-+      br.cond.sptk page_fault
-+      ;;
-+#endif
-+#endif
-       /*
-        * The DTLB handler accesses the L3 PTE via the virtually mapped linear
-        * page table.  If a nested TLB miss occurs, we switch into physical
-@@ -301,6 +340,13 @@
- // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
- ENTRY(alt_itlb_miss)
-       DBG_FAULT(3)
-+#ifdef XEN
-+//#ifdef VHPT_GLOBAL
-+//    VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
-+//    br.cond.sptk page_fault
-+//    ;;
-+//#endif
-+#endif
-       mov r16=cr.ifa          // get address that caused the TLB miss
-       movl r17=PAGE_KERNEL
-       mov r21=cr.ipsr
-@@ -339,6 +385,13 @@
- // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
- ENTRY(alt_dtlb_miss)
-       DBG_FAULT(4)
-+#ifdef XEN
-+//#ifdef VHPT_GLOBAL
-+//    VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
-+//    br.cond.sptk page_fault
-+//    ;;
-+//#endif
-+#endif
-       mov r16=cr.ifa          // get address that caused the TLB miss
-       movl r17=PAGE_KERNEL
-       mov r20=cr.isr
-@@ -368,6 +421,17 @@
-       cmp.ne p8,p0=r0,r23
- (p9)  cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22  // check isr.code field
- (p8)  br.cond.spnt page_fault
-+#ifdef XEN
-+      ;;
-+      // FIXME: inadequate test, this is where we test for Xen address
-+      // note that 0xf000 (cached) and 0xd000 (uncached) addresses
-+      // should be OK.  (Though no I/O is done in Xen, EFI needs uncached
-+      // addresses and some domain EFI calls are passed through)
-+      tbit.nz p0,p8=r16,60
-+(p8)  br.cond.spnt page_fault
-+//(p8)        br.cond.spnt 0
-+      ;;
-+#endif
- 
-       dep r21=-1,r21,IA64_PSR_ED_BIT,1
-       or r19=r19,r17          // insert PTE control bits into r19
-@@ -448,6 +512,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
- ENTRY(ikey_miss)
-+#ifdef XEN
-+      REFLECT(6)
-+#endif
-       DBG_FAULT(6)
-       FAULT(6)
- END(ikey_miss)
-@@ -460,9 +527,16 @@
-       srlz.i
-       ;;
-       SAVE_MIN_WITH_COVER
-+#ifdef XEN
-+      alloc r15=ar.pfs,0,0,4,0
-+      mov out0=cr.ifa
-+      mov out1=cr.isr
-+      mov out3=cr.itir
-+#else
-       alloc r15=ar.pfs,0,0,3,0
-       mov out0=cr.ifa
-       mov out1=cr.isr
-+#endif
-       adds r3=8,r2                            // set up second base pointer
-       ;;
-       ssm psr.ic | PSR_DEFAULT_BITS
-@@ -483,6 +557,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
- ENTRY(dkey_miss)
-+#ifdef XEN
-+      REFLECT(7)
-+#endif
-       DBG_FAULT(7)
-       FAULT(7)
- END(dkey_miss)
-@@ -491,6 +568,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
- ENTRY(dirty_bit)
-+#ifdef XEN
-+      REFLECT(8)
-+#endif
-       DBG_FAULT(8)
-       /*
-        * What we do here is to simply turn on the dirty bit in the PTE.  We 
need to
-@@ -553,6 +633,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
- ENTRY(iaccess_bit)
-+#ifdef XEN
-+      REFLECT(9)
-+#endif
-       DBG_FAULT(9)
-       // Like Entry 8, except for instruction access
-       mov r16=cr.ifa                          // get the address that caused 
the fault
-@@ -618,6 +701,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
- ENTRY(daccess_bit)
-+#ifdef XEN
-+      REFLECT(10)
-+#endif
-       DBG_FAULT(10)
-       // Like Entry 8, except for data access
-       mov r16=cr.ifa                          // get the address that caused 
the fault
-@@ -686,6 +772,16 @@
-        * to prevent leaking bits from kernel to user level.
-        */
-       DBG_FAULT(11)
-+#ifdef XEN
-+      mov r16=cr.isr
-+      mov r17=cr.iim
-+      mov r31=pr
-+      ;;
-+      cmp.eq p7,p0=r0,r17                     // is this a psuedo-cover?
-+      // FIXME: may also need to check slot==2?
-+(p7)  br.sptk.many dispatch_privop_fault
-+      br.sptk.many dispatch_break_fault
-+#endif
-       mov r16=IA64_KR(CURRENT)                // r16 = current task; 12 cycle 
read lat.
-       mov r17=cr.iim
-       mov r18=__IA64_BREAK_SYSCALL
-@@ -696,7 +792,9 @@
-       mov r27=ar.rsc
-       mov r26=ar.pfs
-       mov r28=cr.iip
-+#ifndef XEN
-       mov r31=pr                              // prepare to save predicates
-+#endif
-       mov r20=r1
-       ;;
-       adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
-@@ -792,6 +890,36 @@
-       DBG_FAULT(13)
-       FAULT(13)
- 
-+#ifdef XEN
-+      // There is no particular reason for this code to be here, other than 
that
-+      // there happens to be space here that would go unused otherwise.  If 
this
-+      // fault ever gets "unreserved", simply moved the following code to a 
more
-+      // suitable spot...
-+
-+ENTRY(dispatch_break_fault)
-+      SAVE_MIN_WITH_COVER
-+      ;;
-+      alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
-+      mov out0=cr.ifa
-+      adds out1=16,sp
-+      mov out2=cr.isr         // FIXME: pity to make this slow access twice
-+      mov out3=cr.iim         // FIXME: pity to make this slow access twice
-+
-+      ssm psr.ic | PSR_DEFAULT_BITS
-+      ;;
-+      srlz.i                                  // guarantee that interruption 
collection is on
-+      ;;
-+(p15) ssm psr.i                               // restore psr.i
-+      adds r3=8,r2                            // set up second base pointer
-+      ;;
-+      SAVE_REST
-+      movl r14=ia64_leave_kernel
-+      ;;
-+      mov rp=r14
-+      br.sptk.many ia64_prepare_handle_break
-+END(dispatch_break_fault)
-+#endif
-+
-       .org ia64_ivt+0x3800
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x3800 Entry 14 (size 64 bundles) Reserved
-@@ -842,9 +970,11 @@
-        *      - ar.fpsr: set to kernel settings
-        */
- GLOBAL_ENTRY(ia64_syscall_setup)
-+#ifndef XEN
- #if PT(B6) != 0
- # error This code assumes that b6 is the first field in pt_regs.
- #endif
-+#endif
-       st8 [r1]=r19                            // save b6
-       add r16=PT(CR_IPSR),r1                  // initialize first base pointer
-       add r17=PT(R11),r1                      // initialize second base 
pointer
-@@ -974,6 +1104,37 @@
-       DBG_FAULT(16)
-       FAULT(16)
- 
-+#ifdef XEN
-+      // There is no particular reason for this code to be here, other than 
that
-+      // there happens to be space here that would go unused otherwise.  If 
this
-+      // fault ever gets "unreserved", simply moved the following code to a 
more
-+      // suitable spot...
-+
-+ENTRY(dispatch_privop_fault)
-+      SAVE_MIN_WITH_COVER
-+      ;;
-+      alloc r14=ar.pfs,0,0,4,0                // now it's safe (must be first 
in insn group!)
-+      mov out0=cr.ifa
-+      adds out1=16,sp
-+      mov out2=cr.isr         // FIXME: pity to make this slow access twice
-+      mov out3=cr.itir
-+
-+      ssm psr.ic | PSR_DEFAULT_BITS
-+      ;;
-+      srlz.i                                  // guarantee that interruption 
collection is on
-+      ;;
-+(p15) ssm psr.i                               // restore psr.i
-+      adds r3=8,r2                            // set up second base pointer
-+      ;;
-+      SAVE_REST
-+      movl r14=ia64_leave_kernel
-+      ;;
-+      mov rp=r14
-+      br.sptk.many ia64_prepare_handle_privop
-+END(dispatch_privop_fault)
-+#endif
-+
-+
-       .org ia64_ivt+0x4400
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x4400 Entry 17 (size 64 bundles) Reserved
-@@ -1090,6 +1251,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
- ENTRY(page_not_present)
-+#ifdef XEN
-+      REFLECT(20)
-+#endif
-       DBG_FAULT(20)
-       mov r16=cr.ifa
-       rsm psr.dt
-@@ -1110,6 +1274,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
- ENTRY(key_permission)
-+#ifdef XEN
-+      REFLECT(21)
-+#endif
-       DBG_FAULT(21)
-       mov r16=cr.ifa
-       rsm psr.dt
-@@ -1123,6 +1290,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
- ENTRY(iaccess_rights)
-+#ifdef XEN
-+      REFLECT(22)
-+#endif
-       DBG_FAULT(22)
-       mov r16=cr.ifa
-       rsm psr.dt
-@@ -1136,6 +1306,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
- ENTRY(daccess_rights)
-+#ifdef XEN
-+      REFLECT(23)
-+#endif
-       DBG_FAULT(23)
-       mov r16=cr.ifa
-       rsm psr.dt
-@@ -1153,8 +1326,13 @@
-       mov r16=cr.isr
-       mov r31=pr
-       ;;
-+#ifdef XEN
-+      cmp4.ge p6,p0=0x20,r16
-+(p6)  br.sptk.many dispatch_privop_fault
-+#else
-       cmp4.eq p6,p0=0,r16
- (p6)  br.sptk.many dispatch_illegal_op_fault
-+#endif
-       ;;
-       mov r19=24              // fault number
-       br.sptk.many dispatch_to_fault_handler
-@@ -1164,6 +1342,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
- ENTRY(disabled_fp_reg)
-+#ifdef XEN
-+      REFLECT(25)
-+#endif
-       DBG_FAULT(25)
-       rsm psr.dfh             // ensure we can access fph
-       ;;
-@@ -1177,6 +1358,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
- ENTRY(nat_consumption)
-+#ifdef XEN
-+      REFLECT(26)
-+#endif
-       DBG_FAULT(26)
-       FAULT(26)
- END(nat_consumption)
-@@ -1185,6 +1369,10 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
- ENTRY(speculation_vector)
-+#ifdef XEN
-+      // this probably need not reflect...
-+      REFLECT(27)
-+#endif
-       DBG_FAULT(27)
-       /*
-        * A [f]chk.[as] instruction needs to take the branch to the recovery 
code but
-@@ -1228,6 +1416,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
- ENTRY(debug_vector)
-+#ifdef XEN
-+      REFLECT(29)
-+#endif
-       DBG_FAULT(29)
-       FAULT(29)
- END(debug_vector)
-@@ -1236,6 +1427,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
- ENTRY(unaligned_access)
-+#ifdef XEN
-+      REFLECT(30)
-+#endif
-       DBG_FAULT(30)
-       mov r16=cr.ipsr
-       mov r31=pr              // prepare to save predicates
-@@ -1247,6 +1441,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
- ENTRY(unsupported_data_reference)
-+#ifdef XEN
-+      REFLECT(31)
-+#endif
-       DBG_FAULT(31)
-       FAULT(31)
- END(unsupported_data_reference)
-@@ -1255,6 +1452,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
- ENTRY(floating_point_fault)
-+#ifdef XEN
-+      REFLECT(32)
-+#endif
-       DBG_FAULT(32)
-       FAULT(32)
- END(floating_point_fault)
-@@ -1263,6 +1463,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
- ENTRY(floating_point_trap)
-+#ifdef XEN
-+      REFLECT(33)
-+#endif
-       DBG_FAULT(33)
-       FAULT(33)
- END(floating_point_trap)
-@@ -1271,6 +1474,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
- ENTRY(lower_privilege_trap)
-+#ifdef XEN
-+      REFLECT(34)
-+#endif
-       DBG_FAULT(34)
-       FAULT(34)
- END(lower_privilege_trap)
-@@ -1279,6 +1485,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
- ENTRY(taken_branch_trap)
-+#ifdef XEN
-+      REFLECT(35)
-+#endif
-       DBG_FAULT(35)
-       FAULT(35)
- END(taken_branch_trap)
-@@ -1287,6 +1496,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
- ENTRY(single_step_trap)
-+#ifdef XEN
-+      REFLECT(36)
-+#endif
-       DBG_FAULT(36)
-       FAULT(36)
- END(single_step_trap)
-@@ -1343,6 +1555,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception 
(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
- ENTRY(ia32_exception)
-+#ifdef XEN
-+      REFLECT(45)
-+#endif
-       DBG_FAULT(45)
-       FAULT(45)
- END(ia32_exception)
-@@ -1351,6 +1566,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
- ENTRY(ia32_intercept)
-+#ifdef XEN
-+      REFLECT(46)
-+#endif
-       DBG_FAULT(46)
- #ifdef        CONFIG_IA32_SUPPORT
-       mov r31=pr
-@@ -1381,6 +1599,9 @@
- 
/////////////////////////////////////////////////////////////////////////////////////////
- // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
- ENTRY(ia32_interrupt)
-+#ifdef XEN
-+      REFLECT(47)
-+#endif
-       DBG_FAULT(47)
- #ifdef CONFIG_IA32_SUPPORT
-       mov r31=pr
-@@ -1510,6 +1731,39 @@
-       DBG_FAULT(67)
-       FAULT(67)
- 
-+#ifdef XEN
-+      .org ia64_ivt+0x8000
-+ENTRY(dispatch_reflection)
-+      /*
-+       * Input:
-+       *      psr.ic: off
-+       *      r19:    intr type (offset into ivt, see ia64_int.h)
-+       *      r31:    contains saved predicates (pr)
-+       */
-+      SAVE_MIN_WITH_COVER_R19
-+      alloc r14=ar.pfs,0,0,5,0
-+      mov out4=r15
-+      mov out0=cr.ifa
-+      adds out1=16,sp
-+      mov out2=cr.isr
-+      mov out3=cr.iim
-+//    mov out3=cr.itir
-+
-+      ssm psr.ic | PSR_DEFAULT_BITS
-+      ;;
-+      srlz.i                                  // guarantee that interruption 
collection is on
-+      ;;
-+(p15) ssm psr.i                               // restore psr.i
-+      adds r3=8,r2                            // set up second base pointer
-+      ;;
-+      SAVE_REST
-+      movl r14=ia64_leave_kernel
-+      ;;
-+      mov rp=r14
-+      br.sptk.many ia64_prepare_handle_reflection
-+END(dispatch_reflection)
-+#endif
-+
- #ifdef CONFIG_IA32_SUPPORT
- 
-       /*
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/kregs.h
--- a/xen/arch/ia64/patch/linux-2.6.7/kregs.h   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,13 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/kregs.h    
2004-06-15 23:19:01.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/kregs.h        
2004-09-17 18:27:22.000000000 -0600
-@@ -30,6 +30,10 @@
- #define IA64_TR_PALCODE               1       /* itr1: maps PALcode as 
required by EFI */
- #define IA64_TR_PERCPU_DATA   1       /* dtr1: percpu data */
- #define IA64_TR_CURRENT_STACK 2       /* dtr2: maps kernel's memory- & 
register-stacks */
-+#ifdef XEN
-+#define IA64_TR_SHARED_INFO   3       /* dtr3: page shared with domain */
-+#define       IA64_TR_VHPT            4       /* dtr4: vhpt */
-+#endif
- 
- /* Processor status register bits: */
- #define IA64_PSR_BE_BIT               1
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/lds.S
--- a/xen/arch/ia64/patch/linux-2.6.7/lds.S     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,17 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/vmlinux.lds.S   
   2004-06-15 23:19:52.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/xen.lds.S     2004-08-25 
19:28:12.000000000 -0600
-@@ -11,12 +11,14 @@
- OUTPUT_FORMAT("elf64-ia64-little")
- OUTPUT_ARCH(ia64)
- ENTRY(phys_start)
-+#ifndef XEN
- jiffies = jiffies_64;
- PHDRS {
-   code   PT_LOAD;
-   percpu PT_LOAD;
-   data   PT_LOAD;
- }
-+#endif
- SECTIONS
- {
-   /* Sections to be discarded */
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/linuxtime.h
--- a/xen/arch/ia64/patch/linux-2.6.7/linuxtime.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,34 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/time.h        
2004-06-15 23:19:37.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/xen/linuxtime.h 2004-11-15 
17:42:04.000000000 -0700
-@@ -1,6 +1,11 @@
- #ifndef _LINUX_TIME_H
- #define _LINUX_TIME_H
- 
-+#ifdef XEN
-+typedef       s64 time_t;
-+typedef       s64 suseconds_t;
-+#endif
-+
- #include <asm/param.h>
- #include <linux/types.h>
- 
-@@ -25,7 +30,9 @@
- #ifdef __KERNEL__
- 
- #include <linux/spinlock.h>
-+#ifndef XEN
- #include <linux/seqlock.h>
-+#endif
- #include <linux/timex.h>
- #include <asm/div64.h>
- #ifndef div_long_long_rem
-@@ -322,7 +329,9 @@
- 
- extern struct timespec xtime;
- extern struct timespec wall_to_monotonic;
-+#ifndef XEN
- extern seqlock_t xtime_lock;
-+#endif
- 
- static inline unsigned long get_seconds(void)
- { 
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/mca_asm.h
--- a/xen/arch/ia64/patch/linux-2.6.7/mca_asm.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,32 +0,0 @@
---- ../../linux-2.6.7/include/asm-ia64/mca_asm.h       2004-06-15 
23:20:03.000000000 -0600
-+++ include/asm-ia64/mca_asm.h 2005-04-01 12:56:37.000000000 -0700
-@@ -26,8 +26,13 @@
-  * direct mapped to physical addresses.
-  *    1. Lop off bits 61 thru 63 in the virtual address
-  */
-+#ifdef XEN
-+#define INST_VA_TO_PA(addr)                                                   
\
-+      dep     addr    = 0, addr, 60, 4
-+#else // XEN
- #define INST_VA_TO_PA(addr)                                                   
\
-       dep     addr    = 0, addr, 61, 3
-+#endif // XEN
- /*
-  * This macro converts a data virtual address to a physical address
-  * Right now for simulation purposes the virtual addresses are
-@@ -42,9 +47,15 @@
-  * direct mapped to physical addresses.
-  *    1. Put 0x7 in bits 61 thru 63.
-  */
-+#ifdef XEN
-+#define DATA_PA_TO_VA(addr,temp)                                              
        \
-+      mov     temp    = 0xf   ;;                                              
        \
-+      dep     addr    = temp, addr, 60, 4
-+#else // XEN
- #define DATA_PA_TO_VA(addr,temp)                                              
        \
-       mov     temp    = 0x7   ;;                                              
        \
-       dep     addr    = temp, addr, 61, 3
-+#endif // XEN
- 
- /*
-  * This macro jumps to the instruction at the given virtual address
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/minstate.h
--- a/xen/arch/ia64/patch/linux-2.6.7/minstate.h        Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,29 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/minstate.h      2004-06-15 
23:19:52.000000000 -0600
-+++ arch/ia64/minstate.h       2005-04-01 12:56:01.000000000 -0700
-@@ -45,7 +45,7 @@
- (pKStk) tpa r1=sp;                            /* compute physical addr of sp  
*/              \
- (pUStk)       addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base 
of memory stack */      \
- (pUStk)       mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
--(pUStk)       dep r22=-1,r22,61,3;                    /* compute kernel 
virtual addr of RBS */        \
-+(pUStk)       dep r22=-1,r22,60,4;                    /* compute kernel 
virtual addr of RBS */        \
-       ;;                                                                      
                \
- (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;                /* if in kernel mode, 
use sp (r12) */           \
- (pUStk)       mov ar.bspstore=r22;                    /* switch to kernel RBS 
*/                      \
-@@ -65,7 +65,7 @@
- #endif
- 
- #ifdef MINSTATE_PHYS
--# define MINSTATE_GET_CURRENT(reg)    mov reg=IA64_KR(CURRENT);; dep 
reg=0,reg,61,3
-+# define MINSTATE_GET_CURRENT(reg)    mov reg=IA64_KR(CURRENT);; dep 
reg=0,reg,60,4
- # define MINSTATE_START_SAVE_MIN      MINSTATE_START_SAVE_MIN_PHYS
- # define MINSTATE_END_SAVE_MIN                MINSTATE_END_SAVE_MIN_PHYS
- #endif
-@@ -172,7 +172,7 @@
-       ;;                                                                      
                \
- .mem.offset 0,0; st8.spill [r16]=r15,16;                                      
                \
- .mem.offset 8,0; st8.spill [r17]=r14,16;                                      
                \
--      dep r14=-1,r0,61,3;                                                     
                \
-+      dep r14=-1,r0,60,4;                                                     
                \
-       ;;                                                                      
                \
- .mem.offset 0,0; st8.spill [r16]=r2,16;                                       
                        \
- .mem.offset 8,0; st8.spill [r17]=r3,16;                                       
                        \
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c
--- a/xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,92 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/bootmem.c        
2004-06-15 23:19:09.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_bootmem.c  2004-12-17 
13:47:03.000000000 -0700
-@@ -10,7 +10,9 @@
-  */
- 
- #include <linux/mm.h>
-+#ifndef XEN
- #include <linux/kernel_stat.h>
-+#endif
- #include <linux/swap.h>
- #include <linux/interrupt.h>
- #include <linux/init.h>
-@@ -55,6 +57,9 @@
-       bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
-       bdata->node_boot_start = (start << PAGE_SHIFT);
-       bdata->node_low_pfn = end;
-+#ifdef XEN
-+//printk("init_bootmem_core: 
mapstart=%lx,start=%lx,end=%lx,bdata->node_bootmem_map=%lx,bdata->node_boot_start=%lx,bdata->node_low_pfn=%lx\n",mapstart,start,end,bdata->node_bootmem_map,bdata->node_boot_start,bdata->node_low_pfn);
-+#endif
- 
-       /*
-        * Initially all pages are reserved - setup_arch() has to
-@@ -146,6 +151,9 @@
-       unsigned long i, start = 0, incr, eidx;
-       void *ret;
- 
-+#ifdef XEN
-+//printf("__alloc_bootmem_core(%lx,%lx,%lx,%lx) 
called\n",bdata,size,align,goal);
-+#endif
-       if(!size) {
-               printk("__alloc_bootmem_core(): zero-sized request\n");
-               BUG();
-@@ -153,6 +161,9 @@
-       BUG_ON(align & (align-1));
- 
-       eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
-+#ifdef XEN
-+//printf("__alloc_bootmem_core: eidx=%lx\n",eidx);
-+#endif
-       offset = 0;
-       if (align &&
-           (bdata->node_boot_start & (align - 1UL)) != 0)
-@@ -182,6 +193,9 @@
-               unsigned long j;
-               i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
-               i = ALIGN(i, incr);
-+#ifdef XEN
-+//if (i >= eidx) goto fail_block;
-+#endif
-               if (test_bit(i, bdata->node_bootmem_map))
-                       continue;
-               for (j = i + 1; j < i + areasize; ++j) {
-@@ -203,6 +217,9 @@
-       return NULL;
- 
- found:
-+#ifdef XEN
-+//printf("__alloc_bootmem_core: start=%lx\n",start);
-+#endif
-       bdata->last_success = start << PAGE_SHIFT;
-       BUG_ON(start >= eidx);
- 
-@@ -262,6 +279,9 @@
-       page = virt_to_page(phys_to_virt(bdata->node_boot_start));
-       idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
-       map = bdata->node_bootmem_map;
-+#ifdef XEN
-+//printk("free_all_bootmem_core: bdata=%lx, bdata->node_boot_start=%lx, 
bdata->node_low_pfn=%lx, 
bdata->node_bootmem_map=%lx\n",bdata,bdata->node_boot_start,bdata->node_low_pfn,bdata->node_bootmem_map);
-+#endif
-       for (i = 0; i < idx; ) {
-               unsigned long v = ~map[i / BITS_PER_LONG];
-               if (v) {
-@@ -285,6 +305,9 @@
-        * Now free the allocator bitmap itself, it's not
-        * needed anymore:
-        */
-+#ifdef XEN
-+//printk("About to free the allocator bitmap itself\n");
-+#endif
-       page = virt_to_page(bdata->node_bootmem_map);
-       count = 0;
-       for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> 
PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
-@@ -327,6 +350,9 @@
-       return(init_bootmem_core(&contig_page_data, start, 0, pages));
- }
- 
-+#ifdef XEN
-+#undef reserve_bootmem
-+#endif
- #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
- void __init reserve_bootmem (unsigned long addr, unsigned long size)
- {
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/mm_contig.c
--- a/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,216 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/mm/contig.c    2004-06-15 23:19:12.000000000 
-0600
-+++ arch/ia64/mm_contig.c      2005-03-23 14:54:06.000000000 -0700
-@@ -15,11 +15,21 @@
-  * memory.
-  */
- #include <linux/config.h>
-+#ifdef XEN
-+#include <xen/sched.h>
-+#endif
- #include <linux/bootmem.h>
- #include <linux/efi.h>
- #include <linux/mm.h>
- #include <linux/swap.h>
- 
-+#ifdef XEN
-+#undef reserve_bootmem
-+extern struct page *zero_page_memmap_ptr;
-+struct page *mem_map;
-+#define MAX_DMA_ADDRESS ~0UL  // FIXME???
-+#endif
-+
- #include <asm/meminit.h>
- #include <asm/pgalloc.h>
- #include <asm/pgtable.h>
-@@ -37,30 +47,7 @@
- void
- show_mem (void)
- {
--      int i, total = 0, reserved = 0;
--      int shared = 0, cached = 0;
--
--      printk("Mem-info:\n");
--      show_free_areas();
--
--      printk("Free swap:       %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
--      i = max_mapnr;
--      while (i-- > 0) {
--              if (!pfn_valid(i))
--                      continue;
--              total++;
--              if (PageReserved(mem_map+i))
--                      reserved++;
--              else if (PageSwapCache(mem_map+i))
--                      cached++;
--              else if (page_count(mem_map + i))
--                      shared += page_count(mem_map + i) - 1;
--      }
--      printk("%d pages of RAM\n", total);
--      printk("%d reserved pages\n", reserved);
--      printk("%d pages shared\n", shared);
--      printk("%d pages swap cached\n", cached);
--      printk("%ld pages in page table cache\n", pgtable_cache_size);
-+      printk("Dummy show_mem\n");
- }
- 
- /* physical address where the bootmem map is located */
-@@ -80,6 +67,9 @@
- {
-       unsigned long *max_pfnp = arg, pfn;
- 
-+#ifdef XEN
-+//printf("find_max_pfn: start=%lx, end=%lx, *arg=%lx\n",start,end,*(unsigned 
long *)arg);
-+#endif
-       pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
-       if (pfn > *max_pfnp)
-               *max_pfnp = pfn;
-@@ -133,41 +123,6 @@
-       return 0;
- }
- 
--/**
-- * find_memory - setup memory map
-- *
-- * Walk the EFI memory map and find usable memory for the system, taking
-- * into account reserved areas.
-- */
--void
--find_memory (void)
--{
--      unsigned long bootmap_size;
--
--      reserve_memory();
--
--      /* first find highest page frame number */
--      max_pfn = 0;
--      efi_memmap_walk(find_max_pfn, &max_pfn);
--
--      /* how many bytes to cover all the pages */
--      bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
--
--      /* look for a location to hold the bootmap */
--      bootmap_start = ~0UL;
--      efi_memmap_walk(find_bootmap_location, &bootmap_size);
--      if (bootmap_start == ~0UL)
--              panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
--
--      bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
--
--      /* Free all available memory, then mark bootmem-map as being in use. */
--      efi_memmap_walk(filter_rsvd_memory, free_bootmem);
--      reserve_bootmem(bootmap_start, bootmap_size);
--
--      find_initrd();
--}
--
- #ifdef CONFIG_SMP
- /**
-  * per_cpu_init - setup per-cpu variables
-@@ -227,73 +182,42 @@
- void
- paging_init (void)
- {
--      unsigned long max_dma;
--      unsigned long zones_size[MAX_NR_ZONES];
--#ifdef CONFIG_VIRTUAL_MEM_MAP
--      unsigned long zholes_size[MAX_NR_ZONES];
--      unsigned long max_gap;
--#endif
--
--      /* initialize mem_map[] */
-+      struct pfn_info *pg;
-+      /* Allocate and map the machine-to-phys table */
-+      if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
-+              panic("Not enough memory to bootstrap Xen.\n");
-+      memset(page_to_virt(pg), 0x55, 16UL << 20);
- 
--      memset(zones_size, 0, sizeof(zones_size));
-+      /* Other mapping setup */
- 
--      num_physpages = 0;
--      efi_memmap_walk(count_pages, &num_physpages);
- 
--      max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
--
--#ifdef CONFIG_VIRTUAL_MEM_MAP
--      memset(zholes_size, 0, sizeof(zholes_size));
--
--      num_dma_physpages = 0;
--      efi_memmap_walk(count_dma_pages, &num_dma_physpages);
--
--      if (max_low_pfn < max_dma) {
--              zones_size[ZONE_DMA] = max_low_pfn;
--              zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
--      } else {
--              zones_size[ZONE_DMA] = max_dma;
--              zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
--              if (num_physpages > num_dma_physpages) {
--                      zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
--                      zholes_size[ZONE_NORMAL] =
--                              ((max_low_pfn - max_dma) -
--                               (num_physpages - num_dma_physpages));
--              }
--      }
--
--      max_gap = 0;
--      efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
--      if (max_gap < LARGE_GAP) {
--              vmem_map = (struct page *) 0;
--              free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
--                                  zholes_size);
--              mem_map = contig_page_data.node_mem_map;
--      } else {
--              unsigned long map_size;
--
--              /* allocate virtual_mem_map */
--
--              map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
--              vmalloc_end -= map_size;
--              vmem_map = (struct page *) vmalloc_end;
--              efi_memmap_walk(create_mem_map_page_table, 0);
--
--              free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
--                                  0, zholes_size);
--
--              mem_map = contig_page_data.node_mem_map;
--              printk("Virtual mem_map starts at 0x%p\n", mem_map);
--      }
--#else /* !CONFIG_VIRTUAL_MEM_MAP */
--      if (max_low_pfn < max_dma)
--              zones_size[ZONE_DMA] = max_low_pfn;
--      else {
--              zones_size[ZONE_DMA] = max_dma;
--              zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
--      }
--      free_area_init(zones_size);
--#endif /* !CONFIG_VIRTUAL_MEM_MAP */
-       zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
- }
-+
-+struct pfn_info *frame_table;
-+unsigned long frame_table_size;
-+unsigned long max_page;
-+
-+/* FIXME: postpone support to machines with big holes between physical 
memorys.
-+ * Current hack allows only efi memdesc upto 4G place. (See efi.c)
-+ */ 
-+#ifndef CONFIG_VIRTUAL_MEM_MAP
-+#define FT_ALIGN_SIZE (16UL << 20)
-+void __init init_frametable(void)
-+{
-+      unsigned long i, p;
-+      frame_table_size = max_page * sizeof(struct pfn_info);
-+      frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
-+
-+      /* Request continuous trunk from boot allocator, since HV
-+       * address is identity mapped */
-+      p = alloc_boot_pages(frame_table_size>>PAGE_SHIFT, 
FT_ALIGN_SIZE>>PAGE_SHIFT) << PAGE_SHIFT;
-+      if (p == 0)
-+              panic("Not enough memory for frame table.\n");
-+
-+      frame_table = __va(p);
-+      memset(frame_table, 0, frame_table_size);
-+      printk("size of frame_table: %lukB\n",
-+              frame_table_size >> 10);
-+}
-+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/mmzone.h
--- a/xen/arch/ia64/patch/linux-2.6.7/mmzone.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,14 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/mmzone.h      
2004-06-15 23:19:36.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/mmzone.h 
2004-08-25 19:28:13.000000000 -0600
-@@ -185,7 +185,11 @@
-       char                    *name;
-       unsigned long           spanned_pages;  /* total size, including holes 
*/
-       unsigned long           present_pages;  /* amount of memory (excluding 
holes) */
-+#ifdef XEN
-+};
-+#else
- } ____cacheline_maxaligned_in_smp;
-+#endif
- 
- 
- /*
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/page.h
--- a/xen/arch/ia64/patch/linux-2.6.7/page.h    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,84 +0,0 @@
---- ../../linux-2.6.7/include/asm-ia64/page.h  2004-06-15 23:18:58.000000000 
-0600
-+++ include/asm-ia64/page.h    2005-04-01 12:56:37.000000000 -0700
-@@ -12,6 +12,9 @@
- #include <asm/intrinsics.h>
- #include <asm/types.h>
- 
-+#ifndef __ASSEMBLY__
-+#include <asm/flushtlb.h>
-+#endif
- /*
-  * PAGE_SHIFT determines the actual kernel page size.
-  */
-@@ -84,14 +87,22 @@
- #endif
- 
- #ifndef CONFIG_DISCONTIGMEM
-+#ifdef XEN
-+#define pfn_valid(pfn)                (0)
-+#else
- #define pfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
--#define page_to_pfn(page)     ((unsigned long) (page - mem_map))
--#define pfn_to_page(pfn)      (mem_map + (pfn))
-+#endif
- #endif /* CONFIG_DISCONTIGMEM */
- 
--#define page_to_phys(page)    (page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_pfn(_page)  ((unsigned long)((_page) - frame_table))
-+#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
-+
-+#define page_to_phys(_page)   (page_to_pfn(_page) << PAGE_SHIFT)
- #define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
- 
-+#define pfn_to_page(_pfn)     (frame_table + (_pfn))
-+#define phys_to_page(kaddr)   pfn_to_page(((kaddr) >> PAGE_SHIFT))
-+
- typedef union ia64_va {
-       struct {
-               unsigned long off : 61;         /* intra-region offset */
-@@ -107,8 +118,25 @@
-  * expressed in this way to ensure they result in a single "dep"
-  * instruction.
-  */
-+#ifdef XEN
-+typedef union xen_va {
-+      struct {
-+              unsigned long off : 60;
-+              unsigned long reg : 4;
-+      } f;
-+      unsigned long l;
-+      void *p;
-+} xen_va;
-+
-+// xen/drivers/console.c uses __va in a declaration (should be fixed!)
-+#define __pa(x)               ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
-+#define __va(x)               ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-+//# define __pa(x)    ((unsigned long)(((unsigned long)x) - PAGE_OFFSET))
-+//# define __va(x)    ((void *)((char *)(x) + PAGE_OFFSET))
-+#else
- #define __pa(x)               ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
- #define __va(x)               ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-+#endif
- 
- #define REGION_NUMBER(x)      ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
- #define REGION_OFFSET(x)      ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
-@@ -180,11 +208,19 @@
- # define __pgprot(x)  (x)
- #endif /* !STRICT_MM_TYPECHECKS */
- 
-+#ifdef XEN
-+#define PAGE_OFFSET                   0xf000000000000000
-+#else
- #define PAGE_OFFSET                   0xe000000000000000
-+#endif
- 
- #define VM_DATA_DEFAULT_FLAGS         (VM_READ | VM_WRITE |                   
                \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC 
|                \
-                                        (((current->thread.flags & 
IA64_THREAD_XSTACK) != 0)   \
-                                         ? VM_EXEC : 0))
- 
-+#ifdef XEN
-+#define __flush_tlb() do {} while(0);
-+#endif
-+
- #endif /* _ASM_IA64_PAGE_H */
diff -r e173a853dc46 -r e2127f19861b 
xen/arch/ia64/patch/linux-2.6.7/page_alloc.c
--- a/xen/arch/ia64/patch/linux-2.6.7/page_alloc.c      Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,305 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/page_alloc.c     
2004-06-15 23:18:57.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/page_alloc.c  2004-12-17 
13:47:03.000000000 -0700
-@@ -19,20 +19,28 @@
- #include <linux/mm.h>
- #include <linux/swap.h>
- #include <linux/interrupt.h>
-+#ifndef XEN
- #include <linux/pagemap.h>
-+#endif
- #include <linux/bootmem.h>
- #include <linux/compiler.h>
- #include <linux/module.h>
-+#ifndef XEN
- #include <linux/suspend.h>
- #include <linux/pagevec.h>
- #include <linux/blkdev.h>
-+#endif
- #include <linux/slab.h>
-+#ifndef XEN
- #include <linux/notifier.h>
-+#endif
- #include <linux/topology.h>
-+#ifndef XEN
- #include <linux/sysctl.h>
- #include <linux/cpu.h>
- 
- #include <asm/tlbflush.h>
-+#endif
- 
- DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
- struct pglist_data *pgdat_list;
-@@ -71,6 +79,9 @@
- 
- static void bad_page(const char *function, struct page *page)
- {
-+#ifdef XEN
-+printk("bad_page: called but disabled\n");
-+#else
-       printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
-               function, current->comm, page);
-       printk(KERN_EMERG "flags:0x%08lx mapping:%p mapcount:%d count:%d\n",
-@@ -91,6 +102,7 @@
-       set_page_count(page, 0);
-       page->mapping = NULL;
-       page->mapcount = 0;
-+#endif
- }
- 
- #ifndef CONFIG_HUGETLB_PAGE
-@@ -218,6 +230,7 @@
- 
- static inline void free_pages_check(const char *function, struct page *page)
- {
-+#ifndef XEN
-       if (    page_mapped(page) ||
-               page->mapping != NULL ||
-               page_count(page) != 0 ||
-@@ -233,6 +246,7 @@
-                       1 << PG_swapcache |
-                       1 << PG_writeback )))
-               bad_page(function, page);
-+#endif
-       if (PageDirty(page))
-               ClearPageDirty(page);
- }
-@@ -276,6 +290,9 @@
- 
- void __free_pages_ok(struct page *page, unsigned int order)
- {
-+#ifdef XEN
-+printk("__free_pages_ok: called but disabled\n");
-+#else
-       LIST_HEAD(list);
-       int i;
- 
-@@ -285,6 +302,7 @@
-       list_add(&page->lru, &list);
-       kernel_map_pages(page, 1<<order, 0);
-       free_pages_bulk(page_zone(page), 1, &list, order);
-+#endif
- }
- 
- #define MARK_USED(index, order, area) \
-@@ -330,6 +348,7 @@
-  */
- static void prep_new_page(struct page *page, int order)
- {
-+#ifndef XEN
-       if (page->mapping || page_mapped(page) ||
-           (page->flags & (
-                       1 << PG_private |
-@@ -343,11 +362,14 @@
-                       1 << PG_swapcache |
-                       1 << PG_writeback )))
-               bad_page(__FUNCTION__, page);
-+#endif
- 
-       page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-                       1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_checked | 1 << PG_mappedtodisk);
-+#ifndef XEN
-       page->private = 0;
-+#endif
-       set_page_refs(page, order);
- }
- 
-@@ -590,13 +612,17 @@
-       unsigned long min;
-       struct zone **zones;
-       struct page *page;
-+#ifndef XEN
-       struct reclaim_state reclaim_state;
-+#endif
-       struct task_struct *p = current;
-       int i;
-       int alloc_type;
-       int do_retry;
- 
-+#ifndef XEN
-       might_sleep_if(wait);
-+#endif
- 
-       zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
-       if (zones[0] == NULL)     /* no zones in the zonelist */
-@@ -610,12 +636,14 @@
- 
-               min = (1<<order) + z->protection[alloc_type];
- 
-+#ifndef XEN
-               /*
-                * We let real-time tasks dip their real-time paws a little
-                * deeper into reserves.
-                */
-               if (rt_task(p))
-                       min -= z->pages_low >> 1;
-+#endif
- 
-               if (z->free_pages >= min ||
-                               (!wait && z->free_pages >= z->pages_high)) {
-@@ -627,9 +655,11 @@
-               }
-       }
- 
-+#ifndef XEN
-       /* we're somewhat low on memory, failed to find what we needed */
-       for (i = 0; zones[i] != NULL; i++)
-               wakeup_kswapd(zones[i]);
-+#endif
- 
-       /* Go through the zonelist again, taking __GFP_HIGH into account */
-       for (i = 0; zones[i] != NULL; i++) {
-@@ -639,8 +669,10 @@
- 
-               if (gfp_mask & __GFP_HIGH)
-                       min -= z->pages_low >> 2;
-+#ifndef XEN
-               if (rt_task(p))
-                       min -= z->pages_low >> 1;
-+#endif
- 
-               if (z->free_pages >= min ||
-                               (!wait && z->free_pages >= z->pages_high)) {
-@@ -654,6 +686,7 @@
- 
-       /* here we're in the low on memory slow path */
- 
-+#ifndef XEN
- rebalance:
-       if ((p->flags & (PF_MEMALLOC | PF_MEMDIE)) && !in_interrupt()) {
-               /* go through the zonelist yet again, ignoring mins */
-@@ -681,6 +714,7 @@
- 
-       p->reclaim_state = NULL;
-       p->flags &= ~PF_MEMALLOC;
-+#endif
- 
-       /* go through the zonelist yet one more time */
-       for (i = 0; zones[i] != NULL; i++) {
-@@ -698,6 +732,11 @@
-               }
-       }
- 
-+#ifdef XEN
-+printk(KERN_WARNING "%s: page allocation failure."
-+                      " order:%d, mode:0x%x\n",
-+                      "(xen tasks have no comm)", order, gfp_mask);
-+#else
-       /*
-        * Don't let big-order allocations loop unless the caller explicitly
-        * requests that.  Wait for some write requests to complete then retry.
-@@ -724,6 +763,7 @@
-                       p->comm, order, gfp_mask);
-               dump_stack();
-       }
-+#endif
-       return NULL;
- got_pg:
-       kernel_map_pages(page, 1 << order, 1);
-@@ -808,6 +848,7 @@
- 
- EXPORT_SYMBOL(get_zeroed_page);
- 
-+#ifndef XEN
- void __pagevec_free(struct pagevec *pvec)
- {
-       int i = pagevec_count(pvec);
-@@ -815,10 +856,15 @@
-       while (--i >= 0)
-               free_hot_cold_page(pvec->pages[i], pvec->cold);
- }
-+#endif
- 
- fastcall void __free_pages(struct page *page, unsigned int order)
- {
-+#ifdef XEN
-+      if (!PageReserved(page)) {
-+#else
-       if (!PageReserved(page) && put_page_testzero(page)) {
-+#endif
-               if (order == 0)
-                       free_hot_page(page);
-               else
-@@ -914,6 +960,13 @@
-       return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
- }
- 
-+#ifdef XEN
-+unsigned int nr_free_highpages (void)
-+{
-+printf("nr_free_highpages: called but not implemented\n");
-+}
-+#endif
-+
- #ifdef CONFIG_HIGHMEM
- unsigned int nr_free_highpages (void)
- {
-@@ -1022,6 +1075,7 @@
- 
- void si_meminfo(struct sysinfo *val)
- {
-+#ifndef XEN
-       val->totalram = totalram_pages;
-       val->sharedram = 0;
-       val->freeram = nr_free_pages();
-@@ -1034,6 +1088,7 @@
-       val->freehigh = 0;
- #endif
-       val->mem_unit = PAGE_SIZE;
-+#endif
- }
- 
- EXPORT_SYMBOL(si_meminfo);
-@@ -1165,7 +1220,9 @@
-               printk("= %lukB\n", K(total));
-       }
- 
-+#ifndef XEN
-       show_swap_cache_info();
-+#endif
- }
- 
- /*
-@@ -1530,6 +1587,9 @@
-               zone->wait_table_size = wait_table_size(size);
-               zone->wait_table_bits =
-                       wait_table_bits(zone->wait_table_size);
-+#ifdef XEN
-+//printf("free_area_init_core-1: calling 
alloc_bootmem_node(%lx,%lx)\n",pgdat,zone->wait_table_size * 
sizeof(wait_queue_head_t));
-+#endif
-               zone->wait_table = (wait_queue_head_t *)
-                       alloc_bootmem_node(pgdat, zone->wait_table_size
-                                               * sizeof(wait_queue_head_t));
-@@ -1584,6 +1644,9 @@
-                        */
-                       bitmap_size = (size-1) >> (i+4);
-                       bitmap_size = LONG_ALIGN(bitmap_size+1);
-+#ifdef XEN
-+//printf("free_area_init_core-2: calling 
alloc_bootmem_node(%lx,%lx)\n",pgdat, bitmap_size);
-+#endif
-                       zone->free_area[i].map = 
-                         (unsigned long *) alloc_bootmem_node(pgdat, 
bitmap_size);
-               }
-@@ -1601,6 +1664,9 @@
-       calculate_zone_totalpages(pgdat, zones_size, zholes_size);
-       if (!node_mem_map) {
-               size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
-+#ifdef XEN
-+//printf("free_area_init_node: calling 
alloc_bootmem_node(%lx,%lx)\n",pgdat,size);
-+#endif
-               node_mem_map = alloc_bootmem_node(pgdat, size);
-       }
-       pgdat->node_mem_map = node_mem_map;
-@@ -1784,6 +1850,7 @@
- 
- #endif /* CONFIG_PROC_FS */
- 
-+#ifndef XEN
- #ifdef CONFIG_HOTPLUG_CPU
- static int page_alloc_cpu_notify(struct notifier_block *self,
-                                unsigned long action, void *hcpu)
-@@ -2011,3 +2078,4 @@
-       setup_per_zone_protection();
-       return 0;
- }
-+#endif
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/pal.S
--- a/xen/arch/ia64/patch/linux-2.6.7/pal.S     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/pal.S   2004-06-15 23:20:25.000000000 
-0600
-+++ arch/ia64/pal.S    2005-04-01 12:56:01.000000000 -0700
-@@ -163,7 +163,11 @@
-       adds r8  = 1f-1b,r8             // calculate return address for call
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-+#ifdef XEN
-+      dep.z loc2=loc2,0,60            // convert pal entry point to physical
-+#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-+#endif // XEN
-       tpa r8=r8                       // convert rp to physical
-       ;;
-       mov b7 = loc2                   // install target to branch reg
-@@ -218,7 +222,11 @@
-       mov loc3 = psr          // save psr
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-+#ifdef XEN
-+      dep.z loc2=loc2,0,60            // convert pal entry point to physical
-+#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-+#endif // XEN
-       ;;
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       movl r16=PAL_PSR_BITS_TO_CLEAR
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/pgalloc.h
--- a/xen/arch/ia64/patch/linux-2.6.7/pgalloc.h Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,65 +0,0 @@
---- ../../linux-2.6.7/include/asm-ia64/pgalloc.h       2004-06-15 
23:18:54.000000000 -0600
-+++ include/asm-ia64/pgalloc.h 2005-03-23 14:54:11.000000000 -0700
-@@ -34,6 +34,10 @@
- #define pmd_quicklist         (local_cpu_data->pmd_quick)
- #define pgtable_cache_size    (local_cpu_data->pgtable_cache_sz)
- 
-+/* FIXME: Later 3 level page table should be over, to create 
-+ * new interface upon xen memory allocator. To simplify first
-+ * effort moving to xen allocator, use xenheap pages temporarily. 
-+ */
- static inline pgd_t*
- pgd_alloc_one_fast (struct mm_struct *mm)
- {
-@@ -55,7 +59,7 @@
-       pgd_t *pgd = pgd_alloc_one_fast(mm);
- 
-       if (unlikely(pgd == NULL)) {
--              pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
-+              pgd = (pgd_t *)alloc_xenheap_page();
-               if (likely(pgd != NULL))
-                       clear_page(pgd);
-       }
-@@ -93,7 +97,7 @@
- static inline pmd_t*
- pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
- {
--      pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+      pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
- 
-       if (likely(pmd != NULL))
-               clear_page(pmd);
-@@ -125,7 +129,7 @@
- static inline struct page *
- pte_alloc_one (struct mm_struct *mm, unsigned long addr)
- {
--      struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
-+      struct page *pte = alloc_xenheap_page();
- 
-       if (likely(pte != NULL))
-               clear_page(page_address(pte));
-@@ -135,7 +139,7 @@
- static inline pte_t *
- pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
- {
--      pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+      pte_t *pte = (pte_t *)alloc_xenheap_page();
- 
-       if (likely(pte != NULL))
-               clear_page(pte);
-@@ -145,13 +149,13 @@
- static inline void
- pte_free (struct page *pte)
- {
--      __free_page(pte);
-+      free_xenheap_page(pte);
- }
- 
- static inline void
- pte_free_kernel (pte_t *pte)
- {
--      free_page((unsigned long) pte);
-+      free_xenheap_page((unsigned long) pte);
- }
- 
- #define __pte_free_tlb(tlb, pte)      tlb_remove_page((tlb), (pte))
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/processor.h
--- a/xen/arch/ia64/patch/linux-2.6.7/processor.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,19 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/processor.h     
   2005-01-23 13:23:36.000000000 -0700
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/processor.h    
2004-08-25 19:28:13.000000000 -0600
-@@ -406,12 +406,16 @@
-  */
- 
- /* Return TRUE if task T owns the fph partition of the CPU we're running on. 
*/
-+#ifdef XEN
-+#define ia64_is_local_fpu_owner(t) 0
-+#else
- #define ia64_is_local_fpu_owner(t)                                            
                \
- ({                                                                            
                \
-       struct task_struct *__ia64_islfo_task = (t);                            
                \
-       (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()           
                \
-        && __ia64_islfo_task == (struct task_struct *) 
ia64_get_kr(IA64_KR_FPU_OWNER));        \
- })
-+#endif
- 
- /* Mark task T as owning the fph partition of the CPU we're running on. */
- #define ia64_set_local_fpu_owner(t) do {                                      
        \
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/sal.h
--- a/xen/arch/ia64/patch/linux-2.6.7/sal.h     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/sal.h      
2004-06-15 23:20:04.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/sal.h  2004-10-27 
13:55:23.000000000 -0600
-@@ -646,7 +646,23 @@
- {
-       struct ia64_sal_retval isrv;
- 
-+//#ifdef XEN
-+#if 0
-+      unsigned long *x = (unsigned long *)ia64_sal;
-+      unsigned long *inst = (unsigned long *)*x;
-+      unsigned long __ia64_sc_flags;
-+      struct ia64_fpreg __ia64_sc_fr[6];
-+printf("ia64_sal_freq_base: about to save_scratch_fpregs\n");
-+      ia64_save_scratch_fpregs(__ia64_sc_fr);
-+      spin_lock_irqsave(&sal_lock, __ia64_sc_flags);
-+printf("ia64_sal_freq_base: about to call, ia64_sal=%p, ia64_sal[0]=%p, 
ia64_sal[1]=%p\n",x,x[0],x[1]);
-+printf("first inst=%p,%p\n",inst[0],inst[1]);
-+      isrv = (*ia64_sal)(SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
-+      spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);
-+      ia64_load_scratch_fpregs(__ia64_sc_fr);
-+#else
-       SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
-+#endif
-       *ticks_per_second = isrv.v0;
-       *drift_info = isrv.v1;
-       return isrv.status;
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/setup.c
--- a/xen/arch/ia64/patch/linux-2.6.7/setup.c   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,203 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/setup.c 2004-06-15 23:18:58.000000000 
-0600
-+++ arch/ia64/setup.c  2005-04-04 22:31:09.000000000 -0600
-@@ -21,6 +21,9 @@
- #include <linux/init.h>
- 
- #include <linux/acpi.h>
-+#ifdef XEN
-+#include <xen/sched.h>
-+#endif
- #include <linux/bootmem.h>
- #include <linux/console.h>
- #include <linux/delay.h>
-@@ -30,13 +33,17 @@
- #include <linux/seq_file.h>
- #include <linux/string.h>
- #include <linux/threads.h>
-+#ifndef XEN
- #include <linux/tty.h>
- #include <linux/serial.h>
- #include <linux/serial_core.h>
-+#endif
- #include <linux/efi.h>
- #include <linux/initrd.h>
- 
-+#ifndef XEN
- #include <asm/ia32.h>
-+#endif
- #include <asm/machvec.h>
- #include <asm/mca.h>
- #include <asm/meminit.h>
-@@ -50,6 +57,11 @@
- #include <asm/smp.h>
- #include <asm/system.h>
- #include <asm/unistd.h>
-+#ifdef XEN
-+#include <linux/mm.h>
-+#include <asm/mmu_context.h>
-+extern unsigned long loops_per_jiffy; // from linux/init/main.c
-+#endif
- 
- #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
- # error "struct cpuinfo_ia64 too big!"
-@@ -65,7 +77,9 @@
- DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
- unsigned long ia64_cycles_per_usec;
- struct ia64_boot_param *ia64_boot_param;
-+#ifndef XEN
- struct screen_info screen_info;
-+#endif
- 
- unsigned long ia64_max_cacheline_size;
- unsigned long ia64_iobase;    /* virtual address for I/O accesses */
-@@ -98,7 +112,6 @@
- struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
- int num_rsvd_regions;
- 
--
- /*
-  * Filter incoming memory segments based on the primitive map created from 
the boot
-  * parameters. Segments contained in the map are removed from the memory 
ranges. A
-@@ -128,9 +141,12 @@
-       for (i = 0; i < num_rsvd_regions; ++i) {
-               range_start = max(start, prev_start);
-               range_end   = min(end, rsvd_region[i].start);
--
--              if (range_start < range_end)
--                      call_pernode_memory(__pa(range_start), range_end - 
range_start, func);
-+              /* init_boot_pages requires "ps, pe" */
-+              if (range_start < range_end) {
-+                      printk("Init boot pages: 0x%lx -> 0x%lx.\n",
-+                              __pa(range_start), __pa(range_end));
-+                      (*func)(__pa(range_start), __pa(range_end), 0);
-+              }
- 
-               /* nothing more available in this segment */
-               if (range_end == end) return 0;
-@@ -187,17 +203,17 @@
-                               + strlen(__va(ia64_boot_param->command_line)) + 
1);
-       n++;
- 
-+      /* Reserve xen image/bitmap/xen-heap */
-       rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
--      rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
-+      rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
-       n++;
- 
--#ifdef CONFIG_BLK_DEV_INITRD
-+      /* This is actually dom0 image */
-       if (ia64_boot_param->initrd_start) {
-               rsvd_region[n].start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
-               rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->initrd_size;
-               n++;
-       }
--#endif
- 
-       /* end of memory marker */
-       rsvd_region[n].start = ~0UL;
-@@ -207,6 +223,16 @@
-       num_rsvd_regions = n;
- 
-       sort_regions(rsvd_region, num_rsvd_regions);
-+
-+      {
-+              int i;
-+              printk("Reserved regions: \n");
-+              for (i = 0; i < num_rsvd_regions; i++)
-+                      printk("  [%d] -> [0x%lx, 0x%lx]\n",
-+                              i,
-+                              rsvd_region[i].start,
-+                              rsvd_region[i].end);
-+      }
- }
- 
- /**
-@@ -280,23 +306,26 @@
- }
- #endif
- 
-+#ifdef XEN
- void __init
--setup_arch (char **cmdline_p)
-+early_setup_arch(char **cmdline_p)
- {
-       unw_init();
--
--      ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
--
-+      
-       *cmdline_p = __va(ia64_boot_param->command_line);
-       strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
--
-+      cmdline_parse(*cmdline_p);
-+      
-       efi_init();
--      io_port_init();
--
-+      
- #ifdef CONFIG_IA64_GENERIC
-       machvec_init(acpi_get_sysname());
- #endif
- 
-+#ifdef XEN
-+#undef CONFIG_ACPI_BOOT
-+#endif
-+
- #ifdef CONFIG_ACPI_BOOT
-       /* Initialize the ACPI boot-time table parser */
-       acpi_table_init();
-@@ -308,9 +337,13 @@
-       smp_build_cpu_map();    /* happens, e.g., with the Ski simulator */
- # endif
- #endif /* CONFIG_APCI_BOOT */
-+      io_port_init();
-+}
-+#endif
- 
--      find_memory();
--
-+void __init
-+setup_arch (void)
-+{
-       /* process SAL system table: */
-       ia64_sal_init(efi.sal_systab);
- 
-@@ -353,7 +386,6 @@
-       /* enable IA-64 Machine Check Abort Handling */
-       ia64_mca_init();
- 
--      platform_setup(cmdline_p);
-       paging_init();
- }
- 
-@@ -413,6 +445,9 @@
-               sprintf(cp, " 0x%lx", mask);
-       }
- 
-+#ifdef XEN
-+#define seq_printf(a,b...) printf(b)
-+#endif
-       seq_printf(m,
-                  "processor  : %d\n"
-                  "vendor     : %s\n"
-@@ -616,7 +651,11 @@
-                                       | IA64_DCR_DA | IA64_DCR_DD | 
IA64_DCR_LC));
-       atomic_inc(&init_mm.mm_count);
-       current->active_mm = &init_mm;
-+#ifdef XEN
-+      if (current->domain->arch.mm)
-+#else
-       if (current->mm)
-+#endif
-               BUG();
- 
-       ia64_mmu_init(ia64_imva(cpu_data));
-@@ -667,6 +706,8 @@
- void
- check_bugs (void)
- {
-+#ifndef XEN
-       ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
-                              (unsigned long) __end___mckinley_e9_bundles);
-+#endif
- }
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/slab.c
--- a/xen/arch/ia64/patch/linux-2.6.7/slab.c    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,139 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/slab.c   2004-06-15 
23:19:44.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/slab.c        2004-12-17 
13:47:03.000000000 -0700
-@@ -86,15 +86,30 @@
- #include      <linux/init.h>
- #include      <linux/compiler.h>
- #include      <linux/seq_file.h>
-+#ifndef XEN
- #include      <linux/notifier.h>
- #include      <linux/kallsyms.h>
- #include      <linux/cpu.h>
- #include      <linux/sysctl.h>
- #include      <linux/module.h>
-+#endif
- 
- #include      <asm/uaccess.h>
- #include      <asm/cacheflush.h>
-+#ifndef XEN
- #include      <asm/tlbflush.h>
-+#endif
-+
-+#ifdef XEN
-+#define lock_cpu_hotplug()    do { } while (0)
-+#define unlock_cpu_hotplug()  do { } while (0)
-+#define might_sleep_if(x)     do { } while (0)
-+#define       dump_stack()            do { } while (0)
-+#define start_cpu_timer(cpu)  do { } while (0)
-+static inline void __down(struct semaphore *sem) { }
-+static inline void __up(struct semaphore *sem) { }
-+static inline void might_sleep(void) { }
-+#endif
- 
- /*
-  * DEBUG      - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
-@@ -530,7 +545,9 @@
-       FULL
- } g_cpucache_up;
- 
-+#ifndef XEN
- static DEFINE_PER_CPU(struct timer_list, reap_timers);
-+#endif
- 
- static void reap_timer_fnc(unsigned long data);
- static void free_block(kmem_cache_t* cachep, void** objpp, int len);
-@@ -588,6 +605,7 @@
-  * Add the CPU number into the expiry time to minimize the possibility of the
-  * CPUs getting into lockstep and contending for the global cache chain lock.
-  */
-+#ifndef XEN
- static void __devinit start_cpu_timer(int cpu)
- {
-       struct timer_list *rt = &per_cpu(reap_timers, cpu);
-@@ -600,6 +618,7 @@
-               add_timer_on(rt, cpu);
-       }
- }
-+#endif
- 
- #ifdef CONFIG_HOTPLUG_CPU
- static void stop_cpu_timer(int cpu)
-@@ -634,6 +653,7 @@
-       return nc;
- }
- 
-+#ifndef XEN
- static int __devinit cpuup_callback(struct notifier_block *nfb,
-                                 unsigned long action,
-                                 void *hcpu)
-@@ -693,6 +713,7 @@
- }
- 
- static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
-+#endif
- 
- /* Initialisation.
-  * Called after the gfp() functions have been enabled, and before smp_init().
-@@ -805,10 +826,14 @@
-       /* Done! */
-       g_cpucache_up = FULL;
- 
-+#ifdef XEN
-+printk("kmem_cache_init: some parts commented out, ignored\n");
-+#else
-       /* Register a cpu startup notifier callback
-        * that initializes ac_data for all new cpus
-        */
-       register_cpu_notifier(&cpucache_notifier);
-+#endif
-       
- 
-       /* The reap timers are started later, with a module init call:
-@@ -886,8 +911,10 @@
-               page++;
-       }
-       sub_page_state(nr_slab, nr_freed);
-+#ifndef XEN
-       if (current->reclaim_state)
-               current->reclaim_state->reclaimed_slab += nr_freed;
-+#endif
-       free_pages((unsigned long)addr, cachep->gfporder);
-       if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 
-               atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
-@@ -1363,8 +1390,10 @@
-                                       + cachep->num;
-       } 
- 
-+#ifndef XEN
-       cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                                       ((unsigned 
long)cachep)%REAPTIMEOUT_LIST3;
-+#endif
- 
-       /* Need the semaphore to access the chain. */
-       down(&cache_chain_sem);
-@@ -2237,8 +2266,10 @@
- 
-       if (unlikely(addr < min_addr))
-               goto out;
-+#ifndef XEN
-       if (unlikely(addr > (unsigned long)high_memory - size))
-               goto out;
-+#endif
-       if (unlikely(addr & align_mask))
-               goto out;
-       if (unlikely(!kern_addr_valid(addr)))
-@@ -2769,6 +2800,7 @@
-  */
- static void reap_timer_fnc(unsigned long cpu)
- {
-+#ifndef XEN
-       struct timer_list *rt = &__get_cpu_var(reap_timers);
- 
-       /* CPU hotplug can drag us off cpu: don't run on wrong CPU */
-@@ -2776,6 +2808,7 @@
-               cache_reap();
-               mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
-       }
-+#endif
- }
- 
- #ifdef CONFIG_PROC_FS
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/slab.h
--- a/xen/arch/ia64/patch/linux-2.6.7/slab.h    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,14 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/slab.h        
2004-06-15 23:20:26.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/slab.h 2004-08-25 
19:28:13.000000000 -0600
-@@ -83,7 +83,11 @@
-                       goto found; \
-               else \
-                       i++;
-+#ifdef XEN
-+#include <linux/kmalloc_sizes.h>
-+#else
- #include "kmalloc_sizes.h"
-+#endif
- #undef CACHE
-               {
-                       extern void __you_cannot_kmalloc_that_much(void);
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/swiotlb.c
--- a/xen/arch/ia64/patch/linux-2.6.7/swiotlb.c Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,47 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/lib/swiotlb.c  2004-06-15 23:19:43.000000000 
-0600
-+++ arch/ia64/lib/swiotlb.c    2005-03-23 14:54:05.000000000 -0700
-@@ -100,7 +100,11 @@
-       /*
-        * Get IO TLB memory from the low pages
-        */
--      io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << 
IO_TLB_SHIFT));
-+      /* FIXME: Do we really need swiotlb in HV? If all memory trunks
-+       * presented to guest as <4G, are actually <4G in machine range,
-+       * no DMA intevention from HV...
-+       */
-+      io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << 
IO_TLB_SHIFT)));
-       if (!io_tlb_start)
-               BUG();
-       io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
-@@ -110,11 +114,11 @@
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
--      io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
-+      io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * 
sizeof(int)));
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
--      io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
-+      io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * 
sizeof(char *)));
- 
-       printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
-              (void *) io_tlb_start, (void *) io_tlb_end);
-@@ -279,7 +283,7 @@
-       /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: 
*/
-       flags |= GFP_DMA;
- 
--      ret = (void *)__get_free_pages(flags, get_order(size));
-+      ret = (void *)alloc_xenheap_pages(get_order(size));
-       if (!ret)
-               return NULL;
- 
-@@ -294,7 +298,7 @@
- void
- swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, 
dma_addr_t dma_handle)
- {
--      free_pages((unsigned long) vaddr, get_order(size));
-+      free_xenheap_pages((unsigned long) vaddr, get_order(size));
- }
- 
- /*
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/system.h
--- a/xen/arch/ia64/patch/linux-2.6.7/system.h  Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,43 +0,0 @@
---- ../../linux-2.6.7/include/asm-ia64/system.h        2005-03-24 
19:39:56.000000000 -0700
-+++ include/asm-ia64/system.h  2005-04-01 12:56:37.000000000 -0700
-@@ -24,8 +24,16 @@
-  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
-  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
-  */
-+#ifdef XEN
-+//#define KERNEL_START                 0xf000000100000000
-+#define KERNEL_START           0xf000000004000000
-+#define PERCPU_ADDR            0xf100000000000000-PERCPU_PAGE_SIZE
-+#define SHAREDINFO_ADDR                0xf100000000000000
-+#define VHPT_ADDR              0xf200000000000000
-+#else
- #define KERNEL_START           0xa000000100000000
- #define PERCPU_ADDR           (-PERCPU_PAGE_SIZE)
-+#endif
- 
- #ifndef __ASSEMBLY__
- 
-@@ -218,9 +226,13 @@
- # define PERFMON_IS_SYSWIDE() (0)
- #endif
- 
-+#ifdef XEN
-+#define IA64_HAS_EXTRA_STATE(t) 0
-+#else
- #define IA64_HAS_EXTRA_STATE(t)                                               
        \
-       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       
\
-        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
-+#endif
- 
- #define __switch_to(prev,next,last) do {                                      
                 \
-       if (IA64_HAS_EXTRA_STATE(prev))                                         
                 \
-@@ -249,6 +261,9 @@
- #else
- # define switch_to(prev,next,last)    __switch_to(prev, next, last)
- #endif
-+//#ifdef XEN
-+//#undef switch_to
-+//#endif
- 
- /*
-  * On IA-64, we don't want to hold the runqueue's lock during the low-level 
context-switch,
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/time.c
--- a/xen/arch/ia64/patch/linux-2.6.7/time.c    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,338 +0,0 @@
---- ../../linux-2.6.7/arch/ia64/kernel/time.c  2004-06-15 23:19:01.000000000 
-0600
-+++ arch/ia64/time.c   2005-03-14 17:27:11.000000000 -0700
-@@ -10,16 +10,22 @@
-  */
- #include <linux/config.h>
- 
-+#ifndef XEN
- #include <linux/cpu.h>
-+#endif
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
-+#ifndef XEN
- #include <linux/profile.h>
-+#endif
- #include <linux/sched.h>
- #include <linux/time.h>
- #include <linux/interrupt.h>
- #include <linux/efi.h>
-+#ifndef XEN
- #include <linux/profile.h>
-+#endif
- #include <linux/timex.h>
- 
- #include <asm/machvec.h>
-@@ -29,6 +35,9 @@
- #include <asm/sal.h>
- #include <asm/sections.h>
- #include <asm/system.h>
-+#ifdef XEN
-+#include <asm/ia64_int.h>
-+#endif
- 
- extern unsigned long wall_jiffies;
- 
-@@ -45,6 +54,59 @@
- 
- #endif
- 
-+#ifdef XEN
-+volatile unsigned long last_nsec_offset;
-+extern rwlock_t xtime_lock;
-+unsigned long cpu_khz;  /* Detected as we calibrate the TSC */
-+static s_time_t        stime_irq;       /* System time at last 'time update' 
*/
-+
-+static inline u64 get_time_delta(void)
-+{
-+      return ia64_get_itc();
-+}
-+
-+s_time_t get_s_time(void)
-+{
-+    s_time_t now;
-+    unsigned long flags;
-+
-+    read_lock_irqsave(&xtime_lock, flags);
-+
-+    now = stime_irq + get_time_delta();
-+
-+    /* Ensure that the returned system time is monotonically increasing. */
-+    {
-+        static s_time_t prev_now = 0;
-+        if ( unlikely(now < prev_now) )
-+            now = prev_now;
-+        prev_now = now;
-+    }
-+
-+    read_unlock_irqrestore(&xtime_lock, flags);
-+
-+    return now; 
-+}
-+
-+void update_dom_time(struct vcpu *v)
-+{
-+// FIXME: implement this?
-+//    printf("update_dom_time: called, not implemented, skipping\n");
-+      return;
-+}
-+
-+/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
-+void do_settime(unsigned long secs, unsigned long usecs, u64 system_time_base)
-+{
-+// FIXME: Should this be do_settimeofday (from linux)???
-+      printf("do_settime: called, not implemented, stopping\n");
-+      dummy();
-+}
-+#endif
-+
-+#if 0 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-+#endif        /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-+
-+#ifndef XEN
- static void
- itc_reset (void)
- {
-@@ -80,12 +142,15 @@
-       return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> 
IA64_NSEC_PER_CYC_SHIFT;
- }
- 
-+#ifndef XEN
- static struct time_interpolator itc_interpolator = {
-       .get_offset =   itc_get_offset,
-       .update =       itc_update,
-       .reset =        itc_reset
- };
-+#endif
- 
-+#ifndef XEN
- int
- do_settimeofday (struct timespec *tv)
- {
-@@ -95,7 +160,9 @@
-       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-               return -EINVAL;
- 
-+#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
-       write_seqlock_irq(&xtime_lock);
-+#endif
-       {
-               /*
-                * This is revolting. We need to set "xtime" correctly. 
However, the value
-@@ -117,12 +184,15 @@
-               time_esterror = NTP_PHASE_LIMIT;
-               time_interpolator_reset();
-       }
-+#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
-       write_sequnlock_irq(&xtime_lock);
-+#endif
-       clock_was_set();
-       return 0;
- }
- 
- EXPORT_SYMBOL(do_settimeofday);
-+#endif
- 
- void
- do_gettimeofday (struct timeval *tv)
-@@ -185,6 +255,7 @@
- }
- 
- EXPORT_SYMBOL(do_gettimeofday);
-+#endif
- 
- /*
-  * The profiling function is SMP safe. (nothing can mess
-@@ -195,6 +266,9 @@
- static inline void
- ia64_do_profile (struct pt_regs * regs)
- {
-+#ifdef XEN
-+}
-+#else
-       unsigned long ip, slot;
-       extern cpumask_t prof_cpu_mask;
- 
-@@ -231,24 +305,89 @@
-               ip = prof_len-1;
-       atomic_inc((atomic_t *)&prof_buffer[ip]);
- }
-+#endif
-+
-+#ifdef XEN
-+unsigned long domain0_ready = 0;      // FIXME (see below)
-+#define typecheck(a,b)        1
-+/* FROM linux/include/linux/jiffies.h */
-+/*
-+ *    These inlines deal with timer wrapping correctly. You are 
-+ *    strongly encouraged to use them
-+ *    1. Because people otherwise forget
-+ *    2. Because if the timer wrap changes in future you won't have to
-+ *       alter your driver code.
-+ *
-+ * time_after(a,b) returns true if the time a is after time b.
-+ *
-+ * Do this with "<0" and ">=0" to only test the sign of the result. A
-+ * good compiler would generate better code (and a really good compiler
-+ * wouldn't care). Gcc is currently neither.
-+ */
-+#define time_after(a,b)               \
-+      (typecheck(unsigned long, a) && \
-+       typecheck(unsigned long, b) && \
-+       ((long)(b) - (long)(a) < 0))
-+#define time_before(a,b)      time_after(b,a)
-+
-+#define time_after_eq(a,b)    \
-+      (typecheck(unsigned long, a) && \
-+       typecheck(unsigned long, b) && \
-+       ((long)(a) - (long)(b) >= 0))
-+#define time_before_eq(a,b)   time_after_eq(b,a)
-+#endif
- 
- static irqreturn_t
- timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
- {
-       unsigned long new_itm;
- 
-+#ifndef XEN
-       if (unlikely(cpu_is_offline(smp_processor_id()))) {
-               return IRQ_HANDLED;
-       }
-+#endif
-+#ifdef XEN
-+      if (current->domain == dom0) {
-+              // FIXME: there's gotta be a better way of doing this...
-+              // We have to ensure that domain0 is launched before we
-+              // call vcpu_timer_expired on it
-+              //domain0_ready = 1; // moved to xensetup.c
-+      }
-+      if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
-+              vcpu_pend_timer(dom0->vcpu[0]);
-+              //vcpu_set_next_timer(dom0->vcpu[0]);
-+              domain_wake(dom0->vcpu[0]);
-+      }
-+      if (!is_idle_task(current->domain) && current->domain != dom0) {
-+              if (vcpu_timer_expired(current)) {
-+                      vcpu_pend_timer(current);
-+                      // ensure another timer interrupt happens even if 
domain doesn't
-+                      vcpu_set_next_timer(current);
-+                      domain_wake(current);
-+              }
-+      }
-+      raise_actimer_softirq();
-+#endif
- 
-+#ifndef XEN
-       platform_timer_interrupt(irq, dev_id, regs);
-+#endif
- 
-       new_itm = local_cpu_data->itm_next;
- 
-       if (!time_after(ia64_get_itc(), new_itm))
-+#ifdef XEN
-+              return;
-+#else
-               printk(KERN_ERR "Oops: timer tick before it's due 
(itc=%lx,itm=%lx)\n",
-                      ia64_get_itc(), new_itm);
-+#endif
- 
-+#ifdef XEN
-+//    printf("GOT TO HERE!!!!!!!!!!!\n");
-+      //while(1);
-+#endif
-       ia64_do_profile(regs);
- 
-       while (1) {
-@@ -269,10 +408,16 @@
-                        * another CPU. We need to avoid to SMP race by 
acquiring the
-                        * xtime_lock.
-                        */
-+#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
-                       write_seqlock(&xtime_lock);
-+#endif
-+#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
-                       do_timer(regs);
-+#endif
-                       local_cpu_data->itm_next = new_itm;
-+#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
-                       write_sequnlock(&xtime_lock);
-+#endif
-               } else
-                       local_cpu_data->itm_next = new_itm;
- 
-@@ -292,7 +437,12 @@
-                */
-               while (!time_after(new_itm, ia64_get_itc() + 
local_cpu_data->itm_delta/2))
-                       new_itm += local_cpu_data->itm_delta;
-+//#ifdef XEN
-+//            vcpu_set_next_timer(current);
-+//#else
-+//printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
-               ia64_set_itm(new_itm);
-+//#endif
-               /* double check, in case we got hit by a (slow) PMI: */
-       } while (time_after_eq(ia64_get_itc(), new_itm));
-       return IRQ_HANDLED;
-@@ -307,6 +457,7 @@
-       int cpu = smp_processor_id();
-       unsigned long shift = 0, delta;
- 
-+printf("ia64_cpu_local_tick: about to call ia64_set_itv\n");
-       /* arrange for the cycle counter to generate a timer interrupt: */
-       ia64_set_itv(IA64_TIMER_VECTOR);
- 
-@@ -320,6 +471,7 @@
-               shift = (2*(cpu - hi) + 1) * delta/hi/2;
-       }
-       local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
-+printf("***** ia64_cpu_local_tick: Setting itm to 
%lx\n",local_cpu_data->itm_next);
-       ia64_set_itm(local_cpu_data->itm_next);
- }
- 
-@@ -335,6 +487,7 @@
-        * frequency and then a PAL call to determine the frequency ratio 
between the ITC
-        * and the base frequency.
-        */
-+
-       status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
-                                   &platform_base_freq, &platform_base_drift);
-       if (status != 0) {
-@@ -384,9 +537,11 @@
-                                       + itc_freq/2)/itc_freq;
- 
-       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-+#ifndef XEN
-               itc_interpolator.frequency = local_cpu_data->itc_freq;
-               itc_interpolator.drift = itc_drift;
-               register_time_interpolator(&itc_interpolator);
-+#endif
-       }
- 
-       /* Setup the CPU local timer tick */
-@@ -395,7 +550,9 @@
- 
- static struct irqaction timer_irqaction = {
-       .handler =      timer_interrupt,
-+#ifndef XEN
-       .flags =        SA_INTERRUPT,
-+#endif
-       .name =         "timer"
- };
- 
-@@ -403,12 +560,16 @@
- time_init (void)
- {
-       register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
-+#ifndef XEN
-       efi_gettimeofday(&xtime);
-+#endif
-       ia64_init_itm();
- 
-+#ifndef XEN
-       /*
-        * Initialize wall_to_monotonic such that adding it to xtime will yield 
zero, the
-        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
-        */
-       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, 
-xtime.tv_nsec);
-+#endif
- }
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/tlb.c
--- a/xen/arch/ia64/patch/linux-2.6.7/tlb.c     Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,48 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/tlb.c  
2004-06-15 23:19:43.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/tlb.c 2004-08-25 
19:28:12.000000000 -0600
-@@ -21,7 +21,9 @@
- #include <asm/mmu_context.h>
- #include <asm/pgalloc.h>
- #include <asm/pal.h>
-+#ifndef XEN
- #include <asm/tlbflush.h>
-+#endif
- 
- static struct {
-       unsigned long mask;     /* mask of supported purge page-sizes */
-@@ -43,6 +45,9 @@
- void
- wrap_mmu_context (struct mm_struct *mm)
- {
-+#ifdef XEN
-+printf("wrap_mmu_context: called, not implemented\n");
-+#else
-       unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
-       struct task_struct *tsk;
-       int i;
-@@ -83,6 +88,7 @@
-               put_cpu();
-       }
-       local_flush_tlb_all();
-+#endif
- }
- 
- void
-@@ -132,6 +138,9 @@
- void
- flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned 
long end)
- {
-+#ifdef XEN
-+printf("flush_tlb_range: called, not implemented\n");
-+#else
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long size = end - start;
-       unsigned long nbits;
-@@ -163,6 +172,7 @@
- # endif
- 
-       ia64_srlz_i();                  /* srlz.i implies srlz.d */
-+#endif
- }
- EXPORT_SYMBOL(flush_tlb_range);
- 
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/types.h
--- a/xen/arch/ia64/patch/linux-2.6.7/types.h   Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,15 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/types.h    
2004-06-15 23:19:01.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/types.h        
2004-11-11 17:08:30.000000000 -0700
-@@ -1,5 +1,12 @@
- #ifndef _ASM_IA64_TYPES_H
- #define _ASM_IA64_TYPES_H
-+#ifdef XEN
-+#ifndef __ASSEMBLY__
-+typedef unsigned long ssize_t;
-+typedef unsigned long size_t;
-+typedef long long loff_t;
-+#endif
-+#endif
- 
- /*
-  * This file is never included by application software unless explicitly 
requested (e.g.,
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/unaligned.c
--- a/xen/arch/ia64/patch/linux-2.6.7/unaligned.c       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,97 +0,0 @@
---- 
/home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/unaligned.c     
   2004-06-15 23:20:03.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/unaligned.c   2004-08-25 
19:28:12.000000000 -0600
-@@ -15,8 +15,10 @@
-  */
- #include <linux/kernel.h>
- #include <linux/sched.h>
-+#ifndef XEN
- #include <linux/smp_lock.h>
- #include <linux/tty.h>
-+#endif
- 
- #include <asm/intrinsics.h>
- #include <asm/processor.h>
-@@ -24,7 +26,16 @@
- #include <asm/uaccess.h>
- #include <asm/unaligned.h>
- 
-+#ifdef XEN
-+#define       ia64_peek(x...) printk("ia64_peek: called, not implemented\n")
-+#define       ia64_poke(x...) printk("ia64_poke: called, not implemented\n")
-+#define       ia64_sync_fph(x...) printk("ia64_sync_fph: called, not 
implemented\n")
-+#define       ia64_flush_fph(x...) printk("ia64_flush_fph: called, not 
implemented\n")
-+#define       die_if_kernel(x...) printk("die_if_kernel: called, not 
implemented\n")
-+#define jiffies 0
-+#else
- extern void die_if_kernel(char *str, struct pt_regs *regs, long err) 
__attribute__ ((noreturn));
-+#endif
- 
- #undef DEBUG_UNALIGNED_TRAP
- 
-@@ -437,7 +448,11 @@
- }
- 
- 
-+#ifdef XEN
-+void
-+#else
- static void
-+#endif
- setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs 
*regs)
- {
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-@@ -611,7 +626,11 @@
- }
- 
- 
-+#ifdef XEN
-+void
-+#else
- static void
-+#endif
- getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs 
*regs)
- {
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-@@ -1298,7 +1317,9 @@
-       mm_segment_t old_fs = get_fs();
-       unsigned long bundle[2];
-       unsigned long opcode;
-+#ifndef XEN
-       struct siginfo si;
-+#endif
-       const struct exception_table_entry *eh = NULL;
-       union {
-               unsigned long l;
-@@ -1317,6 +1338,9 @@
-        * user-level unaligned accesses.  Otherwise, a clever program could 
trick this
-        * handler into reading an arbitrary kernel addresses...
-        */
-+#ifdef XEN
-+printk("ia64_handle_unaligned: called, not working yet\n");
-+#else
-       if (!user_mode(regs))
-               eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
-       if (user_mode(regs) || eh) {
-@@ -1353,6 +1377,7 @@
- 
-       if (__copy_from_user(bundle, (void *) regs->cr_iip, 16))
-               goto failure;
-+#endif
- 
-       /*
-        * extract the instruction from the bundle given the slot number
-@@ -1493,6 +1518,7 @@
-               /* NOT_REACHED */
-       }
-   force_sigbus:
-+#ifndef XEN
-       si.si_signo = SIGBUS;
-       si.si_errno = 0;
-       si.si_code = BUS_ADRALN;
-@@ -1501,5 +1527,6 @@
-       si.si_isr = 0;
-       si.si_imm = 0;
-       force_sig_info(SIGBUS, &si, current);
-+#endif
-       goto done;
- }
diff -r e173a853dc46 -r e2127f19861b xen/arch/ia64/patch/linux-2.6.7/wait.h
--- a/xen/arch/ia64/patch/linux-2.6.7/wait.h    Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,26 +0,0 @@
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/wait.h        
2004-06-15 23:19:31.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/wait.h   
2004-08-25 19:28:13.000000000 -0600
-@@ -104,10 +104,15 @@
-       list_del(&old->task_list);
- }
- 
-+#ifdef XEN
-+void FASTCALL(__wake_up(struct task_struct *p));
-+#else
- void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void 
*key));
-+#endif
- extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int 
mode));
- extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, 
int nr));
- 
-+#ifndef XEN
- #define wake_up(x)                    __wake_up(x, TASK_UNINTERRUPTIBLE | 
TASK_INTERRUPTIBLE, 1, NULL)
- #define wake_up_nr(x, nr)             __wake_up(x, TASK_UNINTERRUPTIBLE | 
TASK_INTERRUPTIBLE, nr, NULL)
- #define wake_up_all(x)                        __wake_up(x, 
TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
-@@ -117,6 +122,7 @@
- #define wake_up_interruptible_all(x)  __wake_up(x, TASK_INTERRUPTIBLE, 0, 
NULL)
- #define       wake_up_locked(x)               __wake_up_locked((x), 
TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
- #define wake_up_interruptible_sync(x)   
__wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
-+#endif
- 
- #define __wait_event(wq, condition)                                   \
- do {                                                                  \
diff -r e173a853dc46 -r e2127f19861b xen/include/asm-ia64/slab.h
--- a/xen/include/asm-ia64/slab.h       Tue Aug  2 17:20:46 2005
+++ /dev/null   Tue Aug  2 23:59:09 2005
@@ -1,3 +0,0 @@
-#include <xen/xmalloc.h>
-#include <linux/gfp.h>
-#include <asm/delay.h>

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] We no longer need linux sources to build xen., Xen patchbot -unstable <=