WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

RE: [Xen-ia64-devel] Paravirtualized xenlinux/ia64 available

To: <xen-ia64-devel@xxxxxxxxxxxxxxxxxxx>
Subject: RE: [Xen-ia64-devel] Paravirtualized xenlinux/ia64 available
From: "Magenheimer, Dan (HP Labs Fort Collins)" <dan.magenheimer@xxxxxx>
Date: Wed, 4 May 2005 15:01:51 -0700
Delivery-date: Wed, 04 May 2005 22:01:23 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: DIscussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcVHlVmMb8/hDflETjKrZWojU5vuVgJUlTQAAAMSAYA=
Thread-topic: [Xen-ia64-devel] Paravirtualized xenlinux/ia64 available
For those interested -- but not interested enough to go through
the effort to pull the bits -- here's the diffstat and diff -u.

 arch/ia64/Kconfig             |    4 
 arch/ia64/Makefile            |    1 
 arch/ia64/hp/sim/Makefile     |    2 
 arch/ia64/ia32/ia32_signal.c  |   12 ++
 arch/ia64/ia32/ia32_support.c |    8 +
 arch/ia64/kernel/entry.S      |  185
++++++++++++++++++++++++++++++++++++++++++
 arch/ia64/kernel/ivt.S        |  160
++++++++++++++++++++++++++++++++++++
 arch/ia64/kernel/minstate.h   |  117 ++++++++++++++++++++++++++
 arch/ia64/kernel/pal.S        |   16 +++
 arch/ia64/kernel/setup.c      |    6 +
 drivers/acpi/motherboard.c    |    3 
 include/asm-ia64/ia32.h       |    4 
 include/asm-ia64/processor.h  |   76 +++++++++++++++++
 include/asm-ia64/system.h     |   58 +++++++++++++
 14 files changed, 652 insertions(+)

--- 1.85/arch/ia64/Kconfig      Fri Jan 28 16:32:25 2005
+++ 1.86/arch/ia64/Kconfig      Fri Apr 22 16:52:50 2005
@@ -46,6 +46,10 @@
        bool
        default y
 
+config XEN
+       bool
+       default y
+
 choice
        prompt "System type"
        default IA64_GENERIC
===== arch/ia64/Makefile 1.74 vs 1.75 =====
--- 1.74/arch/ia64/Makefile     Fri Jan 28 16:32:45 2005
+++ 1.75/arch/ia64/Makefile     Fri Apr 22 16:52:50 2005
@@ -60,6 +60,7 @@
 
 drivers-$(CONFIG_PCI)          += arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)  += arch/ia64/hp/sim/
+drivers-$(CONFIG_XEN)          += arch/ia64/hp/sim/
 drivers-$(CONFIG_IA64_HP_ZX1)  += arch/ia64/hp/common/
arch/ia64/hp/zx1/
 drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/
arch/ia64/hp/zx1/
 drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/
arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
===== arch/ia64/hp/sim/Makefile 1.8 vs 1.9 =====
--- 1.8/arch/ia64/hp/sim/Makefile       Sat Aug 16 16:58:46 2003
+++ 1.9/arch/ia64/hp/sim/Makefile       Fri Apr 22 16:52:50 2005
@@ -13,4 +13,6 @@
 obj-$(CONFIG_HP_SIMETH)        += simeth.o
 obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
 obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
+obj-$(CONFIG_XEN) += simserial.o
+obj-$(CONFIG_XEN) += hpsim_console.o
 obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
===== arch/ia64/ia32/ia32_signal.c 1.35 vs 1.36 =====
--- 1.35/arch/ia64/ia32/ia32_signal.c   Tue Jan 25 13:23:45 2005
+++ 1.36/arch/ia64/ia32/ia32_signal.c   Wed May  4 14:16:59 2005
@@ -667,7 +667,11 @@
        /*
         *  `eflags' is in an ar register for this context
         */
+#ifdef CONFIG_XEN
+       flag = xen_get_eflag();
+#else
        flag = ia64_getreg(_IA64_REG_AR_EFLAG);
+#endif
        err |= __put_user((unsigned int)flag, &sc->eflags);
        err |= __put_user(regs->r12, &sc->esp_at_signal);
        err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int
__user *)&sc->ss);
@@ -755,10 +759,18 @@
                 *  IA32 process's context.
                 */
                err |= __get_user(tmpflags, &sc->eflags);
+#ifdef CONFIG_XEN
+               flag = xen_get_eflag();
+#else
                flag = ia64_getreg(_IA64_REG_AR_EFLAG);
+#endif
                flag &= ~0x40DD5;
                flag |= (tmpflags & 0x40DD5);
+#ifdef CONFIG_XEN
+               xen_set_eflag(flag);
+#else
                ia64_setreg(_IA64_REG_AR_EFLAG, flag);
+#endif
 
                regs->r1 = -1;  /* disable syscall checks, r1 is
orig_eax */
        }
===== arch/ia64/ia32/ia32_support.c 1.20 vs 1.21 =====
--- 1.20/arch/ia64/ia32/ia32_support.c  Wed Oct  6 23:55:23 2004
+++ 1.21/arch/ia64/ia32/ia32_support.c  Wed May  4 14:16:59 2005
@@ -100,7 +100,11 @@
 void
 ia32_save_state (struct task_struct *t)
 {
+#ifdef CONFIG_XEN
+       t->thread.eflag = xen_get_eflag();
+#else
        t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
+#endif
        t->thread.fsr   = ia64_getreg(_IA64_REG_AR_FSR);
        t->thread.fcr   = ia64_getreg(_IA64_REG_AR_FCR);
        t->thread.fir   = ia64_getreg(_IA64_REG_AR_FIR);
@@ -122,7 +126,11 @@
        fdr = t->thread.fdr;
        tssd = load_desc(_TSS);                                 /* TSSD
*/
 
+#ifdef CONFIG_XEN
+       xen_set_eflag(eflag);
+#else
        ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
+#else
        ia64_setreg(_IA64_REG_AR_FSR, fsr);
        ia64_setreg(_IA64_REG_AR_FCR, fcr);
        ia64_setreg(_IA64_REG_AR_FIR, fir);
===== arch/ia64/kernel/entry.S 1.80 vs 1.83 =====
--- 1.80/arch/ia64/kernel/entry.S       Fri Jan 28 16:15:49 2005
+++ 1.83/arch/ia64/kernel/entry.S       Wed May  4 14:16:59 2005
@@ -189,7 +189,13 @@
 
        adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
        movl r25=init_task
+#ifdef CONFIG_XEN
+       movl r27=XSI_KR0+(IA64_KR_CURRENT_STACK*8)
+       ;;
+       ld8 r27=[r27]
+#else
        mov r27=IA64_KR(CURRENT_STACK)
+#endif
        adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
        dep r20=0,in0,61,3              // physical address of "next"
        ;;
@@ -204,11 +210,25 @@
 (p6)   br.cond.dpnt .map
        ;;
 .done:
+#ifdef CONFIG_XEN
+       movl r27=XSI_PSR_IC
+       mov r8=1
+       ;;
+(p6)   st4 [r27]=r8
+       ;;
+#else
 (p6)   ssm psr.ic                      // if we had to map, reenable
the psr.ic bit FIRST!!!
        ;;
 (p6)   srlz.d
+#endif
        ld8 sp=[r21]                    // load kernel stack pointer of
new task
+#ifdef CONFIG_XEN
+       movl r8=XSI_KR0+(IA64_KR_CURRENT*8)
+       ;;
+       st8 [r8]=in0
+#else
        mov IA64_KR(CURRENT)=in0        // update "current" application
register
+#endif
        mov r8=r13                      // return pointer to previously
running task
        mov r13=in0                     // set "current" pointer
        ;;
@@ -220,7 +240,13 @@
        br.ret.sptk.many rp             // boogie on out in new context
 
 .map:
+#ifdef CONFIG_XEN
+       movl r27=XSI_PSR_IC
+       ;;
+       st4 [r27]=r0
+#else
        rsm psr.ic                      // interrupts (psr.i) are
already disabled here
+#endif
        movl r25=PAGE_KERNEL
        ;;
        srlz.d
@@ -231,7 +257,13 @@
        mov cr.ifa=in0                  // VA of next task...
        ;;
        mov r25=IA64_TR_CURRENT_STACK
+#ifdef CONFIG_XEN
+       movl r8=XSI_KR0+(IA64_KR_CURRENT_STACK*8)
+       ;;
+       st8 [r8]=r26
+#else
        mov IA64_KR(CURRENT_STACK)=r26  // remember last page we
mapped...
+#endif
        ;;
        itr.d dtr[r25]=r23              // wire in new mapping...
        br.cond.sptk .done
@@ -693,7 +725,13 @@
        ;;
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count
== 0)
 #else /* !CONFIG_PREEMPT */
+#ifdef CONFIG_XEN
+       movl r2=XSI_PSR_I
+       ;;
+(pUStk)        st4 [r2]=r0
+#else
 (pUStk)        rsm psr.i
+#endif
        cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
 (pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
 #endif
@@ -720,7 +758,14 @@
        mov f6=f0               // clear f6
        ;;
        invala                  // M0|1 invalidate ALAT
+#ifdef CONFIG_XEN
+       movl r29=XSI_PSR_IC
+       ;;
+       st8     [r29]=r0        // note: clears both vpsr.i and vpsr.ic!
+       ;;
+#else
        rsm psr.i | psr.ic      // M2 initiate turning off of interrupt
and interruption collection
+#endif
        mov f9=f0               // clear f9
 
        ld8 r29=[r2],16         // load cr.ipsr
@@ -809,7 +854,14 @@
        ;;
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count
== 0)
 #else
+#ifdef CONFIG_XEN
+(pUStk)        movl r17=XSI_PSR_I
+       ;;
+(pUStk)        st4 [r17]=r0
+       ;;
+#else
 (pUStk)        rsm psr.i
+#endif
        cmp.eq p0,pLvSys=r0,r0          // pLvSys=0: leave from kernel
 (pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
 #endif
@@ -857,7 +909,14 @@
        mov ar.csd=r30
        mov ar.ssd=r31
        ;;
+#ifdef CONFIG_XEN
+       movl r22=XSI_PSR_IC
+       ;;
+       st8 [r22]=r0            // note: clears both vpsr.i and vpsr.ic!
+       ;;
+#else
        rsm psr.i | psr.ic      // initiate turning off of interrupt and
interruption collection
+#endif
        invala                  // invalidate ALAT
        ;;
        ld8.fill r22=[r2],24
@@ -889,9 +948,48 @@
        mov ar.ccv=r15
        ;;
        ldf.fill f11=[r2]
+#ifdef CONFIG_XEN
+       ;;
+       // r16-r31 all now hold bank1 values
+       movl r2=XSI_BANK1_R16
+       movl r3=XSI_BANK1_R16+8
+       ;;
+       st8 [r2]=r16,16
+       st8 [r3]=r17,16
+       ;;
+       st8 [r2]=r18,16
+       st8 [r3]=r19,16
+       ;;
+       st8 [r2]=r20,16
+       st8 [r3]=r21,16
+       ;;
+       st8 [r2]=r22,16
+       st8 [r3]=r23,16
+       ;;
+       st8 [r2]=r24,16
+       st8 [r3]=r25,16
+       ;;
+       st8 [r2]=r26,16
+       st8 [r3]=r27,16
+       ;;
+       st8 [r2]=r28,16
+       st8 [r3]=r29,16
+       ;;
+       st8 [r2]=r30,16
+       st8 [r3]=r31,16
+       ;;
+#else
        bsw.0                   // switch back to bank 0 (no stop bit
required beforehand...)
+#endif
        ;;
+#ifdef CONFIG_XEN
+(pUStk)        movl r18=XSI_KR0+(IA64_KR_CURRENT*8)
+       ;;
+(pUStk)        ld8 r18=[r18]
+       ;;
+#else
 (pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
+#endif
        adds r16=PT(CR_IPSR)+16,r12
        adds r17=PT(CR_IIP)+16,r12
 
@@ -947,7 +1045,11 @@
 (pNonSys) br.cond.dpnt dont_preserve_current_frame
 
 rbs_switch:
+#ifdef CONFIG_XEN
+       break.b 0x1fffff                // this is a "privified" cover
+#else
        cover                           // add current frame into dirty
partition and set cr.ifs
+#endif
        ;;
        mov r19=ar.bsp                  // get new backing store pointer
        sub r16=r16,r18                 // krbs = old bsp - size of
dirty partition
@@ -1049,16 +1151,34 @@
 (pKStk)        dep r29=r22,r29,21,1    // I0 update ipsr.pp with psr.pp
 (pLvSys)mov r16=r0             // A  clear r16 for leave_syscall, no-op
otherwise
        ;;
+#ifdef CONFIG_XEN
+       movl r25=XSI_IPSR
+       ;;
+       st8[r25]=r29,XSI_IFS-XSI_IPSR
+       ;;
+#else
        mov cr.ipsr=r29         // M2
+#endif
        mov ar.pfs=r26          // I0
 (pLvSys)mov r17=r0             // A  clear r17 for leave_syscall, no-op
otherwise
 
+#ifdef CONFIG_XEN
+(p9)   st8 [r25]=r30
+       ;;
+       adds r25=XSI_IIP-XSI_IFS,r25
+       ;;
+#else
 (p9)   mov cr.ifs=r30          // M2
+#endif
        mov b0=r21              // I0
 (pLvSys)mov r18=r0             // A  clear r18 for leave_syscall, no-op
otherwise
 
        mov ar.fpsr=r20         // M2
+#ifdef CONFIG_XEN
+       st8     [r25]=r28
+#else
        mov cr.iip=r28          // M2
+#endif
        nop 0
        ;;
 (pUStk)        mov ar.rnat=r24         // M2 must happen with RSE in
lazy mode
@@ -1067,6 +1187,23 @@
 
        mov ar.rsc=r27          // M2
        mov pr=r31,-1           // I0
+#ifdef CONFIG_XEN
+       ;;
+       /* FIXME: THIS CODE IS NOT NaT SAFE! */
+       movl r30=XSI_BANKNUM;
+       mov r31=1;;
+       st4 [r30]=r31;
+       movl r30=XSI_BANK1_R16;
+       movl r31=XSI_BANK1_R16+8;;
+       ld8 r16=[r30],16; ld8 r17=[r31],16;;
+       ld8 r18=[r30],16; ld8 r19=[r31],16;;
+       ld8 r20=[r30],16; ld8 r21=[r31],16;;
+       ld8 r22=[r30],16; ld8 r23=[r31],16;;
+       ld8 r24=[r30],16; ld8 r25=[r31],16;;
+       ld8 r26=[r30],16; ld8 r27=[r31],16;;
+       ld8 r28=[r30],16; ld8 r29=[r31],16;;
+       ld8 r30=[r30]; ld8 r31=[r31];;
+#endif
        rfi                     // B
 
        /*
@@ -1096,7 +1233,13 @@
 #endif
        br.call.spnt.many rp=schedule
 .ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
+#ifdef CONFIG_XEN
+       movl r2=XSI_PSR_I
+       ;;
+       st4 [r2]=r0
+#else
        rsm psr.i               // disable interrupts
+#endif
        ;;
 #ifdef CONFIG_PREEMPT
 (pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
@@ -1322,6 +1465,48 @@
        mov rp=loc0
        br.ret.sptk.many rp
 END(unw_init_running)
+
+#ifdef CONFIG_XEN
+GLOBAL_ENTRY(xen_thash)
+       tak r8=r96              // this is a "privified" thash r8=r32
+       ;;
+       br.ret.sptk.many rp
+END(xen_thash)
+
+GLOBAL_ENTRY(xen_fc)
+       ptc.e r96               // this is a "privified" fc r32
+       ;;
+       br.ret.sptk.many rp
+END(xen_fc)
+
+GLOBAL_ENTRY(xen_get_cpuid)
+       mov r72=rr[r32]         // this is a "privified" mov
r8=cpuid[r32]
+       ;;
+       br.ret.sptk.many rp
+END(xen_get_cpuid)
+
+GLOBAL_ENTRY(xen_get_pmd)
+       mov r72=pmc[r32]        // this is a "privified" mov r8=pmd[r32]
+       ;;
+       br.ret.sptk.many rp
+END(xen_get_pmd)
+
+#ifdef CONFIG_IA32_SUPPORT
+GLOBAL_ENTRY(xen_get_eflag)
+       mov ar24=r72            // this is a "privified" mov r8=ar.eflg
+       ;;
+       br.ret.sptk.many rp
+END(xen_get_eflag)
+// some bits aren't set if pl!=0, see SDM vol1 3.1.8
+GLOBAL_ENTRY(xen_set_eflag)
+       mov ar24=r32
+       ;;
+       br.ret.sptk.many rp
+END(xen_get_eflag)
+#endif
+
+
+#endif
 
        .rodata
        .align 8
===== arch/ia64/kernel/ivt.S 1.31 vs 1.34 =====
--- 1.31/arch/ia64/kernel/ivt.S Wed Feb  2 16:06:25 2005
+++ 1.34/arch/ia64/kernel/ivt.S Wed May  4 14:16:59 2005
@@ -224,7 +224,13 @@
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
 .itlb_fault:
+#ifdef CONFIG_XEN
+       movl r17=XSI_IHA
+       ;;
+       ld8 r17=[r17]                           // get virtual address
of L3 PTE
+#else
        mov r17=cr.iha                          // get virtual address
of L3 PTE
+#endif
        movl r30=1f                             // load nested fault
continuation point
        ;;
 1:     ld8 r18=[r17]                           // read L3 PTE
@@ -268,7 +274,13 @@
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
 dtlb_fault:
+#ifdef CONFIG_XEN
+       movl r17=XSI_IHA
+       ;;
+       ld8 r17=[r17]                           // get virtual address
of L3 PTE
+#else
        mov r17=cr.iha                          // get virtual address
of L3 PTE
+#endif
        movl r30=1f                             // load nested fault
continuation point
        ;;
 1:     ld8 r18=[r17]                           // read L3 PTE
@@ -302,20 +314,33 @@
 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
 ENTRY(alt_itlb_miss)
        DBG_FAULT(3)
+#ifdef CONFIG_XEN
+       ld8 r21=[r31],XSI_IFA-XSI_IPSR  // get ipsr, point to ifa
+       movl r17=PAGE_KERNEL
+       ;;
+       movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+       ;;
+       ld8 r16=[r31]           // get ifa
+       mov r31=pr
+       ;;
+#else
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r21=cr.ipsr
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
        mov r31=pr
        ;;
+#endif
 #ifdef CONFIG_DISABLE_VHPT
        shr.u r22=r16,61                        // get the region number
into r21
        ;;
        cmp.gt p8,p0=6,r22                      // user mode
        ;;
+#ifndef CONFIG_XEN
 (p8)   thash r17=r16
        ;;
 (p8)   mov cr.iha=r17
+#endif
 (p8)   mov r29=b0                              // save b0
 (p8)   br.cond.dptk .itlb_fault
 #endif
@@ -340,6 +365,17 @@
 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
 ENTRY(alt_dtlb_miss)
        DBG_FAULT(4)
+#ifdef CONFIG_XEN
+       ld8 r21=[r31],XSI_ISR-XSI_IPSR  // get ipsr, point to isr
+       movl r17=PAGE_KERNEL
+       ;;
+       ld8 r20=[r31],XSI_IFA-XSI_ISR   // get isr, point to ifa
+       movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+       ;;
+       ld8 r16=[r31]           // get ifa
+       mov r31=pr
+       ;;
+#else
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r20=cr.isr
@@ -347,14 +383,17 @@
        mov r21=cr.ipsr
        mov r31=pr
        ;;
+#endif
 #ifdef CONFIG_DISABLE_VHPT
        shr.u r22=r16,61                        // get the region number
into r21
        ;;
        cmp.gt p8,p0=6,r22                      // access to region 0-5
        ;;
+#ifndef CONFIG_XEN
 (p8)   thash r17=r16
        ;;
 (p8)   mov cr.iha=r17
+#endif
 (p8)   mov r29=b0                              // save b0
 (p8)   br.cond.dptk dtlb_fault
 #endif
@@ -408,7 +447,13 @@
         * Clobbered:   b0, r18, r19, r21, psr.dt (cleared)
         */
        rsm psr.dt                              // switch to using
physical data addressing
+#ifdef CONFIG_XEN
+       movl r19=XSI_KR0+(IA64_KR_PT_BASE*8)    // get the page table
base address
+       ;;
+       ld8 r19=[r19]
+#else
        mov r19=IA64_KR(PT_BASE)                // get the page table
base address
+#endif
        shl r21=r16,3                           // shift bit 60 into
sign bit
        ;;
        shr.u r17=r16,61                        // get the region number
into r17
@@ -462,6 +507,19 @@
        ;;
        SAVE_MIN_WITH_COVER
        alloc r15=ar.pfs,0,0,3,0
+#ifdef CONFIG_XEN
+       movl r3=XSI_ISR
+       ;;
+       ld8 out1=[r3],XSI_IFA-XSI_ISR           // get vcr.isr, point to
ifa
+       ;;
+       ld8 out0=[r3],XSI_PSR_IC-XSI_IFA        // get vcr.ifa, point to
vpsr.ic
+       ;;
+       mov r14=1
+       ;;
+       st4 [r3]=r14                            // vpsr.ic = 1
+       adds r3=8,r2                            // set up second base
pointer
+       ;;
+#else
        mov out0=cr.ifa
        mov out1=cr.isr
        adds r3=8,r2                            // set up second base
pointer
@@ -470,7 +528,23 @@
        ;;
        srlz.i                                  // guarantee that
interruption collectin is on
        ;;
+#endif
+#ifdef CONFIG_XEN_XXX  /* untested; overflows this IVT section */
+(p15)  movl r3=XSI_PSR_I
+       ;;
+(p15)  st4 [r3]=r14,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+       mov r14=r0
+       ;;
+(p15)  ld4 r14=[r3]                            // if
(pending_interrupts)
+       ;;
+(p15)  cmp.ne  p15,p0=r14,r0
+       ;;
+(p15)  ssm     psr.i                           //   do a real ssm psr.i
+       ;;
+       adds r3=8,r2                            // set up second base
pointer
+#else
 (p15)  ssm psr.i                               // restore psr.i
+#endif
        movl r14=ia64_leave_kernel
        ;;
        SAVE_REST
@@ -505,7 +579,11 @@
        mov r16=cr.ifa                          // get the address that
caused the fault
        movl r30=1f                             // load continuation
point in case of nested fault
        ;;
+#ifdef CONFIG_XEN
+       tak r17=r80                             // "privified" thash
+#else
        thash r17=r16                           // compute virtual
address of L3 PTE
+#endif
        mov r29=b0                              // save b0 in case of
nested fault
        mov r31=pr                              // save pr
 #ifdef CONFIG_SMP
@@ -571,7 +649,11 @@
 (p6)   mov r16=r18                             // if so, use cr.iip
instead of cr.ifa
 #endif /* CONFIG_ITANIUM */
        ;;
+#ifdef CONFIG_XEN
+       tak r17=r80                             // "privified" thash
+#else
        thash r17=r16                           // compute virtual
address of L3 PTE
+#endif
        mov r29=b0                              // save b0 in case of
nested fault)
 #ifdef CONFIG_SMP
        mov r28=ar.ccv                          // save ar.ccv
@@ -624,7 +706,11 @@
        mov r16=cr.ifa                          // get the address that
caused the fault
        movl r30=1f                             // load continuation
point in case of nested fault
        ;;
+#ifdef CONFIG_XEN
+       tak r17=r80                             // "privified" thash
+#else
        thash r17=r16                           // compute virtual
address of L3 PTE
+#endif
        mov r31=pr
        mov r29=b0                              // save b0 in case of
nested fault)
 #ifdef CONFIG_SMP
@@ -687,6 +773,23 @@
         * to prevent leaking bits from kernel to user level.
         */
        DBG_FAULT(11)
+#ifdef CONFIG_XEN
+       ld8 r29=[r31],XSI_IIP-XSI_IPSR          // get ipsr, point to
iip
+       mov r18=__IA64_BREAK_SYSCALL
+       mov r21=ar.fpsr
+       ;;
+       ld8 r28=[r31],XSI_IIM-XSI_IIP           // get iip, point to iim
+       mov r19=b6
+       mov r25=ar.unat
+       ;;
+       ld8 r17=[r31]                           // get iim
+       mov r27=ar.rsc
+       mov r26=ar.pfs
+       ;;
+       adds r31=(XSI_KR0+(IA64_KR_CURRENT*8))-XSI_IIM,r31
+       ;;
+       ld8 r16=[r31]                           // r16 = current task
+#else
        mov r16=IA64_KR(CURRENT)                // r16 = current task;
12 cycle read lat.
        mov r17=cr.iim
        mov r18=__IA64_BREAK_SYSCALL
@@ -697,6 +800,7 @@
        mov r27=ar.rsc
        mov r26=ar.pfs
        mov r28=cr.iip
+#endif
        mov r31=pr                              // prepare to save
predicates
        mov r20=r1
        ;;
@@ -729,13 +833,36 @@
        MINSTATE_START_SAVE_MIN_VIRT
        br.call.sptk.many b7=ia64_syscall_setup
        ;;
+#ifdef CONFIG_XEN
+       mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
+#else
        MINSTATE_END_SAVE_MIN_VIRT              // switch to bank 1
+#endif
+#ifdef CONFIG_XEN
+       movl r3=XSI_PSR_IC
+       mov r16=1
+       ;;
+       st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC       // vpsr.ic = 1
+       ;;
+(p15)  st4 [r3]=r16,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+       mov r16=r0
+       ;;
+(p15)  ld4 r16=[r3]                            // if
(pending_interrupts)
+       ;;
+       cmp.ne  p6,p0=r16,r0
+       ;;
+(p6)   ssm     psr.i                           //   do a real ssm psr.i
+       ;;
+       mov r3=NR_syscalls - 1
+       ;;
+#else
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
        srlz.i                                  // guarantee that
interruption collection is on
        mov r3=NR_syscalls - 1
        ;;
 (p15)  ssm psr.i                               // restore psr.i
+#endif
        // p10==true means out registers are more than 8 or r15's Nat is
true
 (p10)  br.cond.spnt.many ia64_ret_from_syscall
        ;;
@@ -773,7 +900,14 @@
        mov r31=pr              // prepare to save predicates
        ;;
        SAVE_MIN_WITH_COVER     // uses r31; defines r2 and r3
+#ifdef CONFIG_XEN
+       movl r3=XSI_PSR_IC
+       mov r14=1
+       ;;
+       st4 [r3]=r14
+#else
        ssm psr.ic | PSR_DEFAULT_BITS
+#endif
        ;;
        adds r3=8,r2            // set up second base pointer for
SAVE_REST
        srlz.i                  // ensure everybody knows psr.ic is back
on
@@ -1521,6 +1655,32 @@
 // 0x7e00 Entry 66 (size 16 bundles) Reserved
        DBG_FAULT(66)
        FAULT(66)
+
+#ifdef CONFIG_XEN
+       /*
+        * There is no particular reason for this code to be here, other
than that
+        * there happens to be space here that would go unused
otherwise.  If this
+        * fault ever gets "unreserved", simply moved the following code
to a more
+        * suitable spot...
+        */
+
+GLOBAL_ENTRY(xen_bsw1)
+       /* FIXME: THIS CODE IS NOT NaT SAFE! */
+       movl r30=XSI_BANKNUM;
+       mov r31=1;;
+       st4 [r30]=r31;
+       movl r30=XSI_BANK1_R16;
+       movl r31=XSI_BANK1_R16+8;;
+       ld8 r16=[r30],16; ld8 r17=[r31],16;;
+       ld8 r18=[r30],16; ld8 r19=[r31],16;;
+       ld8 r20=[r30],16; ld8 r21=[r31],16;;
+       ld8 r22=[r30],16; ld8 r23=[r31],16;;
+       ld8 r24=[r30],16; ld8 r25=[r31],16;;
+       ld8 r26=[r30],16; ld8 r27=[r31],16;;
+       ld8 r28=[r30],16; ld8 r29=[r31],16;;
+       ld8 r30=[r30]; ld8 r31=[r31];;
+       br.ret.sptk.many b0
+#endif
 
        .org ia64_ivt+0x7f00
 
////////////////////////////////////////////////////////////////////////
/////////////////
===== arch/ia64/kernel/minstate.h 1.19 vs 1.22 =====
--- 1.19/arch/ia64/kernel/minstate.h    Wed Jan 26 11:01:31 2005
+++ 1.22/arch/ia64/kernel/minstate.h    Wed May  4 14:16:59 2005
@@ -93,10 +93,122 @@
  *     p15 = TRUE if psr.i is set in cr.ipsr
  *     predicate registers (other than p2, p3, and p15), b6, r3, r14,
r15:
  *             preserved
+ * CONFIG_XEN note: p6/p7 are not preserved
  *
  * Note that psr.ic is NOT turned on by this macro.  This is so that
  * we can pass interruption state as arguments to a handler.
  */
+#ifdef CONFIG_XEN
+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)
\
+       /*MINSTATE_GET_CURRENT(r16);    /* M (or M;;I) */
\
+       movl r16=XSI_KR0+(IA64_KR_CURRENT*8);;
\
+       ld8 r16=[r16];;
\
+       mov r27=ar.rsc;                 /* M */
\
+       mov r20=r1;                     /* A */
\
+       mov r25=ar.unat;                /* M */
\
+       /* mov r29=cr.ipsr;             /* M */
\
+       movl r29=XSI_IPSR;;
\
+       ld8 r29=[r29];;
\
+       mov r26=ar.pfs;                 /* I */
\
+       /* mov r28=cr.iip;              /* M */
\
+       movl r28=XSI_IIP;;
\
+       ld8 r28=[r28];;
\
+       mov r21=ar.fpsr;                /* M */
\
+       COVER;                  /* B;; (or nothing) */
\
+/*     break.b 0x1fffff;               /* "privified" cover instr */
\
+       ;;
\
+       adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;
\
+       ;;
\
+       ld1 r17=[r16];                          /* load
current->thread.on_ustack flag */       \
+       st1 [r16]=r0;                           /* clear
current->thread.on_ustack flag */       \
+       adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
\
+       /* switch from user to kernel RBS: */
\
+       ;;
\
+       invala;                         /* M */
\
+       /* SAVE_IFS; /* see xen special handling below */
\
+       cmp.eq pKStk,pUStk=r0,r17;              /* are we in kernel mode
already? */             \
+       ;;
\
+       MINSTATE_START_SAVE_MIN
\
+       adds r17=2*L1_CACHE_BYTES,r1;           /* really: biggest
cache-line size */              \
+       adds r16=PT(CR_IPSR),r1;
\
+       ;;
\
+       lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;
\
+       st8 [r16]=r29;          /* save cr.ipsr */
\
+       ;;
\
+       lfetch.fault.excl.nt1 [r17];
\
+       tbit.nz p15,p0=r29,IA64_PSR_I_BIT;
\
+       mov r29=b0
\
+       ;;
\
+       adds r16=PT(R8),r1;     /* initialize first base pointer */
\
+       adds r17=PT(R9),r1;     /* initialize second base pointer */
\
+(pKStk)        mov r18=r0;             /* make sure r18 isn't NaT */
\
+       ;;
\
+.mem.offset 0,0; st8.spill [r16]=r8,16;
\
+.mem.offset 8,0; st8.spill [r17]=r9,16;
\
+        ;;
\
+.mem.offset 0,0; st8.spill [r16]=r10,24;
\
+.mem.offset 8,0; st8.spill [r17]=r11,24;
\
+        ;;
\
+       /* xen special handling for possibly lazy cover */
\
+       movl r8=XSI_INCOMPL_REGFR;
\
+       ;;
\
+       ld4 r30=[r8];
\
+       ;;
\
+       cmp.eq  p6,p7=r30,r0;
\
+       ;; /* not sure if this stop bit is necessary */
\
+(p6)   adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8;
\
+(p7)   adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8;
\
+       ;;
\
+       ld8 r30=[r8];
\
+       ;;
\
+       st8 [r16]=r28,16;       /* save cr.iip */
\
+       st8 [r17]=r30,16;       /* save cr.ifs */
\
+(pUStk)        sub r18=r18,r22;        /* r18=RSE.ndirty*8 */
\
+       mov r8=ar.ccv;
\
+       mov r9=ar.csd;
\
+       mov r10=ar.ssd;
\
+       movl r11=FPSR_DEFAULT;   /* L-unit */
\
+       ;;
\
+       st8 [r16]=r25,16;       /* save ar.unat */
\
+       st8 [r17]=r26,16;       /* save ar.pfs */
\
+       shl r18=r18,16;         /* compute ar.rsc to be used for
"loadrs" */                     \
+       ;;
\
+       st8 [r16]=r27,16;       /* save ar.rsc */
\
+(pUStk)        st8 [r17]=r24,16;       /* save ar.rnat */
\
+(pKStk)        adds r17=16,r17;        /* skip over ar_rnat field */
\
+       ;;                      /* avoid RAW on r16 & r17 */
\
+(pUStk)        st8 [r16]=r23,16;       /* save ar.bspstore */
\
+       st8 [r17]=r31,16;       /* save predicates */
\
+(pKStk)        adds r16=16,r16;        /* skip over ar_bspstore field
*/                              \
+       ;;
\
+       st8 [r16]=r29,16;       /* save b0 */
\
+       st8 [r17]=r18,16;       /* save ar.rsc value for "loadrs" */
\
+       cmp.eq pNonSys,pSys=r0,r0       /* initialize pSys=0, pNonSys=1
*/                      \
+       ;;
\
+.mem.offset 0,0; st8.spill [r16]=r20,16;       /* save original r1 */
\
+.mem.offset 8,0; st8.spill [r17]=r12,16;
\
+       adds r12=-16,r1;        /* switch to kernel memory stack (with
16 bytes of scratch) */ \
+       ;;
\
+.mem.offset 0,0; st8.spill [r16]=r13,16;
\
+.mem.offset 8,0; st8.spill [r17]=r21,16;       /* save ar.fpsr */
\
+       /* mov r13=IA64_KR(CURRENT);    /* establish `current' */
\
+       movl r21=XSI_KR0+(IA64_KR_CURRENT*8);;
\
+       ld8 r13=[r21];;
\
+       ;;
\
+.mem.offset 0,0; st8.spill [r16]=r15,16;
\
+.mem.offset 8,0; st8.spill [r17]=r14,16;
\
+       ;;
\
+.mem.offset 0,0; st8.spill [r16]=r2,16;
\
+.mem.offset 8,0; st8.spill [r17]=r3,16;
\
+       ;;
\
+       EXTRA;
\
+       mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;
\
+       adds r2=IA64_PT_REGS_R16_OFFSET,r1;
\
+       ;;
\
+       movl r1=__gp;           /* establish kernel global pointer */
\
+       ;;
\
+       /* MINSTATE_END_SAVE_MIN */
+#else
 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)
\
        MINSTATE_GET_CURRENT(r16);      /* M (or M;;I) */
\
        mov r27=ar.rsc;                 /* M */
\
@@ -183,6 +295,7 @@
        movl r1=__gp;           /* establish kernel global pointer */
\
        ;;
\
        MINSTATE_END_SAVE_MIN
+#endif
 
 /*
  * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
@@ -248,4 +361,8 @@
 
 #define SAVE_MIN_WITH_COVER    DO_SAVE_MIN(cover, mov r30=cr.ifs,)
 #define SAVE_MIN_WITH_COVER_R19        DO_SAVE_MIN(cover, mov
r30=cr.ifs, mov r15=r19)
+#ifdef CONFIG_XEN
+#define SAVE_MIN               break 0;; /* FIXME: non-cover version
only for ia32 support? */
+#else
 #define SAVE_MIN               DO_SAVE_MIN(     , mov r30=r0, )
+#endif
===== arch/ia64/kernel/pal.S 1.10 vs 1.12 =====
--- 1.10/arch/ia64/kernel/pal.S Fri Jul  2 05:27:05 2004
+++ 1.12/arch/ia64/kernel/pal.S Tue Apr 26 17:11:17 2005
@@ -74,6 +74,21 @@
        .body
        mov r30 = in2
 
+#ifdef CONFIG_XEN
+       // this is low priority for paravirtualization, but is called
+       // from the idle loop so confuses privop counting
+       movl r31=XSI_PSR_IC
+       ;;
+(p6)   st8 [r31]=r0
+       ;;
+(p7)   adds r31=XSI_PSR_I-XSI_PSR_IC,r31
+       ;;
+(p7)   st4 [r31]=r0
+       ;;
+       mov r31 = in3
+       mov b7 = loc2
+       ;;
+#else
 (p6)   rsm psr.i | psr.ic
        mov r31 = in3
        mov b7 = loc2
@@ -81,6 +96,7 @@
 (p7)   rsm psr.i
        ;;
 (p6)   srlz.i
+#endif
        mov rp = r8
        br.cond.sptk.many b7
 1:     mov psr.l = loc3
===== arch/ia64/kernel/setup.c 1.86 vs edited =====
--- 1.86/arch/ia64/kernel/setup.c       Wed Feb  2 13:21:27 2005
+++ edited/arch/ia64/kernel/setup.c     Wed May  4 11:16:45 2005
@@ -270,6 +270,11 @@
 static inline int __init
 early_console_setup (char *cmdline)
 {
+#ifdef CONFIG_XEN
+       extern struct console hpsim_cons;
+       register_console(&hpsim_cons);
+       return 0;
+#else
 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
        {
                extern int sn_serial_console_early_setup(void);
@@ -287,6 +292,7 @@
 #endif
 
        return -1;
+#endif
 }
 
 static inline void
===== drivers/acpi/motherboard.c 1.6 vs 1.7 =====
--- 1.6/drivers/acpi/motherboard.c      Wed Nov 10 15:57:35 2004
+++ 1.7/drivers/acpi/motherboard.c      Fri Apr 22 16:47:41 2005
@@ -120,6 +120,9 @@
 static void __init
 acpi_reserve_resources (void)
 {
+#ifdef CONFIG_XEN
+       if (!acpi_gbl_FADT) return;
+#endif
        if (acpi_gbl_FADT->xpm1a_evt_blk.address &&
acpi_gbl_FADT->pm1_evt_len)
                request_region(acpi_gbl_FADT->xpm1a_evt_blk.address, 
                        acpi_gbl_FADT->pm1_evt_len, "PM1a_EVT_BLK");
===== include/asm-ia64/ia32.h 1.31 vs 1.32 =====
--- 1.31/include/asm-ia64/ia32.h        Wed Oct  6 23:59:48 2004
+++ 1.32/include/asm-ia64/ia32.h        Wed May  4 14:16:59 2005
@@ -19,6 +19,10 @@
 extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
 extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
 extern int ia32_clone_tls (struct task_struct *child, struct pt_regs
*childregs);
+#ifdef CONFIG_XEN
+extern unsigned long xen_get_eflag(void);
+extern void xen_set_eflag(unsigned long);
+#endif
 
 # endif /* !CONFIG_IA32_SUPPORT */
 
===== include/asm-ia64/processor.h 1.72 vs 1.75 =====
--- 1.72/include/asm-ia64/processor.h   Wed Jan 26 11:01:41 2005
+++ 1.75/include/asm-ia64/processor.h   Wed May  4 14:16:59 2005
@@ -695,4 +695,80 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#ifdef CONFIG_XEN
+
+/* Xen shared-info base and offsets */
+#define        XSI_BASE                        0xf100000000000000
+/* derived from xen/include/public/arch-ia64.h arch_vcpu_info_t */
+#define        XSI_IPSR_OFS                    0x08
+#define        XSI_IIP_OFS                     0x10
+#define        XSI_IFS_OFS                     0x18
+#define        XSI_PRECOVER_IFS_OFS            0x20
+#define        XSI_ISR_OFS                     0x28
+#define        XSI_IFA_OFS                     0x30
+#define        XSI_IIPA_OFS                    0x38
+#define        XSI_IIM_OFS                     0x40
+#define        XSI_TPR_OFS                     0x50
+#define        XSI_IHA_OFS                     0x58
+#define        XSI_PSR_IC_OFS                  0x88
+#define        XSI_PSR_I_OFS                   0x8c
+#define        XSI_PEND_OFS                    0x90
+#define        XSI_INCOMPL_REGFR_OFS           0x94
+#define        XSI_DELIV_MASK0_OFS             0x98
+#define        XSI_METAPHYS_OFS                0xb8
+#define        XSI_BANKNUM_OFS                 0xbc
+#define        XSI_BANK0_R16_OFS               0xc0
+#define        XSI_BANK1_R16_OFS               0x140
+#define        XSI_RR0_OFS                     0x1c0
+#define        XSI_KR0_OFS                     0x200
+
+#define        XSI_IPSR                        (XSI_BASE+XSI_IPSR_OFS)
+#define        XSI_IIP                         (XSI_BASE+XSI_IIP_OFS)
+#define        XSI_IFS                         (XSI_BASE+XSI_IFS_OFS)
+#define        XSI_PRECOVER_IFS
(XSI_BASE+XSI_PRECOVER_IFS_OFS)
+#define        XSI_ISR                         (XSI_BASE+XSI_ISR_OFS)
+#define        XSI_IFA                         (XSI_BASE+XSI_IFA_OFS)
+#define        XSI_IIPA                        (XSI_BASE+XSI_IIPA_OFS)
+#define        XSI_IIM                         (XSI_BASE+XSI_IIM_OFS)
+#define        XSI_TPR                         (XSI_BASE+XSI_TPR_OFS)
+#define        XSI_IHA                         (XSI_BASE+XSI_IHA_OFS)
+#define        XSI_PSR_IC
(XSI_BASE+XSI_PSR_IC_OFS)
+#define        XSI_PSR_I                       (XSI_BASE+XSI_PSR_I_OFS)
+#define        XSI_PEND                        (XSI_BASE+XSI_PEND_OFS)
+#define        XSI_INCOMPL_REGFR
(XSI_BASE+XSI_INCOMPL_REGFR_OFS)
+#define        XSI_DELIV_MASK0
(XSI_BASE+XSI_DELIV_MASK0_OFS)
+#define        XSI_METAPHYS
(XSI_BASE+XSI_METAPHYS_OFS)
+#define        XSI_BANKNUM
(XSI_BASE+XSI_BANKNUM_OFS)
+#define        XSI_BANK0_R16
(XSI_BASE+XSI_BANK0_R16_OFS)
+#define        XSI_BANK1_R16
(XSI_BASE+XSI_BANK1_R16_OFS)
+#define        XSI_RR0                         (XSI_BASE+XSI_RR0_OFS)
+#define        XSI_KR0                         (XSI_BASE+XSI_KR0_OFS)
+
+#ifndef __ASSEMBLY__
+#undef ia64_get_kr
+#define ia64_get_kr(regnum)                                    \
+({                                                             \
+       ((unsigned long *)(XSI_KR0))[regnum];                   \
+})
+
+#undef ia64_thash
+extern unsigned long xen_thash(unsigned long addr);
+#define        ia64_thash(addr)                xen_thash(addr)
+
+#undef ia64_fc
+extern void xen_fc(unsigned long addr);
+#define        ia64_fc(addr)                   xen_fc(addr)
+
+#undef ia64_get_cpuid
+extern unsigned long xen_get_cpuid(int index);
+#define        ia64_get_cpuid(i)               xen_get_cpuid(i)
+
+#undef ia64_get_pmd
+extern unsigned long xen_get_pmd(int index);
+#define        ia64_get_pmd(i)                 xen_get_pmd(i)
+
+/* FIXME: may need to redefine ia64_set/is_local_fpu_owner(t) */
+#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_XEN */
+
 #endif /* _ASM_IA64_PROCESSOR_H */
===== include/asm-ia64/system.h 1.48 vs 1.49 =====
--- 1.48/include/asm-ia64/system.h      Tue Jan  4 19:48:18 2005
+++ 1.49/include/asm-ia64/system.h      Fri Apr 22 16:52:51 2005
@@ -181,6 +181,64 @@
        (__ia64_id_flags & IA64_PSR_I) == 0;    \
 })
 
+
+#ifdef CONFIG_XEN
+
+#include <asm/processor.h>
+
+#define ia64_rsm_psr_i()                       \
+do {                                           \
+       *(int *)(XSI_PSR_I) = 0;                \
+} while (0)
+
+#define ia64_ssm_psr_i()                       \
+do {                                           \
+       int old = *(int *)(XSI_PSR_I);          \
+       *(int *)(XSI_PSR_I) = 1;                \
+       if (!old && *(int *)(XSI_PEND))         \
+               ia64_ssm(IA64_PSR_I);           \
+} while (0)
+
+#undef local_irq_enable
+#define local_irq_enable() ia64_ssm_psr_i()
+
+#      undef __local_irq_save
+#define __local_irq_save(x)                    \
+do {                                           \
+       (x) = *(int *)(XSI_PSR_I) ?             \
+                       IA64_PSR_I : 0;         \
+       ia64_rsm_psr_i();                       \
+} while (0)
+
+#undef __local_irq_disable
+#define __local_irq_disable()                  \
+do {                                           \
+       ia64_rsm_psr_i();                       \
+} while (0)
+
+#undef __local_irq_restore
+#define __local_irq_restore(x)                 \
+do {                                           \
+       if (x & IA64_PSR_I) ia64_ssm_psr_i();   \
+       else ia64_rsm_psr_i();                  \
+} while (0)
+
+#undef local_save_flags
+#define local_save_flags(flags)                        \
+do {                                           \
+       (flags) = *(int *)(XSI_PSR_I) ?         \
+                       IA64_PSR_I : 0;         \
+} while (0)
+
+#undef local_irq_save
+#undef local_irq_disable
+#undef local_irq_restore
+# define local_irq_save(x)     __local_irq_save(x)
+# define local_irq_disable()   __local_irq_disable()
+# define local_irq_restore(x)  __local_irq_restore(x)
+
+#endif
+
 #ifdef __KERNEL__
 
 #define prepare_to_switch()    do { } while(0)

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

<Prev in Thread] Current Thread [Next in Thread>