WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [IA64] Clean up warnings related to VTi code. (C files)

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [IA64] Clean up warnings related to VTi code. (C files)
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 02 Mar 2006 12:16:27 +0000
Delivery-date: Thu, 02 Mar 2006 12:20:17 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 0f59ace5442c9e8365e974cdbfb6e0783e74ea1b
# Parent  d44e8ace51a33fe966f037fe04a5446096402b28
[IA64] Clean up warnings related to VTi code. (C files)

This patch removes  most of the warnings such as incompatible assignment, 
unused variables, return value type of some functions  and so on.

Signed-off-by: Zhang Xiantao <xiantao.zhang @intel.com>
Signed-off-by: Kevin Tian    <kevin.tian@xxxxxxxxx>

diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/mm.c
--- a/xen/arch/ia64/vmx/mm.c    Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/mm.c    Fri Feb 24 20:29:18 2006
@@ -133,7 +133,7 @@
             if (ovl) {
                   // generate MCA.
                 panic("Tlb conflict!!");
-                return;
+                return -1;
             }
             thash_purge_and_insert(hcb, &entry);
         }else if(cmd == MMU_MACHPHYS_UPDATE){
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/mmio.c  Fri Feb 24 20:29:18 2006
@@ -32,6 +32,7 @@
 #include <public/hvm/ioreq.h>
 #include <asm/mm.h>
 #include <asm/vmx.h>
+#include <public/event_channel.h>
 
 /*
 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
@@ -135,7 +136,6 @@
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
-    unsigned long addr;
 
     vio = get_vio(v->domain, v->vcpu_id);
     if (vio == 0) {
@@ -168,7 +168,6 @@
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
-    unsigned long addr;
 
     vio = get_vio(v->domain, v->vcpu_id);
     if (vio == 0) {
@@ -406,7 +405,7 @@
 {
     REGS *regs;
     IA64_BUNDLE bundle;
-    int slot, dir, inst_type;
+    int slot, dir=0, inst_type;
     size_t size;
     u64 data, value,post_update, slot1a, slot1b, temp;
     INST64 inst;
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c      Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/pal_emul.c      Fri Feb 24 20:29:18 2006
@@ -238,7 +238,6 @@
 static struct ia64_pal_retval
 pal_vm_page_size(VCPU *vcpu){
 }
-
 void
 pal_emul( VCPU *vcpu) {
        UINT64 gr28;
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vlsapic.c       Fri Feb 24 20:29:18 2006
@@ -47,6 +47,9 @@
 /*
  * Update the checked last_itc.
  */
+
+extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
+     UINT64 vector,REGS *regs);
 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
 {
     vtm->last_itc = cur_itc;
@@ -483,7 +486,7 @@
 
     if (vector & ~0xff) {
         DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
-        return;
+        return -1;
     }
     local_irq_save(spsr);
     ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
@@ -577,7 +580,7 @@
 
 uint64_t guest_read_vivr(VCPU *vcpu)
 {
-    int vec, next, h_inservice;
+    int vec, h_inservice;
     uint64_t  spsr;
 
     local_irq_save(spsr);
@@ -609,7 +612,7 @@
     vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
 }
 
-vhpi_detection(VCPU *vcpu)
+void vhpi_detection(VCPU *vcpu)
 {
     uint64_t    threshold,vhpi;
     tpr_t       vtpr;
@@ -626,7 +629,7 @@
     }
 }
 
-vmx_vexirq(VCPU *vcpu)
+void vmx_vexirq(VCPU *vcpu)
 {
     static  uint64_t  vexirq_count=0;
 
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmmu.c  Fri Feb 24 20:29:18 2006
@@ -31,6 +31,7 @@
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
+#include <xen/irq.h>
 
 /*
  * Architecture ppn is in 4KB unit while XEN
@@ -55,7 +56,7 @@
 u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
 {
     struct domain *d;
-    u64    i, xen_gppn, xen_mppn, mpfn;
+    u64    xen_gppn, xen_mppn, mpfn;
     
     if ( domid == DOMID_SELF ) {
         d = current->domain;
@@ -178,7 +179,7 @@
     vhpt->vs->tag_func = machine_ttag;
     vhpt->hash = vbase;
     vhpt->hash_sz = VCPU_TLB_SIZE/2;
-    vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
+    vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
     vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
     vhpt->recycle_notifier = recycle_message;
     thash_init(vhpt,VCPU_TLB_SHIFT-1);
@@ -212,7 +213,7 @@
     tlb->hash_func = machine_thash;
     tlb->hash = vbase;
     tlb->hash_sz = VCPU_TLB_SIZE/2;
-    tlb->cch_buf = (u64)vbase + tlb->hash_sz;
+    tlb->cch_buf = (void *)((u64)vbase + tlb->hash_sz);
     tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
     tlb->recycle_notifier = recycle_message;
     thash_init(tlb,VCPU_TLB_SHIFT-1);
@@ -249,13 +250,14 @@
     u64     psr;
     thash_data_t    mtlb;
     unsigned int    cl = tlb->cl;
-
+    unsigned long mtlb_ppn; 
     mtlb.ifa = tlb->vadr;
     mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
     //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
     mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
-    mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1);
-    if (mtlb.ppn == INVALID_MFN)
+    mtlb.ppn = (unsigned long)get_mfn(DOMID_SELF,tlb->ppn, 1);
+    mtlb_ppn=mtlb.ppn;
+    if (mtlb_ppn == INVALID_MFN)
     panic("Machine tlb insert with invalid mfn number.\n");
 
     psr = ia64_clear_ic();
@@ -291,10 +293,8 @@
 u64 machine_thash(PTA pta, u64 va)
 {
     u64     saved_pta;
-    u64     hash_addr, tag;
+    u64     hash_addr;
     unsigned long psr;
-    struct vcpu *v = current;
-    ia64_rr vrr;
 
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     psr = ia64_clear_ic();
@@ -414,7 +414,7 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 1;
     data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     
     sections.tr = 1;
@@ -424,7 +424,7 @@
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     thash_purge_and_insert(hcb, &data);
     return IA64_NO_FAULT;
@@ -447,7 +447,7 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 1;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
@@ -456,7 +456,7 @@
     if (ovl) {
           // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     thash_purge_and_insert(hcb, &data);
     return IA64_NO_FAULT;
@@ -472,7 +472,7 @@
     ia64_rr vrr;
     u64          preferred_size;
 
-    vmx_vcpu_get_rr(vcpu, va, &vrr);
+    vmx_vcpu_get_rr(vcpu, va, (UINT64 *)&vrr);
     hcb = vmx_vcpu_get_vtlb(vcpu);
     va = PAGEALIGN(va,vrr.ps);
     preferred_size = PSIZE(vrr.ps);
@@ -493,7 +493,7 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 0;
     data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
@@ -502,7 +502,7 @@
     if (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     sections.tr = 0;
     sections.tc = 1;
@@ -526,7 +526,7 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 0;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
@@ -535,7 +535,7 @@
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     sections.tr = 0;
     sections.tc = 1;
@@ -578,7 +578,6 @@
     thash_cb_t  *hcb;
     ia64_rr vrr;
     search_section_t sections;
-    thash_data_t data, *ovl;
     hcb = vmx_vcpu_get_vtlb(vcpu);
     vrr=vmx_vcpu_rr(vcpu,vadr);
     sections.tr = 0;
@@ -616,7 +615,7 @@
 {
     PTA vpta;
     ia64_rr vrr;
-    u64 vhpt_offset,tmp;
+    u64 vhpt_offset;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
     vrr=vmx_vcpu_rr(vcpu, vadr);
     if(vpta.vf){
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Feb 24 20:29:18 2006
@@ -31,6 +31,11 @@
 #include <xen/mm.h>
 #include <xen/multicall.h>
 #include <xen/hypercall.h>
+#include <public/version.h>
+#include <asm/dom_fw.h>
+#include <xen/domain.h>
+
+extern long do_sched_op(int cmd, unsigned long arg);
 
 
 void hyper_not_support(void)
@@ -48,7 +53,7 @@
     vcpu_get_gr_nat(vcpu,17,&r33);
     vcpu_get_gr_nat(vcpu,18,&r34);
     vcpu_get_gr_nat(vcpu,19,&r35);
-    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35);
     vcpu_set_gr(vcpu, 8, ret, 0);
     vmx_vcpu_increment_iip(vcpu);
 }
@@ -162,7 +167,6 @@
 
 static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
 {
-    int i;
     ia64_rr rr;
     thash_cb_t *hcb;
     hcb = vmx_vcpu_get_vtlb(vcpu);
@@ -207,7 +211,7 @@
         * to xen heap. Or else, leave to domain itself to decide.
         */
        if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
-               free_xenheap_page(o_info);
+               free_xenheap_page((void *)o_info);
     } else
         memset(d->shared_info, 0, PAGE_SIZE);
     return 0;
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c      Fri Feb 24 20:29:18 2006
@@ -95,7 +95,7 @@
        if (!(vp_env_info & VP_OPCODE))
                printk("WARNING: no opcode provided from hardware(%lx)!!!\n", 
vp_env_info);
        vm_order = get_order(buffer_size);
-       printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
+       printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
 
        vmx_enabled = 1;
 no_vti:
@@ -113,7 +113,7 @@
        u64 status, tmp_base;
 
        if (!vm_buffer) {
-               vm_buffer = alloc_xenheap_pages(vm_order);
+               vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
                ASSERT(vm_buffer);
                printk("vm_buffer: 0x%lx\n", vm_buffer);
        }
@@ -125,7 +125,7 @@
 
        if (status != PAL_STATUS_SUCCESS) {
                printk("ia64_pal_vp_init_env failed.\n");
-               return -1;
+               return ;
        }
 
        if (!__vsa_base)
@@ -189,7 +189,7 @@
        /* ia64_ivt is function pointer, so need this tranlation */
        ivt_base = (u64) &vmx_ia64_ivt;
        printk("ivt_base: 0x%lx\n", ivt_base);
-       ret = ia64_pal_vp_create(vpd, ivt_base, 0);
+       ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
        if (ret != PAL_STATUS_SUCCESS)
                panic("ia64_pal_vp_create failed. \n");
 }
@@ -198,11 +198,10 @@
 void
 vmx_save_state(struct vcpu *v)
 {
-       u64 status, psr;
-       u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
+       u64 status;
 
        /* FIXME: about setting of pal_proc_vector... time consuming */
-       status = ia64_pal_vp_save(v->arch.privregs, 0);
+       status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS)
                panic("Save vp status failed\n");
 
@@ -224,10 +223,7 @@
 void
 vmx_load_state(struct vcpu *v)
 {
-       u64 status, psr;
-       u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
-       u64 pte_xen, pte_vhpt;
-       int i;
+       u64 status;
 
        status = ia64_pal_vp_restore(v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS)
@@ -379,7 +375,7 @@
 
        ASSERT(d != dom0); /* only for non-privileged vti domain */
        d->arch.vmx_platform.shared_page_va =
-               __va(__gpa_to_mpa(d, IO_PAGE_START));
+               (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
        sp = get_sp(d);
        //memset((char *)sp,0,PAGE_SIZE);
        /* TEMP */
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Fri Feb 24 20:29:18 2006
@@ -86,7 +86,7 @@
 
 }
 
-int
+void
 inject_guest_interruption(VCPU *vcpu, u64 vec)
 {
     u64 viva;
@@ -334,6 +334,7 @@
  *  @ Nat Consumption Vector
  * Refer to SDM Vol2 Table 5-6 & 8-1
  */
+
 static void
 ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
 {
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_irq_ia64.c
--- a/xen/arch/ia64/vmx/vmx_irq_ia64.c  Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c  Fri Feb 24 20:29:18 2006
@@ -24,6 +24,8 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+#include <asm/vcpu.h>
+#include <xen/irq.h>
 #ifdef CONFIG_SMP
 #   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
 #else
@@ -126,6 +128,6 @@
         * come through until ia64_eoi() has been done.
         */
        vmx_irq_exit();
-       if ( wake_dom0 && current != dom0 ) 
+       if (current && wake_dom0 != dom0 ) 
                vcpu_wake(dom0->vcpu[0]);
 }
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Feb 24 20:29:18 2006
@@ -61,9 +61,9 @@
      *  data access can be satisfied though itlb entry for physical
      *  emulation is hit.
          */
-    SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V,
-    0,  0,  0,  0,  0,  0,  0,  0,
-    0,  0,  0,  0,  0,  0,  0,  0,
+    {SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
+    {0,  0,  0,  0,  0,  0,  0,  0},
+    {0,  0,  0,  0,  0,  0,  0,  0},
     /*
      *  (it,dt,rt): (0,1,1) -> (1,1,1)
      *  This kind of transition is found in OSYa.
@@ -71,17 +71,17 @@
      *  (it,dt,rt): (0,1,1) -> (0,0,0)
      *  This kind of transition is found in OSYa
      */
-    SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V,
+    {SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V},
     /* (1,0,0)->(1,1,1) */
-    0,  0,  0,  0,  0,  0,  0,  SW_P2V,
+    {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
     /*
          *  (it,dt,rt): (1,0,1) -> (1,1,1)
          *  This kind of transition usually occurs when Linux returns
      *  from the low level TLB miss handlers.
          *  (see "arch/ia64/kernel/ivt.S")
          */
-    0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V,
-    0,  0,  0,  0,  0,  0,  0,  0,
+    {0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
+    {0,  0,  0,  0,  0,  0,  0,  0},
     /*
          *  (it,dt,rt): (1,1,1) -> (1,0,1)
          *  This kind of transition usually occurs in Linux low level
@@ -94,67 +94,18 @@
      *  (1,1,1)->(1,0,0)
      */
 
-    SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF,
+    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF}
 };
 
 void
 physical_mode_init(VCPU *vcpu)
 {
-    UINT64 psr;
-    struct domain * d = vcpu->domain;
-
     vcpu->arch.old_rsc = 0;
     vcpu->arch.mode_flags = GUEST_IN_PHY;
 }
 
 extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
-#if 0
-void
-physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
-{
-    u64 psr;
-    IA64_PSR vpsr;
-    u64 mppn,gppn,mpp1,gpp1;
-    struct domain *d;
-    static u64 test=0;
-    d=vcpu->domain;
-    if(test)
-        panic("domn physical itlb miss happen\n");
-    else
-        test=1;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
-    mppn=(mppn<<12)|(vpsr.cpl<<7);
-    gpp1=0;
-    mpp1 = get_mfn(DOMID_SELF,gpp1,1);
-    mpp1=(mpp1<<12)|(vpsr.cpl<<7);
-//    if(vadr>>63)
-//        mppn |= PHY_PAGE_UC;
-//    else
-//        mppn |= PHY_PAGE_WB;
-    mpp1 |= PHY_PAGE_WB;
-    psr=ia64_clear_ic();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), 
(mppn|PHY_PAGE_WB), 24);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), 
(mppn|PHY_PAGE_WB), 24);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_set_psr(psr);
-    ia64_srlz_i();
-    return;
-}
-#endif
+extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
 
 void
 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
@@ -404,7 +355,7 @@
         switch_mm_mode (vcpu, old_psr, new_psr);
     }
 
-    return 0;
+    return;
 }
 
 
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_process.c   Fri Feb 24 20:29:18 2006
@@ -50,6 +50,7 @@
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
+#include <asm/vmx_pal.h>
 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
 
@@ -65,7 +66,7 @@
     0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
     0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
     0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
-    0x7f00,
+    0x7f00
 };
 
 
@@ -74,7 +75,7 @@
      UINT64 vector,REGS *regs)
 {
     VCPU *vcpu = current;
-    UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
+    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
     if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
         panic("Guest nested fault!");
     }
@@ -92,10 +93,8 @@
 IA64FAULT
 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long 
isr, unsigned long iim)
 {
-       static int first_time = 1;
        struct domain *d = (struct domain *) current->domain;
-       struct vcpu *v = (struct domain *) current;
-       extern unsigned long running_on_sim;
+       struct vcpu *v = (struct vcpu *) current;
        unsigned long i, sal_param[8];
 
 #if 0
@@ -160,12 +159,12 @@
                    case FW_HYPERCALL_EFI_GET_TIME:
                        {
                        unsigned long *tv, *tc;
-                       vcpu_get_gr_nat(v, 32, &tv);
-                       vcpu_get_gr_nat(v, 33, &tc);
+                       vcpu_get_gr_nat(v, 32, (u64 *)&tv);
+                       vcpu_get_gr_nat(v, 33, (u64 *)&tc);
                        printf("efi_get_time(%p,%p) called...",tv,tc);
-                       tv = __va(translate_domain_mpaddr(tv));
-                       if (tc) tc = __va(translate_domain_mpaddr(tc));
-                       regs->r8 = (*efi.get_time)(tv,tc);
+                       tv = __va(translate_domain_mpaddr((unsigned long)tv));
+                       if (tc) tc = __va(translate_domain_mpaddr((unsigned 
long)tc));
+                       regs->r8 = (*efi.get_time)((efi_time_t 
*)tv,(efi_time_cap_t *)tc);
                        printf("and returns %lx\n",regs->r8);
                        }
                        break;
@@ -200,12 +199,13 @@
                        die_if_kernel("bug check", regs, iim);
                vmx_reflect_interruption(ifa,isr,iim,11,regs);
     }
+    return IA64_NO_FAULT;
 }
 
 
 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
 {
-    unsigned long i, * src,* dst, *sunat, *dunat;
+    unsigned long i=0UL, * src,* dst, *sunat, *dunat;
     IA64_PSR vpsr;
     src=&regs->r16;
     sunat=&regs->eml_unat;
@@ -287,16 +287,17 @@
 }
 
 /* We came here because the H/W VHPT walker failed to find an entry */
-void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
+IA64FAULT
+vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
 {
     IA64_PSR vpsr;
-    CACHE_LINE_TYPE type;
+    CACHE_LINE_TYPE type=ISIDE_TLB;
     u64 vhpt_adr, gppa;
     ISR misr;
     ia64_rr vrr;
 //    REGS *regs;
-    thash_cb_t *vtlb, *vhpt;
-    thash_data_t *data, me;
+    thash_cb_t *vtlb;
+    thash_data_t *data;
     VCPU *v = current;
     vtlb=vmx_vcpu_get_vtlb(v);
 #ifdef  VTLB_DEBUG
@@ -316,7 +317,7 @@
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
         if(vec==1){
             physical_itlb_miss(v, vadr);
-            return;
+            return IA64_FAULT;
         }
         if(vec==2){
             
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
@@ -324,7 +325,7 @@
             }else{
                 physical_dtlb_miss(v, vadr);
             }
-            return;
+            return IA64_FAULT;
         }
     }
     vrr = vmx_vcpu_rr(v, vadr);
@@ -334,7 +335,7 @@
 
 //    prepare_if_physical_mode(v);
 
-    if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
+    if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
        gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
         if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
             emulate_io_inst(v, gppa, data->ma);
@@ -428,6 +429,5 @@
             }
         }
     }
-}
-
-
+    return IA64_NO_FAULT;
+}
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_utility.c
--- a/xen/arch/ia64/vmx/vmx_utility.c   Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_utility.c   Fri Feb 24 20:29:18 2006
@@ -307,9 +307,8 @@
             }
             return 0;
     }
-
-
     panic ("Unsupported CR");
+    return 0;
 }
 
 
@@ -600,7 +599,6 @@
 
 void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
 {
-    u64 value;
     ISR isr;
 
     isr.val = set_isr_ei_ni(vcpu);
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Fri Feb 24 20:29:18 2006
@@ -35,7 +35,7 @@
 #include <asm/gcc_intrin.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx.h>
-
+#include <asm/vmx_phy_mode.h>
 //u64  fire_itc;
 //u64  fire_itc2;
 //u64  fire_itm;
@@ -66,7 +66,6 @@
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
-
 //unsigned long last_guest_rsm = 0x0;
 struct guest_psr_bundle{
     unsigned long ip;
@@ -138,7 +137,7 @@
     regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
 
     check_mm_mode_switch(vcpu, old_psr, new_psr);
-    return IA64_NO_FAULT;
+    return ;
 }
 
 /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Fri Feb 24 20:29:18 2006
@@ -30,8 +30,9 @@
 #include <asm/vmmu.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/smp.h>
-
+#include <asm/vmx.h>
 #include <asm/virt_event.h>
+#include <asm/vmx_phy_mode.h>
 extern UINT64 privop_trace;
 
 void
@@ -137,6 +138,11 @@
                 *cause=EVENT_BSW_1;
             }
         }
+        case I:
+        case F:
+        case L:
+        case ILLEGAL:
+        break;
     }
 }
 
@@ -157,7 +163,6 @@
 {
     UINT64 tgt = inst.M33.r1;
     UINT64 val;
-    IA64FAULT fault;
 
 /*
     if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
@@ -176,7 +181,6 @@
 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
 {
     UINT64 val;
-    IA64FAULT fault;
     if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
        panic(" get_psr nat bit fault\n");
 
@@ -255,7 +259,6 @@
 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
 {
     u64 r2,r3;
-    ISR isr;
     IA64_PSR  vpsr;
 
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -267,6 +270,7 @@
     }
     
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
 #ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
         set_isr_reg_nat_consumption(vcpu,0,0);
         rnat_comsumption(vcpu);
         return IA64_FAULT;
@@ -287,11 +291,11 @@
 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
 {
     u64 r3;
+    IA64_PSR  vpsr;
+
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64_PSR  vpsr;
-
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -321,10 +325,10 @@
 
 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
 {
+    IA64FAULT  ret1, ret2;
+
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64FAULT  ret1, ret2;
-
-#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.cpl != 0) {
@@ -373,9 +377,9 @@
 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
-#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -403,9 +407,11 @@
 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
- #ifdef  CHECK_FAULT
+#endif
+#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -433,8 +439,8 @@
 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
-#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -477,10 +483,10 @@
 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
     int fault=IA64_NO_FAULT;
-#ifdef  CHECK_FAULT
     visr.val=0;
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
@@ -514,16 +520,16 @@
 
 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
 {
-    UINT64 fault, itir, ifa, pte, slot;
+    UINT64 itir, ifa, pte, slot;
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.ic ) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64_PSR  vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -571,8 +577,10 @@
 
 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
 {
-    UINT64 fault, itir, ifa, pte, slot;
+    UINT64 itir, ifa, pte, slot;
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
+#endif
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.ic ) {
@@ -628,19 +636,19 @@
 
 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 
*pte)
 {
+    IA64_PSR  vpsr;
+    IA64FAULT  ret1;
+
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.ic ) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+
+#ifdef  VMAL_NO_FAULT_CHECK
     UINT64 fault;
     ISR isr;
-    IA64_PSR  vpsr;
-    IA64FAULT  ret1;
-
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -1146,7 +1154,7 @@
 
 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
 {
-    u64 r2,cr3;
+    u64 r2;
 #ifdef  CHECK_FAULT
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -1309,14 +1317,10 @@
 void
 vmx_emulate(VCPU *vcpu, REGS *regs)
 {
-    IA64_BUNDLE bundle;
-    int slot;
-    IA64_SLOT_TYPE slot_type;
     IA64FAULT status;
     INST64 inst;
     UINT64 iip, cause, opcode;
     iip = regs->cr_iip;
-    IA64_PSR vpsr;
     cause = VMX(vcpu,cause);
     opcode = VMX(vcpu,opcode);
 
@@ -1342,6 +1346,10 @@
 #endif
 #ifdef BYPASS_VMAL_OPCODE
     // make a local copy of the bundle containing the privop
+    IA64_BUNDLE bundle;
+    int slot;
+    IA64_SLOT_TYPE slot_type;
+    IA64_PSR vpsr;
     bundle = __vmx_get_domain_bundle(iip);
     slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
     if (!slot) inst.inst = bundle.slot0;
@@ -1483,11 +1491,11 @@
         status=vmx_emul_mov_from_cpuid(vcpu, inst);
         break;
     case EVENT_VMSW:
-        printf ("Unimplemented instruction %d\n", cause);
+        printf ("Unimplemented instruction %ld\n", cause);
        status=IA64_FAULT;
         break;
     default:
-        printf("unknown cause %d, iip: %lx, ipsr: %lx\n", 
cause,regs->cr_iip,regs->cr_ipsr);
+        printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", 
cause,regs->cr_iip,regs->cr_ipsr);
         while(1);
        /* For unknown cause, let hardware to re-execute */
        status=IA64_RETRY;
diff -r d44e8ace51a3 -r 0f59ace5442c xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Fri Feb 24 18:08:51 2006
+++ b/xen/arch/ia64/vmx/vtlb.c  Fri Feb 24 20:29:18 2006
@@ -86,7 +86,7 @@
 static int
 __is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 
sva, u64 eva)
 {
-    uint64_t size1,size2,sa1,ea1,ea2;
+    uint64_t size1,sa1,ea1;
 
     if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl 
) ) {
         return 0;
@@ -287,11 +287,11 @@
     ASSERT ( hcb->ht == THASH_VHPT );
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
     pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
-    mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
+    mfn = (unsigned long)(hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
     if ( mfn == INVALID_MFN ) return 0;
 
     // TODO with machine discontinuous address space issue.
-    vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
+    vhpt->etag =(unsigned long) (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
     //vhpt->ti = 0;
     vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
     vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
@@ -374,7 +374,7 @@
     u64 gppn;
     u64 ppns, ppne;
 
-    hash_table = (hcb->hash_func)(hcb->pta, va);
+    hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
     if( INVALID_ENTRY(hcb, hash_table) ) {
         *hash_table = *entry;
         hash_table->next = 0;
@@ -419,11 +419,11 @@
 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t   vhpt_entry, *hash_table, *cch;
-    ia64_rr vrr;
+
     if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
         panic("Can't convert to machine VHPT entry\n");
     }
-    hash_table = (hcb->hash_func)(hcb->pta, va);
+    hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
     if( INVALID_ENTRY(hcb, hash_table) ) {
         *hash_table = vhpt_entry;
         hash_table->next = 0;
@@ -449,7 +449,7 @@
 
 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
-    thash_data_t    *hash_table;
+    //thash_data_t    *hash_table;
     ia64_rr vrr;
     
     vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
@@ -466,7 +466,6 @@
 {
     thash_data_t    *hash_table, *p, *q;
     thash_internal_t *priv = &hcb->priv;
-    int idx;
 
     hash_table = priv->hash_base;
     if ( hash_table == entry ) {
@@ -492,9 +491,6 @@
 
 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
 {
-    thash_data_t    *hash_table, *p, *q;
-    thash_internal_t *priv = &hcb->priv;
-    int idx;
     
     if ( !entry->tc ) {
         return rem_tr(hcb, entry->cl, entry->tr_idx);
@@ -553,7 +549,6 @@
         __rem_hash_head(hcb, hash);
     }
 }
-
 
 /*
  * Find an overlap entry in hash table and its collision chain.
@@ -580,7 +575,6 @@
 {
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
-    u64     tag;
     ia64_rr vrr;
 
     priv->_curva = va & ~(size-1);
@@ -588,7 +582,7 @@
     priv->rid = rid;
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table = (hcb->hash_func)(hcb->pta, priv->_curva);
+    hash_table =(thash_data_t *)(hcb->hash_func)(hcb->pta, priv->_curva);
     priv->s_sect = s_sect;
     priv->cl = cl;
     priv->_tr_idx = 0;
@@ -610,8 +604,8 @@
     priv->rid = rid;
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table = (hcb->hash_func)( hcb->pta, priv->_curva);
-    tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+    hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva);
+    tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva);
     priv->tag = tag;
     priv->hash_base = hash_table;
     priv->cur_cch = hash_table;
@@ -634,10 +628,10 @@
         tr = &DTR(hcb,0);
     }
     for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
-        if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
+        if ( __is_tlb_overlap(hcb, &tr[(unsigned)priv->_tr_idx],
                 priv->rid, priv->cl,
                 priv->_curva, priv->_eva) ) {
-            return &tr[priv->_tr_idx++];
+            return &tr[(unsigned)priv->_tr_idx++];
         }
     }
     return NULL;
@@ -652,7 +646,7 @@
 {
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
-    u64 addr,rr_psize;
+    u64 rr_psize;
     ia64_rr vrr;
 
     if ( priv->s_sect.tr ) {
@@ -673,7 +667,7 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
+        priv->hash_base = (thash_data_t *)(hcb->hash_func)( hcb->pta, 
priv->_curva);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
@@ -683,7 +677,7 @@
 {
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
-    u64 addr,rr_psize;
+    u64 rr_psize;
     ia64_rr vrr;
 
     vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
@@ -698,8 +692,8 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
-        priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+        priv->hash_base =(thash_data_t *)(hcb->hash_func)( hcb->pta, 
priv->_curva);
+        priv->tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, 
priv->_curva);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
@@ -842,7 +836,6 @@
             CACHE_LINE_TYPE cl)
 {
     thash_data_t    *hash_table, *cch;
-    u64     tag;
     ia64_rr vrr;
    
     ASSERT ( hcb->ht == THASH_VTLB );
@@ -851,7 +844,7 @@
     if ( cch ) return cch;
 
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    hash_table = (hcb->hash_func)( hcb->pta, va);
+    hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, va);
 
     if ( INVALID_ENTRY(hcb, hash_table ) )
         return NULL;
@@ -913,12 +906,13 @@
  */
 void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
 {
-    thash_cb_t  *vhpt;
+//    thash_cb_t  *vhpt;
     search_section_t    s_sect;
     
     s_sect.v = 0;
     thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
     machine_tlb_purge(entry->vadr, entry->ps);
+    return;
 }
 
 /*
@@ -930,7 +924,7 @@
 
     cch_mem_init (hcb);
     hcb->magic = THASH_CB_MAGIC;
-    hcb->pta.val = hcb->hash;
+    hcb->pta.val = (unsigned long)hcb->hash;
     hcb->pta.vf = 1;
     hcb->pta.ve = 1;
     hcb->pta.size = sz;
@@ -1010,7 +1004,7 @@
 //    vb2 = vb1 + vtlb->hash_sz;
     hash_num = vhpt->hash_sz / sizeof(thash_data_t);
 //    printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
-    printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n", 
+    printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n", 
                 vtlb, vtlb->hash,vtlb->hash_sz,
                 vhpt, vhpt->hash, vhpt->hash_sz);
     //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
@@ -1043,7 +1037,7 @@
         }
         hash ++;
     }
-    printf("Done vtlb entry check, hash=%lp\n", hash);
+    printf("Done vtlb entry check, hash=%p\n", hash);
     printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", 
check_ok_num,check_invalid);
     invalid_ratio = 1000*check_invalid / hash_num;
     printf("%02ld.%01ld%% entries are invalid\n", 
@@ -1072,7 +1066,7 @@
         if ( !INVALID_ENTRY(vhpt, hash) ) {
             for ( cch= hash; cch; cch=cch->next) {
                 if ( !cch->checked ) {
-                    printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, 
cch);
+                    printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
                     check_fail_num ++;
                 }
                 else {
@@ -1112,9 +1106,9 @@
     printf("Dump vTC\n");
     for ( i = 0; i < hash_num; i++ ) {
         if ( !INVALID_ENTRY(vtlb, hash) ) {
-            printf("VTLB at hash=%lp\n", hash);
+            printf("VTLB at hash=%p\n", hash);
             for (cch=hash; cch; cch=cch->next) {
-                printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+                printf("Entry %p va=%lx ps=%d rid=%d\n",
                     cch, cch->vadr, cch->ps, cch->rid);
             }
         }
@@ -1123,13 +1117,13 @@
     printf("Dump vDTR\n");
     for (i=0; i<NDTRS; i++) {
         tr = &DTR(vtlb,i);
-        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+        printf("Entry %p va=%lx ps=%d rid=%d\n",
                     tr, tr->vadr, tr->ps, tr->rid);
     }
     printf("Dump vITR\n");
     for (i=0; i<NITRS; i++) {
         tr = &ITR(vtlb,i);
-        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+        printf("Entry %p va=%lx ps=%d rid=%d\n",
                     tr, tr->vadr, tr->ps, tr->rid);
     }
     printf("End of vTLB dump\n");

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [IA64] Clean up warnings related to VTi code. (C files), Xen patchbot -unstable <=