WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] Support multiple page sizes in VHP

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] Support multiple page sizes in VHPT
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 26 Oct 2006 12:11:34 +0000
Delivery-date: Thu, 26 Oct 2006 05:19:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 9e9d8696fb553784750237d77119d4785d057a68
# Parent  901083dace1dee72b5e207b844a00076312c3b9d
[IA64] Support multiple page sizes in VHPT

Enable VHPT support for multiple page sizes.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
 xen/arch/ia64/vmx/vmmu.c            |    8 +++++---
 xen/arch/ia64/vmx/vmx_ivt.S         |   16 ++++++++++++----
 xen/arch/ia64/vmx/vmx_phy_mode.c    |    4 ++--
 xen/arch/ia64/vmx/vmx_process.c     |    4 ++--
 xen/arch/ia64/vmx/vtlb.c            |   36 +++++++++++++++++++++++++++---------
 xen/include/asm-ia64/mm.h           |    4 ++++
 xen/include/asm-ia64/vmmu.h         |    3 ++-
 xen/include/asm-ia64/vmx_phy_mode.h |    2 +-
 8 files changed, 55 insertions(+), 22 deletions(-)

diff -r 901083dace1d -r 9e9d8696fb55 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Tue Oct 24 09:49:31 2006 -0600
@@ -341,9 +341,9 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUN
             ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
             return IA64_RETRY;
         }
-        mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
         maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
                 (gip & (PSIZE(tlb->ps) - 1));
+        mfn = maddr >> PAGE_SHIFT;
     }
 
     page = mfn_to_page(mfn);
@@ -637,7 +637,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v
     thash_data_t *data;
     ISR visr,pt_isr;
     REGS *regs;
-    u64 vhpt_adr;
+    u64 vhpt_adr, madr;
     IA64_PSR vpsr;
     regs=vcpu_regs(vcpu);
     pt_isr.val=VMX(vcpu,cr_isr);
@@ -673,7 +673,9 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v
             dnat_page_consumption(vcpu, vadr);
             return IA64_FAULT;
         }else{
-            *padr = (get_gpfn_from_mfn(arch_to_xen_ppn(data->ppn)) << 
PAGE_SHIFT) | (vadr & (PAGE_SIZE - 1));
+            madr = (data->ppn >> (data->ps - 12) << data->ps) |
+                   (vadr & (PSIZE(data->ps) - 1));
+            *padr = __mpa_to_gpa(madr);
             return IA64_NO_FAULT;
         }
     }
diff -r 901083dace1d -r 9e9d8696fb55 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Tue Oct 24 09:49:31 2006 -0600
@@ -172,13 +172,17 @@ vmx_itlb_loop:
     ld8 r27 = [r18]
     ld8 r29 = [r28]
     ;;
-    st8 [r16] = r29
-    st8 [r28] = r22
+    st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
+    st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
     extr.u r19 = r27, 56, 4
     ;;
+    ld8 r29 = [r16]
+    ld8 r22 = [r28]
     dep r27 = r0, r27, 56, 4
     dep r25 = r19, r25, 56, 4
     ;;
+    st8 [r16] = r22
+    st8 [r28] = r29
     st8 [r18] = r25
     st8 [r17] = r27
     ;;
@@ -246,13 +250,17 @@ vmx_dtlb_loop:
     ld8 r27 = [r18]
     ld8 r29 = [r28]
     ;;
-    st8 [r16] = r29
-    st8 [r28] = r22
+    st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
+    st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
     extr.u r19 = r27, 56, 4
     ;;
+    ld8 r29 = [r16]
+    ld8 r22 = [r28]
     dep r27 = r0, r27, 56, 4
     dep r25 = r19, r25, 56, 4
     ;;
+    st8 [r16] = r22
+    st8 [r28] = r29
     st8 [r18] = r25
     st8 [r17] = r27
     ;;    
diff -r 901083dace1d -r 9e9d8696fb55 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Oct 24 09:49:31 2006 -0600
@@ -107,7 +107,7 @@ extern void vmx_switch_rr7(unsigned long
 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
 
 void
-physical_tlb_miss(VCPU *vcpu, u64 vadr)
+physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
 {
     u64 pte;
     ia64_rr rr;
@@ -117,7 +117,7 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr)
         pte = pte | PHY_PAGE_UC;
     else
         pte = pte | PHY_PAGE_WB;
-    thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr);
+    thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
     return;
 }
 
diff -r 901083dace1d -r 9e9d8696fb55 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c   Tue Oct 24 09:49:31 2006 -0600
@@ -288,7 +288,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
                 return IA64_FAULT;
             }
         }
-        physical_tlb_miss(v, vadr);
+        physical_tlb_miss(v, vadr, type);
         return IA64_FAULT;
     }
 
@@ -306,7 +306,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
                 return IA64_FAULT;
             }
         }
-        thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
+        thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
 
     }else if(type == DSIDE_TLB){
     
diff -r 901083dace1d -r 9e9d8696fb55 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c  Tue Oct 24 09:49:31 2006 -0600
@@ -178,11 +178,23 @@ static void vmx_vhpt_insert(thash_cb_t *
     return;
 }
 
-void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va)
-{
-    u64 phy_pte;
+void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
+{
+    u64 phy_pte, psr;
+    ia64_rr mrr;
+
+    mrr.rrval = ia64_get_rr(va);
     phy_pte=translate_phy_pte(v, &pte, itir, va);
-    vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+
+    if (itir_ps(itir) >= mrr.ps) {
+        vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+    } else {
+        phy_pte  &= ~PAGE_FLAGS_RV_MASK;
+        psr = ia64_clear_ic();
+        ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
+        ia64_set_psr(psr);
+        ia64_srlz_i();
+    }
 }
 /*
  *   vhpt lookup
@@ -191,7 +203,7 @@ thash_data_t * vhpt_lookup(u64 va)
 thash_data_t * vhpt_lookup(u64 va)
 {
     thash_data_t *hash, *head;
-    u64 tag, pte;
+    u64 tag, pte, itir;
     head = (thash_data_t *)ia64_thash(va);
     hash=head;
     tag = ia64_ttag(va);
@@ -207,6 +219,9 @@ thash_data_t * vhpt_lookup(u64 va)
         tag = hash->etag;
         hash->etag = head->etag;
         head->etag = tag;
+        itir = hash->itir;
+        hash->itir = head->itir;
+        head->itir = itir;
         head->len = hash->len;
         hash->len=0;
         return head;
@@ -223,7 +238,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
     if (data == NULL) {
         data = vtlb_lookup(current, iha, DSIDE_TLB);
         if (data != NULL)
-            thash_vhpt_insert(current, data->page_flags, data->itir ,iha);
+            thash_vhpt_insert(current, data->page_flags, data->itir,
+                              iha, DSIDE_TLB);
     }
 
     asm volatile ("rsm psr.ic|psr.i;;"
@@ -607,7 +623,8 @@ void thash_init(thash_cb_t *hcb, u64 sz)
     head=hcb->hash;
     num = (hcb->hash_sz/sizeof(thash_data_t));
     do{
-        head->itir = PAGE_SHIFT<<2;
+        head->page_flags = 0;
+        head->itir = 0;
         head->etag = 1UL<<63;
         head->next = 0;
         head++;
@@ -617,11 +634,12 @@ void thash_init(thash_cb_t *hcb, u64 sz)
     hcb->cch_freelist = p = hcb->cch_buf;
     num = (hcb->cch_sz/sizeof(thash_data_t))-1;
     do{
-        p->itir = PAGE_SHIFT<<2;
+        p->page_flags = 0;
+        p->itir = 0;
         p->next =p+1;
         p++;
         num--;
     }while(num);
-    p->itir = PAGE_SHIFT<<2;
+    p->itir = 0;
     p->next = NULL;
 }
diff -r 901083dace1d -r 9e9d8696fb55 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/include/asm-ia64/mm.h Tue Oct 24 09:49:31 2006 -0600
@@ -497,6 +497,10 @@ extern u64 translate_domain_pte(u64 ptev
 #define __gpa_to_mpa(_d, gpa)   \
     ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
 
+#define __mpa_to_gpa(madr) \
+    ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
+    ((madr) & ~PAGE_MASK))
+
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
 
diff -r 901083dace1d -r 9e9d8696fb55 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/include/asm-ia64/vmmu.h       Tue Oct 24 09:49:31 2006 -0600
@@ -305,7 +305,8 @@ extern int vhpt_enabled(struct vcpu *vcp
 extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
 extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va);
 extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
-extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa);
+extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
+                              int type);
 extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
 
 static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 
va, u64 rid)
diff -r 901083dace1d -r 9e9d8696fb55 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Tue Oct 24 09:22:56 2006 -0600
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Tue Oct 24 09:49:31 2006 -0600
@@ -96,7 +96,7 @@ extern void recover_if_physical_mode(VCP
 extern void recover_if_physical_mode(VCPU *vcpu);
 extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_load_all_rr(VCPU *vcpu);
-extern void physical_tlb_miss(VCPU *vcpu, u64 vadr);
+extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type);
 /*
  * No sanity check here, since all psr changes have been
  * checked in switch_mm_mode().

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] Support multiple page sizes in VHPT, Xen patchbot-unstable <=