WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [IA64] VTI: Use 16K page size to emulate guest physical

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [IA64] VTI: Use 16K page size to emulate guest physical mode
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 18 Mar 2006 11:18:07 +0000
Delivery-date: Sat, 18 Mar 2006 11:19:30 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 983311b895bebff38f5a4c10cd84dff64764a787
# Parent  333db05b8bbb1e07583608c8efe4fa97a6b64cc4
[IA64] VTI: Use 16K page size to emulate guest physical mode

Previously VMM use 4k to emulate guest physical mode on VTI-domain to
satisfy the requirement of speculation attribute in physical mode, please
refer to 4.4.6 Speculation Attributes of Itanium SDM 2

Seems like guest doesn't need to conform to this

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r 333db05b8bbb -r 983311b895be xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Mar  3 17:11:33 2006
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Fri Mar  3 20:03:39 2006
@@ -104,57 +104,51 @@
     vcpu->arch.mode_flags = GUEST_IN_PHY;
 }
 
-extern u64 get_mfn(struct domain *d, u64 gpfn);
 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
-void
-physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
+/*void
+physical_itlb_miss(VCPU *vcpu, u64 vadr)
 {
     u64 psr;
     IA64_PSR vpsr;
-    u64 mppn,gppn;
+    u64 xen_mppn,xen_gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(vcpu->domain,gppn);
-    mppn=(mppn<<12)|(vpsr.cpl<<7); 
-//    if(vadr>>63)
-//       mppn |= PHY_PAGE_UC;
-//    else
-    mppn |= PHY_PAGE_WB;
+    xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
+    xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
+    xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
+    if(vadr>>63)
+        xen_mppn |= PHY_PAGE_UC;
+    else
+        xen_mppn |= PHY_PAGE_WB;
 
     psr=ia64_clear_ic();
-    ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
+    ia64_itc(1,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
     ia64_set_psr(psr);
     ia64_srlz_i();
     return;
 }
 
-
-void
-physical_itlb_miss(VCPU *vcpu, u64 vadr)
-{
-        physical_itlb_miss_dom0(vcpu, vadr);
-}
-
-
-void
-physical_dtlb_miss(VCPU *vcpu, u64 vadr)
+*/
+/* 
+ *      vec=1, itlb miss
+ *      vec=2, dtlb miss
+ */
+void
+physical_tlb_miss(VCPU *vcpu, u64 vadr, u64 vec)
 {
     u64 psr;
     IA64_PSR vpsr;
-    u64 mppn,gppn;
-//    if(vcpu->domain!=dom0)
-//        panic("dom n physical dtlb miss happen\n");
+    u64 xen_mppn,xen_gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(vcpu->domain, gppn);
-    mppn=(mppn<<12)|(vpsr.cpl<<7);
+    xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
+    xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
+    xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
     if(vadr>>63)
-        mppn |= PHY_PAGE_UC;
+        xen_mppn |= PHY_PAGE_UC;
     else
-        mppn |= PHY_PAGE_WB;
+        xen_mppn |= PHY_PAGE_WB;
 
     psr=ia64_clear_ic();
-    ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
+    ia64_itc(vec,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
     ia64_set_psr(psr);
     ia64_srlz_i();
     return;
@@ -193,13 +187,13 @@
        if (is_physical_mode(vcpu)) {
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
-               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+               phy_rr.rrval = vcpu->arch.metaphysical_rr0;
+ //    phy_rr.ps = PAGE_SHIFT;
        phy_rr.ve = 1;
 
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
-               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+               phy_rr.rrval = vcpu->arch.metaphysical_rr4;
+//     phy_rr.ps = PAGE_SHIFT;
            phy_rr.ve = 1;
 
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
@@ -242,12 +236,12 @@
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
     phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
     phy_rr.ve = 1;
     ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
     phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
     phy_rr.ve = 1;
     ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
diff -r 333db05b8bbb -r 983311b895be xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Fri Mar  3 17:11:33 2006
+++ b/xen/arch/ia64/vmx/vmx_process.c   Fri Mar  3 20:03:39 2006
@@ -315,23 +315,20 @@
         return;
     }
 */
-    if(vadr == 0x1ea18c00 ){
+/*    if(vadr == 0x1ea18c00 ){
         ia64_clear_ic();
         while(1);
     }
+ */
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
-        if(vec==1){
-            physical_itlb_miss(v, vadr);
-            return IA64_FAULT;
-        }
         if(vec==2){
             
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
                 emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
-            }else{
-                physical_dtlb_miss(v, vadr);
+                return IA64_FAULT;
             }
-            return IA64_FAULT;
         }
+        physical_tlb_miss(v, vadr, vec);
+        return IA64_FAULT;
     }
     vrr = vmx_vcpu_rr(v, vadr);
     if(vec == 1) type = ISIDE_TLB;
diff -r 333db05b8bbb -r 983311b895be xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Fri Mar  3 17:11:33 2006
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Fri Mar  3 20:03:39 2006
@@ -75,11 +75,11 @@
 #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
 #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
 
-#ifdef PHY_16M  /* 16M: large granule for test*/
-#define EMUL_PHY_PAGE_SHIFT 24
-#else   /* 4K: emulated physical page granule */
-#define EMUL_PHY_PAGE_SHIFT 12
-#endif
+//#ifdef PHY_16M  /* 16M: large granule for test*/
+//#define EMUL_PHY_PAGE_SHIFT 24
+//#else   /* 4K: emulated physical page granule */
+//#define EMUL_PHY_PAGE_SHIFT 12
+//#endif
 #define IA64_RSC_MODE       0x0000000000000003
 #define XEN_RR7_RID    (0xf00010)
 #define GUEST_IN_PHY    0x1
@@ -96,8 +96,7 @@
 extern void recover_if_physical_mode(VCPU *vcpu);
 extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_load_all_rr(VCPU *vcpu);
-extern void physical_itlb_miss(VCPU *vcpu, u64 vadr);
-extern void physical_dtlb_miss(VCPU *vcpu, u64 vadr);
+extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, u64 vec);
 /*
  * No sanity check here, since all psr changes have been
  * checked in switch_mm_mode().

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [IA64] VTI: Use 16K page size to emulate guest physical mode, Xen patchbot -unstable <=