WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] Replace mode_flags by mmu_mode

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] Replace mode_flags by mmu_mode
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 04 Oct 2007 17:41:12 -0700
Delivery-date: Thu, 04 Oct 2007 17:44:11 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1191253934 21600
# Node ID 5c56ce7b989241e112d97c77e4a1a65b348e2019
# Parent  83239b2890723e0c06bad507bb273a970784b18e
[IA64] Replace mode_flags by mmu_mode

Replace mode_flags by mmu_mode and put it into arch_vmx structure.
Cleanup in vmx_phy_mode.c to prepare for half-physical mode (dt=0,it=1)

Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
 xen/arch/ia64/asm-offsets.c         |    4 -
 xen/arch/ia64/vmx/optvfault.S       |   64 +++++++++---------
 xen/arch/ia64/vmx/vmx_fault.c       |   17 +++-
 xen/arch/ia64/vmx/vmx_phy_mode.c    |  127 +++++++++++++++++-------------------
 xen/arch/ia64/vmx/vmx_vcpu.c        |    4 -
 xen/include/asm-ia64/domain.h       |    2 
 xen/include/asm-ia64/vmx_phy_mode.h |   17 +---
 xen/include/asm-ia64/vmx_vpd.h      |   33 ---------
 8 files changed, 112 insertions(+), 156 deletions(-)

diff -r 83239b289072 -r 5c56ce7b9892 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/arch/ia64/asm-offsets.c       Mon Oct 01 09:52:14 2007 -0600
@@ -59,6 +59,7 @@ void foo(void)
        DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, 
arch.hypercall_continuation));
        DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
        DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu, 
arch.metaphysical_rid_dt));
+       DEFINE(IA64_VCPU_META_RID_D_OFFSET, offsetof (struct vcpu, 
arch.metaphysical_rid_d));
        DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, 
arch.metaphysical_saved_rr0));
        DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, 
arch.breakimm));
        DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
@@ -149,7 +150,7 @@ void foo(void)
        DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
        DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, 
r16)-sizeof(struct pt_regs))>>3)&0x3f));
        DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu 
,arch.arch_vmx.flags));
-       DEFINE(IA64_VCPU_MODE_FLAGS_OFFSET,offsetof(struct vcpu, 
arch.mode_flags));
+       DEFINE(IA64_VCPU_MMU_MODE_OFFSET,offsetof(struct vcpu, 
arch.arch_vmx.mmu_mode));
 
        BLANK();
 
@@ -202,7 +203,6 @@ void foo(void)
        DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
        DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, 
arch.insvc[0]));
        DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta));
-       DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
        DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
 
        BLANK();
diff -r 83239b289072 -r 5c56ce7b9892 xen/arch/ia64/vmx/optvfault.S
--- a/xen/arch/ia64/vmx/optvfault.S     Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/arch/ia64/vmx/optvfault.S     Mon Oct 01 09:52:14 2007 -0600
@@ -149,15 +149,15 @@ vmx_asm_mov_to_rr_back_2:
     ;;
     cmp.eq.or p6,p0=4,r23
     ;;
-    adds r16=IA64_VCPU_MODE_FLAGS_OFFSET,r21
+    adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21
     (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
     ;;
-    ld4 r16=[r16]
+    ld1 r16=[r16]
     cmp.eq p7,p0=r0,r0
     (p6) shladd r17=r23,1,r17
     ;;
     (p6) st8 [r17]=r19
-    (p6) tbit.nz p6,p7=r16,GUEST_IN_PHY_BIT // Set physical rr if in virt mode
+    (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode
     ;;
     (p7) mov rr[r28]=r19
     mov r24=r22
@@ -179,13 +179,13 @@ GLOBAL_ENTRY(vmx_asm_rsm)
     dep r26=r27,r26,21,2
     ;;
     add r17=VPD_VPSR_START_OFFSET,r16
-    add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21
+    add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
     //r26 is imm24
     dep r26=r28,r26,23,1
     ;;
     ld8 r18=[r17]
     movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
-    ld4 r23=[r22]
+    ld1 r23=[r22]
     sub r27=-1,r26 // ~r26
     mov r24=b0
     ;;
@@ -199,22 +199,22 @@ GLOBAL_ENTRY(vmx_asm_rsm)
     ;;
     ld8 r27=[r27]
     ;;
-    tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
+    tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
     ;;
     (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1  // Keep dfh
     ;;
     mov cr.ipsr=r20
-    tbit.nz p6,p0=r23,GUEST_IN_PHY_BIT
+    cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23
     ;;
     tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
-    (p6) br.dptk vmx_resume_to_guest  // (DT set or already in phy mode)
+    (p6) br.dptk vmx_resume_to_guest  // DT not cleared or already in phy mode
     ;;
     // Switch to meta physical mode.
     add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
-    dep r23=-1,r23,GUEST_IN_PHY_BIT,1 // Set GUEST_IN_PHY
+    mov r23=VMX_MMU_PHY_DT
     ;;
     ld8 r26=[r26]
-    st4 [r22]=r23 
+    st1 [r22]=r23 
     dep.z r28=4,61,3
     ;;
     mov rr[r0]=r26
@@ -245,30 +245,30 @@ GLOBAL_ENTRY(vmx_asm_ssm)
     ld8 r29=[r27]
     mov r24=b0
     ;;
-    add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21
+    add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
     mov r20=cr.ipsr
     or r19=r29,r26
     ;;
-    ld4 r23=[r22]
-    st8 [r27]=r19
+    ld1 r23=[r22] // mmu_mode
+    st8 [r27]=r19 // vpsr
     or r20=r20,r26
     ;;
     mov cr.ipsr=r20
     movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
     ;;
     and r19=r28,r19
-    tbit.z p6,p0=r23,GUEST_IN_PHY_BIT
-    ;;
-    cmp.ne.or p6,p0=r28,r19
+    cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23
+    ;;
+    cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy
     (p6) br.dptk vmx_asm_ssm_1
     ;;
     add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
     add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
-    dep r23=0,r23,GUEST_IN_PHY_BIT,1  // Clear GUEST_IN_PHY
+    mov r23=VMX_MMU_VIRTUAL
     ;;
     ld8 r26=[r26]
     ld8 r27=[r27]
-    st4 [r22]=r23
+    st1 [r22]=r23
     dep.z r28=4,61,3
     ;;
     mov rr[r0]=r26
@@ -320,37 +320,37 @@ GLOBAL_ENTRY(vmx_asm_mov_to_psr)
     br.many b0
     ;;   
 vmx_asm_mov_to_psr_back:
-    ld8 r17=[r27]
-    add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21
-    dep r19=0,r19,32,32
+    ld8 r17=[r27] // vpsr
+    add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
+    dep r19=0,r19,32,32 // Clear bits 32-63
     ;;   
-    ld4 r23=[r22]
+    ld1 r23=[r22] // mmu_mode
     dep r18=0,r17,0,32
     ;; 
-    add r30=r18,r19
+    or r30=r18,r19
     movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
     ;;
-    st8 [r27]=r30
+    st8 [r27]=r30 // set vpsr
     and r27=r28,r30
     and r29=r28,r17
     ;;
-    cmp.eq p5,p0=r29,r27
-    cmp.eq p6,p7=r28,r27
-    (p5) br.many vmx_asm_mov_to_psr_1
+    cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it))
+    cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it)
+    (p5) br.many vmx_asm_mov_to_psr_1 // no change
     ;;
     //virtual to physical
     (p7) add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
     (p7) add r27=IA64_VCPU_META_RID_DT_OFFSET,r21
-    (p7) dep r23=-1,r23,GUEST_IN_PHY_BIT,1
+    (p7) mov r23=VMX_MMU_PHY_DT
     ;;
     //physical to virtual
     (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
     (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
-    (p6) dep r23=0,r23,GUEST_IN_PHY_BIT,1
+    (p6) mov r23=VMX_MMU_VIRTUAL
     ;;
     ld8 r26=[r26]
     ld8 r27=[r27]
-    st4 [r22]=r23
+    st1 [r22]=r23
     dep.z r28=4,61,3
     ;;
     mov rr[r0]=r26
@@ -443,10 +443,10 @@ vmx_asm_thash_back1:
 vmx_asm_thash_back1:
     shr.u r23=r19,61           // get RR number
     adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
-    adds r16=IA64_VPD_VPTA_OFFSET,r16  // get vpta 
+    adds r16=IA64_VPD_VPTA_OFFSET,r16  // get virtual pta 
     ;;
     shladd r27=r23,3,r25       // get vcpu->arch.arch_vmx.vrr[r23]'s addr
-    ld8 r17=[r16]              // get PTA
+    ld8 r17=[r16]              // get virtual PTA
     mov r26=1
     ;;
     extr.u r29=r17,2,6         // get pta.size
diff -r 83239b289072 -r 5c56ce7b9892 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Mon Oct 01 09:52:14 2007 -0600
@@ -335,7 +335,7 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
         panic_domain(regs, "wrong vec:%lx\n", vec);
 
     /* Physical mode and region is 0 or 4.  */
-    if (is_physical_mode(v) && (!((vadr<<1)>>62))) {
+    if (!is_virtual_mode(v) && (!((vadr << 1) >> 62))) {
         if (vec == 2) {
             /* DTLB miss.  */
             if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
@@ -351,7 +351,10 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
     }
     
 try_again:
-    if ((data=vtlb_lookup(v, vadr,type)) != 0) {
+    /* Search in VTLB.  */
+    data = vtlb_lookup(v, vadr, type);
+    if (data != 0) {
+        /* Found.  */
         if (v->domain != dom0 && type == DSIDE_TLB) {
             if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
                 if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
@@ -373,8 +376,10 @@ try_again:
             }
         }
         thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
-
-    } else if (type == DSIDE_TLB) {
+        return IA64_NO_FAULT;
+    }
+
+    if (type == DSIDE_TLB) {
         struct opt_feature* optf = &(v->domain->arch.opt_feature);
 
         if (misr.sp)
@@ -385,7 +390,7 @@ try_again:
 
         if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
             /* windows use region 4 and 5 for identity mapping */
-            if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4 &&
+            if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4) &&
                 REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
                 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
 
@@ -395,7 +400,7 @@ try_again:
                     goto try_again;
                 return IA64_NO_FAULT;
             }
-            if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5 &&
+            if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5) &&
                 REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
                 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
 
diff -r 83239b289072 -r 5c56ce7b9892 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Mon Oct 01 09:52:14 2007 -0600
@@ -25,7 +25,6 @@
 #include <asm/processor.h>
 #include <asm/gcc_intrin.h>
 #include <asm/vmx_phy_mode.h>
-#include <xen/sched.h>
 #include <asm/pgtable.h>
 #include <asm/vmmu.h>
 #include <asm/debugger.h>
@@ -44,11 +43,10 @@
  * Special notes:
  * - Index by it/dt/rt sequence
  * - Only existing mode transitions are allowed in this table
- * - RSE is placed at lazy mode when emulating guest partial mode
  * - If gva happens to be rr0 and rr4, only allowed case is identity
  *   mapping (gva=gpa), or panic! (How?)
  */
-static const int mm_switch_table[8][8] = {
+static const unsigned char mm_switch_table[8][8] = {
     /*  2004/09/12(Kevin): Allow switch to self */
     /*
      *  (it,dt,rt): (0,0,0) -> (1,1,1)
@@ -94,41 +92,36 @@ static const int mm_switch_table[8][8] =
      *  (see "arch/ia64/kernel/head.S")
      *  (1,1,1)->(1,0,0)
      */
-
     {SW_V2P_DT, 0,  0,  0,  SW_V2P_D, SW_V2P_D, 0,  SW_SELF},
 };
 
 void
 physical_mode_init(VCPU *vcpu)
 {
-    vcpu->arch.mode_flags = GUEST_IN_PHY;
+    vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
 }
 
 void
 physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
 {
     u64 pte;
-    ia64_rr rr;
-    rr.rrval = ia64_get_rr(vadr);
-    pte = vadr & _PAGE_PPN_MASK;
-    pte = pte | PHY_PAGE_WB;
-    thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
-    return;
+
+    pte = (vadr & _PAGE_PPN_MASK) | PHY_PAGE_WB;
+    thash_vhpt_insert(vcpu, pte, (PAGE_SHIFT << 2), vadr, type);
 }
 
 void
 vmx_init_all_rr(VCPU *vcpu)
 {
-       VMX(vcpu, vrr[VRN0]) = 0x38;
        // enable vhpt in guest physical mode
        vcpu->arch.metaphysical_rid_dt |= 1;
+
+       VMX(vcpu, vrr[VRN0]) = 0x38;
        vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
        VMX(vcpu, vrr[VRN1]) = 0x38;
        VMX(vcpu, vrr[VRN2]) = 0x38;
        VMX(vcpu, vrr[VRN3]) = 0x38;
        VMX(vcpu, vrr[VRN4]) = 0x38;
-       // enable vhpt in guest physical mode
-       vcpu->arch.metaphysical_rid_d |= 0; /* VHPT not enabled! */
        vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
        VMX(vcpu, vrr[VRN5]) = 0x38;
        VMX(vcpu, vrr[VRN6]) = 0x38;
@@ -141,31 +134,31 @@ vmx_load_all_rr(VCPU *vcpu)
 vmx_load_all_rr(VCPU *vcpu)
 {
        unsigned long psr;
-
-       local_irq_save(psr);
-
-       /* WARNING: not allow co-exist of both virtual mode and physical
-        * mode in same region
-        */
-       if (is_physical_mode(vcpu)) {
-               if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){
-                       panic_domain(vcpu_regs(vcpu),
-                                    "Unexpected domain switch in phy emul\n");
-               }
-               ia64_set_rr((VRN0 << VRN_SHIFT), 
vcpu->arch.metaphysical_rid_dt);
-               ia64_dv_serialize_data();
-               ia64_set_rr((VRN4 << VRN_SHIFT), 
vcpu->arch.metaphysical_rid_dt);
-               ia64_dv_serialize_data();
-       } else {
-               ia64_set_rr((VRN0 << VRN_SHIFT),
-                           vcpu->arch.metaphysical_saved_rr0);
-               ia64_dv_serialize_data();
-               ia64_set_rr((VRN4 << VRN_SHIFT),
-                           vcpu->arch.metaphysical_saved_rr4);
-               ia64_dv_serialize_data();
+       unsigned long rr0, rr4;
+
+       switch (vcpu->arch.arch_vmx.mmu_mode) {
+       case VMX_MMU_VIRTUAL:
+               rr0 = vcpu->arch.metaphysical_saved_rr0;
+               rr4 = vcpu->arch.metaphysical_saved_rr4;
+               break;
+       case VMX_MMU_PHY_DT:
+               rr0 = vcpu->arch.metaphysical_rid_dt;
+               rr4 = vcpu->arch.metaphysical_rid_dt;
+               break;
+       case VMX_MMU_PHY_D:
+               rr0 = vcpu->arch.metaphysical_rid_d;
+               rr4 = vcpu->arch.metaphysical_rid_d;
+               break;
+       default:
+               panic_domain(NULL, "bad mmu mode value");
        }
 
-       /* rr567 will be postponed to last point when resuming back to guest */
+       psr = ia64_clear_ic();
+
+       ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
+       ia64_dv_serialize_data();
+       ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
+       ia64_dv_serialize_data();
        ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
        ia64_dv_serialize_data();
        ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
@@ -190,13 +183,25 @@ switch_to_physical_rid(VCPU *vcpu)
 switch_to_physical_rid(VCPU *vcpu)
 {
     u64 psr;
-
+    u64 rr;
+
+    switch (vcpu->arch.arch_vmx.mmu_mode) {
+    case VMX_MMU_PHY_DT:
+        rr = vcpu->arch.metaphysical_rid_dt;
+        break;
+    case VMX_MMU_PHY_D:
+        rr = vcpu->arch.metaphysical_rid_d;
+        break;
+    default:
+        panic_domain(NULL, "bad mmu mode value");
+    }
+    
     psr = ia64_clear_ic();
-    ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
+    ia64_set_rr(VRN0<<VRN_SHIFT, rr);
+    ia64_dv_serialize_data();
+    ia64_set_rr(VRN4<<VRN_SHIFT, rr);
     ia64_srlz_d();
-    ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
-    ia64_srlz_d();
-
+    
     ia64_set_psr(psr);
     ia64_srlz_i();
     return;
@@ -206,9 +211,10 @@ switch_to_virtual_rid(VCPU *vcpu)
 switch_to_virtual_rid(VCPU *vcpu)
 {
     u64 psr;
-    psr=ia64_clear_ic();
+
+    psr = ia64_clear_ic();
     ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
-    ia64_srlz_d();
+    ia64_dv_serialize_data();
     ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
     ia64_srlz_d();
     ia64_set_psr(psr);
@@ -232,22 +238,14 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
     case SW_V2P_D:
 //        printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
 //               old_psr.val, new_psr.val);
+        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
         switch_to_physical_rid(vcpu);
-        /*
-         * Set rse to enforced lazy, to prevent active rse save/restor when
-         * guest physical mode.
-         */
-        vcpu->arch.mode_flags |= GUEST_IN_PHY;
         break;
     case SW_P2V:
 //        printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
 //               old_psr.val, new_psr.val);
+        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
         switch_to_virtual_rid(vcpu);
-        /*
-         * recover old mode which is saved when entering
-         * guest physical mode
-         */
-        vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
         break;
     case SW_SELF:
         printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
@@ -259,7 +257,9 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
         break;
     default:
         /* Sanity check */
-        panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode 
transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val);
+        panic_domain(vcpu_regs(vcpu),
+                     "Unexpected virtual <--> physical mode transition, "
+                     "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
         break;
     }
     return;
@@ -268,16 +268,12 @@ void
 void
 check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr)
 {
-
     if (old_psr.dt != new_psr.dt ||
         old_psr.it != new_psr.it ||
         old_psr.rt != new_psr.rt) {
-
         switch_mm_mode(vcpu, old_psr, new_psr);
         debugger_event(XEN_IA64_DEBUG_ON_MMU);
     }
-
-    return;
 }
 
 
@@ -300,10 +296,8 @@ void
 void
 prepare_if_physical_mode(VCPU *vcpu)
 {
-    if (is_physical_mode(vcpu)) {
-        vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
+    if (!is_virtual_mode(vcpu))
         switch_to_virtual_rid(vcpu);
-    }
     return;
 }
 
@@ -311,9 +305,8 @@ void
 void
 recover_if_physical_mode(VCPU *vcpu)
 {
-    if (is_physical_mode(vcpu))
+    if (!is_virtual_mode(vcpu))
         switch_to_physical_rid(vcpu);
-    vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
-    return;
-}
-
+    return;
+}
+
diff -r 83239b289072 -r 5c56ce7b9892 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Mon Oct 01 09:52:14 2007 -0600
@@ -178,13 +178,13 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
     case VRN4:
         rrval = vrrtomrr(vcpu,val);
         vcpu->arch.metaphysical_saved_rr4 = rrval;
-        if (!is_physical_mode(vcpu))
+        if (is_virtual_mode(vcpu))
             ia64_set_rr(reg,rrval);
         break;
     case VRN0:
         rrval = vrrtomrr(vcpu,val);
         vcpu->arch.metaphysical_saved_rr0 = rrval;
-        if (!is_physical_mode(vcpu))
+        if (is_virtual_mode(vcpu))
             ia64_set_rr(reg,rrval);
         break;
     default:
diff -r 83239b289072 -r 5c56ce7b9892 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/include/asm-ia64/domain.h     Mon Oct 01 09:52:14 2007 -0600
@@ -254,8 +254,6 @@ struct arch_vcpu {
     char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
     char hypercall_continuation;
 
-    // for physical emulation
-    int mode_flags;
     fpswa_ret_t fpswa_ret;     /* save return values of FPSWA emulation */
     struct timer hlt_timer;
     struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
diff -r 83239b289072 -r 5c56ce7b9892 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Mon Oct 01 09:52:14 2007 -0600
@@ -85,22 +85,13 @@ extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_load_all_rr(VCPU *vcpu);
 extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type);
-/*
- * No sanity check here, since all psr changes have been
- * checked in switch_mm_mode().
- */
-#define is_physical_mode(v) \
-    ((v->arch.mode_flags) & GUEST_IN_PHY)
 
-#define is_virtual_mode(v) \
-    (!is_physical_mode(v))
+#define is_virtual_mode(v)     ((v)->arch.arch_vmx.mmu_mode == VMX_MMU_VIRTUAL)
 
 #endif /* __ASSEMBLY__ */
 
-#define GUEST_IN_PHY_BIT   0
-#define GUEST_PHY_EMUL_BIT 1
-
-#define GUEST_IN_PHY   (1 << GUEST_IN_PHY_BIT)
-#define GUEST_PHY_EMUL (1 << GUEST_PHY_EMUL_BIT)
+#define VMX_MMU_VIRTUAL    0    /* Full virtual mode: it=dt=1  */
+#define VMX_MMU_PHY_D      1    /* Half physical: it=1,dt=0  */
+#define VMX_MMU_PHY_DT     3    /* Full physical mode: it=0,dt=0  */
 
 #endif /* _PHY_MODE_H_ */
diff -r 83239b289072 -r 5c56ce7b9892 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Thu Sep 27 16:29:43 2007 -0600
+++ b/xen/include/asm-ia64/vmx_vpd.h    Mon Oct 01 09:52:14 2007 -0600
@@ -32,38 +32,6 @@
 #define VPD_SHIFT      16
 #define VPD_SIZE       (1 << VPD_SHIFT)
 
-typedef struct {
-       unsigned long   dcr;            // CR0
-       unsigned long   itm;
-       unsigned long   iva;
-       unsigned long   rsv1[5];
-       unsigned long   pta;            // CR8
-       unsigned long   rsv2[7];
-       unsigned long   ipsr;           // CR16
-       unsigned long   isr;
-       unsigned long   rsv3;
-       unsigned long   iip;
-       unsigned long   ifa;
-       unsigned long   itir;
-       unsigned long   iipa;
-       unsigned long   ifs;
-       unsigned long   iim;            // CR24
-       unsigned long   iha;
-       unsigned long   rsv4[38];
-       unsigned long   lid;            // CR64
-       unsigned long   ivr;
-       unsigned long   tpr;
-       unsigned long   eoi;
-       unsigned long   irr[4];
-       unsigned long   itv;            // CR72
-       unsigned long   pmv;
-       unsigned long   cmcv;
-       unsigned long   rsv5[5];
-       unsigned long   lrr0;           // CR80
-       unsigned long   lrr1;
-       unsigned long   rsv6[46];
-} cr_t;
-
 #ifdef VTI_DEBUG
 struct ivt_debug{
     unsigned long iip;
@@ -91,6 +59,7 @@ struct arch_vmx_struct {
     unsigned long   xen_port;
     unsigned char   xtp;
     unsigned char   pal_init_pending;
+    unsigned char   mmu_mode; /* Current mmu mode.  See vmx_phy_mode.h  */
 #ifdef VTI_DEBUG
     unsigned long  ivt_current;
     struct ivt_debug ivt_debug[IVT_DEBUG_MAX];

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] Replace mode_flags by mmu_mode, Xen patchbot-unstable <=