# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1190834537 21600
# Node ID 788c39a0b9059ea01342e4602de2479277764b03
# Parent c7140b8f56ac3fcf979a6e76a6d9497d4829a97b
[IA64] Use same RID fro rr0 and rr4 in metaphysical mode.
Renames metaphysical_rr0 to metaphysical_rid_dt
Renames metaphysical_rr4 to metaphysical_rid_d
Add comments in optvfault.S
cleanup and update vmx_phy_mode.[ch]
Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
xen/arch/ia64/asm-offsets.c | 2
xen/arch/ia64/vmx/optvfault.S | 54 ++++++-------
xen/arch/ia64/vmx/vmx_ivt.S | 6 -
xen/arch/ia64/vmx/vmx_phy_mode.c | 145 ++++++++++++++----------------------
xen/arch/ia64/vmx/vtlb.c | 2
xen/arch/ia64/xen/domain.c | 9 +-
xen/arch/ia64/xen/hyperprivop.S | 2
xen/arch/ia64/xen/regionreg.c | 14 +--
xen/include/asm-ia64/domain.h | 10 +-
xen/include/asm-ia64/vmx_phy_mode.h | 29 +------
10 files changed, 118 insertions(+), 155 deletions(-)
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/asm-offsets.c Wed Sep 26 13:22:17 2007 -0600
@@ -58,7 +58,7 @@ void foo(void)
DEFINE(IA64_VCPU_DOMAIN_OFFSET, offsetof (struct vcpu, domain));
DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu,
arch.hypercall_continuation));
DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
- DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu,
arch.metaphysical_rr0));
+ DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu,
arch.metaphysical_rid_dt));
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu,
arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu,
arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/vmx/optvfault.S
--- a/xen/arch/ia64/vmx/optvfault.S Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/vmx/optvfault.S Wed Sep 26 13:22:17 2007 -0600
@@ -16,6 +16,7 @@
#include <asm/vmx_pal_vsa.h>
#include <asm/asm-offsets.h>
#include <asm-ia64/vmx_mm_def.h>
+#include <asm-ia64/vmx_phy_mode.h>
#define ACCE_MOV_FROM_AR
#define ACCE_MOV_FROM_RR
@@ -25,7 +26,7 @@
#define ACCE_MOV_TO_PSR
#define ACCE_THASH
-//mov r1=ar3
+//mov r1=ar3 (only itc is virtualized)
GLOBAL_ENTRY(vmx_asm_mov_from_ar)
#ifndef ACCE_MOV_FROM_AR
br.many vmx_virtualization_fault_back
@@ -131,7 +132,7 @@ vmx_asm_mov_to_rr_back_2:
;; //mangling rid 1 and 3
extr.u r16=r19,8,8
extr.u r17=r19,24,8
- extr.u r18=r19,2,6
+ extr.u r18=r19,2,6 // page size
;;
dep r19=r16,r19,24,8
;;
@@ -156,7 +157,7 @@ vmx_asm_mov_to_rr_back_2:
(p6) shladd r17=r23,1,r17
;;
(p6) st8 [r17]=r19
- (p6) tbit.nz p6,p7=r16,0
+ (p6) tbit.nz p6,p7=r16,GUEST_IN_PHY_BIT // Set physical rr if in virt mode
;;
(p7) mov rr[r28]=r19
mov r24=r22
@@ -170,11 +171,11 @@ GLOBAL_ENTRY(vmx_asm_rsm)
br.many vmx_virtualization_fault_back
#endif
add r16=IA64_VPD_BASE_OFFSET,r21
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
+ extr.u r26=r25,6,21 // Imm21
+ extr.u r27=r25,31,2 // I2d
;;
ld8 r16=[r16]
- extr.u r28=r25,36,1
+ extr.u r28=r25,36,1 // I
dep r26=r27,r26,21,2
;;
add r17=VPD_VPSR_START_OFFSET,r16
@@ -185,41 +186,40 @@ GLOBAL_ENTRY(vmx_asm_rsm)
ld8 r18=[r17]
movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
ld4 r23=[r22]
- sub r27=-1,r26
+ sub r27=-1,r26 // ~r26
mov r24=b0
;;
mov r20=cr.ipsr
- or r28=r27,r28
- and r19=r18,r27
+ or r28=r27,r28 // Keep IC,I,DT,SI
+ and r19=r18,r27 // Update vpsr
;;
st8 [r17]=r19
- and r20=r20,r28
+ and r20=r20,r28 // Update ipsr
adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
;;
ld8 r27=[r27]
;;
tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh
;;
mov cr.ipsr=r20
- tbit.nz p6,p0=r23,0
+ tbit.nz p6,p0=r23,GUEST_IN_PHY_BIT
;;
tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
- (p6) br.dptk vmx_resume_to_guest
- ;;
- add r26=IA64_VCPU_META_RR0_OFFSET,r21
- add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
- dep r23=-1,r23,0,1
+ (p6) br.dptk vmx_resume_to_guest // (DT set or already in phy mode)
+ ;;
+ // Switch to meta physical mode.
+ add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
+ dep r23=-1,r23,GUEST_IN_PHY_BIT,1 // Set GUEST_IN_PHY
;;
ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
+ st4 [r22]=r23
dep.z r28=4,61,3
;;
mov rr[r0]=r26
;;
- mov rr[r28]=r27
+ mov rr[r28]=r26
;;
srlz.d
br.many vmx_resume_to_guest
@@ -257,14 +257,14 @@ GLOBAL_ENTRY(vmx_asm_ssm)
movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
;;
and r19=r28,r19
- tbit.z p6,p0=r23,0
+ tbit.z p6,p0=r23,GUEST_IN_PHY_BIT
;;
cmp.ne.or p6,p0=r28,r19
(p6) br.dptk vmx_asm_ssm_1
;;
add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
- dep r23=0,r23,0,1
+ dep r23=0,r23,GUEST_IN_PHY_BIT,1 // Clear GUEST_IN_PHY
;;
ld8 r26=[r26]
ld8 r27=[r27]
@@ -339,14 +339,14 @@ vmx_asm_mov_to_psr_back:
(p5) br.many vmx_asm_mov_to_psr_1
;;
//virtual to physical
- (p7) add r26=IA64_VCPU_META_RR0_OFFSET,r21
- (p7) add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
- (p7) dep r23=-1,r23,0,1
+ (p7) add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
+ (p7) add r27=IA64_VCPU_META_RID_DT_OFFSET,r21
+ (p7) dep r23=-1,r23,GUEST_IN_PHY_BIT,1
;;
//physical to virtual
(p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
(p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
- (p6) dep r23=0,r23,0,1
+ (p6) dep r23=0,r23,GUEST_IN_PHY_BIT,1
;;
ld8 r26=[r26]
ld8 r27=[r27]
@@ -594,6 +594,7 @@ MOV_FROM_BANK0_REG(31)
// mov from reg table
+// r19: value, r30: return address
ENTRY(asm_mov_from_reg)
MOV_FROM_REG(0)
MOV_FROM_REG(1)
@@ -789,6 +790,7 @@ MOV_TO_BANK0_REG(31)
// mov to reg table
+// r19: value, r30: return address
ENTRY(asm_mov_to_reg)
MOV_TO_REG0
MOV_TO_REG(1)
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_ivt.S Wed Sep 26 13:22:17 2007 -0600
@@ -308,9 +308,9 @@ ENTRY(vmx_alt_itlb_miss)
ENTRY(vmx_alt_itlb_miss)
VMX_DBG_FAULT(3)
mov r31 = pr
- mov r29=cr.ipsr;
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+ mov r29=cr.ipsr
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT
(p7)br.spnt vmx_fault_3
vmx_alt_itlb_miss_1:
mov r16=cr.ifa // get address that caused the TLB miss
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Wed Sep 26 13:22:17 2007 -0600
@@ -30,16 +30,15 @@
#include <asm/vmmu.h>
#include <asm/debugger.h>
-static const int valid_mm_mode[8] = {
- GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
- INV_MODE,
- INV_MODE,
- GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
- INV_MODE,
- GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
- INV_MODE,
- GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
-};
+#define MODE_IND(psr) \
+ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+
+#define SW_BAD 0 /* Bad mode transitition */
+#define SW_V2P_DT 1 /* Physical emulation is activated */
+#define SW_V2P_D 2 /* Physical emulation is activated (only for data) */
+#define SW_P2V 3 /* Exit physical mode emulation */
+#define SW_SELF 4 /* No mode transition */
+#define SW_NOP 5 /* Mode transition, but without action required */
/*
* Special notes:
@@ -51,9 +50,9 @@ static const int valid_mm_mode[8] = {
*/
static const int mm_switch_table[8][8] = {
/* 2004/09/12(Kevin): Allow switch to self */
- /*
- * (it,dt,rt): (0,0,0) -> (1,1,1)
- * This kind of transition usually occurs in the very early
+ /*
+ * (it,dt,rt): (0,0,0) -> (1,1,1)
+ * This kind of transition usually occurs in the very early
* stage of Linux boot up procedure. Another case is in efi
* and pal calls. (see "arch/ia64/kernel/head.S")
*
@@ -62,7 +61,7 @@ static const int mm_switch_table[8][8] =
* service. Due to gva = gpa in this case (Same region),
* data access can be satisfied though itlb entry for physical
* emulation is hit.
- */
+ */
{SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0},
@@ -77,16 +76,16 @@ static const int mm_switch_table[8][8] =
/* (1,0,0)->(1,1,1) */
{0, 0, 0, 0, 0, 0, 0, SW_P2V},
/*
- * (it,dt,rt): (1,0,1) -> (1,1,1)
- * This kind of transition usually occurs when Linux returns
+ * (it,dt,rt): (1,0,1) -> (1,1,1)
+ * This kind of transition usually occurs when Linux returns
* from the low level TLB miss handlers.
- * (see "arch/ia64/kernel/ivt.S")
- */
+ * (see "arch/ia64/kernel/ivt.S")
+ */
{0, 0, 0, 0, 0, SW_SELF,0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
/*
- * (it,dt,rt): (1,1,1) -> (1,0,1)
- * This kind of transition usually occurs in Linux low level
+ * (it,dt,rt): (1,1,1) -> (1,0,1)
+ * This kind of transition usually occurs in Linux low level
* TLB miss handler. (see "arch/ia64/kernel/ivt.S")
*
* (it,dt,rt): (1,1,1) -> (0,0,0)
@@ -96,7 +95,7 @@ static const int mm_switch_table[8][8] =
* (1,1,1)->(1,0,0)
*/
- {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
+ {SW_V2P_DT, 0, 0, 0, SW_V2P_D, SW_V2P_D, 0, SW_SELF},
};
void
@@ -111,26 +110,25 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr,
u64 pte;
ia64_rr rr;
rr.rrval = ia64_get_rr(vadr);
- pte = vadr& _PAGE_PPN_MASK;
+ pte = vadr & _PAGE_PPN_MASK;
pte = pte | PHY_PAGE_WB;
thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
return;
}
-
void
vmx_init_all_rr(VCPU *vcpu)
{
VMX(vcpu, vrr[VRN0]) = 0x38;
// enable vhpt in guest physical mode
- vcpu->arch.metaphysical_rr0 |= 1;
+ vcpu->arch.metaphysical_rid_dt |= 1;
vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN1]) = 0x38;
VMX(vcpu, vrr[VRN2]) = 0x38;
VMX(vcpu, vrr[VRN3]) = 0x38;
VMX(vcpu, vrr[VRN4]) = 0x38;
// enable vhpt in guest physical mode
- vcpu->arch.metaphysical_rr4 |= 1;
+ vcpu->arch.metaphysical_rid_d |= 0; /* VHPT not enabled! */
vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN5]) = 0x38;
VMX(vcpu, vrr[VRN6]) = 0x38;
@@ -154,37 +152,32 @@ vmx_load_all_rr(VCPU *vcpu)
panic_domain(vcpu_regs(vcpu),
"Unexpected domain switch in phy emul\n");
}
- ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
+ ia64_set_rr((VRN0 << VRN_SHIFT),
vcpu->arch.metaphysical_rid_dt);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT),
vcpu->arch.metaphysical_rid_dt);
ia64_dv_serialize_data();
} else {
ia64_set_rr((VRN0 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr0);
+ vcpu->arch.metaphysical_saved_rr0);
ia64_dv_serialize_data();
ia64_set_rr((VRN4 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr4);
+ vcpu->arch.metaphysical_saved_rr4);
ia64_dv_serialize_data();
}
/* rr567 will be postponed to last point when resuming back to guest */
- ia64_set_rr((VRN1 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN2 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN3 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN5 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN6 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+ ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
ia64_dv_serialize_data();
vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
- (void *)vcpu->arch.vhpt.hash, pal_vaddr );
+ (void *)vcpu->arch.vhpt.hash, pal_vaddr);
ia64_set_pta(VMX(vcpu, mpta));
vmx_ia64_set_dcr(vcpu);
@@ -193,24 +186,21 @@ vmx_load_all_rr(VCPU *vcpu)
ia64_srlz_i();
}
-
-
void
switch_to_physical_rid(VCPU *vcpu)
{
u64 psr;
- /* Save original virtual mode rr[0] and rr[4] */
- psr=ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
- ia64_srlz_d();
- ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
+
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
ia64_srlz_d();
ia64_set_psr(psr);
ia64_srlz_i();
return;
}
-
void
switch_to_virtual_rid(VCPU *vcpu)
@@ -238,7 +228,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
act = mm_switch_action(old_psr, new_psr);
perfc_incra(vmx_switch_mm_mode, act);
switch (act) {
- case SW_V2P:
+ case SW_V2P_DT:
+ case SW_V2P_D:
// printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
// old_psr.val, new_psr.val);
switch_to_physical_rid(vcpu);
@@ -274,6 +265,20 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
return;
}
+void
+check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
+{
+
+ if (old_psr.dt != new_psr.dt ||
+ old_psr.it != new_psr.it ||
+ old_psr.rt != new_psr.rt) {
+
+ switch_mm_mode(vcpu, old_psr, new_psr);
+ debugger_event(XEN_IA64_DEBUG_ON_MMU);
+ }
+
+ return;
+}
/*
@@ -293,38 +298,6 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
*/
void
-check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
-{
-
- if ( (old_psr.dt != new_psr.dt ) ||
- (old_psr.it != new_psr.it ) ||
- (old_psr.rt != new_psr.rt )
- ) {
- switch_mm_mode (vcpu, old_psr, new_psr);
- debugger_event(XEN_IA64_DEBUG_ON_MMU);
- }
-
- return;
-}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void
prepare_if_physical_mode(VCPU *vcpu)
{
if (is_physical_mode(vcpu)) {
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Wed Sep 26 13:22:17 2007 -0600
@@ -187,7 +187,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
if (itir_ps(itir) >= mrr.ps) {
vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
} else {
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
ia64_itc(type + 1, va, phy_pte, itir);
ia64_set_psr(psr);
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/xen/domain.c Wed Sep 26 13:22:17 2007 -0600
@@ -426,10 +426,11 @@ int vcpu_initialise(struct vcpu *v)
struct domain *d = v->domain;
if (!is_idle_domain(d)) {
- v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
- v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
- v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
- v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
+ v->arch.metaphysical_rid_dt = d->arch.metaphysical_rid_dt;
+ v->arch.metaphysical_rid_d = d->arch.metaphysical_rid_d;
+ /* Set default values to saved_rr. */
+ v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rid_dt;
+ v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rid_dt;
/* Is it correct ?
It depends on the domain rid usage.
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S Wed Sep 26 13:22:17 2007 -0600
@@ -1423,7 +1423,7 @@ ENTRY(hyper_rsm_dt)
(p7) br.spnt.many 1f ;; // already in metaphysical mode
movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
ld8 r22=[r22];;
- adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
+ adds r22=IA64_VCPU_META_RID_DT_OFFSET,r22;;
ld8 r23=[r22];;
mov rr[r0]=r23;;
srlz.i;;
diff -r c7140b8f56ac -r 788c39a0b905 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/arch/ia64/xen/regionreg.c Wed Sep 26 13:22:17 2007 -0600
@@ -185,8 +185,8 @@ int allocate_rid_range(struct domain *d,
d->arch.starting_mp_rid = i << mp_rid_shift;
d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
- d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0);
- d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1);
+ d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
+ d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
d, d->arch.starting_rid, d->arch.ending_rid,
@@ -238,7 +238,8 @@ int set_one_rr(unsigned long rr, unsigne
ia64_rr rrv, newrrv, memrrv;
unsigned long newrid;
- if (val == -1) return 1;
+ if (val == -1)
+ return 1;
rrv.rrval = val;
newrrv.rrval = 0;
@@ -277,7 +278,7 @@ int set_metaphysical_rr0(void)
// ia64_rr rrv;
// rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
- ia64_set_rr(0,v->arch.metaphysical_rr0);
+ ia64_set_rr(0, v->arch.metaphysical_rid_dt);
ia64_srlz_d();
return 1;
}
@@ -290,7 +291,8 @@ void init_all_rr(struct vcpu *v)
//rrv.rrval = v->domain->arch.metaphysical_rr0;
rrv.ps = v->arch.vhpt_pg_shift;
rrv.ve = 1;
-if (!v->vcpu_info) { panic("Stopping in init_all_rr\n"); }
+ if (!v->vcpu_info)
+ panic("Stopping in init_all_rr\n");
VCPU(v,rrs[0]) = -1;
VCPU(v,rrs[1]) = rrv.rrval;
VCPU(v,rrs[2]) = rrv.rrval;
@@ -319,7 +321,7 @@ void load_region_regs(struct vcpu *v)
unsigned long bad = 0;
if (VCPU(v,metaphysical_mode)) {
- rr0 = v->domain->arch.metaphysical_rr0;
+ rr0 = v->domain->arch.metaphysical_rid_dt;
ia64_set_rr(0x0000000000000000L, rr0);
ia64_srlz_d();
}
diff -r c7140b8f56ac -r 788c39a0b905 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/include/asm-ia64/domain.h Wed Sep 26 13:22:17 2007 -0600
@@ -143,8 +143,8 @@ struct arch_domain {
int starting_mp_rid;
int ending_mp_rid;
/* RID for metaphysical mode. */
- unsigned long metaphysical_rr0;
- unsigned long metaphysical_rr4;
+ unsigned long metaphysical_rid_dt; /* dt=it=0 */
+ unsigned long metaphysical_rid_d; /* dt=0, it=1 */
int rid_bits; /* number of virtual rid bits (default: 18) */
int breakimm; /* The imm value for hypercalls. */
@@ -232,8 +232,8 @@ struct arch_vcpu {
/* These fields are copied from arch_domain to make access easier/faster
in assembly code. */
- unsigned long metaphysical_rr0; // from arch_domain (so is
pinned)
- unsigned long metaphysical_rr4; // from arch_domain (so is
pinned)
+ unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
+ unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
unsigned long metaphysical_saved_rr0; // from arch_domain (so is
pinned)
unsigned long metaphysical_saved_rr4; // from arch_domain (so is
pinned)
unsigned long fp_psr; // used for lazy float register
@@ -254,7 +254,7 @@ struct arch_vcpu {
char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
char hypercall_continuation;
- //for phycial emulation
+ // for physical emulation
int mode_flags;
fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
struct timer hlt_timer;
diff -r c7140b8f56ac -r 788c39a0b905 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h Wed Sep 26 12:43:41 2007 -0600
+++ b/xen/include/asm-ia64/vmx_phy_mode.h Wed Sep 26 13:22:17 2007 -0600
@@ -66,29 +66,19 @@
*/
+#ifndef __ASSEMBLY__
+
#include <asm/vmx_vcpu.h>
#include <asm/regionreg.h>
#include <asm/gcc_intrin.h>
#include <asm/pgtable.h>
-/* Due to change of ia64_set_rr interface */
-#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
-//#ifdef PHY_16M /* 16M: large granule for test*/
-//#define EMUL_PHY_PAGE_SHIFT 24
-//#else /* 4K: emulated physical page granule */
-//#define EMUL_PHY_PAGE_SHIFT 12
-//#endif
-#define IA64_RSC_MODE 0x0000000000000003
-#define XEN_RR7_RID (0xf00010)
-#define GUEST_IN_PHY 0x1
-#define GUEST_PHY_EMUL 0x2
extern void physical_mode_init(VCPU *);
extern void switch_to_physical_rid(VCPU *);
extern void switch_to_virtual_rid(VCPU *vcpu);
extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
-extern void stlb_phys_lookup(VCPU *vcpu, u64 paddr, u64 type);
extern void check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR
new_psr);
extern void prepare_if_physical_mode(VCPU *vcpu);
extern void recover_if_physical_mode(VCPU *vcpu);
@@ -105,17 +95,12 @@ extern void physical_tlb_miss(VCPU *vcpu
#define is_virtual_mode(v) \
(!is_physical_mode(v))
-#define MODE_IND(psr) \
- (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+#endif /* __ASSEMBLY__ */
-#define SW_BAD 0 /* Bad mode transitition */
-#define SW_V2P 1 /* Physical emulatino is activated */
-#define SW_P2V 2 /* Exit physical mode emulation */
-#define SW_SELF 3 /* No mode transition */
-#define SW_NOP 4 /* Mode transition, but without action required */
+#define GUEST_IN_PHY_BIT 0
+#define GUEST_PHY_EMUL_BIT 1
-#define INV_MODE 0 /* Invalid mode */
-#define GUEST_VIRT 1 /* Guest in virtual mode */
-#define GUEST_PHYS 2 /* Guest in physical mode, requiring emulation */
+#define GUEST_IN_PHY (1 << GUEST_IN_PHY_BIT)
+#define GUEST_PHY_EMUL (1 << GUEST_PHY_EMUL_BIT)
#endif /* _PHY_MODE_H_ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|