# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 53ec7e3d3a8a48dff051dcbba7feb2edfb94f85b
# Parent a947ca5d473160fe5d98019808e4b653cdf0d4c7
[IA64] Fix a VTi physical mode bug
When guest writes rr in physical mode, if it is rr0 or rr4, Xen can't
write it into machine rr.
Signed-off-by: Xuefei Xu <anthony.xu@xxxxxxxxx>
---
xen/arch/ia64/vmx/vmx_phy_mode.c | 53 ++++++++++++---------------------------
xen/arch/ia64/vmx/vmx_vcpu.c | 17 +++++++++++-
2 files changed, 32 insertions(+), 38 deletions(-)
diff -r a947ca5d4731 -r 53ec7e3d3a8a xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Sun Oct 01 11:14:00 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Sun Oct 01 11:19:45 2006 -0600
@@ -126,10 +126,16 @@ vmx_init_all_rr(VCPU *vcpu)
vmx_init_all_rr(VCPU *vcpu)
{
VMX(vcpu, vrr[VRN0]) = 0x38;
+ // enable vhpt in guest physical mode
+ vcpu->arch.metaphysical_rr0 |= 1;
+ vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN1]) = 0x38;
VMX(vcpu, vrr[VRN2]) = 0x38;
VMX(vcpu, vrr[VRN3]) = 0x38;
VMX(vcpu, vrr[VRN4]) = 0x38;
+ // enable vhpt in guest physical mode
+ vcpu->arch.metaphysical_rr4 |= 1;
+ vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN5]) = 0x38;
VMX(vcpu, vrr[VRN6]) = 0x38;
VMX(vcpu, vrr[VRN7]) = 0x738;
@@ -141,10 +147,8 @@ vmx_load_all_rr(VCPU *vcpu)
vmx_load_all_rr(VCPU *vcpu)
{
unsigned long psr;
- ia64_rr phy_rr;
local_irq_save(psr);
-
/* WARNING: not allow co-exist of both virtual mode and physical
* mode in same region
@@ -154,24 +158,16 @@ vmx_load_all_rr(VCPU *vcpu)
panic_domain(vcpu_regs(vcpu),
"Unexpected domain switch in phy emul\n");
}
- phy_rr.rrval = vcpu->arch.metaphysical_rr0;
- //phy_rr.ps = PAGE_SHIFT;
- phy_rr.ve = 1;
-
- ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
- ia64_dv_serialize_data();
- phy_rr.rrval = vcpu->arch.metaphysical_rr4;
- //phy_rr.ps = PAGE_SHIFT;
- phy_rr.ve = 1;
-
- ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
+ ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
ia64_dv_serialize_data();
} else {
ia64_set_rr((VRN0 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
+ vcpu->arch.metaphysical_saved_rr0);
ia64_dv_serialize_data();
ia64_set_rr((VRN4 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
+ vcpu->arch.metaphysical_saved_rr4);
ia64_dv_serialize_data();
}
@@ -209,21 +205,11 @@ switch_to_physical_rid(VCPU *vcpu)
switch_to_physical_rid(VCPU *vcpu)
{
UINT64 psr;
- ia64_rr phy_rr, mrr;
-
/* Save original virtual mode rr[0] and rr[4] */
psr=ia64_clear_ic();
- phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
- mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
- phy_rr.ps = mrr.ps;
- phy_rr.ve = 1;
- ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
- ia64_srlz_d();
- phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
- mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
- phy_rr.ps = mrr.ps;
- phy_rr.ve = 1;
- ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
+ ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
ia64_srlz_d();
ia64_set_psr(psr);
@@ -236,15 +222,10 @@ switch_to_virtual_rid(VCPU *vcpu)
switch_to_virtual_rid(VCPU *vcpu)
{
UINT64 psr;
- ia64_rr mrr;
-
psr=ia64_clear_ic();
-
- vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
- ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
- ia64_srlz_d();
- vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
- ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
+ ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
ia64_srlz_d();
ia64_set_psr(psr);
ia64_srlz_i();
diff -r a947ca5d4731 -r 53ec7e3d3a8a xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c Sun Oct 01 11:14:00 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c Sun Oct 01 11:19:45 2006 -0600
@@ -212,19 +212,32 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
{
ia64_rr oldrr,newrr;
extern void * pal_vaddr;
+ u64 rrval;
vcpu_get_rr(vcpu, reg, &oldrr.rrval);
newrr.rrval=val;
if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
- VMX(vcpu,vrr[reg>>61]) = val;
- switch((u64)(reg>>61)) {
+ VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
+ switch((u64)(reg>>VRN_SHIFT)) {
case VRN7:
vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
(void *)vcpu->arch.privregs,
(void *)vcpu->arch.vhpt.hash, pal_vaddr );
break;
+ case VRN4:
+ rrval = vrrtomrr(vcpu,val);
+ vcpu->arch.metaphysical_saved_rr4 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg,rrval);
+ break;
+ case VRN0:
+ rrval = vrrtomrr(vcpu,val);
+ vcpu->arch.metaphysical_saved_rr0 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg,rrval);
+ break;
default:
ia64_set_rr(reg,vrrtomrr(vcpu,val));
break;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|