# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1205528865 21600
# Node ID 8c921adf4833a0359775c8f20f9549f6cb11df7b
# Parent 82fa2e6cb592b0d8be6251ea8849250b3f960b2d
[IA64] Raise a fault with unimplemented physical address
An unimplemented data fault or an unimplemented instruction trap
should be raised with unimplemented physical address.
Also some cleanups.
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
---
xen/arch/ia64/vmx/vmx_fault.c | 15 ++++++++++++++-
xen/arch/ia64/vmx/vmx_virt.c | 21 ---------------------
xen/include/asm-ia64/vmx_vcpu.h | 22 ++++++++++++++++++++++
3 files changed, 36 insertions(+), 22 deletions(-)
diff -r 82fa2e6cb592 -r 8c921adf4833 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c Fri Mar 14 15:02:12 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_fault.c Fri Mar 14 15:07:45 2008 -0600
@@ -328,6 +328,11 @@ static int vmx_handle_lds(REGS* regs)
return IA64_FAULT;
}
+static inline int unimpl_phys_addr (u64 paddr)
+{
+ return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0;
+}
+
/* We came here because the H/W VHPT walker failed to find an entry */
IA64FAULT
vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
@@ -361,10 +366,18 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
/* DTLB miss. */
if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
return vmx_handle_lds(regs);
+ if (unlikely(unimpl_phys_addr(vadr))) {
+ unimpl_daddr(v);
+ return IA64_FAULT;
+ }
pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
- /* Clear UC bit in vadr with the shifts. */
if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
+ return IA64_FAULT;
+ }
+ } else {
+ if (unlikely(unimpl_phys_addr(vadr))) {
+ unimpl_iaddr_trap(v, vadr);
return IA64_FAULT;
}
}
diff -r 82fa2e6cb592 -r 8c921adf4833 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Fri Mar 14 15:02:12 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c Fri Mar 14 15:07:45 2008 -0600
@@ -277,9 +277,6 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -338,9 +335,6 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -374,9 +368,6 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -411,9 +402,6 @@ static IA64FAULT ptr_fault_check(VCPU *v
return IA64_FAULT;
}
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -635,9 +623,6 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
return IA64_FAULT;
}
if (unimplemented_gva(vcpu, ifa)) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -703,9 +688,6 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc
return IA64_FAULT;
}
if (unimplemented_gva(vcpu, ifa)) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -764,9 +746,6 @@ static IA64FAULT itc_fault_check(VCPU *v
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,ifa) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
diff -r 82fa2e6cb592 -r 8c921adf4833 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Fri Mar 14 15:02:12 2008 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h Fri Mar 14 15:07:45 2008 -0600
@@ -582,6 +582,11 @@ static inline void
static inline void
unimpl_daddr (VCPU *vcpu)
{
+ ISR isr;
+
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_UNIMPL_DADDR_FAULT;
+ vcpu_set_isr(vcpu, isr.val);
_general_exception(vcpu);
}
@@ -695,4 +700,21 @@ data_access_rights(VCPU *vcpu, u64 vadr)
set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
}
+
+/*
+ * Unimplement Instruction Address Trap
+ * @ Lower-Privilege Transfer Trap Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+static inline void
+unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
+{
+ ISR isr;
+
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_UNIMPL_IADDR_TRAP;
+ vcpu_set_isr(vcpu, isr.val);
+ vcpu_set_ifa(vcpu, vadr);
+ inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
+}
#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|