# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1205169777 21600
# Node ID 85d25d01d93ff6a84fe4415b64f292b516a96e38
# Parent 16f6435a9d079b5bab8d4dc0c908e01ac9aca768
[IA64] Avoid multiple calls to lookup_domain_mpa for io emulation
__gpfn_is_io macro hides a call to lookup_domain_mpa. This patch avoids
multiple call to lookup_domain_mpa during io emulation.
Remove __gpfn_is_io and __gpfn_is_mem.
Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
xen/arch/ia64/vmx/mmio.c | 15 +++++++--------
xen/arch/ia64/vmx/vmmu.c | 8 +++++---
xen/arch/ia64/vmx/vmx_fault.c | 15 +++++++++------
xen/include/asm-ia64/mm.h | 18 ------------------
xen/include/asm-ia64/vmmu.h | 2 +-
xen/include/asm-ia64/xenpage.h | 5 +++++
6 files changed, 27 insertions(+), 36 deletions(-)
diff -r 16f6435a9d07 -r 85d25d01d93f xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/arch/ia64/vmx/mmio.c Mon Mar 10 11:22:57 2008 -0600
@@ -352,10 +352,9 @@ static void legacy_io_access(VCPU *vcpu,
return;
}
-static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma,
int dir)
-{
- unsigned long iot;
- iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
+static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma,
int dir, u64 pte)
+{
+ unsigned long iot = pte & GPFN_IO_MASK;
perfc_incra(vmx_mmio_access, iot >> 56);
switch (iot) {
@@ -395,7 +394,7 @@ enum inst_type_en { SL_INTEGER, SL_FLOAT
/*
dir 1: read 0:write
*/
-void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
+void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma, u64 pte)
{
REGS *regs;
IA64_BUNDLE bundle;
@@ -537,7 +536,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
}
if (vcpu->domain->arch.is_sioemu) {
- unsigned long iot = __gpfn_is_io(vcpu->domain, padr >> PAGE_SHIFT);
+ unsigned long iot = pte & GPFN_IO_MASK;
if (iot != GPFN_PIB && iot != GPFN_IOSAPIC) {
sioemu_io_emulate(padr, data, data1, update_word);
@@ -546,10 +545,10 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
}
if (size == 4) {
- mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
+ mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir, pte);
size = 3;
}
- mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
+ mmio_access(vcpu, padr, &data, 1 << size, ma, dir, pte);
emulate_io_update(vcpu, update_word, data, data1);
}
diff -r 16f6435a9d07 -r 85d25d01d93f xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Mon Mar 10 11:22:57 2008 -0600
@@ -283,9 +283,10 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
#ifdef VTLB_DEBUG
int index;
#endif
- u64 gpfn;
+ u64 gpfn, gpte;
u64 ps, va, rid;
thash_data_t * p_dtr;
+
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
#ifdef VTLB_DEBUG
@@ -313,10 +314,11 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
if (ps != _PAGE_SIZE_16M)
thash_purge_entries(vcpu, va, ps);
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
+ gpte = lookup_domain_mpa(vcpu->domain, gpfn, NULL);
+ if (gpte & GPFN_IO_MASK)
pte |= VTLB_PTE_IO;
vcpu_get_rr(vcpu, va, &rid);
- rid = rid& RR_RID_MASK;
+ rid &= RR_RID_MASK;
p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va,
rid);
vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
diff -r 16f6435a9d07 -r 85d25d01d93f xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_fault.c Mon Mar 10 11:22:57 2008 -0600
@@ -355,15 +355,16 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
mmu_mode = VMX_MMU_MODE(v);
if ((mmu_mode == VMX_MMU_PHY_DT
|| (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))
- && !((vadr<<1)>>62)) {
+ && (REGION_NUMBER(vadr) & 3) == 0) {
if (type == DSIDE_TLB) {
+ u64 pte;
/* DTLB miss. */
if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
return vmx_handle_lds(regs);
+ pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
/* Clear UC bit in vadr with the shifts. */
- if (v->domain != dom0
- && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
- emulate_io_inst(v, ((vadr << 1) >> 1), 4);
+ if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
+ emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
return IA64_FAULT;
}
}
@@ -377,18 +378,20 @@ try_again:
if (data != 0) {
/* Found. */
if (v->domain != dom0 && type == DSIDE_TLB) {
+ u64 pte;
if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
return vmx_handle_lds(regs);
}
gppa = (vadr & ((1UL << data->ps) - 1)) +
(data->ppn >> (data->ps - 12) << data->ps);
- if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
+ pte = lookup_domain_mpa(v->domain, gppa, NULL);
+ if (pte & GPFN_IO_MASK) {
if (misr.sp)
panic_domain(NULL, "ld.s on I/O page not with UC attr."
" pte=0x%lx\n", data->page_flags);
if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
- emulate_io_inst(v, gppa, data->ma);
+ emulate_io_inst(v, gppa, data->ma, pte);
else {
vcpu_set_isr(v, misr.val);
data_access_rights(v, vadr);
diff -r 16f6435a9d07 -r 85d25d01d93f xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/include/asm-ia64/mm.h Mon Mar 10 11:22:57 2008 -0600
@@ -483,24 +483,6 @@ extern u64 translate_domain_pte(u64 ptev
#define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
-/* Return I/O type if trye */
-#define __gpfn_is_io(_d, gpfn) \
-({ \
- u64 pte, ret=0; \
- pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
- ret = (pte != INVALID_MFN) ? pte & GPFN_IO_MASK : 0; \
- ret; \
-})
-
-#define __gpfn_is_mem(_d, gpfn) \
-({ \
- u64 pte, ret=0; \
- pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
- ret = (pte != INVALID_MFN) && (pte & GPFN_IO_MASK) == GPFN_MEM; \
- ret; \
-})
-
-
#define __gpa_to_mpa(_d, gpa) \
((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
diff -r 16f6435a9d07 -r 85d25d01d93f xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/include/asm-ia64/vmmu.h Mon Mar 10 11:22:57 2008 -0600
@@ -185,7 +185,7 @@ extern void free_domain_tlb(struct vcpu
extern void free_domain_tlb(struct vcpu *v);
extern thash_data_t * vhpt_lookup(u64 va);
extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE
*pbundle);
-extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
+extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma, u64 pte);
extern void emulate_io_update(struct vcpu *vcpu, u64 word, u64 d, u64 d1);
extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
diff -r 16f6435a9d07 -r 85d25d01d93f xen/include/asm-ia64/xenpage.h
--- a/xen/include/asm-ia64/xenpage.h Mon Mar 10 11:10:46 2008 -0600
+++ b/xen/include/asm-ia64/xenpage.h Mon Mar 10 11:22:57 2008 -0600
@@ -83,6 +83,11 @@ static inline int get_order_from_shift(u
#define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \
xen_pstart + KERNEL_START)
+/* Clear bit 63 (UC bit in physical addresses). */
+static inline u64 pa_clear_uc(u64 paddr)
+{
+ return (paddr << 1) >> 1;
+}
#undef __pa
#undef __va
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|