[IA64] use pte_pfn() where possible
use pte_pfn() to get mfn from pte.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff --git a/xen/arch/ia64/vmx/sioemu.c b/xen/arch/ia64/vmx/sioemu.c
--- a/xen/arch/ia64/vmx/sioemu.c
+++ b/xen/arch/ia64/vmx/sioemu.c
@@ -148,7 +148,7 @@ sioemu_set_callback (struct vcpu *v, uns
pte = *lookup_noalloc_domain_pte(v->domain, paddr);
if (!pte_present(pte) || !pte_mem(pte))
return -EINVAL;
- mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+ mfn = pte_pfn(pte);
ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn);
diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c
+++ b/xen/arch/ia64/vmx/vmmu.c
@@ -312,7 +312,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
*/
if (ps != _PAGE_SIZE_16M)
thash_purge_entries(vcpu, va, ps);
- gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
+ gpfn = pte_pfn(__pte(pte));
vcpu_get_rr(vcpu, va, &rid);
rid &= RR_RID_MASK;
p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
diff --git a/xen/arch/ia64/vmx/vmx_fault.c b/xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c
+++ b/xen/arch/ia64/vmx/vmx_fault.c
@@ -376,7 +376,7 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
if (v->domain != dom0 && (pte & _PAGE_IO)) {
emulate_io_inst(v, pa_clear_uc(vadr), 4,
- (pte & _PFN_MASK) >> PAGE_SHIFT);
+ pte_pfn(__pte(pte)));
return IA64_FAULT;
}
physical_tlb_miss(v, vadr, type);
@@ -413,7 +413,7 @@ try_again:
" pte=0x%lx\n", data->page_flags);
if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
emulate_io_inst(v, gppa, data->ma,
- (pte & _PFN_MASK) >> PAGE_SHIFT);
+ pte_pfn(__pte(pte)));
else {
vcpu_set_isr(v, misr.val);
data_access_rights(v, vadr);
diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c
+++ b/xen/arch/ia64/vmx/vmx_init.c
@@ -457,7 +457,7 @@ int vmx_set_ioreq_page(
pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
if (!pte_present(pte) || !pte_mem(pte))
return -EINVAL;
- mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+ mfn = pte_pfn(pte);
ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn);
diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c
+++ b/xen/arch/ia64/vmx/vtlb.c
@@ -522,8 +522,7 @@ static u64 translate_phy_pte(VCPU *v, u6
* which is required by vga acceleration since qemu maps shared
* vram buffer with WB.
*/
- if (mfn_valid((maddr & _PAGE_PPN_MASK) >> PAGE_SHIFT)
- && phy_pte.ma != VA_MATTR_NATPAGE)
+ if (mfn_valid(pte_pfn(__pte(pte))) && phy_pte.ma != VA_MATTR_NATPAGE)
phy_pte.ma = VA_MATTR_WB;
maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -800,7 +800,7 @@ do_dom0vp_op(unsigned long cmd,
dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
__func__, ret);
} else {
- ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
+ ret = pte_pfn(__pte(ret));
}
perfc_incr(dom0vp_phystomach);
break;
--
yamahata
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|