# HG changeset patch # User dietmar.hahn@xxxxxxxxxxxxxxxxxxx # Node ID 294100ad2b044f9cf4bff74f5121d8c14cedb9b2 # Parent 834ac63f4894443dd825ef0d9ef478fb48416d43 Changed some interfaces to use cr.itir instead of only logps in handling itc_i/itc_d. Added XEN_IA64_NPKRS - number of pkr's for PV, Added IA64_INST_KEY_MISS_VECTOR and IA64_DATA_KEY_MISS_VECTOR to ia64_handle_reflection. The key is now used during handling XEN_IA64_OPTF_IDENT_MAP_REG7. Signed-off-by: Dietmar Hahn diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/vmx/vmmu.c Thu Jul 19 15:39:01 2007 +0200 @@ -232,10 +232,10 @@ void machine_tlb_insert(struct vcpu *v, psr = ia64_clear_ic(); if ( cl == ISIDE_TLB ) { - ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps); + ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } else { - ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps); + ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } ia64_set_psr(psr); ia64_srlz_i(); diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/vmx/vtlb.c --- a/xen/arch/ia64/vmx/vtlb.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/vmx/vtlb.c Thu Jul 19 15:39:01 2007 +0200 @@ -199,7 +199,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte, } else { phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); - ia64_itc(type + 1, va, phy_pte, itir_ps(itir)); + ia64_itc(type + 1, va, phy_pte, itir); ia64_set_psr(psr); ia64_srlz_i(); } @@ -562,7 +562,7 @@ int thash_purge_and_insert(VCPU *v, u64 u64 psr; phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); - ia64_itc(type + 1, ifa, phy_pte, ps); + ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0)); ia64_set_psr(psr); ia64_srlz_i(); // ps < mrr.ps, this is not supported diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/xen/faults.c --- a/xen/arch/ia64/xen/faults.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/xen/faults.c Thu Jul 19 15:39:01 2007 +0200 @@ -168,7 +168,7 @@ void ia64_do_page_fault(unsigned long ad unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); IA64FAULT fault; int is_ptc_l_needed = 0; - u64 logps; + ia64_itir_t _itir = {.itir = itir}; if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) @@ -190,14 +190,14 @@ void ia64_do_page_fault(unsigned long ad struct p2m_entry entry; unsigned long m_pteval; m_pteval = translate_domain_pte(pteval, address, itir, - &logps, &entry); + &(_itir.itir), &entry); vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, - m_pteval, pteval, logps, &entry); + m_pteval, pteval, _itir.itir, &entry); if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) || p2m_entry_retry(&entry)) { /* dtlb has been purged in-between. This dtlb was matching. Undo the work. */ - vcpu_flush_tlb_vhpt_range(address, logps); + vcpu_flush_tlb_vhpt_range(address, _itir.ps); // the stale entry which we inserted above // may remains in tlb cache. @@ -209,7 +209,7 @@ void ia64_do_page_fault(unsigned long ad } if (is_ptc_l_needed) - vcpu_ptc_l(current, address, logps); + vcpu_ptc_l(current, address, _itir.ps); if (!guest_mode(regs)) { /* The fault occurs inside Xen. */ if (!ia64_done_with_exception(regs)) { @@ -572,6 +572,12 @@ ia64_handle_reflection(unsigned long ifa BUG_ON(!(psr & IA64_PSR_CPL)); switch (vector) { + case 6: + vector = IA64_INST_KEY_MISS_VECTOR; + break; + case 7: + vector = IA64_DATA_KEY_MISS_VECTOR; + break; case 8: vector = IA64_DIRTY_BIT_VECTOR; break; diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/xen/fw_emul.c --- a/xen/arch/ia64/xen/fw_emul.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/xen/fw_emul.c Thu Jul 19 15:39:01 2007 +0200 @@ -669,7 +669,7 @@ xen_pal_emulator(unsigned long index, u6 { .vw = 1, .phys_add_size = 44, .key_size = 16, - .max_pkr = 15, + .max_pkr = XEN_IA64_NPKRS, .hash_tag_id = 0x30, .max_dtr_entry = NDTRS - 1, .max_itr_entry = NITRS - 1, diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/xen/mm.c Thu Jul 19 15:39:01 2007 +0200 @@ -448,7 +448,7 @@ gmfn_to_mfn_foreign(struct domain *d, un // address, convert the pte for a physical address for (possibly different) // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use // PAGE_SIZE!) -u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* _itir, struct p2m_entry* entry) { struct domain *d = current->domain; @@ -467,7 +467,8 @@ u64 translate_domain_pte(u64 pteval, u64 if (itir.ps > PAGE_SHIFT) itir.ps = PAGE_SHIFT; - *logps = itir.ps; + ((ia64_itir_t*)_itir)->itir = itir.itir; + ((ia64_itir_t*)_itir)->ps = itir.ps; pteval2 = lookup_domain_mpa(d, mpaddr, entry); diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/xen/vcpu.c Thu Jul 19 15:39:01 2007 +0200 @@ -1623,7 +1623,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 u64 * pteval, u64 * itir, u64 * iha) { unsigned long region = address >> 61; - unsigned long pta, rid, rr; + unsigned long pta, rid, rr, key = 0; union pte_flags pte; TR_ENTRY *trp; @@ -1716,6 +1716,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) { pte.val = address & _PAGE_PPN_MASK; pte.val = pte.val | optf->im_reg7.pgprot; + key = optf->im_reg7.key; goto out; } return is_data ? IA64_ALT_DATA_TLB_VECTOR : @@ -1741,7 +1742,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 /* found mapping in guest VHPT! */ out: - *itir = rr & RR_PS_MASK; + *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY); *pteval = pte.val; perfc_incr(vhpt_translate); return IA64_NO_FAULT; @@ -2213,23 +2214,23 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 void vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte, - u64 mp_pte, u64 logps, struct p2m_entry *entry) -{ + u64 mp_pte, u64 itir__, struct p2m_entry *entry) +{ + ia64_itir_t _itir = {.itir = itir__}; unsigned long psr; - unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT; - - check_xen_space_overlap("itc", vaddr, 1UL << logps); + + check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps); // FIXME, must be inlined or potential for nested fault here! - if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT)) + if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT)) panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use " "smaller page size!\n"); - BUG_ON(logps > PAGE_SHIFT); + BUG_ON(_itir.ps > PAGE_SHIFT); vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry); psr = ia64_clear_ic(); pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits. - ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings + ia64_itc(IorD, vaddr, pte, _itir.itir); // FIXME: look for bigger mappings ia64_set_psr(psr); // ia64_srlz_i(); // no srls req'd, will rfi later if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) { @@ -2237,69 +2238,73 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, // addresses never get flushed. More work needed if this // ever happens. //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L< PAGE_SHIFT) - vhpt_multiple_insert(vaddr, pte, logps); + if (_itir.ps > PAGE_SHIFT) + vhpt_multiple_insert(vaddr, pte, _itir.itir); else - vhpt_insert(vaddr, pte, logps << 2); + vhpt_insert(vaddr, pte, _itir.itir); } // even if domain pagesize is larger than PAGE_SIZE, just put // PAGE_SIZE mapping in the vhpt for now, else purging is complicated - else - vhpt_insert(vaddr, pte, PAGE_SHIFT << 2); + else { + _itir.ps = PAGE_SHIFT; + vhpt_insert(vaddr, pte, _itir.itir); + } } IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { - unsigned long pteval, logps = itir_ps(itir); + unsigned long pteval; + ia64_itir_t _itir = {.itir = itir}; BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; - if (logps < PAGE_SHIFT) + if (_itir.ps < PAGE_SHIFT) panic_domain(NULL, "vcpu_itc_d: domain trying to use " "smaller page size!\n"); again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize - pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); + pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry); if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); - vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry); + vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry); if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { - vcpu_flush_tlb_vhpt_range(ifa, logps); + vcpu_flush_tlb_vhpt_range(ifa, _itir.ps); goto again; } - vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa); + vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, _itir.itir, ifa); return IA64_NO_FAULT; } IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { - unsigned long pteval, logps = itir_ps(itir); + unsigned long pteval; + ia64_itir_t _itir = {.itir = itir}; BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; - if (logps < PAGE_SHIFT) + if (_itir.ps < PAGE_SHIFT) panic_domain(NULL, "vcpu_itc_i: domain trying to use " "smaller page size!\n"); again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize - pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); + pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry); if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); - vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry); + vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry); if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { - vcpu_flush_tlb_vhpt_range(ifa, logps); + vcpu_flush_tlb_vhpt_range(ifa, _itir.itir); goto again; } - vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa); + vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, _itir.itir, ifa); return IA64_NO_FAULT; } diff -r 834ac63f4894 -r 294100ad2b04 xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/arch/ia64/xen/vhpt.c Thu Jul 19 15:39:01 2007 +0200 @@ -71,7 +71,7 @@ vhpt_erase(unsigned long vhpt_maddr) // initialize cache too??? } -void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps) +void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir) { struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr); unsigned long tag = ia64_ttag (vadr); @@ -80,13 +80,15 @@ void vhpt_insert (unsigned long vadr, un * because the processor may support speculative VHPT walk. */ vlfe->ti_tag = INVALID_TI_TAG; wmb(); - vlfe->itir = logps; + vlfe->itir = itir; vlfe->page_flags = pte | _PAGE_P; *(volatile unsigned long*)&vlfe->ti_tag = tag; } -void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps) -{ +void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, + unsigned long itir) +{ + unsigned long logps = (itir & IA64_ITIR_PS_MASK) >> IA64_ITIR_PS; unsigned long mask = (1L << logps) - 1; int i; @@ -110,7 +112,7 @@ void vhpt_multiple_insert(unsigned long vaddr &= ~mask; pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK); for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) { - vhpt_insert(vaddr,pte,logps<<2); + vhpt_insert(vaddr,pte,itir); vaddr += PAGE_SIZE; } } diff -r 834ac63f4894 -r 294100ad2b04 xen/include/asm-ia64/linux-xen/asm/processor.h --- a/xen/include/asm-ia64/linux-xen/asm/processor.h Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h Thu Jul 19 15:39:01 2007 +0200 @@ -533,9 +533,22 @@ ia64_itr (__u64 target_mask, __u64 tr_nu * Insert a translation into the instruction and/or data translation * cache. */ -static inline void -ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, - __u64 log_page_size) +#ifdef XEN +static inline void +ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir) +{ + ia64_setreg(_IA64_REG_CR_ITIR, itir); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); + /* as per EAS2.6, itc must be the last instruction in an instruction group */ + if (target_mask & 0x1) + ia64_itci(pte); + if (target_mask & 0x2) + ia64_itcd(pte); +} +#else +static inline void +ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 log_page_size) { ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); ia64_setreg(_IA64_REG_CR_IFA, vmaddr); @@ -546,6 +559,7 @@ ia64_itc (__u64 target_mask, __u64 vmadd if (target_mask & 0x2) ia64_itcd(pte); } +#endif /* * Purge a range of addresses from instruction and/or data translation diff -r 834ac63f4894 -r 294100ad2b04 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/include/asm-ia64/mm.h Thu Jul 19 15:39:01 2007 +0200 @@ -447,7 +447,7 @@ extern unsigned long dom0vp_expose_p2m(s extern volatile unsigned long *mpt_table; extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); -extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry); +extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* _itir, struct p2m_entry* entry); #define machine_to_phys_mapping mpt_table #define INVALID_M2P_ENTRY (~0UL) diff -r 834ac63f4894 -r 294100ad2b04 xen/include/asm-ia64/vhpt.h --- a/xen/include/asm-ia64/vhpt.h Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/include/asm-ia64/vhpt.h Thu Jul 19 15:39:01 2007 +0200 @@ -38,9 +38,9 @@ extern void vhpt_init (void); extern void vhpt_init (void); extern void gather_vhpt_stats(void); extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, - unsigned long logps); + unsigned long itir); extern void vhpt_insert (unsigned long vadr, unsigned long pte, - unsigned long logps); + unsigned long itir); void local_vhpt_flush(void); extern void vcpu_vhpt_flush(struct vcpu* v); diff -r 834ac63f4894 -r 294100ad2b04 xen/include/asm-ia64/xenkregs.h --- a/xen/include/asm-ia64/xenkregs.h Sun Jul 15 13:40:47 2007 -0600 +++ b/xen/include/asm-ia64/xenkregs.h Thu Jul 19 15:39:01 2007 +0200 @@ -38,13 +38,30 @@ /* Some cr.itir declarations. */ #define IA64_ITIR_PS 2 #define IA64_ITIR_PS_LEN 6 -#define IA64_ITIR_PS_MASK (((__IA64_UL(1) << IA64_ITIR_PS_LEN) - 1) \ +#define IA64_ITIR_PS_MASK (((__IA64_UL(1) << IA64_ITIR_PS_LEN) - 1) \ << IA64_ITIR_PS) #define IA64_ITIR_KEY 8 #define IA64_ITIR_KEY_LEN 24 #define IA64_ITIR_KEY_MASK (((__IA64_UL(1) << IA64_ITIR_KEY_LEN) - 1) \ << IA64_ITIR_KEY) -#define IA64_ITIR_PS_KEY(_ps, _key) (((_ps) << IA64_ITIR_PS) | \ +#define IA64_ITIR_PS_KEY(_ps, _key) (((_ps) << IA64_ITIR_PS) | \ (((_key) << IA64_ITIR_KEY))) +/* Define Protection Key Register (PKR) */ + +#define XEN_IA64_NPKRS 15 /* Number of pkr's in PV */ + +#define IA64_PKR_V 0 +#define IA64_PKR_WD 1 +#define IA64_PKR_RD 2 +#define IA64_PKR_XD 3 +#define IA64_PKR_MBZ0 4 +#define IA64_PKR_KEY 8 +#define IA64_PKR_KEY_LEN 24 +#define IA64_PKR_MBZ1 32 + +#define IA64_PKR_VALID (1 << IA64_PKR_V) +#define IA64_PKR_KEY_MASK (((__IA64_UL(1)<