# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Node ID 6e352c348fdb1baedbb35563d4e4e9579749c652 # Parent 68916203eb9717d6fb663fb89eab7378dba9944c add volatile pte entry of the p2m table. p2m table are shared by cpu. added volatile as compiler barrier. PATCHNAME: volatile_pte_t Signed-off-by: Isaku Yamahata diff -r 68916203eb97 -r 6e352c348fdb xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Fri Jun 09 16:22:24 2006 +0900 +++ b/xen/arch/ia64/xen/mm.c Fri Jun 09 17:50:53 2006 +0900 @@ -349,7 +349,7 @@ unsigned long translate_domain_mpaddr(un // pud, pmd, pte page is zero cleared when they are allocated. // Their area must be visible before population so that // cmpxchg must have release semantics. -static pte_t* +static volatile pte_t* lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr) { struct mm_struct *mm = &d->arch.mm; @@ -392,11 +392,11 @@ lookup_alloc_domain_pte(struct domain* d } } - return pte_offset_map(pmd, mpaddr); + return (volatile pte_t*)pte_offset_map(pmd, mpaddr); } //XXX xxx_none() should be used instread of !xxx_present()? -static pte_t* +static volatile pte_t* lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr) { struct mm_struct *mm = &d->arch.mm; @@ -417,11 +417,11 @@ lookup_noalloc_domain_pte(struct domain* if (unlikely(!pmd_present(*pmd))) return NULL; - return pte_offset_map(pmd, mpaddr); + return (volatile pte_t*)pte_offset_map(pmd, mpaddr); } #ifdef CONFIG_XEN_IA64_DOM0_VP -static pte_t* +static volatile pte_t* lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr) { struct mm_struct *mm = &d->arch.mm; @@ -442,13 +442,13 @@ lookup_noalloc_domain_pte_none(struct do if (unlikely(pmd_none(*pmd))) return NULL; - return pte_offset_map(pmd, mpaddr); + return (volatile pte_t*)pte_offset_map(pmd, mpaddr); } unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr) { - pte_t *pte; + volatile pte_t *pte; pte = lookup_noalloc_domain_pte(d, mpaddr); if (pte == NULL) @@ -515,7 +515,7 @@ __tr_entry_print_all(const char* func, i unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr) { - pte_t *pte; + volatile pte_t *pte; #ifdef CONFIG_DOMAIN0_CONTIGUOUS if (d == dom0) { @@ -531,9 +531,10 @@ unsigned long lookup_domain_mpa(struct d #endif pte = lookup_noalloc_domain_pte(d, mpaddr); if (pte != NULL) { - if (pte_present(*pte)) { + pte_t tmp_pte = *pte;// pte is volatile. copy the value. + if (pte_present(tmp_pte)) { //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte)); - return pte_val(*pte); + return pte_val(tmp_pte); } else if (VMX_DOMAIN(d->vcpu[0])) return GPFN_INV_MASK; } @@ -582,7 +583,8 @@ void *domain_mpa_to_imva(struct domain * /* Allocate a new page for domain and map it to the specified metaphysical address. */ struct page_info * -__assign_new_domain_page(struct domain *d, unsigned long mpaddr, pte_t* pte) +__assign_new_domain_page(struct domain *d, unsigned long mpaddr, + volatile pte_t* pte) { struct page_info *p = NULL; unsigned long maddr; @@ -644,7 +646,7 @@ assign_new_domain_page(struct domain *d, return __assign_new_domain_page(d, mpaddr, &dummy_pte); #else struct page_info *p = NULL; - pte_t *pte; + volatile pte_t *pte; pte = lookup_alloc_domain_pte(d, mpaddr); if (pte_none(*pte)) { @@ -662,7 +664,7 @@ assign_new_domain0_page(struct domain *d assign_new_domain0_page(struct domain *d, unsigned long mpaddr) { #ifndef CONFIG_DOMAIN0_CONTIGUOUS - pte_t *pte; + volatile pte_t *pte; BUG_ON(d != dom0); pte = lookup_alloc_domain_pte(d, mpaddr); @@ -682,7 +684,7 @@ __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags) { - pte_t *pte; + volatile pte_t *pte; unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX; pte = lookup_alloc_domain_pte(d, mpaddr); @@ -811,7 +813,7 @@ assign_domain_page_replace(struct domain unsigned long mfn, unsigned long flags) { struct mm_struct *mm = &d->arch.mm; - pte_t* pte; + volatile pte_t* pte; pte_t old_pte; pte_t npte; unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX; @@ -854,7 +856,7 @@ assign_domain_page_cmpxchg_rel(struct do unsigned long flags) { struct mm_struct *mm = &d->arch.mm; - pte_t* pte; + volatile pte_t* pte; unsigned long old_mfn; unsigned long old_arflags; pte_t old_pte; @@ -866,9 +868,14 @@ assign_domain_page_cmpxchg_rel(struct do pte = lookup_alloc_domain_pte(d, mpaddr); again: - old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;//XXX + old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK; old_mfn = page_to_mfn(old_page); old_pte = pfn_pte(old_mfn, __pgprot(old_arflags)); + if (!pte_present(old_pte)) { + DPRINTK("%s: old_pte 0x%lx old_arflags 0x%lx old_mfn 0x%lx\n", + __func__, pte_val(old_pte), old_arflags, old_mfn); + return -EINVAL; + } new_arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX; new_mfn = page_to_mfn(new_page); @@ -906,7 +913,7 @@ zap_domain_page_one(struct domain *d, un zap_domain_page_one(struct domain *d, unsigned long mpaddr) { struct mm_struct *mm = &d->arch.mm; - pte_t *pte; + volatile pte_t *pte; pte_t old_pte; unsigned long mfn; struct page_info *page; @@ -1026,10 +1033,12 @@ destroy_grant_host_mapping(unsigned long unsigned long mfn, unsigned int flags) { struct domain* d = current->domain; - pte_t* pte; + volatile pte_t* pte; + unsigned long cur_arflags; + pte_t cur_pte; + pte_t new_pte; pte_t old_pte; - unsigned long old_mfn = INVALID_MFN; - struct page_info* old_page; + struct page_info* page; if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) { DPRINTK("%s: flags 0x%x\n", __func__, flags); @@ -1037,21 +1046,42 @@ destroy_grant_host_mapping(unsigned long } pte = lookup_noalloc_domain_pte(d, gpaddr); - if (pte == NULL || !pte_present(*pte) || pte_pfn(*pte) != mfn) + if (pte == NULL) { + DPRINTK("%s: gpaddr 0x%lx mfn 0x%lx\n", __func__, gpaddr, mfn); return GNTST_general_error; - - // update pte - old_pte = ptep_get_and_clear(&d->arch.mm, gpaddr, pte); - if (pte_present(old_pte)) { - old_mfn = pte_pfn(old_pte); - } else { + } + + again: + cur_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK; + cur_pte = pfn_pte(mfn, __pgprot(cur_arflags)); + if (!pte_present(cur_pte)) { + DPRINTK("%s: gpaddr 0x%lx mfn 0x%lx cur_pte\n", + __func__, gpaddr, mfn, pte_val(cur_pte)); return GNTST_general_error; } - domain_page_flush(d, gpaddr, old_mfn, INVALID_MFN); - - old_page = mfn_to_page(old_mfn); - BUG_ON(page_get_owner(old_page) == d);//try_to_clear_PGC_allocate(d, page) is not needed. - put_page(old_page); + new_pte = __pte(0); + + old_pte = ptep_cmpxchg_rel(&d->arch.mm, gpaddr, pte, cur_pte, new_pte); + if (unlikely(!pte_present(old_pte))) { + DPRINTK("%s: gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx old_pte 0x%lx\n", + __func__, gpaddr, mfn, pte_val(cur_pte), pte_val(old_pte)); + return GNTST_general_error; + } + if (unlikely(pte_val(cur_pte) != pte_val(old_pte))) { + if (pte_pfn(old_pte) == mfn) { + goto again; + } + DPRINTK("%s gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx old_pte 0x%lx\n", + __func__, gpaddr, mfn, pte_val(cur_pte), pte_val(old_pte)); + return GNTST_general_error; + } + BUG_ON(pte_pfn(old_pte) != mfn); + + domain_page_flush(d, gpaddr, mfn, INVALID_MFN); + + page = mfn_to_page(mfn); + BUG_ON(page_get_owner(page) == d);//try_to_clear_PGC_allocate(d, page) is not needed. + put_page(page); return GNTST_okay; } @@ -1196,7 +1226,7 @@ int int domain_page_mapped(struct domain* d, unsigned long mpaddr) { - pte_t * pte; + volatile pte_t * pte; pte = lookup_noalloc_domain_pte(d, mpaddr); if(pte != NULL && !pte_none(*pte)) diff -r 68916203eb97 -r 6e352c348fdb xen/include/asm-ia64/linux-xen/asm/pgtable.h --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h Fri Jun 09 16:22:24 2006 +0900 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h Fri Jun 09 17:50:53 2006 +0900 @@ -210,7 +210,7 @@ ia64_phys_addr_valid (unsigned long addr #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #ifdef XEN static inline void -set_pte_rel(pte_t* ptep, pte_t pteval) +set_pte_rel(volatile pte_t* ptep, pte_t pteval) { #if CONFIG_SMP asm volatile ("st8.rel [%0]=%1" :: @@ -402,8 +402,14 @@ ptep_test_and_clear_dirty (struct vm_are } #endif +#ifdef XEN +static inline pte_t +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + volatile pte_t *ptep) +#else static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +#endif { #ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0)); @@ -416,7 +422,8 @@ ptep_get_and_clear(struct mm_struct *mm, #ifdef XEN static inline pte_t -ptep_xchg(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t npte) +ptep_xchg(struct mm_struct *mm, unsigned long addr, + volatile pte_t *ptep, pte_t npte) { #ifdef CONFIG_SMP return __pte(xchg((long *) ptep, pte_val(npte))); @@ -428,8 +435,8 @@ ptep_xchg(struct mm_struct *mm, unsigned } static inline pte_t -ptep_cmpxchg_rel(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t new_pte) +ptep_cmpxchg_rel(struct mm_struct *mm, unsigned long addr, + volatile pte_t *ptep, pte_t old_pte, pte_t new_pte) { #ifdef CONFIG_SMP return __pte(cmpxchg_rel(&pte_val(*ptep),