WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] remove some races between the p2m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] remove some races between the p2m table and the m2p table
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 16 Jun 2006 18:40:52 +0000
Delivery-date: Fri, 16 Jun 2006 11:45:39 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID bc76ad9d6270194101f3dd088b2b039532f5654f
# Parent  6fdafeeb88bb70674704ebf63bb2b983106a96a9
[IA64] remove some races between the p2m table and the m2p table

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c                   |    6 
 xen/arch/ia64/xen/mm.c                       |  278 ++++++++++++++++++---------
 xen/include/asm-ia64/linux-xen/asm/pgalloc.h |   47 ++++
 xen/include/asm-ia64/linux-xen/asm/pgtable.h |   31 +++
 xen/include/asm-ia64/mm.h                    |    4 
 5 files changed, 270 insertions(+), 96 deletions(-)

diff -r 6fdafeeb88bb -r bc76ad9d6270 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Jun 09 10:35:41 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Fri Jun 09 10:35:42 2006 -0600
@@ -472,13 +472,7 @@ static void relinquish_memory(struct dom
         /* Follow the list chain and /then/ potentially free the page. */
         ent = ent->next;
 #ifdef CONFIG_XEN_IA64_DOM0_VP
-#if 1
         BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
-#else
-        //XXX this should be done at traversing the P2M table.
-        if (page_get_owner(page) == d)
-            set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
-#endif
 #endif
         put_page(page);
     }
diff -r 6fdafeeb88bb -r bc76ad9d6270 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Jun 09 10:35:41 2006 -0600
+++ b/xen/arch/ia64/xen/mm.c    Fri Jun 09 10:35:42 2006 -0600
@@ -212,16 +212,6 @@ share_xen_page_with_guest(struct page_in
 
     // alloc_xenheap_pages() doesn't initialize page owner.
     //BUG_ON(page_get_owner(page) != NULL);
-#if 0
-    if (get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY) {
-        printk("%s:%d page 0x%p mfn 0x%lx gpfn 0x%lx\n", __func__, __LINE__,
-               page, page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)));
-    }
-#endif
-    // grant_table_destroy() release these pages.
-    // but it doesn't clear m2p entry. So there might remain stale entry.
-    // We clear such a stale entry here.
-    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
 
     spin_lock(&d->page_alloc_lock);
 
@@ -239,6 +229,11 @@ share_xen_page_with_guest(struct page_in
     if ( unlikely(d->xenheap_pages++ == 0) )
         get_knownalive_domain(d);
     list_add_tail(&page->list, &d->xenpage_list);
+
+    // grant_table_destroy() releases these pages.
+    // but it doesn't clear their m2p entry. So there might remain stale
+    // entries. such a stale entry is cleared here.
+    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
 
     spin_unlock(&d->page_alloc_lock);
 }
@@ -351,6 +346,9 @@ unsigned long translate_domain_mpaddr(un
 }
 
 //XXX !xxx_present() should be used instread of !xxx_none()?
+// pud, pmd, pte page is zero cleared when they are allocated.
+// Their area must be visible before population so that
+// cmpxchg must have release semantics.
 static pte_t*
 lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
 {
@@ -360,19 +358,38 @@ lookup_alloc_domain_pte(struct domain* d
     pmd_t *pmd;
 
     BUG_ON(mm->pgd == NULL);
+
     pgd = pgd_offset(mm, mpaddr);
-    if (pgd_none(*pgd)) {
-        pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+ again_pgd:
+    if (unlikely(pgd_none(*pgd))) {
+        pud_t *old_pud = NULL;
+        pud = pud_alloc_one(mm, mpaddr);
+        if (unlikely(!pgd_cmpxchg_rel(mm, pgd, old_pud, pud))) {
+            pud_free(pud);
+            goto again_pgd;
+        }
     }
 
     pud = pud_offset(pgd, mpaddr);
-    if (pud_none(*pud)) {
-        pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+ again_pud:
+    if (unlikely(pud_none(*pud))) {
+        pmd_t* old_pmd = NULL;
+        pmd = pmd_alloc_one(mm, mpaddr);
+        if (unlikely(!pud_cmpxchg_rel(mm, pud, old_pmd, pmd))) {
+            pmd_free(pmd);
+            goto again_pud;
+        }
     }
 
     pmd = pmd_offset(pud, mpaddr);
-    if (pmd_none(*pmd)) {
-        pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm, mpaddr));
+ again_pmd:
+    if (unlikely(pmd_none(*pmd))) {
+        pte_t* old_pte = NULL;
+        pte_t* pte = pte_alloc_one_kernel(mm, mpaddr);
+        if (unlikely(!pmd_cmpxchg_kernel_rel(mm, pmd, old_pte, pte))) {
+            pte_free_kernel(pte);
+            goto again_pmd;
+        }
     }
 
     return pte_offset_map(pmd, mpaddr);
@@ -389,15 +406,15 @@ lookup_noalloc_domain_pte(struct domain*
 
     BUG_ON(mm->pgd == NULL);
     pgd = pgd_offset(mm, mpaddr);
-    if (!pgd_present(*pgd))
+    if (unlikely(!pgd_present(*pgd)))
         return NULL;
 
     pud = pud_offset(pgd, mpaddr);
-    if (!pud_present(*pud))
+    if (unlikely(!pud_present(*pud)))
         return NULL;
 
     pmd = pmd_offset(pud, mpaddr);
-    if (!pmd_present(*pmd))
+    if (unlikely(!pmd_present(*pmd)))
         return NULL;
 
     return pte_offset_map(pmd, mpaddr);
@@ -414,15 +431,15 @@ lookup_noalloc_domain_pte_none(struct do
 
     BUG_ON(mm->pgd == NULL);
     pgd = pgd_offset(mm, mpaddr);
-    if (pgd_none(*pgd))
+    if (unlikely(pgd_none(*pgd)))
         return NULL;
 
     pud = pud_offset(pgd, mpaddr);
-    if (pud_none(*pud))
+    if (unlikely(pud_none(*pud)))
         return NULL;
 
     pmd = pmd_offset(pud, mpaddr);
-    if (pmd_none(*pmd))
+    if (unlikely(pmd_none(*pmd)))
         return NULL;
 
     return pte_offset_map(pmd, mpaddr);
@@ -565,13 +582,14 @@ __assign_new_domain_page(struct domain *
 
     ret = get_page(p, d);
     BUG_ON(ret == 0);
-    set_pte(pte, pfn_pte(maddr >> PAGE_SHIFT,
-                         __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
-
-    mb ();
-    //XXX CONFIG_XEN_IA64_DOM0_VP
-    //    TODO racy
     set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
+    // clear_page() and set_gpfn_from_mfn() become visible before set_pte_rel()
+    // because set_pte_rel() has release semantics
+    set_pte_rel(pte,
+                pfn_pte(maddr >> PAGE_SHIFT,
+                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+
+    smp_mb();
     return p;
 }
 
@@ -626,9 +644,10 @@ __assign_domain_page(struct domain *d,
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
     if (pte_none(*pte)) {
-        set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
-                             __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags)));
-        mb ();
+        set_pte_rel(pte,
+                    pfn_pte(physaddr >> PAGE_SHIFT,
+                            __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags)));
+        smp_mb();
     } else
         printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
 }
@@ -644,11 +663,10 @@ assign_domain_page(struct domain *d,
     BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
     ret = get_page(page, d);
     BUG_ON(ret == 0);
+    set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
+    // because __assign_domain_page() uses set_pte_rel() which has
+    // release semantics, smp_mb() isn't needed.
     __assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable);
-
-    //XXX CONFIG_XEN_IA64_DOM0_VP
-    //    TODO racy
-    set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
 }
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
@@ -740,8 +758,10 @@ assign_domain_mach_page(struct domain *d
     return mpaddr;
 }
 
-// caller must get_page(mfn_to_page(mfn)) before
-// caller must call set_gpfn_from_mfn().
+// caller must get_page(mfn_to_page(mfn)) before call.
+// caller must call set_gpfn_from_mfn() before call if necessary.
+// because set_gpfn_from_mfn() result must be visible before pte xchg
+// caller must use memory barrier. NOTE: xchg has acquire semantics.
 // flags: currently only ASSIGN_readonly
 static void
 assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
@@ -752,39 +772,95 @@ assign_domain_page_replace(struct domain
     pte_t old_pte;
     pte_t npte;
     unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
-
     pte = lookup_alloc_domain_pte(d, mpaddr);
 
     // update pte
     npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
     old_pte = ptep_xchg(mm, mpaddr, pte, npte);
     if (pte_mem(old_pte)) {
-        unsigned long old_mfn;
-        struct page_info* old_page;
-
-        // XXX should previous underlying page be removed?
-        //  or should error be returned because it is a due to a domain?
-        old_mfn = pte_pfn(old_pte);//XXX
-        old_page = mfn_to_page(old_mfn);
-
-        if (page_get_owner(old_page) == d) {
-            BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
-            set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
-        }
-
-        domain_page_flush(d, mpaddr, old_mfn, mfn);
-
-        try_to_clear_PGC_allocate(d, old_page);
-        put_page(old_page);
-    } else {
-        BUG_ON(!mfn_valid(mfn));
-        BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
-               get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
-    }
+        unsigned long old_mfn = pte_pfn(old_pte);
+
+        // mfn = old_mfn case can happen when domain maps a granted page
+        // twice with the same pseudo physial address.
+        // It's non sense, but allowed.
+        // __gnttab_map_grant_ref()
+        //   => create_host_mapping()
+        //      => assign_domain_page_replace()
+        if (mfn != old_mfn) {
+            struct page_info* old_page = mfn_to_page(old_mfn);
+
+            if (page_get_owner(old_page) == d) {
+                BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
+                set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
+            }
+
+            domain_page_flush(d, mpaddr, old_mfn, mfn);
+
+            try_to_clear_PGC_allocate(d, old_page);
+            put_page(old_page);
+        }
+    }
+}
+
+// caller must get_page(new_page) before
+// Only steal_page_for_grant_transfer() calls this function.
+static int
+assign_domain_page_cmpxchg_rel(struct domain* d, unsigned long mpaddr,
+                               struct page_info* old_page,
+                               struct page_info* new_page,
+                               unsigned long flags)
+{
+    struct mm_struct *mm = &d->arch.mm;
+    pte_t* pte;
+    unsigned long old_mfn;
+    unsigned long old_arflags;
+    pte_t old_pte;
+    unsigned long new_mfn;
+    unsigned long new_arflags;
+    pte_t new_pte;
+    pte_t ret_pte;
+
+    pte = lookup_alloc_domain_pte(d, mpaddr);
+
+ again:
+    old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;//XXX
+    old_mfn = page_to_mfn(old_page);
+    old_pte = pfn_pte(old_mfn, __pgprot(old_arflags));
+
+    new_arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX;
+    new_mfn = page_to_mfn(new_page);
+    new_pte = pfn_pte(new_mfn,
+                      __pgprot(__DIRTY_BITS | _PAGE_PL_2 | new_arflags));
+
+    // update pte
+    ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
+    if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {
+        if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {
+            goto again;
+        }
+
+        DPRINTK("%s: old_pte 0x%lx old_arflags 0x%lx old_mfn 0x%lx "
+                "ret_pte 0x%lx ret_mfn 0x%lx\n",
+                __func__,
+                pte_val(old_pte), old_arflags, old_mfn,
+                pte_val(ret_pte), pte_pfn(ret_pte));
+        return -EINVAL;
+    }
+
+    BUG_ON(!pte_mem(old_pte));
+    BUG_ON(page_get_owner(old_page) != d);
+    BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
+    BUG_ON(old_mfn == new_mfn);
+
+    set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
+
+    domain_page_flush(d, mpaddr, old_mfn, new_mfn);
+    put_page(old_page);
+    return 0;
 }
 
 static void
-zap_domain_page_one(struct domain *d, unsigned long mpaddr, int do_put_page)
+zap_domain_page_one(struct domain *d, unsigned long mpaddr)
 {
     struct mm_struct *mm = &d->arch.mm;
     pte_t *pte;
@@ -811,13 +887,10 @@ zap_domain_page_one(struct domain *d, un
 
     domain_page_flush(d, mpaddr, mfn, INVALID_MFN);
 
-    if (do_put_page) {
-        try_to_clear_PGC_allocate(d, page);
-        put_page(page);
-    }
-}
-
-//XXX SMP
+    try_to_clear_PGC_allocate(d, page);
+    put_page(page);
+}
+
 unsigned long
 dom0vp_zap_physmap(struct domain *d, unsigned long gpfn,
                    unsigned int extent_order)
@@ -827,7 +900,7 @@ dom0vp_zap_physmap(struct domain *d, uns
         return -ENOSYS;
     }
 
-    zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1);
+    zap_domain_page_one(d, gpfn << PAGE_SHIFT);
     return 0;
 }
 
@@ -861,11 +934,13 @@ dom0vp_add_physmap(struct domain* d, uns
         error = -EINVAL;
         goto out1;
     }
+    BUG_ON(!mfn_valid(mfn));
     if (unlikely(get_page(mfn_to_page(mfn), rd) == 0)) {
         error = -EINVAL;
         goto out1;
     }
-
+    BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
+           get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
     assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
     //don't update p2m table because this page belongs to rd, not d.
 out1:
@@ -891,10 +966,12 @@ create_grant_host_mapping(unsigned long 
         return GNTST_general_error;
     }
 
+    BUG_ON(!mfn_valid(mfn));
     page = mfn_to_page(mfn);
     ret = get_page(page, page_get_owner(page));
     BUG_ON(ret == 0);
-
+    BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
+           get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
     assign_domain_page_replace(d, gpaddr, mfn, (flags & GNTMAP_readonly)?
                                               ASSIGN_readonly: 
ASSIGN_writable);
     return GNTST_okay;
@@ -937,7 +1014,6 @@ destroy_grant_host_mapping(unsigned long
 }
 
 // heavily depends on the struct page layout.
-//XXX SMP
 int
 steal_page_for_grant_transfer(struct domain *d, struct page_info *page)
 {
@@ -946,11 +1022,41 @@ steal_page_for_grant_transfer(struct dom
 #endif
     u32 _d, _nd;
     u64 x, nx, y;
-    unsigned long mpaddr = get_gpfn_from_mfn(page_to_mfn(page)) << PAGE_SHIFT;
+    unsigned long gpfn;
     struct page_info *new;
-
-    zap_domain_page_one(d, mpaddr, 0);
-    put_page(page);
+    unsigned long new_mfn;
+    int ret;
+    new = alloc_domheap_page(d);
+    if (new == NULL) {
+        DPRINTK("alloc_domheap_page() failed\n");
+        return -1;
+    }
+    // zero out pages for security reasons
+    clear_page(page_to_virt(new));
+    // assign_domain_page_cmpxchg_rel() has release semantics
+    // so smp_mb() isn't needed.
+
+    ret = get_page(new, d);
+    BUG_ON(ret == 0);
+
+    gpfn = get_gpfn_from_mfn(page_to_mfn(page));
+    if (gpfn == INVALID_M2P_ENTRY) {
+        free_domheap_page(new);
+        return -1;
+    }
+    new_mfn = page_to_mfn(new);
+    set_gpfn_from_mfn(new_mfn, gpfn);
+    // smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
+    // has release semantics.
+
+    ret = assign_domain_page_cmpxchg_rel(d, gpfn << PAGE_SHIFT, page, new,
+                                         ASSIGN_writable);
+    if (ret < 0) {
+        DPRINTK("assign_domain_page_cmpxchg_rel failed %d\n", ret);
+        set_gpfn_from_mfn(new_mfn, INVALID_M2P_ENTRY);
+        free_domheap_page(new);
+        return -1;
+    }
 
     spin_lock(&d->page_alloc_lock);
 
@@ -1006,14 +1112,6 @@ steal_page_for_grant_transfer(struct dom
     list_del(&page->list);
 
     spin_unlock(&d->page_alloc_lock);
-
-#if 1
-    //XXX Until net_rx_action() fix
-    // assign new page for this mpaddr
-    new = assign_new_domain_page(d, mpaddr);
-    BUG_ON(new == NULL);//XXX
-#endif
-
     return 0;
 }
 
@@ -1023,10 +1121,14 @@ guest_physmap_add_page(struct domain *d,
 {
     int ret;
 
+    BUG_ON(!mfn_valid(mfn));
     ret = get_page(mfn_to_page(mfn), d);
     BUG_ON(ret == 0);
+    BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
+           get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
+    set_gpfn_from_mfn(mfn, gpfn);
+    smp_mb();
     assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable);
-    set_gpfn_from_mfn(mfn, gpfn);//XXX SMP
 
     //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> 
PAGE_SHIFT));
 }
@@ -1036,7 +1138,7 @@ guest_physmap_remove_page(struct domain 
                           unsigned long mfn)
 {
     BUG_ON(mfn == 0);//XXX
-    zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1);
+    zap_domain_page_one(d, gpfn << PAGE_SHIFT);
 }
 
 //XXX sledgehammer.
@@ -1216,7 +1318,7 @@ void put_page_type(struct page_info *pag
             nx |= PGT_va_mutable;
         }
     }
-    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
+    while ( unlikely((y = cmpxchg_rel(&page->u.inuse.type_info, x, nx)) != x) 
);
 }
 
 
@@ -1324,7 +1426,7 @@ int get_page_type(struct page_info *page
             }
         }
     }
-    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
+    while ( unlikely((y = cmpxchg_acq(&page->u.inuse.type_info, x, nx)) != x) 
);
 
     if ( unlikely(!(nx & PGT_validated)) )
     {
diff -r 6fdafeeb88bb -r bc76ad9d6270 
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h      Fri Jun 09 10:35:41 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h      Fri Jun 09 10:35:42 
2006 -0600
@@ -78,6 +78,15 @@ static inline void pgtable_quicklist_fre
 }
 #endif
 
+#ifdef XEN
+#include <asm/pgtable.h>
+#ifdef __PAGETABLE_PUD_FOLDED
+# define pgd_cmpxchg_rel(mm, pgd, old_pud, new_pud)    ({(void)old_pud;1;})
+#else
+# error "implement pgd_cmpxchg_rel()!"
+#endif
+#endif
+
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        return pgtable_quicklist_alloc();
@@ -94,6 +103,25 @@ pud_populate(struct mm_struct *mm, pud_t
        pud_val(*pud_entry) = __pa(pmd);
 }
 
+#ifdef XEN
+static inline int
+pud_cmpxchg_rel(struct mm_struct *mm, pud_t * pud_entry,
+               pmd_t * old_pmd, pmd_t * new_pmd)
+{
+#ifdef CONFIG_SMP
+       unsigned long r;
+       r = cmpxchg_rel(&pud_val(*pud_entry), __pa(old_pmd), __pa(new_pmd));
+       return (r == __pa(old_pmd));
+#else
+       if (pud_val(*pud_entry) == __pa(old_pmd)) {
+               pud_val(*pud_entry) = __pa(new_pmd);
+               return 1;
+       }
+       return 0;
+#endif
+}
+#endif
+
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        return pgtable_quicklist_alloc();
@@ -119,6 +147,25 @@ pmd_populate_kernel(struct mm_struct *mm
 {
        pmd_val(*pmd_entry) = __pa(pte);
 }
+
+#ifdef XEN
+static inline int
+pmd_cmpxchg_kernel_rel(struct mm_struct *mm, pmd_t * pmd_entry,
+                      pte_t * old_pte, pte_t * new_pte)
+{
+#ifdef CONFIG_SMP
+       unsigned long r;
+       r = cmpxchg_rel(&pmd_val(*pmd_entry), __pa(old_pte), __pa(new_pte));
+       return (r == __pa(old_pte));
+#else
+       if (pmd_val(*pmd_entry) == __pa(old_pte)) {
+               pmd_val(*pmd_entry) = __pa(new_pte);
+               return 1;
+       }
+       return 0;
+#endif
+}
+#endif
 
 #ifndef XEN
 static inline struct page *pte_alloc_one(struct mm_struct *mm,
diff -r 6fdafeeb88bb -r bc76ad9d6270 
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Fri Jun 09 10:35:41 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Fri Jun 09 10:35:42 
2006 -0600
@@ -208,6 +208,19 @@ ia64_phys_addr_valid (unsigned long addr
  */
 #define set_pte(ptep, pteval)  (*(ptep) = (pteval))
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#ifdef XEN
+static inline void
+set_pte_rel(pte_t* ptep, pte_t pteval)
+{
+#if CONFIG_SMP
+       asm volatile ("st8.rel [%0]=%1" ::
+                     "r"(&pte_val(*ptep)), "r"(pte_val(pteval)) :
+                     "memory");
+#else
+       set_pte(ptep, pteval);
+#endif
+}
+#endif
 
 #define RGN_SIZE       (1UL << 61)
 #define RGN_KERNEL     7
@@ -401,6 +414,7 @@ ptep_get_and_clear(struct mm_struct *mm,
 #endif
 }
 
+#ifdef XEN
 static inline pte_t
 ptep_xchg(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t npte)
 {
@@ -412,6 +426,23 @@ ptep_xchg(struct mm_struct *mm, unsigned
        return pte;
 #endif
 }
+
+static inline pte_t
+ptep_cmpxchg_rel(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+                pte_t old_pte, pte_t new_pte)
+{
+#ifdef CONFIG_SMP
+       return __pte(cmpxchg_rel(&pte_val(*ptep),
+                                pte_val(old_pte), pte_val(new_pte)));
+#else
+       pte_t pte = *ptep;
+       if (pte_val(pte) == pte_val(old_pte)) {
+               set_pte(ptep, npte);
+       }
+       return pte;
+#endif
+}
+#endif
 
 #ifndef XEN
 static inline void
diff -r 6fdafeeb88bb -r bc76ad9d6270 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Fri Jun 09 10:35:41 2006 -0600
+++ b/xen/include/asm-ia64/mm.h Fri Jun 09 10:35:42 2006 -0600
@@ -160,7 +160,7 @@ static inline void put_page(struct page_
        x = y;
        nx = x - 1;
     }
-    while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
+    while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
 
     if (unlikely((nx & PGC_count_mask) == 0))
        free_domheap_page(page);
@@ -186,7 +186,7 @@ static inline int get_page(struct page_i
            return 0;
        }
     }
-    while(unlikely((y = cmpxchg((u64*)&page->count_info, x, nx)) != x));
+    while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x));
     return 1;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] remove some races between the p2m table and the m2p table, Xen patchbot-unstable <=