Pasi, to validate the theory that you are seeing races between unpinning
and kmap_atomic_pte can you give this biguglystick approach to solving
it a go.
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 1729178..beeb8e8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1145,9 +1145,12 @@ static int xen_unpin_page(struct mm_struct *mm, struct
page *page,
return 0; /* never need to flush on unpin */
}
+static DEFINE_SPINLOCK(hack_lock); /* Hack to sync unpin against
kmap_atomic_pte */
+
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
+ spin_lock(&hack_lock);
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1173,6 +1176,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t
*pgd)
__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0);
+ spin_unlock(&hack_lock);
}
static void xen_pgd_unpin(struct mm_struct *mm)
@@ -1521,6 +1525,9 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
{
pgprot_t prot = PAGE_KERNEL;
+ void *ret;
+
+ spin_lock(&hack_lock);
if (PagePinned(page))
prot = PAGE_KERNEL_RO;
@@ -1530,7 +1537,11 @@ static void *xen_kmap_atomic_pte(struct page *page, enum
km_type type)
page_to_pfn(page), type,
(unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" :
"READ");
- return kmap_atomic_prot(page, type, prot);
+ ret = kmap_atomic_prot(page, type, prot);
+
+ spin_unlock(&hack_lock);
+
+ return ret;
}
#endif
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|