On Wed, Jun 06, 2007 at 06:37:36PM +1000, Herbert Xu wrote:
>
> > While DMA might be going on. the grant table mapped page is unmapped here.
> > Since the page isn't referenced by this backend domain from the xen VMM
> > point of view, the front end domain can be destroyed. (e.g. by xm destroy)
> > So the page can be freed and reused for other purpose even though
> > DMA is still being processed.
> > The new user of the page (probably another domain) can be upset.
> > Is this true?
>
> Good catch. If the frontend freed the page we'd be in trouble. I suppose
> we need a better solution.
Here's how we can resolve this race condition using a seqlock. Please
be warned that I haven't even compiled it yet. I'll post this again
once I've tested it tomorrow.
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff -r bd3d6b4c52ec linux-2.6-xen-sparse/drivers/xen/core/gnttab.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Fri Jun 01 14:50:52
2007 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Wed Jun 06 22:32:55
2007 +1000
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/seqlock.h>
#include <xen/interface/xen.h>
#include <xen/gnttab.h>
#include <asm/pgtable.h>
@@ -62,6 +63,8 @@ static struct grant_entry *shared;
static struct grant_entry *shared;
static struct gnttab_free_callback *gnttab_free_callback_list;
+
+static DEFINE_SEQLOCK(gnttab_dma_lock);
static int gnttab_expand(unsigned int req_entries);
@@ -492,11 +495,6 @@ static int gnttab_map(unsigned int start
static void gnttab_page_free(struct page *page)
{
- if (page->mapping) {
- put_page((struct page *)page->mapping);
- page->mapping = NULL;
- }
-
ClearPageForeign(page);
gnttab_reset_grant_page(page);
put_page(page);
@@ -538,8 +536,30 @@ int gnttab_copy_grant_page(grant_ref_t r
mfn = pfn_to_mfn(pfn);
new_mfn = virt_to_mfn(new_addr);
+ write_seqlock(&gnttab_dma_lock);
+
+ /* Has the page been DMA-mapped? */
+ if (unlikely(page_mapped(page)))
+ write_sequnlock(&gnttab_dma_lock);
+ put_page(new_page);
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ set_phys_to_machine(pfn, new_mfn);
+
+ gnttab_set_replace_op(&unmap, (unsigned long)addr,
+ (unsigned long)new_addr, ref);
+
+ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
+ &unmap, 1);
+ BUG_ON(err);
+ BUG_ON(unmap.status);
+
+ write_sequnlock(&gnttab_dma_lock);
+
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_phys_to_machine(pfn, new_mfn);
set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
@@ -548,14 +568,6 @@ int gnttab_copy_grant_page(grant_ref_t r
BUG_ON(err);
}
- gnttab_set_replace_op(&unmap, (unsigned long)addr,
- (unsigned long)new_addr, ref);
-
- err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
- &unmap, 1);
- BUG_ON(err);
- BUG_ON(unmap.status);
-
new_page->mapping = page->mapping;
new_page->index = page->index;
set_bit(PG_foreign, &new_page->flags);
@@ -564,22 +576,9 @@ int gnttab_copy_grant_page(grant_ref_t r
SetPageForeign(page, gnttab_page_free);
page->mapping = NULL;
- /*
- * Ensure that there is a barrier between setting the p2m entry
- * and checking the map count. See gnttab_dma_map_page.
- */
- smp_mb();
-
- /* Has the page been DMA-mapped? */
- if (unlikely(page_mapped(page))) {
- err = -EBUSY;
- page->mapping = (void *)new_page;
- }
-
out:
put_page(page);
return err;
-
}
EXPORT_SYMBOL(gnttab_copy_grant_page);
@@ -593,23 +592,27 @@ EXPORT_SYMBOL(gnttab_copy_grant_page);
*/
maddr_t gnttab_dma_map_page(struct page *page)
{
- maddr_t mfn = pfn_to_mfn(page_to_pfn(page)), mfn2;
+ maddr_t maddr = page_to_bus(page);
+ unsigned int seq;
if (!PageForeign(page))
- return mfn << PAGE_SHIFT;
-
- if (mfn_to_local_pfn(mfn) < max_mapnr)
- return mfn << PAGE_SHIFT;
-
- atomic_set(&page->_mapcount, 0);
-
- /* This barrier corresponds to the one in gnttab_copy_grant_page. */
- smp_mb();
-
- /* Has this page been copied in the mean time? */
- mfn2 = pfn_to_mfn(page_to_pfn(page));
-
- return mfn2 << PAGE_SHIFT;
+ return maddr;
+
+ do {
+ seq = read_seqbegin(&gnttab_dma_lock);
+ maddr = page_to_bus(page);
+
+ /* Has it become a local MFN? */
+ if (pfn_valid(mfn_to_local_pfn(maddr >> PAGE_SHIFT)))
+ break;
+
+ atomic_set(&page->_mapcount, 0);
+
+ /* Make _mapcount visible before read_seqretry. */
+ smp_mb();
+ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
+
+ return maddr;
}
int gnttab_resume(void)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|