On Tue, May 29, 2007 at 02:55:28PM +1000, Herbert Xu wrote:
> Hi:
Hi Herbert.
I'm now trying to adopt unmap_and_replace on ia64.
I have some issues related to dma tracking.
This patch treats maddr_t as same as dma_addr_t.
But this isn't true for auto trasnlated physmap mode (i.e. IA64).
The grant table api shouldn't be involved in dma address
conversions, I suppose. The conversion should be done in dma api
implementation.
Thus dma api implementation can reduce address conversion.
On ia64, dma address conversion needs hypercall so that we want to
avoid the conversion as much as possible.
How about introducing something like the followings and
moving gnttab_dma_map_page(), gnttab_dma_unmap_page()
to x86 (or dma api implementation) specific file?
void __gnttab_dma_map_page(stuct page* page)
{
atomic_set(&page->_mapcount, 0);
/* This barrier corresponds to the one in gnttab_copy_grant_page. */
smp_mb();
}
void __gnttab_dma_unmap_page(struct page* page)
{
}
There are some comments below.
> diff -r 3ef0510e44d0 linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c
> --- a/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c Tue May 08
> 10:21:23 2007 +0100
> +++ b/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c Wed May 16
> 22:31:20 2007 +1000
[snip]
> @@ -326,7 +335,8 @@ dma_map_single(struct device *dev, void
> if (swiotlb) {
> dma = swiotlb_map_single(dev, ptr, size, direction);
> } else {
> - dma = virt_to_bus(ptr);
> + dma = gnttab_dma_map_page(virt_to_page(ptr)) +
> + offset_in_page(ptr);
> IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
> IOMMU_BUG_ON(address_needs_mapping(dev, dma));
> }
> @@ -344,6 +354,8 @@ dma_unmap_single(struct device *dev, dma
> BUG();
> if (swiotlb)
> swiotlb_unmap_single(dev, dma_addr, size, direction);
> + else
> + gnttab_dma_unmap_page(dma_addr);
> }
> EXPORT_SYMBOL(dma_unmap_single);
Is it assumed that size <= PAGE_SIZE?
Or should we iterate on the pointer with PAGE_SIZE?
Am I missing anything else?
> diff -r 3ef0510e44d0 linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c
> --- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c Tue May 08 10:21:23
> 2007 +0100
> +++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c Wed May 16 22:31:20
> 2007 +1000
...
> @@ -468,7 +467,8 @@ dma_addr_t
> dma_addr_t
> swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
> {
> - dma_addr_t dev_addr = virt_to_bus(ptr);
> + dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
> + offset_in_page(ptr);
> void *map;
> struct phys_addr buffer;
>
> @@ -486,6 +486,7 @@ swiotlb_map_single(struct device *hwdev,
> /*
> * Oh well, have to allocate and map a bounce buffer.
> */
> + gnttab_dma_unmap_page(dev_addr);
> buffer.page = virt_to_page(ptr);
> buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
> map = map_single(hwdev, buffer, size, dir);
> @@ -513,6 +514,8 @@ swiotlb_unmap_single(struct device *hwde
> BUG_ON(dir == DMA_NONE);
> if (in_swiotlb_aperture(dev_addr))
> unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
> + else
> + gnttab_dma_unmap_page(dev_addr);
> }
Ditto.
> +/*
> + * Must not be called with IRQs off. This should only be used on the
> + * slow path.
> + *
> + * Copy a foreign granted page to local memory.
> + */
> +int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
> +{
> + struct gnttab_unmap_and_replace unmap;
> + mmu_update_t mmu;
> + struct page *page;
> + struct page *new_page;
> + void *new_addr;
> + void *addr;
> + paddr_t pfn;
> + maddr_t mfn;
> + maddr_t new_mfn;
> + int err;
> +
> + page = *pagep;
> + if (!get_page_unless_zero(page))
> + return -ENOENT;
> +
> + err = -ENOMEM;
> + new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
> + if (!new_page)
> + goto out;
> +
> + new_addr = page_address(new_page);
> + addr = page_address(page);
> + memcpy(new_addr, addr, PAGE_SIZE);
> +
> + pfn = page_to_pfn(page);
> + mfn = pfn_to_mfn(pfn);
> + new_mfn = virt_to_mfn(new_addr);
> +
> + if (!xen_feature(XENFEAT_auto_translated_physmap)) {
> + set_phys_to_machine(pfn, new_mfn);
> + set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
> +
> + mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
> + mmu.val = pfn;
> + err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
> + BUG_ON(err);
> + }
> +
> + gnttab_set_replace_op(&unmap, (unsigned long)addr,
> + (unsigned long)new_addr, ref);
> +
> + err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
> + &unmap, 1);
> + BUG_ON(err);
> + BUG_ON(unmap.status);
> +
> + new_page->mapping = page->mapping;
> + new_page->index = page->index;
> + set_bit(PG_foreign, &new_page->flags);
> + *pagep = new_page;
> +
> + SetPageForeign(page, gnttab_page_free);
> + page->mapping = NULL;
> +
> + /*
> + * Ensure that there is a barrier between setting the p2m entry
> + * and checking the map count. See gnttab_dma_map_page.
> + */
> + smp_mb();
> +
> + /* Has the page been DMA-mapped? */
> + if (unlikely(page_mapped(page))) {
> + err = -EBUSY;
> + page->mapping = (void *)new_page;
While DMA might be going on. the grant table mapped page is unmapped here.
Since the page isn't referenced by this backend domain from the xen VMM
point of view, the front end domain can be destroyed. (e.g. by xm destroy)
So the page can be freed and reused for other purpose even though
DMA is still being processed.
The new user of the page (probably another domain) can be upset.
Is this true?
Such a case is very rare and it won't be an issue in practice.
I just want to confirm my understanding.
> + }
> +
> +out:
> + put_page(page);
> + return err;
> +
> +}
> +EXPORT_SYMBOL(gnttab_copy_grant_page);
--
yamahata
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|