# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1181035451 -32400 # Node ID cb024947fa0186c5e08a71ecd4c0550967c5509a # Parent 3d3656528c8fa62896b744d2441033ff7d72a122 support dma tracking for swiotlb and xen_dma PATCHNAME: dma_tracking_swiotlb_xen_dma Signed-off-by: Isaku Yamahata diff -r 3d3656528c8f -r cb024947fa01 linux-2.6-xen-sparse/arch/ia64/xen/swiotlb.c --- a/linux-2.6-xen-sparse/arch/ia64/xen/swiotlb.c Tue Jun 05 22:43:38 2007 +0900 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/swiotlb.c Tue Jun 05 18:24:11 2007 +0900 @@ -33,6 +33,7 @@ #include #ifdef CONFIG_XEN +#include /* * What DMA mask should Xen use to remap the bounce buffer pool? Most * reports seem to indicate 30 bits is sufficient, except maybe for old @@ -597,7 +598,8 @@ dma_addr_t dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) { - unsigned long dev_addr = virt_to_bus(ptr); + unsigned long dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) + + offset_in_page(ptr); void *map; BUG_ON(dir == DMA_NONE); @@ -613,6 +615,7 @@ swiotlb_map_single(struct device *hwdev, /* * Oh well, have to allocate and map a bounce buffer. */ + gnttab_dma_unmap_page(dev_addr); map = map_single(hwdev, ptr, size, dir); if (!map) { swiotlb_full(hwdev, size, dir, 1); @@ -672,8 +675,11 @@ swiotlb_unmap_single(struct device *hwde BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); - else if (dir == DMA_FROM_DEVICE) - mark_clean(dma_addr, size); + else { + gnttab_dma_unmap_page(dev_addr); + if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); + } } /* @@ -774,9 +780,12 @@ swiotlb_map_sg(struct device *hwdev, str for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); - dev_addr = virt_to_bus(addr); + dev_addr = gnttab_dma_map_page(virt_to_page(addr)) + + offset_in_page(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { - void *map = map_single(hwdev, addr, sg->length, dir); + void *map; + gnttab_dma_unmap_page(dev_addr); + map = map_single(hwdev, addr, sg->length, dir); sg->dma_address = virt_to_bus(map); if (!map) { /* Don't panic here, we expect map_sg users @@ -808,8 +817,12 @@ swiotlb_unmap_sg(struct device *hwdev, s for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) unmap_single(hwdev, (void *) bus_to_virt(sg->dma_address), sg->dma_length, dir); - else if (dir == DMA_FROM_DEVICE) - mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); + else { + gnttab_dma_unmap_page(sg->dma_address); + if (dir == DMA_FROM_DEVICE) + mark_clean(SG_ENT_VIRT_ADDRESS(sg), + sg->dma_length); + } } /* diff -r 3d3656528c8f -r cb024947fa01 linux-2.6-xen-sparse/arch/ia64/xen/xen_dma.c --- a/linux-2.6-xen-sparse/arch/ia64/xen/xen_dma.c Tue Jun 05 22:43:38 2007 +0900 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xen_dma.c Tue Jun 05 18:24:11 2007 +0900 @@ -25,6 +25,7 @@ #include #include #include +#include #define IOMMU_BUG_ON(test) \ do { \ @@ -57,7 +58,7 @@ xen_map_sg(struct device *hwdev, struct int i; for (i = 0 ; i < nents ; i++) { - sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; + sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset; sg[i].dma_length = sg[i].length; IOMMU_BUG_ON(address_needs_mapping(hwdev, sg[i].dma_address)); @@ -71,6 +72,9 @@ xen_unmap_sg(struct device *hwdev, struc xen_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + int i; + for (i = 0; i < nents; i++) + gnttab_dma_unmap_page(page_to_bus(sg[i].page)); } EXPORT_SYMBOL(xen_unmap_sg); @@ -128,7 +132,7 @@ xen_map_single(struct device *dev, void xen_map_single(struct device *dev, void *ptr, size_t size, int direction) { - dma_addr_t dma_addr = virt_to_bus(ptr); + dma_addr_t dma_addr = gnttab_dma_map_page(virt_to_page(ptr)); IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size)); IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr)); @@ -141,5 +145,6 @@ xen_unmap_single(struct device *dev, dma xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, int direction) { + gnttab_dma_unmap_page(dma_addr); } EXPORT_SYMBOL(xen_unmap_single);