# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID f5c64bb5ed7433e2b0ee698d982effb7119866b3
# Parent ae9fd9d9628fc9e76e3b7681fa75c1280408bc66
Fix dma_map_single to work correctly with mutli-page buffers.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r ae9fd9d9628f -r f5c64bb5ed74
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c Fri Jul 1
15:42:58 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c Fri Jul 8
12:55:56 2005
@@ -232,4 +232,121 @@
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+static LIST_HEAD(dma_map_head);
+static DEFINE_SPINLOCK(dma_map_lock);
+struct dma_map_entry {
+ struct list_head list;
+ dma_addr_t dma;
+ void *bounce, *host;
+};
+
+dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ void *bnc;
+ dma_addr_t dma;
+ unsigned long flags;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /*
+ * Even if size is sub-page, the buffer may still straddle a page
+ * boundary. Take into account buffer start offset. All other calls are
+ * conservative and always search the dma_map list if it's non-empty.
+ */
+ if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
+ dma = virt_to_bus(ptr);
+ } else {
+ BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
+ BUG_ON((ent = kmalloc(sizeof(*ent), GFP_KERNEL)) == NULL);
+ if (direction != DMA_FROM_DEVICE)
+ memcpy(bnc, ptr, size);
+ ent->dma = dma;
+ ent->bounce = bnc;
+ ent->host = ptr;
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_add(&ent->list, &dma_map_head);
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ }
+
+ flush_write_buffers();
+ return dma;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list ) {
+ if (ent->dma == dma_addr) {
+ list_del(&ent->list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head) {
+ if (direction != DMA_TO_DEVICE)
+ memcpy(ent->host, ent->bounce, size);
+ dma_free_coherent(dev, size, ent->bounce, ent->dma);
+ kfree(ent);
+ }
+ }
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
+void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags;
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list )
+ if (ent->dma == dma_handle)
+ break;
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head)
+ if (direction != DMA_TO_DEVICE)
+ memcpy(ent->host, ent->bounce, size);
+ }
+}
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags;
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list )
+ if (ent->dma == dma_handle)
+ break;
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head)
+ if (direction != DMA_FROM_DEVICE)
+ memcpy(ent->bounce, ent->host, size);
+ }
+
+ flush_write_buffers();
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
#endif
diff -r ae9fd9d9628f -r f5c64bb5ed74
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Fri Jul
1 15:42:58 2005
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Fri Jul
8 12:55:56 2005
@@ -16,21 +16,13 @@
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
-static inline dma_addr_t
+extern dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- flush_write_buffers();
- return virt_to_bus(ptr);
-}
+ enum dma_data_direction direction);
-static inline void
+extern void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
+ enum dma_data_direction direction);
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
@@ -73,24 +65,20 @@
BUG_ON(direction == DMA_NONE);
}
-static inline void
+extern void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
+ enum dma_data_direction direction);
-static inline void
+extern void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
+ enum dma_data_direction direction);
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
+ dma_sync_single_for_cpu(dev, dma_handle, size, direction);
}
static inline void
@@ -98,7 +86,7 @@
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
- flush_write_buffers();
+ dma_sync_single_for_device(dev, dma_handle, size, direction);
}
static inline void
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|