diff options
author | Magnus Damm <magnus.damm@gmail.com> | 2008-01-25 12:42:48 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-02-14 14:22:07 +0900 |
commit | 2a3eeba88f935b200245d1536b99cd4b7eec1d4a (patch) | |
tree | ac201b3def115a6c695b317a968743a28bf86681 /arch | |
parent | e760e716d47b48caf98da348368fd41b4a9b9e7e (diff) | |
download | talos-op-linux-2a3eeba88f935b200245d1536b99cd4b7eec1d4a.tar.gz talos-op-linux-2a3eeba88f935b200245d1536b99cd4b7eec1d4a.zip |
sh: declared coherent memory support V2 fix
This patch fixes the recently introduced declared coherent memory support.
Without this fix a cached memory area is returned by dma_alloc_coherent() -
unless dma_declare_coherent_memory() has setup a separate area.
This patch makes sure an uncached memory area is returned. With this patch
it is now possible to ping through an rtl8139 interface on r2d-plus.
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/mm/consistent.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 7b2131c9eeda..d3c33fc5b1c2 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -26,7 +26,7 @@ struct dma_coherent_mem { void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - void *ret; + void *ret, *ret_nocache; struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; int order = get_order(size); @@ -44,17 +44,24 @@ void *dma_alloc_coherent(struct device *dev, size_t size, } ret = (void *)__get_free_pages(gfp, order); - - if (ret != NULL) { - memset(ret, 0, size); - /* - * Pages from the page allocator may have data present in - * cache. So flush the cache before using uncached memory. - */ - dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); - *dma_handle = virt_to_phys(ret); + if (!ret) + return NULL; + + memset(ret, 0, size); + /* + * Pages from the page allocator may have data present in + * cache. So flush the cache before using uncached memory. + */ + dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); + + ret_nocache = ioremap_nocache(virt_to_phys(ret), size); + if (!ret_nocache) { + free_pages((unsigned long)ret, order); + return NULL; } - return ret; + + *dma_handle = virt_to_phys(ret); + return ret_nocache; } EXPORT_SYMBOL(dma_alloc_coherent); @@ -71,7 +78,8 @@ void dma_free_coherent(struct device *dev, size_t size, } else { WARN_ON(irqs_disabled()); /* for portability */ BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); - free_pages((unsigned long)vaddr, order); + free_pages((unsigned long)phys_to_virt(dma_handle), order); + iounmap(vaddr); } } EXPORT_SYMBOL(dma_free_coherent); |