summaryrefslogtreecommitdiffstats
path: root/include/asm-sh64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-10-16 23:29:42 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 08:42:57 -0700
commit622a9edd919de98ef59571ae6c40c7458244e3f2 (patch)
treec25684f90ed52c459c39d97d6ab641fe3bb6b09d /include/asm-sh64
parentbc154b1efb7f8430ea9faabd5953ebc411f8ead5 (diff)
downloadblackbird-op-linux-622a9edd919de98ef59571ae6c40c7458244e3f2.tar.gz
blackbird-op-linux-622a9edd919de98ef59571ae6c40c7458244e3f2.zip
Remove dma_cache_(wback|inv|wback_inv) functions
dma_cache_(wback|inv|wback_inv) were the earliest attempt on a generalized cache managment API for I/O purposes. Originally it was basically the raw MIPS low level cache API exported to the entire world. The API has suffered from a lack of documentation, was not very widely used unlike it's more modern brothers and can easily be replaced by dma_cache_sync. So remove it rsp. turn the surviving bits back into an arch private API, as discussed on linux-arch. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-sh64')
-rw-r--r--include/asm-sh64/dma-mapping.h6
-rw-r--r--include/asm-sh64/io.h48
2 files changed, 5 insertions, 49 deletions
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index de4309960207..e661857f98dc 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -42,7 +42,11 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
- dma_cache_wback_inv((unsigned long)vaddr, size);
+ unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbp %0, 0" : : "r" (s));
}
static inline dma_addr_t dma_map_single(struct device *dev,
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
index 3de3ad99f457..7bd7314d38c2 100644
--- a/include/asm-sh64/io.h
+++ b/include/asm-sh64/io.h
@@ -182,54 +182,6 @@ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* n
extern void onchip_unmap(unsigned long vaddr);
/*
- * The caches on some architectures aren't dma-coherent and have need to
- * handle this in software. There are three types of operations that
- * can be applied to dma buffers.
- *
- * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
- * writing the content of the caches back to memory, if necessary.
- * The function also invalidates the affected part of the caches as
- * necessary before DMA transfers from outside to memory.
- * - dma_cache_inv(start, size) invalidates the affected parts of the
- * caches. Dirty lines of the caches may be written back or simply
- * be discarded. This operation is necessary before dma operations
- * to the memory.
- * - dma_cache_wback(start, size) writes back any dirty lines but does
- * not invalidate the cache. This can be used before DMA reads from
- * memory,
- */
-
-static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbp %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
-{
- // Note that caller has to be careful with overzealous
- // invalidation should there be partial cache lines at the extremities
- // of the specified range
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbi %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbwb %0, 0" : : "r" (s));
-}
-
-/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
OpenPOWER on IntegriCloud