From ccaf5f05b218e5eb41e2f5cdfd26b18dce4a0218 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 11 Dec 2009 02:21:57 +0100 Subject: ARM: 5848/1: kill flush_ioremap_region() There is not enough users to warrant its existence, and it is actually an obstacle to progress with the new DMA API which cannot cover this case properly. To keep backward compatibility, let's perform the necessary custom cache maintenance locally in the only driver affected. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 73eceb87e588..3db7acd39a62 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page) */ #define flush_icache_page(vma,page) do { } while (0) -static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, - unsigned offset, size_t size) -{ - const void *start = (void __force *)virt + offset; - dmac_inv_range(start, start + size); -} - /* * flush_cache_vmap() is used when creating mappings (eg, via vmap, * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT -- cgit v1.2.1 From 2c9b9c8490b60428fa2d1c64042f7c7caed93940 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 12:56:21 +0000 Subject: ARM: add size argument to __cpuc_flush_dcache_page ... and rename the function since it no longer operates on just pages. Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3db7acd39a62..730aefcfbee3 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -211,7 +211,7 @@ struct cpu_cache_fns { void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); - void (*flush_kern_dcache_page)(void *); + void (*flush_kern_dcache_area)(void *, size_t); void (*dma_inv_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *); @@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_user_range cpu_cache.coherent_user_range -#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page +#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area /* * These are private to the dma-mapping API. Do not use directly. @@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long); -extern void __cpuc_flush_dcache_page(void *); +extern void __cpuc_flush_dcache_area(void *, size_t); /* * These are private to the dma-mapping API. Do not use directly. @@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page) { /* highmem pages are always flushed upon kunmap already */ if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } #define flush_dcache_mmap_lock(mapping) \ -- cgit v1.2.1