diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-18 23:00:42 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-18 23:00:42 +0100 |
commit | 07f1c295de593ec0b0dca3092299c048c03374da (patch) | |
tree | ad8f291e550b3315f84c07e9f543e25adcf95dc3 /arch/arm/include | |
parent | 4aa96ccf9ee35cdbd0d423e87a4d551019570218 (diff) | |
parent | fb89fcfb151698776be6c900aec8161b01990e92 (diff) | |
download | blackbird-op-linux-07f1c295de593ec0b0dca3092299c048c03374da.tar.gz blackbird-op-linux-07f1c295de593ec0b0dca3092299c048c03374da.zip |
Merge branch 'dma' of http://git.linaro.org/git/people/nico/linux into devel-stable
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 88 | ||||
-rw-r--r-- | arch/arm/include/asm/dma.h | 11 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/arch.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 12 |
4 files changed, 23 insertions, 92 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363ed..7a21d0bf7134 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ___dma_page_dev_to_cpu(page, off, size, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) -{ - if (mask < ISA_DMA_THRESHOLD) - return 0; - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_DMABOUNCE - if (dev->archdata.dmabounce) { - if (dma_mask >= ISA_DMA_THRESHOLD) - return 0; - else - return -EIO; - } -#endif - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} +extern int dma_supported(struct device *, u64); +extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. @@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, * @dev: valid struct device pointer * @small_buf_size: size of buffers to use with small buffer pool * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing * * This function should be called by low-level platform code to register * a device as requireing DMA buffer bouncing. The function will allocate * appropriate DMA pools for the device. - * */ extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long); + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); /** * dmabounce_unregister_dev @@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t __dma_map_single(struct device *, void *, size_t, - enum dma_data_direction); -extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, @@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } -static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - __dma_single_cpu_to_dev(cpu_addr, size, dir); - return virt_to_dma(dev, cpu_addr); -} - static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { @@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + unsigned long offset; + struct page *page; dma_addr_t addr; + BUG_ON(!virt_addr_valid(cpu_addr)); + BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); BUG_ON(!valid_dma_direction(dir)); - addr = __dma_map_single(dev, cpu_addr, size, dir); - debug_dma_map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr & ~PAGE_MASK, size, - dir, addr, true); + page = virt_to_page(cpu_addr); + offset = (unsigned long)cpu_addr & ~PAGE_MASK; + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, true); return addr; } @@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_single(dev, handle, size, dir); + __dma_unmap_page(dev, handle, size, dir); } /** diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 42005542932b..628670e9d7c9 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -1,15 +1,16 @@ #ifndef __ASM_ARM_DMA_H #define __ASM_ARM_DMA_H -#include <asm/memory.h> - /* * This is the maximum virtual address which can be DMA'd from. */ -#ifndef ARM_DMA_ZONE_SIZE -#define MAX_DMA_ADDRESS 0xffffffff +#ifndef CONFIG_ZONE_DMA +#define MAX_DMA_ADDRESS 0xffffffffUL #else -#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) +#define MAX_DMA_ADDRESS ({ \ + extern unsigned long arm_dma_zone_size; \ + arm_dma_zone_size ? \ + (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif #ifdef CONFIG_ISA_DMA_API diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 946f4d778f71..3281fb4b12e3 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -23,6 +23,10 @@ struct machine_desc { unsigned int nr_irqs; /* number of IRQs */ +#ifdef CONFIG_ZONE_DMA + unsigned long dma_zone_size; /* size of DMA-able area */ +#endif + unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb3480..b8de516e600e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -204,18 +204,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif /* - * The DMA mask corresponding to the maximum bus address allocatable - * using GFP_DMA. The default here places no restriction on DMA - * allocations. This must be the smallest DMA mask in the system, - * so a successful GFP_DMA allocation will always satisfy this. - */ -#ifndef ARM_DMA_ZONE_SIZE -#define ISA_DMA_THRESHOLD (0xffffffffULL) -#else -#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) -#endif - -/* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. * |