diff options
Diffstat (limited to 'arch/microblaze/kernel/dma.c')
-rw-r--r-- | arch/microblaze/kernel/dma.c | 78 |
1 files changed, 22 insertions, 56 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 990bf9ea0ec6..c91e8cef98dd 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -15,42 +15,18 @@ #include <linux/bug.h> #include <asm/cacheflush.h> -#define NOT_COHERENT_CACHE - -static void *dma_direct_alloc_coherent(struct device *dev, size_t size, +static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { -#ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); -#else - void *ret; - struct page *page; - int node = dev_to_node(dev); - - /* ignore region specifiers */ - flag &= ~(__GFP_HIGHMEM); - - page = alloc_pages_node(node, flag, get_order(size)); - if (page == NULL) - return NULL; - ret = page_address(page); - memset(ret, 0, size); - *dma_handle = virt_to_phys(ret); - - return ret; -#endif } -static void dma_direct_free_coherent(struct device *dev, size_t size, +static void dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { -#ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); -#else - free_pages((unsigned long)vaddr, get_order(size)); -#endif } static inline void __dma_sync(unsigned long paddr, @@ -69,7 +45,7 @@ static inline void __dma_sync(unsigned long paddr, } } -static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, +static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { @@ -89,12 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static int dma_direct_dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - -static inline dma_addr_t dma_direct_map_page(struct device *dev, +static inline dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, @@ -106,7 +77,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, return page_to_phys(page) + offset; } -static inline void dma_direct_unmap_page(struct device *dev, +static inline void dma_nommu_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, @@ -122,7 +93,7 @@ static inline void dma_direct_unmap_page(struct device *dev, } static inline void -dma_direct_sync_single_for_cpu(struct device *dev, +dma_nommu_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { @@ -136,7 +107,7 @@ dma_direct_sync_single_for_cpu(struct device *dev, } static inline void -dma_direct_sync_single_for_device(struct device *dev, +dma_nommu_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { @@ -150,7 +121,7 @@ dma_direct_sync_single_for_device(struct device *dev, } static inline void -dma_direct_sync_sg_for_cpu(struct device *dev, +dma_nommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { @@ -164,7 +135,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev, } static inline void -dma_direct_sync_sg_for_device(struct device *dev, +dma_nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { @@ -178,7 +149,7 @@ dma_direct_sync_sg_for_device(struct device *dev, } static -int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, +int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { @@ -191,12 +162,8 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, if (off >= count || user_count > (count - off)) return -ENXIO; -#ifdef NOT_COHERENT_CACHE vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = consistent_virt_to_pfn(cpu_addr); -#else - pfn = virt_to_pfn(cpu_addr); -#endif return remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); #else @@ -204,20 +171,19 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, #endif } -const struct dma_map_ops dma_direct_ops = { - .alloc = dma_direct_alloc_coherent, - .free = dma_direct_free_coherent, - .mmap = dma_direct_mmap_coherent, - .map_sg = dma_direct_map_sg, - .dma_supported = dma_direct_dma_supported, - .map_page = dma_direct_map_page, - .unmap_page = dma_direct_unmap_page, - .sync_single_for_cpu = dma_direct_sync_single_for_cpu, - .sync_single_for_device = dma_direct_sync_single_for_device, - .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, - .sync_sg_for_device = dma_direct_sync_sg_for_device, +const struct dma_map_ops dma_nommu_ops = { + .alloc = dma_nommu_alloc_coherent, + .free = dma_nommu_free_coherent, + .mmap = dma_nommu_mmap_coherent, + .map_sg = dma_nommu_map_sg, + .map_page = dma_nommu_map_page, + .unmap_page = dma_nommu_unmap_page, + .sync_single_for_cpu = dma_nommu_sync_single_for_cpu, + .sync_single_for_device = dma_nommu_sync_single_for_device, + .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu, + .sync_sg_for_device = dma_nommu_sync_sg_for_device, }; -EXPORT_SYMBOL(dma_direct_ops); +EXPORT_SYMBOL(dma_nommu_ops); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |