diff options
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/Kconfig | 3 | ||||
-rw-r--r-- | arch/arc/Makefile | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/dma-mapping.h | 187 | ||||
-rw-r--r-- | arch/arc/kernel/unwind.c | 32 | ||||
-rw-r--r-- | arch/arc/mm/cache.c | 4 | ||||
-rw-r--r-- | arch/arc/mm/dma.c | 152 | ||||
-rw-r--r-- | arch/arc/mm/highmem.c | 4 |
8 files changed, 126 insertions, 260 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6312f607932f..76dde9db7934 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -73,9 +73,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y depends on ARC_MMU_V4 diff --git a/arch/arc/Makefile b/arch/arc/Makefile index cf0cf34eeb24..aeb19021099e 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -81,7 +81,7 @@ endif LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel -KBUILD_CFLAGS_MODULE += -mlong-calls +KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode # Finally dump eveything into kernel build system KBUILD_CFLAGS += $(cflags-y) diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index abf06e81c929..210ef3e72332 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -62,9 +62,7 @@ extern int ioc_exists; #define ARC_REG_IC_IVIC 0x10 #define ARC_REG_IC_CTRL 0x11 #define ARC_REG_IC_IVIL 0x19 -#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) #define ARC_REG_IC_PTAG 0x1E -#endif #define ARC_REG_IC_PTAG_HI 0x1F /* Bit val in IC_CTRL */ diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 2d28ba939d8e..660205414f1d 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -11,192 +11,11 @@ #ifndef ASM_ARC_DMA_MAPPING_H #define ASM_ARC_DMA_MAPPING_H -#include <asm-generic/dma-coherent.h> -#include <asm/cacheflush.h> +extern struct dma_map_ops arc_dma_ops; -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, - dma_addr_t dma_handle); - -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - -/* - * streaming DMA Mapping API... - * CPU accesses page via normal paddr, thus needs to explicitly made - * consistent before each use - */ - -static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size, - enum dma_data_direction dir) -{ - switch (dir) { - case DMA_FROM_DEVICE: - dma_cache_inv(paddr, size); - break; - case DMA_TO_DEVICE: - dma_cache_wback(paddr, size); - break; - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(paddr, size); - break; - default: - pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); - } -} - -void __arc_dma_cache_sync(unsigned long paddr, size_t size, - enum dma_data_direction dir); - -#define _dma_cache_sync(addr, sz, dir) \ -do { \ - if (__builtin_constant_p(dir)) \ - __inline_dma_cache_sync(addr, sz, dir); \ - else \ - __arc_dma_cache_sync(addr, sz, dir); \ -} \ -while (0); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *cpu_addr, size_t size, - enum dma_data_direction dir) -{ - _dma_cache_sync((unsigned long)cpu_addr, size, dir); - return (dma_addr_t)cpu_addr; -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir) -{ -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - unsigned long paddr = page_to_phys(page) + offset; - return dma_map_single(dev, (void *)paddr, size, dir); -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); - - return nents; -} - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); -} - -static inline int dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; + return &arc_dma_ops; } #endif diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index cf2828ab0905..0587bf121d11 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -293,13 +293,13 @@ static void init_unwind_hdr(struct unwind_table *table, const u32 *cie = cie_for_fde(fde, table); signed ptrType; - if (cie == ¬_fde) /* only process FDE here */ + if (cie == ¬_fde) continue; if (cie == NULL || cie == &bad_cie) - continue; /* say FDE->CIE.version != 1 */ + goto ret_err; ptrType = fde_pointer_type(cie); if (ptrType < 0) - continue; + goto ret_err; ptr = (const u8 *)(fde + 2); if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, @@ -315,14 +315,14 @@ static void init_unwind_hdr(struct unwind_table *table, } if (tableSize || !n) - return; + goto ret_err; hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) + 2 * n * sizeof(unsigned long); header = alloc(hdrSize); if (!header) - return; + goto ret_err; header->version = 1; header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; @@ -343,10 +343,6 @@ static void init_unwind_hdr(struct unwind_table *table, if (fde[1] == 0xffffffff) continue; /* this is a CIE */ - - if (*(u8 *)(cie + 2) != 1) - continue; /* FDE->CIE.version not supported */ - ptr = (const u8 *)(fde + 2); header->table[n].start = read_pointer(&ptr, (const u8 *)(fde + 1) + @@ -365,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table, table->hdrsz = hdrSize; smp_wmb(); table->header = (const void *)header; + return; + +ret_err: + panic("Attention !!! Dwarf FDE parsing errors\n");; } #ifdef CONFIG_MODULES @@ -385,8 +385,8 @@ void *unwind_add_table(struct module *module, const void *table_start, return NULL; init_unwind_table(table, module->name, - module->module_core, module->core_size, - module->module_init, module->init_size, + module->core_layout.base, module->core_layout.size, + module->init_layout.base, module->init_layout.size, table_start, table_size, NULL, 0); @@ -523,8 +523,7 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) || (*cie & (sizeof(*cie) - 1)) - || (cie[1] != 0xffffffff) - || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */ + || (cie[1] != 0xffffffff)) return NULL; /* this is not a (valid) CIE */ return cie; } @@ -605,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie) const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; - if (version != 1) - return -1; /* unsupported */ - if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; @@ -1019,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame) ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; frame->call_frame = 1; - if ((state.version = *ptr) != 1) - cie = NULL; /* unsupported version */ - else if (*++ptr) { + if (*++ptr) { /* check if augmentation size is first (thus present) */ if (*ptr == 'z') { while (++ptr < end && *ptr) { diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index ff7ff6cbb811..b65f797e9ad6 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -617,7 +617,7 @@ void flush_dcache_page(struct page *page) */ if (!mapping_mapped(mapping)) { clear_bit(PG_dc_clean, &page->flags); - } else if (page_mapped(page)) { + } else if (page_mapcount(page)) { /* kernel reading from page with U-mapping */ phys_addr_t paddr = (unsigned long)page_address(page); @@ -857,7 +857,7 @@ void copy_user_highpage(struct page *to, struct page *from, * For !VIPT cache, all of this gets compiled out as * addr_not_cache_congruent() is 0 */ - if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { + if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { __flush_dcache_page((unsigned long)kfrom, u_vaddr); clean_src_k_mappings = 1; } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 29a46bb198cc..01eaf88bf821 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -17,18 +17,14 @@ */ #include <linux/dma-mapping.h> -#include <linux/dma-debug.h> -#include <linux/export.h> #include <asm/cache.h> #include <asm/cacheflush.h> -/* - * Helpers for Coherent DMA API. - */ -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + +static void *arc_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { - void *paddr; + void *paddr, *kvaddr; /* This is linear addr (0x8000_0000 based) */ paddr = alloc_pages_exact(size, gfp); @@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, /* This is bus address, platform dependent */ *dma_handle = (dma_addr_t)paddr; - return paddr; -} -EXPORT_SYMBOL(dma_alloc_noncoherent); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - free_pages_exact((void *)dma_handle, size); -} -EXPORT_SYMBOL(dma_free_noncoherent); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - void *paddr, *kvaddr; - /* * IOC relies on all data (even coherent DMA data) being in cache * Thus allocate normal cached memory @@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size, * -For coherent data, Read/Write to buffers terminate early in cache * (vs. always going to memory - thus are faster) */ - if (is_isa_arcv2() && ioc_exists) - return dma_alloc_noncoherent(dev, size, dma_handle, gfp); - - /* This is linear addr (0x8000_0000 based) */ - paddr = alloc_pages_exact(size, gfp); - if (!paddr) - return NULL; + if ((is_isa_arcv2() && ioc_exists) || + dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + return paddr; /* This is kernel Virtual address (0x7000_0000 based) */ kvaddr = ioremap_nocache((unsigned long)paddr, size); if (kvaddr == NULL) return NULL; - /* This is bus address, platform dependent */ - *dma_handle = (dma_addr_t)paddr; - /* * Evict any existing L1 and/or L2 lines for the backing page * in case it was used earlier as a normal "cached" page. @@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size, return kvaddr; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, - dma_addr_t dma_handle) +static void arc_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { - if (is_isa_arcv2() && ioc_exists) - return dma_free_noncoherent(dev, size, kvaddr, dma_handle); - - iounmap((void __force __iomem *)kvaddr); + if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && + !(is_isa_arcv2() && ioc_exists)) + iounmap((void __force __iomem *)vaddr); free_pages_exact((void *)dma_handle, size); } -EXPORT_SYMBOL(dma_free_coherent); /* - * Helper for streaming DMA... + * streaming DMA Mapping API... + * CPU accesses page via normal paddr, thus needs to explicitly made + * consistent before each use */ -void __arc_dma_cache_sync(unsigned long paddr, size_t size, - enum dma_data_direction dir) +static void _dma_cache_sync(unsigned long paddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_FROM_DEVICE: + dma_cache_inv(paddr, size); + break; + case DMA_TO_DEVICE: + dma_cache_wback(paddr, size); + break; + case DMA_BIDIRECTIONAL: + dma_cache_wback_inv(paddr, size); + break; + default: + pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); + } +} + +static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + unsigned long paddr = page_to_phys(page) + offset; + _dma_cache_sync(paddr, size, dir); + return (dma_addr_t)paddr; +} + +static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) + s->dma_address = dma_map_page(dev, sg_page(s), s->offset, + s->length, dir); + + return nents; +} + +static void arc_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +{ + _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); +} + +static void arc_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { - __inline_dma_cache_sync(paddr, size, dir); + _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); } -EXPORT_SYMBOL(__arc_dma_cache_sync); + +static void arc_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction dir) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); +} + +static void arc_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction dir) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); +} + +static int arc_dma_supported(struct device *dev, u64 dma_mask) +{ + /* Support 32 bit DMA mask exclusively */ + return dma_mask == DMA_BIT_MASK(32); +} + +struct dma_map_ops arc_dma_ops = { + .alloc = arc_dma_alloc, + .free = arc_dma_free, + .map_page = arc_dma_map_page, + .map_sg = arc_dma_map_sg, + .sync_single_for_device = arc_dma_sync_single_for_device, + .sync_single_for_cpu = arc_dma_sync_single_for_cpu, + .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, + .sync_sg_for_device = arc_dma_sync_sg_for_device, + .dma_supported = arc_dma_supported, +}; +EXPORT_SYMBOL(arc_dma_ops); diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index 065ee6bfa82a..92dd92cad7f9 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv) } EXPORT_SYMBOL(__kunmap_atomic); -noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) +static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) { pgd_t *pgd_k; pud_t *pud_k; @@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) return pte_k; } -void kmap_init(void) +void __init kmap_init(void) { /* Due to recursive include hell, we can't do this in processor.h */ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); |