diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 21 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 20 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 164 | ||||
-rw-r--r-- | arch/arm64/mm/flush.c | 16 |
5 files changed, 203 insertions, 23 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3f0e854d0ff4..c49ca4c738bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -18,6 +18,7 @@ config ARM64 select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_AUTOPROBE @@ -56,6 +57,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA @@ -109,6 +111,9 @@ config GENERIC_CALIBRATE_DELAY config ZONE_DMA def_bool y +config HAVE_GENERIC_RCU_GUP + def_bool y + config ARCH_DMA_ADDR_T_64BIT def_bool y diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 77dbe1e6398d..cefd3e825612 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -244,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_PTE_SPECIAL +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pmd_t pud_pmd(pud_t pud) +{ + return __pmd(pud_val(pud)); +} + static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); @@ -261,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) -#endif +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +struct vm_area_struct; +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -282,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_write(pud) pte_write(pud_pte(pud)) #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) @@ -383,6 +400,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); } +#define pud_page(pud) pmd_page(pud_pmd(pud)) + #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ #if CONFIG_ARM64_PGTABLE_LEVELS > 3 diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 62731ef9749a..a82c0c5c8b52 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -23,6 +23,20 @@ #include <asm-generic/tlb.h> +#include <linux/pagemap.h> +#include <linux/swap.h> + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } #if CONFIG_ARM64_PGTABLE_LEVELS > 2 @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pudp)); + tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 2c71077cacfd..d92094203913 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -20,6 +20,7 @@ #include <linux/gfp.h> #include <linux/export.h> #include <linux/slab.h> +#include <linux/genalloc.h> #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> @@ -38,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, return prot; } +static struct gen_pool *atomic_pool; + +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static void *__alloc_from_pool(size_t size, struct page **ret_page) +{ + unsigned long val; + void *ptr = NULL; + + if (!atomic_pool) { + WARN(1, "coherent pool not initialised!\n"); + return NULL; + } + + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; + } + + return ptr; +} + +static bool __in_atomic_pool(void *start, size_t size) +{ + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); +} + +static int __free_from_pool(void *start, size_t size) +{ + if (!__in_atomic_pool(start, size)) + return 0; + + gen_pool_free(atomic_pool, (unsigned long)start, size); + + return 1; +} + static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) @@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; - if (IS_ENABLED(CONFIG_DMA_CMA)) { + if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; size = PAGE_ALIGN(size); @@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { + bool freed; + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return; } - if (IS_ENABLED(CONFIG_DMA_CMA)) { - phys_addr_t paddr = dma_to_phys(dev, dma_handle); - - dma_release_from_contiguous(dev, + freed = dma_release_from_contiguous(dev, phys_to_page(paddr), size >> PAGE_SHIFT); - } else { + if (!freed) swiotlb_free_coherent(dev, size, vaddr, dma_handle); - } } static void *__dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { - struct page *page, **map; + struct page *page; void *ptr, *coherent_ptr; - int order, i; size = PAGE_ALIGN(size); - order = get_order(size); + + if (!(flags & __GFP_WAIT)) { + struct page *page = NULL; + void *addr = __alloc_from_pool(size, &page); + + if (addr) + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + + return addr; + + } ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; - map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA); - if (!map) - goto no_map; /* remove any dirty cache lines on the kernel alias */ __dma_flush_range(ptr, ptr + size); /* create a coherent mapping */ page = virt_to_page(ptr); - for (i = 0; i < (size >> PAGE_SHIFT); i++) - map[i] = page + i; - coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, - __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); - kfree(map); + coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, + __get_dma_pgprot(attrs, + __pgprot(PROT_NORMAL_NC), false), + NULL); if (!coherent_ptr) goto no_map; @@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size, { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + if (__free_from_pool(vaddr, size)) + return; vunmap(vaddr); __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); } @@ -307,6 +362,67 @@ EXPORT_SYMBOL(coherent_swiotlb_dma_ops); extern int swiotlb_late_init_with_default_size(size_t default_size); +static int __init atomic_pool_init(void) +{ + pgprot_t prot = __pgprot(PROT_NORMAL_NC); + unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; + struct page *page; + void *addr; + unsigned int pool_size_order = get_order(atomic_pool_size); + + if (dev_get_cma_area(NULL)) + page = dma_alloc_from_contiguous(NULL, nr_pages, + pool_size_order); + else + page = alloc_pages(GFP_DMA, pool_size_order); + + if (page) { + int ret; + void *page_addr = page_address(page); + + memset(page_addr, 0, atomic_pool_size); + __dma_flush_range(page_addr, page_addr + atomic_pool_size); + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto free_page; + + addr = dma_common_contiguous_remap(page, atomic_pool_size, + VM_USERMAP, prot, atomic_pool_init); + + if (!addr) + goto destroy_genpool; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto remove_mapping; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + + pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", + atomic_pool_size / 1024); + return 0; + } + goto out; + +remove_mapping: + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +free_page: + if (!dma_release_from_contiguous(NULL, page, nr_pages)) + __free_pages(page, pool_size_order); +out: + pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); + return -ENOMEM; +} + static int __init swiotlb_late_init(void) { size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); @@ -315,7 +431,17 @@ static int __init swiotlb_late_init(void) return swiotlb_late_init_with_default_size(swiotlb_size); } -arch_initcall(swiotlb_late_init); + +static int __init arm64_dma_init(void) +{ + int ret = 0; + + ret |= swiotlb_late_init(); + ret |= atomic_pool_init(); + + return ret; +} +arch_initcall(arm64_dma_init); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 0d64089d28b5..b6f14e8d2121 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -104,3 +104,19 @@ EXPORT_SYMBOL(flush_dcache_page); */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |