summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/tlbflush.h
blob: 146b269de8fad78d799154cc4d59edb698f7dc3d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H

#define MMU_NO_CONTEXT	~0UL


#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>

#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
				       unsigned long start, unsigned long end)
{
	if (radix_enabled())
		return radix__flush_pmd_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
					   unsigned long start,
					   unsigned long end)
{
	if (radix_enabled())
		return radix__flush_hugetlb_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	if (radix_enabled())
		return radix__flush_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
	if (radix_enabled())
		return radix__flush_tlb_kernel_range(start, end);
	return hash__flush_tlb_kernel_range(start, end);
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__local_flush_tlb_mm(mm);
	return hash__local_flush_tlb_mm(mm);
}

static inline void local_flush_tlb_page(struct vm_area_struct *vma,
					unsigned long vmaddr)
{
	if (radix_enabled())
		return radix__local_flush_tlb_page(vma, vmaddr);
	return hash__local_flush_tlb_page(vma, vmaddr);
}

static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
					 unsigned long vmaddr)
{
	if (radix_enabled())
		return radix__flush_tlb_page(vma, vmaddr);
	return hash__flush_tlb_page_nohash(vma, vmaddr);
}

static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (radix_enabled())
		return radix__tlb_flush(tlb);
	return hash__tlb_flush(tlb);
}

#ifdef CONFIG_SMP
static inline void flush_tlb_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__flush_tlb_mm(mm);
	return hash__flush_tlb_mm(mm);
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long vmaddr)
{
	if (radix_enabled())
		return radix__flush_tlb_page(vma, vmaddr);
	return hash__flush_tlb_page(vma, vmaddr);
}
#else
#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */
/*
 * flush the page walk cache for the address
 */
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	/*
	 * Flush the page table walk cache on freeing a page table. We already
	 * have marked the upper/higher level page table entry none by now.
	 * So it is safe to flush PWC here.
	 */
	if (!radix_enabled())
		return;

	radix__flush_tlb_pwc(tlb, address);
}
#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
OpenPOWER on IntegriCloud