1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CoProcessor (SPU/AFU) mm fault handler
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2007
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
* Author: Jeremy Kerr <jk@ozlabs.org>
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <asm/reg.h>
#include <asm/copro.h>
#include <asm/spu.h>
#include <misc/cxl-base.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, vm_fault_t *flt)
{
struct vm_area_struct *vma;
unsigned long is_write;
int ret;
if (mm == NULL)
return -EFAULT;
if (mm->pgd == NULL)
return -EFAULT;
down_read(&mm->mmap_sem);
ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
goto out_unlock;
if (ea < vma->vm_start) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_unlock;
if (expand_stack(vma, ea))
goto out_unlock;
}
is_write = dsisr & DSISR_ISSTORE;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out_unlock;
/*
* PROT_NONE is covered by the VMA check above.
* and hash should get a NOHPTE fault instead of
* a PROTFAULT in case fixup is needed for things
* like autonuma.
*/
if (!radix_enabled())
WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
}
ret = 0;
*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}
BUG();
}
if (*flt & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
out_unlock:
up_read(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
{
u64 vsid, vsidkey;
int psize, ssize;
switch (get_region_id(ea)) {
case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
if (mm == NULL)
return 1;
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_user_vsid(&mm->context, ea, ssize);
vsidkey = SLB_VSID_USER;
break;
case VMALLOC_REGION_ID:
pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
psize = mmu_vmalloc_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
vsidkey = SLB_VSID_KERNEL;
break;
case IO_REGION_ID:
pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
psize = mmu_io_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
vsidkey = SLB_VSID_KERNEL;
break;
case LINEAR_MAP_REGION_ID:
pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
psize = mmu_linear_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
vsidkey = SLB_VSID_KERNEL;
break;
default:
pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
return 1;
}
/* Bad address */
if (!vsid)
return 1;
vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
vsid |= mmu_psize_defs[psize].sllp |
((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
slb->vsid = vsid;
return 0;
}
EXPORT_SYMBOL_GPL(copro_calculate_slb);
void copro_flush_all_slbs(struct mm_struct *mm)
{
#ifdef CONFIG_SPU_BASE
spu_flush_all_slbs(mm);
#endif
cxl_slbia(mm);
}
EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
|