1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Chris Dearman (chris@mips.com)
* Copyright (C) 2007 Mips Technologies, Inc.
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
/*
* Prepare segments for EVA boot:
*
* This is in case the processor boots in legacy configuration
* (SI_EVAReset is de-asserted and CONFIG5.K == 0)
*
* On entry, t1 is loaded with CP0_CONFIG
*
* ========================= Mappings =============================
* Virtual memory Physical memory Mapping
* 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
* Flat 2GB physical memory
*
* 0x80000000 - 0x9fffffff 0x00000000 - 0x1ffffffff MUSUK (kseg0)
* 0xa0000000 - 0xbf000000 0x00000000 - 0x1ffffffff MUSUK (kseg1)
* 0xc0000000 - 0xdfffffff - MK (kseg2)
* 0xe0000000 - 0xffffffff - MK (kseg3)
*
*
* Lowmem is expanded to 2GB
*/
.macro eva_entry
/*
* Get Config.K0 value and use it to program
* the segmentation registers
*/
andi t1, 0x7 /* CCA */
move t2, t1
ins t2, t1, 16, 3
/* SegCtl0 */
li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) | \
(((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
mtc0 t0, $5, 2
/* SegCtl1 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(2 << MIPS_SEGCFG_C_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) | \
(((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
ins t0, t1, 16, 3
mtc0 t0, $5, 3
/* SegCtl2 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
(6 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) | \
(((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
(4 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
mtc0 t0, $5, 4
jal mips_ihb
mfc0 t0, $16, 5
li t2, 0x40000000 /* K bit */
or t0, t0, t2
mtc0 t0, $16, 5
sync
jal mips_ihb
.endm
.macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9:
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1: b 1b
__INITDATA
nonmt_processor:
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
#endif
#ifdef CONFIG_EVA
sync
ehb
mfc0 t1, CP0_CONFIG
bgez t1, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
sll t0, t0, 6 /* SC bit */
bgez t0, 9f
eva_entry
b 0f
9:
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonsc_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1: b 1b
nop
__INITDATA
nonsc_processor:
.asciz "EVA kernel requires a MIPS core with Segment Control implemented\n"
__FINIT
#endif /* CONFIG_EVA */
0:
.endm
/*
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
.endm
#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
|