1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Cache maintenance
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/asm-uaccess.h>
/*
* __flush_cache_range(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
* and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
* - fixup - optional label to branch to on user fault
*/
.macro __flush_cache_range, fixup
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
b .Ldc_skip_\@
alternative_else_nop_endif
mov x2, x0
mov x3, x1
dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
.Ldc_skip_\@:
alternative_if ARM64_HAS_CACHE_DIC
isb
b .Lic_skip_\@
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3, \fixup
.Lic_skip_\@:
.endm
/*
* __flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
* and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
SYM_FUNC_START(__flush_icache_range)
__flush_cache_range
ret
SYM_FUNC_END(__flush_icache_range)
/*
* __flush_cache_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
* and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
SYM_FUNC_START(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3, x4
__flush_cache_range 2f
mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
2:
mov x0, #-EFAULT
b 1b
SYM_FUNC_END(__flush_cache_user_range)
/*
* invalidate_icache_range(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
SYM_FUNC_START(invalidate_icache_range)
alternative_if ARM64_HAS_CACHE_DIC
isb
ret
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3
ret
SYM_FUNC_END(invalidate_icache_range)
/*
* __flush_dcache_area(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned and invalidated to the PoC.
*
* - kaddr - kernel address
* - size - size in question
*/
SYM_FUNC_START_PI(__flush_dcache_area)
add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__flush_dcache_area)
/*
* __clean_dcache_area_pou(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned to the PoU.
*
* - kaddr - kernel address
* - size - size in question
*/
SYM_FUNC_START(__clean_dcache_area_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
add x1, x0, x1
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
SYM_FUNC_END(__clean_dcache_area_pou)
/*
* __inval_dcache_area(start, end)
*
* Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
* - start - kernel start address of region
* - end - kernel end address of region
*/
SYM_FUNC_START_LOCAL(__dma_inv_area)
SYM_FUNC_START_PI(__inval_dcache_area)
/* FALLTHROUGH */
/*
* __dma_inv_area(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
bic x1, x1, x3
b.eq 1f
dc civac, x1 // clean & invalidate D / U line
1: tst x0, x3 // start cache line aligned?
bic x0, x0, x3
b.eq 2f
dc civac, x0 // clean & invalidate D / U line
b 3f
2: dc ivac, x0 // invalidate D / U line
3: add x0, x0, x2
cmp x0, x1
b.lo 2b
dsb sy
ret
SYM_FUNC_END_PI(__inval_dcache_area)
SYM_FUNC_END(__dma_inv_area)
/*
* __clean_dcache_area_poc(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned to the PoC.
*
* - kaddr - kernel address
* - size - size in question
*/
SYM_FUNC_START_LOCAL(__dma_clean_area)
SYM_FUNC_START_PI(__clean_dcache_area_poc)
/* FALLTHROUGH */
/*
* __dma_clean_area(start, size)
* - start - virtual start address of region
* - size - size in question
*/
add x1, x0, x1
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__clean_dcache_area_poc)
SYM_FUNC_END(__dma_clean_area)
/*
* __clean_dcache_area_pop(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned to the PoP.
*
* - kaddr - kernel address
* - size - size in question
*/
SYM_FUNC_START_PI(__clean_dcache_area_pop)
alternative_if_not ARM64_HAS_DCPOP
b __clean_dcache_area_poc
alternative_else_nop_endif
add x1, x0, x1
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__clean_dcache_area_pop)
/*
* __dma_flush_area(start, size)
*
* clean & invalidate D / U line
*
* - start - virtual start address of region
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
/*
* __dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_map_area)
add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
sub x1, x1, x0
b __dma_clean_area
SYM_FUNC_END_PI(__dma_map_area)
/*
* __dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_unmap_area)
add x1, x0, x1
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
SYM_FUNC_END_PI(__dma_unmap_area)
|