1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  linux/arch/arm/mm/cache-v6.S
4  *
5  *  Copyright (C) 2001 Deep Blue Solutions Ltd.
6  *
7  *  This is the "shell" of the ARMv6 processor support.
8  */
9 #include <linux/linkage.h>
10 #include <linux/init.h>
11 #include <asm/assembler.h>
12 #include <asm/errno.h>
13 #include <asm/unwind.h>
14 
15 #include "proc-macros.S"
16 
17 #define HARVARD_CACHE
18 #define CACHE_LINE_SIZE		32
19 #define D_CACHE_LINE_SIZE	32
20 #define BTB_FLUSH_SIZE		8
21 
22 .arch armv6
23 
24 /*
25  *	v6_flush_icache_all()
26  *
27  *	Flush the whole I-cache.
28  *
29  *	ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
30  *	This erratum is present in 1136, 1156 and 1176. It does not affect the
31  *	MPCore.
32  *
33  *	Registers:
34  *	r0 - set to 0
35  *	r1 - corrupted
36  */
37 ENTRY(v6_flush_icache_all)
38 	mov	r0, #0
39 #ifdef CONFIG_ARM_ERRATA_411920
40 	mrs	r1, cpsr
41 	cpsid	ifa				@ disable interrupts
42 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
43 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
44 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
45 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
46 	msr	cpsr_cx, r1			@ restore interrupts
47 	.rept	11				@ ARM Ltd recommends at least
48 	nop					@ 11 NOPs
49 	.endr
50 #else
51 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I-cache
52 #endif
53 	ret	lr
54 ENDPROC(v6_flush_icache_all)
55 
56 /*
57  *	v6_flush_cache_all()
58  *
59  *	Flush the entire cache.
60  *
61  *	It is assumed that:
62  */
63 ENTRY(v6_flush_kern_cache_all)
64 	mov	r0, #0
65 #ifdef HARVARD_CACHE
66 	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
67 #ifndef CONFIG_ARM_ERRATA_411920
68 	mcr	p15, 0, r0, c7, c5, 0		@ I+BTB cache invalidate
69 #else
70 	b	v6_flush_icache_all
71 #endif
72 #else
73 	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
74 #endif
75 	ret	lr
76 
77 /*
78  *	v6_flush_cache_all()
79  *
80  *	Flush all TLB entries in a particular address space
81  *
82  *	- mm    - mm_struct describing address space
83  */
84 ENTRY(v6_flush_user_cache_all)
85 	/*FALLTHROUGH*/
86 
87 /*
88  *	v6_flush_cache_range(start, end, flags)
89  *
90  *	Flush a range of TLB entries in the specified address space.
91  *
92  *	- start - start address (may not be aligned)
93  *	- end   - end address (exclusive, may not be aligned)
94  *	- flags	- vm_area_struct flags describing address space
95  *
96  *	It is assumed that:
97  *	- we have a VIPT cache.
98  */
99 ENTRY(v6_flush_user_cache_range)
100 	ret	lr
101 
102 /*
103  *	v6_coherent_kern_range(start,end)
104  *
105  *	Ensure that the I and D caches are coherent within specified
106  *	region.  This is typically used when code has been written to
107  *	a memory region, and will be executed.
108  *
109  *	- start   - virtual start address of region
110  *	- end     - virtual end address of region
111  *
112  *	It is assumed that:
113  *	- the Icache does not read data from the write buffer
114  */
115 ENTRY(v6_coherent_kern_range)
116 	/* FALLTHROUGH */
117 
118 /*
119  *	v6_coherent_user_range(start,end)
120  *
121  *	Ensure that the I and D caches are coherent within specified
122  *	region.  This is typically used when code has been written to
123  *	a memory region, and will be executed.
124  *
125  *	- start   - virtual start address of region
126  *	- end     - virtual end address of region
127  *
128  *	It is assumed that:
129  *	- the Icache does not read data from the write buffer
130  */
131 ENTRY(v6_coherent_user_range)
132  UNWIND(.fnstart		)
133 #ifdef HARVARD_CACHE
134 	bic	r0, r0, #CACHE_LINE_SIZE - 1
135 1:
136  USER(	mcr	p15, 0, r0, c7, c10, 1	)	@ clean D line
137 	add	r0, r0, #CACHE_LINE_SIZE
138 	cmp	r0, r1
139 	blo	1b
140 #endif
141 	mov	r0, #0
142 #ifdef HARVARD_CACHE
143 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
144 #ifndef CONFIG_ARM_ERRATA_411920
145 	mcr	p15, 0, r0, c7, c5, 0		@ I+BTB cache invalidate
146 #else
147 	b	v6_flush_icache_all
148 #endif
149 #else
150 	mcr	p15, 0, r0, c7, c5, 6		@ invalidate BTB
151 #endif
152 	ret	lr
153 
154 /*
155  * Fault handling for the cache operation above. If the virtual address in r0
156  * isn't mapped, fail with -EFAULT.
157  */
158 9001:
159 	mov	r0, #-EFAULT
160 	ret	lr
161  UNWIND(.fnend		)
162 ENDPROC(v6_coherent_user_range)
163 ENDPROC(v6_coherent_kern_range)
164 
165 /*
166  *	v6_flush_kern_dcache_area(void *addr, size_t size)
167  *
168  *	Ensure that the data held in the page kaddr is written back
169  *	to the page in question.
170  *
171  *	- addr	- kernel address
172  *	- size	- region size
173  */
174 ENTRY(v6_flush_kern_dcache_area)
175 	add	r1, r0, r1
176 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
177 1:
178 #ifdef HARVARD_CACHE
179 	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
180 #else
181 	mcr	p15, 0, r0, c7, c15, 1		@ clean & invalidate unified line
182 #endif
183 	add	r0, r0, #D_CACHE_LINE_SIZE
184 	cmp	r0, r1
185 	blo	1b
186 #ifdef HARVARD_CACHE
187 	mov	r0, #0
188 	mcr	p15, 0, r0, c7, c10, 4
189 #endif
190 	ret	lr
191 
192 
193 /*
194  *	v6_dma_inv_range(start,end)
195  *
196  *	Invalidate the data cache within the specified region; we will
197  *	be performing a DMA operation in this region and we want to
198  *	purge old data in the cache.
199  *
200  *	- start   - virtual start address of region
201  *	- end     - virtual end address of region
202  */
203 v6_dma_inv_range:
204 #ifdef CONFIG_DMA_CACHE_RWFO
205 	ldrb	r2, [r0]			@ read for ownership
206 	strb	r2, [r0]			@ write for ownership
207 #endif
208 	tst	r0, #D_CACHE_LINE_SIZE - 1
209 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
210 #ifdef HARVARD_CACHE
211 	mcrne	p15, 0, r0, c7, c10, 1		@ clean D line
212 #else
213 	mcrne	p15, 0, r0, c7, c11, 1		@ clean unified line
214 #endif
215 	tst	r1, #D_CACHE_LINE_SIZE - 1
216 #ifdef CONFIG_DMA_CACHE_RWFO
217 	ldrbne	r2, [r1, #-1]			@ read for ownership
218 	strbne	r2, [r1, #-1]			@ write for ownership
219 #endif
220 	bic	r1, r1, #D_CACHE_LINE_SIZE - 1
221 #ifdef HARVARD_CACHE
222 	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D line
223 #else
224 	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
225 #endif
226 1:
227 #ifdef HARVARD_CACHE
228 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line
229 #else
230 	mcr	p15, 0, r0, c7, c7, 1		@ invalidate unified line
231 #endif
232 	add	r0, r0, #D_CACHE_LINE_SIZE
233 	cmp	r0, r1
234 #ifdef CONFIG_DMA_CACHE_RWFO
235 	ldrlo	r2, [r0]			@ read for ownership
236 	strlo	r2, [r0]			@ write for ownership
237 #endif
238 	blo	1b
239 	mov	r0, #0
240 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
241 	ret	lr
242 
243 /*
244  *	v6_dma_clean_range(start,end)
245  *	- start   - virtual start address of region
246  *	- end     - virtual end address of region
247  */
248 v6_dma_clean_range:
249 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
250 1:
251 #ifdef CONFIG_DMA_CACHE_RWFO
252 	ldr	r2, [r0]			@ read for ownership
253 #endif
254 #ifdef HARVARD_CACHE
255 	mcr	p15, 0, r0, c7, c10, 1		@ clean D line
256 #else
257 	mcr	p15, 0, r0, c7, c11, 1		@ clean unified line
258 #endif
259 	add	r0, r0, #D_CACHE_LINE_SIZE
260 	cmp	r0, r1
261 	blo	1b
262 	mov	r0, #0
263 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
264 	ret	lr
265 
266 /*
267  *	v6_dma_flush_range(start,end)
268  *	- start   - virtual start address of region
269  *	- end     - virtual end address of region
270  */
271 ENTRY(v6_dma_flush_range)
272 #ifdef CONFIG_DMA_CACHE_RWFO
273 	ldrb	r2, [r0]		@ read for ownership
274 	strb	r2, [r0]		@ write for ownership
275 #endif
276 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
277 1:
278 #ifdef HARVARD_CACHE
279 	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
280 #else
281 	mcr	p15, 0, r0, c7, c15, 1		@ clean & invalidate line
282 #endif
283 	add	r0, r0, #D_CACHE_LINE_SIZE
284 	cmp	r0, r1
285 #ifdef CONFIG_DMA_CACHE_RWFO
286 	ldrblo	r2, [r0]			@ read for ownership
287 	strblo	r2, [r0]			@ write for ownership
288 #endif
289 	blo	1b
290 	mov	r0, #0
291 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
292 	ret	lr
293 
294 /*
295  *	dma_map_area(start, size, dir)
296  *	- start	- kernel virtual start address
297  *	- size	- size of region
298  *	- dir	- DMA direction
299  */
300 ENTRY(v6_dma_map_area)
301 	add	r1, r1, r0
302 	teq	r2, #DMA_FROM_DEVICE
303 	beq	v6_dma_inv_range
304 #ifndef CONFIG_DMA_CACHE_RWFO
305 	b	v6_dma_clean_range
306 #else
307 	teq	r2, #DMA_TO_DEVICE
308 	beq	v6_dma_clean_range
309 	b	v6_dma_flush_range
310 #endif
311 ENDPROC(v6_dma_map_area)
312 
313 /*
314  *	dma_unmap_area(start, size, dir)
315  *	- start	- kernel virtual start address
316  *	- size	- size of region
317  *	- dir	- DMA direction
318  */
319 ENTRY(v6_dma_unmap_area)
320 #ifndef CONFIG_DMA_CACHE_RWFO
321 	add	r1, r1, r0
322 	teq	r2, #DMA_TO_DEVICE
323 	bne	v6_dma_inv_range
324 #endif
325 	ret	lr
326 ENDPROC(v6_dma_unmap_area)
327 
328 	.globl	v6_flush_kern_cache_louis
329 	.equ	v6_flush_kern_cache_louis, v6_flush_kern_cache_all
330 
331 	__INITDATA
332 
333 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
334 	define_cache_functions v6
335