1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  linux/arch/arm/mm/cache-v4.S
4  *
5  *  Copyright (C) 1997-2002 Russell king
6  */
7 #include <linux/linkage.h>
8 #include <linux/init.h>
9 #include <asm/assembler.h>
10 #include <asm/page.h>
11 #include "proc-macros.S"
12 
13 /*
14  *	flush_icache_all()
15  *
16  *	Unconditionally clean and invalidate the entire icache.
17  */
18 ENTRY(v4_flush_icache_all)
19 	ret	lr
20 ENDPROC(v4_flush_icache_all)
21 
22 /*
23  *	flush_user_cache_all()
24  *
25  *	Invalidate all cache entries in a particular address
26  *	space.
27  *
28  *	- mm	- mm_struct describing address space
29  */
30 ENTRY(v4_flush_user_cache_all)
31 	/* FALLTHROUGH */
32 /*
33  *	flush_kern_cache_all()
34  *
35  *	Clean and invalidate the entire cache.
36  */
37 ENTRY(v4_flush_kern_cache_all)
38 #ifdef CONFIG_CPU_CP15
39 	mov	r0, #0
40 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
41 	ret	lr
42 #else
43 	/* FALLTHROUGH */
44 #endif
45 
46 /*
47  *	flush_user_cache_range(start, end, flags)
48  *
49  *	Invalidate a range of cache entries in the specified
50  *	address space.
51  *
52  *	- start - start address (may not be aligned)
53  *	- end	- end address (exclusive, may not be aligned)
54  *	- flags	- vma_area_struct flags describing address space
55  */
56 ENTRY(v4_flush_user_cache_range)
57 #ifdef CONFIG_CPU_CP15
58 	mov	ip, #0
59 	mcr	p15, 0, ip, c7, c7, 0		@ flush ID cache
60 	ret	lr
61 #else
62 	/* FALLTHROUGH */
63 #endif
64 
65 /*
66  *	coherent_kern_range(start, end)
67  *
68  *	Ensure coherency between the Icache and the Dcache in the
69  *	region described by start.  If you have non-snooping
70  *	Harvard caches, you need to implement this function.
71  *
72  *	- start  - virtual start address
73  *	- end	 - virtual end address
74  */
75 ENTRY(v4_coherent_kern_range)
76 	/* FALLTHROUGH */
77 
78 /*
79  *	coherent_user_range(start, end)
80  *
81  *	Ensure coherency between the Icache and the Dcache in the
82  *	region described by start.  If you have non-snooping
83  *	Harvard caches, you need to implement this function.
84  *
85  *	- start  - virtual start address
86  *	- end	 - virtual end address
87  */
88 ENTRY(v4_coherent_user_range)
89 	mov	r0, #0
90 	ret	lr
91 
92 /*
93  *	flush_kern_dcache_area(void *addr, size_t size)
94  *
95  *	Ensure no D cache aliasing occurs, either with itself or
96  *	the I cache
97  *
98  *	- addr	- kernel address
99  *	- size	- region size
100  */
101 ENTRY(v4_flush_kern_dcache_area)
102 	/* FALLTHROUGH */
103 
104 /*
105  *	dma_flush_range(start, end)
106  *
107  *	Clean and invalidate the specified virtual address range.
108  *
109  *	- start  - virtual start address
110  *	- end	 - virtual end address
111  */
112 ENTRY(v4_dma_flush_range)
113 #ifdef CONFIG_CPU_CP15
114 	mov	r0, #0
115 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
116 #endif
117 	ret	lr
118 
119 /*
120  *	dma_unmap_area(start, size, dir)
121  *	- start	- kernel virtual start address
122  *	- size	- size of region
123  *	- dir	- DMA direction
124  */
125 ENTRY(v4_dma_unmap_area)
126 	teq	r2, #DMA_TO_DEVICE
127 	bne	v4_dma_flush_range
128 	/* FALLTHROUGH */
129 
130 /*
131  *	dma_map_area(start, size, dir)
132  *	- start	- kernel virtual start address
133  *	- size	- size of region
134  *	- dir	- DMA direction
135  */
136 ENTRY(v4_dma_map_area)
137 	ret	lr
138 ENDPROC(v4_dma_unmap_area)
139 ENDPROC(v4_dma_map_area)
140 
141 	.globl	v4_flush_kern_cache_louis
142 	.equ	v4_flush_kern_cache_louis, v4_flush_kern_cache_all
143 
144 	__INITDATA
145 
146 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
147 	define_cache_functions v4
148