xref: /kernel/linux/linux-6.6/arch/sh/mm/cache-sh2a.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/sh/mm/cache-sh2a.c
4 *
5 * Copyright (C) 2008 Yoshinori Sato
6 */
7
8#include <linux/init.h>
9#include <linux/mm.h>
10
11#include <asm/cache.h>
12#include <asm/addrspace.h>
13#include <asm/processor.h>
14#include <asm/cacheflush.h>
15#include <asm/io.h>
16
17/*
18 * The maximum number of pages we support up to when doing ranged dcache
19 * flushing. Anything exceeding this will simply flush the dcache in its
20 * entirety.
21 */
22#define MAX_OCACHE_PAGES	32
23#define MAX_ICACHE_PAGES	32
24
25#ifdef CONFIG_CACHE_WRITEBACK
26static void sh2a_flush_oc_line(unsigned long v, int way)
27{
28	unsigned long addr = (v & 0x000007f0) | (way << 11);
29	unsigned long data;
30
31	data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
32	if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
33		data &= ~SH_CACHE_UPDATED;
34		__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
35	}
36}
37#endif
38
39static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
40{
41	/* Set associative bit to hit all ways */
42	unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
43	__raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
44}
45
46/*
47 * Write back the dirty D-caches, but not invalidate them.
48 */
49static void sh2a__flush_wback_region(void *start, int size)
50{
51#ifdef CONFIG_CACHE_WRITEBACK
52	unsigned long v;
53	unsigned long begin, end;
54	unsigned long flags;
55	int nr_ways;
56
57	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
58	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
59		& ~(L1_CACHE_BYTES-1);
60	nr_ways = current_cpu_data.dcache.ways;
61
62	local_irq_save(flags);
63	jump_to_uncached();
64
65	/* If there are too many pages then flush the entire cache */
66	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
67		begin = CACHE_OC_ADDRESS_ARRAY;
68		end = begin + (nr_ways * current_cpu_data.dcache.way_size);
69
70		for (v = begin; v < end; v += L1_CACHE_BYTES) {
71			unsigned long data = __raw_readl(v);
72			if (data & SH_CACHE_UPDATED)
73				__raw_writel(data & ~SH_CACHE_UPDATED, v);
74		}
75	} else {
76		int way;
77		for (way = 0; way < nr_ways; way++) {
78			for (v = begin; v < end; v += L1_CACHE_BYTES)
79				sh2a_flush_oc_line(v, way);
80		}
81	}
82
83	back_to_cached();
84	local_irq_restore(flags);
85#endif
86}
87
88/*
89 * Write back the dirty D-caches and invalidate them.
90 */
91static void sh2a__flush_purge_region(void *start, int size)
92{
93	unsigned long v;
94	unsigned long begin, end;
95	unsigned long flags;
96
97	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
98	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
99		& ~(L1_CACHE_BYTES-1);
100
101	local_irq_save(flags);
102	jump_to_uncached();
103
104	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
105#ifdef CONFIG_CACHE_WRITEBACK
106		int way;
107		int nr_ways = current_cpu_data.dcache.ways;
108		for (way = 0; way < nr_ways; way++)
109			sh2a_flush_oc_line(v, way);
110#endif
111		sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
112	}
113
114	back_to_cached();
115	local_irq_restore(flags);
116}
117
118/*
119 * Invalidate the D-caches, but no write back please
120 */
121static void sh2a__flush_invalidate_region(void *start, int size)
122{
123	unsigned long v;
124	unsigned long begin, end;
125	unsigned long flags;
126
127	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
128	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
129		& ~(L1_CACHE_BYTES-1);
130
131	local_irq_save(flags);
132	jump_to_uncached();
133
134	/* If there are too many pages then just blow the cache */
135	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
136		__raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
137			     SH_CCR);
138	} else {
139		for (v = begin; v < end; v += L1_CACHE_BYTES)
140			sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
141	}
142
143	back_to_cached();
144	local_irq_restore(flags);
145}
146
147/*
148 * Write back the range of D-cache, and purge the I-cache.
149 */
150static void sh2a_flush_icache_range(void *args)
151{
152	struct flusher_data *data = args;
153	unsigned long start, end;
154	unsigned long v;
155	unsigned long flags;
156
157	start = data->addr1 & ~(L1_CACHE_BYTES-1);
158	end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
159
160#ifdef CONFIG_CACHE_WRITEBACK
161	sh2a__flush_wback_region((void *)start, end-start);
162#endif
163
164	local_irq_save(flags);
165	jump_to_uncached();
166
167	/* I-Cache invalidate */
168	/* If there are too many pages then just blow the cache */
169	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
170		__raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
171			     SH_CCR);
172	} else {
173		for (v = start; v < end; v += L1_CACHE_BYTES)
174			sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
175	}
176
177	back_to_cached();
178	local_irq_restore(flags);
179}
180
181void __init sh2a_cache_init(void)
182{
183	local_flush_icache_range	= sh2a_flush_icache_range;
184
185	__flush_wback_region		= sh2a__flush_wback_region;
186	__flush_purge_region		= sh2a__flush_purge_region;
187	__flush_invalidate_region	= sh2a__flush_invalidate_region;
188}
189