xref: /kernel/linux/linux-6.6/mm/damon/paddr.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON Primitives for The Physical Address Space
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon-pa: " fmt
9
10#include <linux/mmu_notifier.h>
11#include <linux/page_idle.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
15
16#include "../internal.h"
17#include "ops-common.h"
18
19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20		unsigned long addr, void *arg)
21{
22	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
23
24	while (page_vma_mapped_walk(&pvmw)) {
25		addr = pvmw.address;
26		if (pvmw.pte)
27			damon_ptep_mkold(pvmw.pte, vma, addr);
28		else
29			damon_pmdp_mkold(pvmw.pmd, vma, addr);
30	}
31	return true;
32}
33
34static void damon_pa_mkold(unsigned long paddr)
35{
36	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
37	struct rmap_walk_control rwc = {
38		.rmap_one = __damon_pa_mkold,
39		.anon_lock = folio_lock_anon_vma_read,
40	};
41	bool need_lock;
42
43	if (!folio)
44		return;
45
46	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
47		folio_set_idle(folio);
48		goto out;
49	}
50
51	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
52	if (need_lock && !folio_trylock(folio))
53		goto out;
54
55	rmap_walk(folio, &rwc);
56
57	if (need_lock)
58		folio_unlock(folio);
59
60out:
61	folio_put(folio);
62}
63
64static void __damon_pa_prepare_access_check(struct damon_region *r)
65{
66	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
67
68	damon_pa_mkold(r->sampling_addr);
69}
70
71static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
72{
73	struct damon_target *t;
74	struct damon_region *r;
75
76	damon_for_each_target(t, ctx) {
77		damon_for_each_region(r, t)
78			__damon_pa_prepare_access_check(r);
79	}
80}
81
82static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
83		unsigned long addr, void *arg)
84{
85	bool *accessed = arg;
86	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
87
88	*accessed = false;
89	while (page_vma_mapped_walk(&pvmw)) {
90		addr = pvmw.address;
91		if (pvmw.pte) {
92			*accessed = pte_young(ptep_get(pvmw.pte)) ||
93				!folio_test_idle(folio) ||
94				mmu_notifier_test_young(vma->vm_mm, addr);
95		} else {
96#ifdef CONFIG_TRANSPARENT_HUGEPAGE
97			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
98				!folio_test_idle(folio) ||
99				mmu_notifier_test_young(vma->vm_mm, addr);
100#else
101			WARN_ON_ONCE(1);
102#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
103		}
104		if (*accessed) {
105			page_vma_mapped_walk_done(&pvmw);
106			break;
107		}
108	}
109
110	/* If accessed, stop walking */
111	return *accessed == false;
112}
113
114static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
115{
116	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
117	bool accessed = false;
118	struct rmap_walk_control rwc = {
119		.arg = &accessed,
120		.rmap_one = __damon_pa_young,
121		.anon_lock = folio_lock_anon_vma_read,
122	};
123	bool need_lock;
124
125	if (!folio)
126		return false;
127
128	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
129		if (folio_test_idle(folio))
130			accessed = false;
131		else
132			accessed = true;
133		goto out;
134	}
135
136	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
137	if (need_lock && !folio_trylock(folio))
138		goto out;
139
140	rmap_walk(folio, &rwc);
141
142	if (need_lock)
143		folio_unlock(folio);
144
145out:
146	*folio_sz = folio_size(folio);
147	folio_put(folio);
148	return accessed;
149}
150
151static void __damon_pa_check_access(struct damon_region *r)
152{
153	static unsigned long last_addr;
154	static unsigned long last_folio_sz = PAGE_SIZE;
155	static bool last_accessed;
156
157	/* If the region is in the last checked page, reuse the result */
158	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
159				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
160		if (last_accessed)
161			r->nr_accesses++;
162		return;
163	}
164
165	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
166	if (last_accessed)
167		r->nr_accesses++;
168
169	last_addr = r->sampling_addr;
170}
171
172static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
173{
174	struct damon_target *t;
175	struct damon_region *r;
176	unsigned int max_nr_accesses = 0;
177
178	damon_for_each_target(t, ctx) {
179		damon_for_each_region(r, t) {
180			__damon_pa_check_access(r);
181			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
182		}
183	}
184
185	return max_nr_accesses;
186}
187
188static bool __damos_pa_filter_out(struct damos_filter *filter,
189		struct folio *folio)
190{
191	bool matched = false;
192	struct mem_cgroup *memcg;
193
194	switch (filter->type) {
195	case DAMOS_FILTER_TYPE_ANON:
196		matched = folio_test_anon(folio);
197		break;
198	case DAMOS_FILTER_TYPE_MEMCG:
199		rcu_read_lock();
200		memcg = folio_memcg_check(folio);
201		if (!memcg)
202			matched = false;
203		else
204			matched = filter->memcg_id == mem_cgroup_id(memcg);
205		rcu_read_unlock();
206		break;
207	default:
208		break;
209	}
210
211	return matched == filter->matching;
212}
213
214/*
215 * damos_pa_filter_out - Return true if the page should be filtered out.
216 */
217static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
218{
219	struct damos_filter *filter;
220
221	damos_for_each_filter(filter, scheme) {
222		if (__damos_pa_filter_out(filter, folio))
223			return true;
224	}
225	return false;
226}
227
228static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
229{
230	unsigned long addr, applied;
231	LIST_HEAD(folio_list);
232
233	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
234		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
235
236		if (!folio)
237			continue;
238
239		if (damos_pa_filter_out(s, folio))
240			goto put_folio;
241
242		folio_clear_referenced(folio);
243		folio_test_clear_young(folio);
244		if (!folio_isolate_lru(folio))
245			goto put_folio;
246		if (folio_test_unevictable(folio))
247			folio_putback_lru(folio);
248		else
249			list_add(&folio->lru, &folio_list);
250put_folio:
251		folio_put(folio);
252	}
253	applied = reclaim_pages(&folio_list);
254	cond_resched();
255	return applied * PAGE_SIZE;
256}
257
258static inline unsigned long damon_pa_mark_accessed_or_deactivate(
259		struct damon_region *r, struct damos *s, bool mark_accessed)
260{
261	unsigned long addr, applied = 0;
262
263	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
264		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
265
266		if (!folio)
267			continue;
268
269		if (damos_pa_filter_out(s, folio))
270			goto put_folio;
271
272		if (mark_accessed)
273			folio_mark_accessed(folio);
274		else
275			folio_deactivate(folio);
276		applied += folio_nr_pages(folio);
277put_folio:
278		folio_put(folio);
279	}
280	return applied * PAGE_SIZE;
281}
282
283static unsigned long damon_pa_mark_accessed(struct damon_region *r,
284	struct damos *s)
285{
286	return damon_pa_mark_accessed_or_deactivate(r, s, true);
287}
288
289static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
290	struct damos *s)
291{
292	return damon_pa_mark_accessed_or_deactivate(r, s, false);
293}
294
295static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
296		struct damon_target *t, struct damon_region *r,
297		struct damos *scheme)
298{
299	switch (scheme->action) {
300	case DAMOS_PAGEOUT:
301		return damon_pa_pageout(r, scheme);
302	case DAMOS_LRU_PRIO:
303		return damon_pa_mark_accessed(r, scheme);
304	case DAMOS_LRU_DEPRIO:
305		return damon_pa_deactivate_pages(r, scheme);
306	case DAMOS_STAT:
307		break;
308	default:
309		/* DAMOS actions that not yet supported by 'paddr'. */
310		break;
311	}
312	return 0;
313}
314
315static int damon_pa_scheme_score(struct damon_ctx *context,
316		struct damon_target *t, struct damon_region *r,
317		struct damos *scheme)
318{
319	switch (scheme->action) {
320	case DAMOS_PAGEOUT:
321		return damon_cold_score(context, r, scheme);
322	case DAMOS_LRU_PRIO:
323		return damon_hot_score(context, r, scheme);
324	case DAMOS_LRU_DEPRIO:
325		return damon_cold_score(context, r, scheme);
326	default:
327		break;
328	}
329
330	return DAMOS_MAX_SCORE;
331}
332
333static int __init damon_pa_initcall(void)
334{
335	struct damon_operations ops = {
336		.id = DAMON_OPS_PADDR,
337		.init = NULL,
338		.update = NULL,
339		.prepare_access_checks = damon_pa_prepare_access_checks,
340		.check_accesses = damon_pa_check_accesses,
341		.reset_aggregated = NULL,
342		.target_valid = NULL,
343		.cleanup = NULL,
344		.apply_scheme = damon_pa_apply_scheme,
345		.get_scheme_score = damon_pa_scheme_score,
346	};
347
348	return damon_register_ops(&ops);
349};
350
351subsys_initcall(damon_pa_initcall);
352