xref: /kernel/linux/linux-5.10/mm/kasan/quarantine.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KASAN quarantine.
4 *
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
7 *
8 * Based on code by Dmitry Chernenkov.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 */
20
21#include <linux/gfp.h>
22#include <linux/hash.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/printk.h>
27#include <linux/shrinker.h>
28#include <linux/slab.h>
29#include <linux/srcu.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/cpuhotplug.h>
33
34#include "../slab.h"
35#include "kasan.h"
36
37/* Data structure and operations for quarantine queues. */
38
39/*
40 * Each queue is a signle-linked list, which also stores the total size of
41 * objects inside of it.
42 */
43struct qlist_head {
44	struct qlist_node *head;
45	struct qlist_node *tail;
46	size_t bytes;
47	bool offline;
48};
49
50#define QLIST_INIT { NULL, NULL, 0 }
51
52static bool qlist_empty(struct qlist_head *q)
53{
54	return !q->head;
55}
56
57static void qlist_init(struct qlist_head *q)
58{
59	q->head = q->tail = NULL;
60	q->bytes = 0;
61}
62
63static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
64		size_t size)
65{
66	if (unlikely(qlist_empty(q)))
67		q->head = qlink;
68	else
69		q->tail->next = qlink;
70	q->tail = qlink;
71	qlink->next = NULL;
72	q->bytes += size;
73}
74
75static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
76{
77	if (unlikely(qlist_empty(from)))
78		return;
79
80	if (qlist_empty(to)) {
81		*to = *from;
82		qlist_init(from);
83		return;
84	}
85
86	to->tail->next = from->head;
87	to->tail = from->tail;
88	to->bytes += from->bytes;
89
90	qlist_init(from);
91}
92
93#define QUARANTINE_PERCPU_SIZE (1 << 20)
94#define QUARANTINE_BATCHES \
95	(1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
96
97/*
98 * The object quarantine consists of per-cpu queues and a global queue,
99 * guarded by quarantine_lock.
100 */
101static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
102
103/* Round-robin FIFO array of batches. */
104static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
105static int quarantine_head;
106static int quarantine_tail;
107/* Total size of all objects in global_quarantine across all batches. */
108static unsigned long quarantine_size;
109static DEFINE_RAW_SPINLOCK(quarantine_lock);
110DEFINE_STATIC_SRCU(remove_cache_srcu);
111
112/* Maximum size of the global queue. */
113static unsigned long quarantine_max_size;
114
115/*
116 * Target size of a batch in global_quarantine.
117 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
118 */
119static unsigned long quarantine_batch_size;
120
121/*
122 * The fraction of physical memory the quarantine is allowed to occupy.
123 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
124 * the ratio low to avoid OOM.
125 */
126#define QUARANTINE_FRACTION 32
127
128static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
129{
130	return virt_to_head_page(qlink)->slab_cache;
131}
132
133static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
134{
135	struct kasan_free_meta *free_info =
136		container_of(qlink, struct kasan_free_meta,
137			     quarantine_link);
138
139	return ((void *)free_info) - cache->kasan_info.free_meta_offset;
140}
141
142static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
143{
144	void *object = qlink_to_object(qlink, cache);
145	unsigned long flags;
146
147	if (IS_ENABLED(CONFIG_SLAB))
148		local_irq_save(flags);
149
150	*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
151	___cache_free(cache, object, _THIS_IP_);
152
153	if (IS_ENABLED(CONFIG_SLAB))
154		local_irq_restore(flags);
155}
156
157static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
158{
159	struct qlist_node *qlink;
160
161	if (unlikely(qlist_empty(q)))
162		return;
163
164	qlink = q->head;
165	while (qlink) {
166		struct kmem_cache *obj_cache =
167			cache ? cache :	qlink_to_cache(qlink);
168		struct qlist_node *next = qlink->next;
169
170		qlink_free(qlink, obj_cache);
171		qlink = next;
172	}
173	qlist_init(q);
174}
175
176void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
177{
178	unsigned long flags;
179	struct qlist_head *q;
180	struct qlist_head temp = QLIST_INIT;
181
182	/*
183	 * Note: irq must be disabled until after we move the batch to the
184	 * global quarantine. Otherwise quarantine_remove_cache() can miss
185	 * some objects belonging to the cache if they are in our local temp
186	 * list. quarantine_remove_cache() executes on_each_cpu() at the
187	 * beginning which ensures that it either sees the objects in per-cpu
188	 * lists or in the global quarantine.
189	 */
190	local_irq_save(flags);
191
192	q = this_cpu_ptr(&cpu_quarantine);
193	if (q->offline) {
194		local_irq_restore(flags);
195		return;
196	}
197	qlist_put(q, &info->quarantine_link, cache->size);
198	if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
199		qlist_move_all(q, &temp);
200
201		raw_spin_lock(&quarantine_lock);
202		WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
203		qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
204		if (global_quarantine[quarantine_tail].bytes >=
205				READ_ONCE(quarantine_batch_size)) {
206			int new_tail;
207
208			new_tail = quarantine_tail + 1;
209			if (new_tail == QUARANTINE_BATCHES)
210				new_tail = 0;
211			if (new_tail != quarantine_head)
212				quarantine_tail = new_tail;
213		}
214		raw_spin_unlock(&quarantine_lock);
215	}
216
217	local_irq_restore(flags);
218}
219
220void quarantine_reduce(void)
221{
222	size_t total_size, new_quarantine_size, percpu_quarantines;
223	unsigned long flags;
224	int srcu_idx;
225	struct qlist_head to_free = QLIST_INIT;
226
227	if (likely(READ_ONCE(quarantine_size) <=
228		   READ_ONCE(quarantine_max_size)))
229		return;
230
231	/*
232	 * srcu critical section ensures that quarantine_remove_cache()
233	 * will not miss objects belonging to the cache while they are in our
234	 * local to_free list. srcu is chosen because (1) it gives us private
235	 * grace period domain that does not interfere with anything else,
236	 * and (2) it allows synchronize_srcu() to return without waiting
237	 * if there are no pending read critical sections (which is the
238	 * expected case).
239	 */
240	srcu_idx = srcu_read_lock(&remove_cache_srcu);
241	raw_spin_lock_irqsave(&quarantine_lock, flags);
242
243	/*
244	 * Update quarantine size in case of hotplug. Allocate a fraction of
245	 * the installed memory to quarantine minus per-cpu queue limits.
246	 */
247	total_size = (totalram_pages() << PAGE_SHIFT) /
248		QUARANTINE_FRACTION;
249	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
250	new_quarantine_size = (total_size < percpu_quarantines) ?
251		0 : total_size - percpu_quarantines;
252	WRITE_ONCE(quarantine_max_size, new_quarantine_size);
253	/* Aim at consuming at most 1/2 of slots in quarantine. */
254	WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
255		2 * total_size / QUARANTINE_BATCHES));
256
257	if (likely(quarantine_size > quarantine_max_size)) {
258		qlist_move_all(&global_quarantine[quarantine_head], &to_free);
259		WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
260		quarantine_head++;
261		if (quarantine_head == QUARANTINE_BATCHES)
262			quarantine_head = 0;
263	}
264
265	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
266
267	qlist_free_all(&to_free, NULL);
268	srcu_read_unlock(&remove_cache_srcu, srcu_idx);
269}
270
271static void qlist_move_cache(struct qlist_head *from,
272				   struct qlist_head *to,
273				   struct kmem_cache *cache)
274{
275	struct qlist_node *curr;
276
277	if (unlikely(qlist_empty(from)))
278		return;
279
280	curr = from->head;
281	qlist_init(from);
282	while (curr) {
283		struct qlist_node *next = curr->next;
284		struct kmem_cache *obj_cache = qlink_to_cache(curr);
285
286		if (obj_cache == cache)
287			qlist_put(to, curr, obj_cache->size);
288		else
289			qlist_put(from, curr, obj_cache->size);
290
291		curr = next;
292	}
293}
294
295static void per_cpu_remove_cache(void *arg)
296{
297	struct kmem_cache *cache = arg;
298	struct qlist_head to_free = QLIST_INIT;
299	struct qlist_head *q;
300
301	q = this_cpu_ptr(&cpu_quarantine);
302	/*
303	 * Ensure the ordering between the writing to q->offline and
304	 * per_cpu_remove_cache.  Prevent cpu_quarantine from being corrupted
305	 * by interrupt.
306	 */
307	if (READ_ONCE(q->offline))
308		return;
309	qlist_move_cache(q, &to_free, cache);
310	qlist_free_all(&to_free, cache);
311}
312
313/* Free all quarantined objects belonging to cache. */
314void quarantine_remove_cache(struct kmem_cache *cache)
315{
316	unsigned long flags, i;
317	struct qlist_head to_free = QLIST_INIT;
318
319	/*
320	 * Must be careful to not miss any objects that are being moved from
321	 * per-cpu list to the global quarantine in quarantine_put(),
322	 * nor objects being freed in quarantine_reduce(). on_each_cpu()
323	 * achieves the first goal, while synchronize_srcu() achieves the
324	 * second.
325	 */
326	on_each_cpu(per_cpu_remove_cache, cache, 1);
327
328	raw_spin_lock_irqsave(&quarantine_lock, flags);
329	for (i = 0; i < QUARANTINE_BATCHES; i++) {
330		if (qlist_empty(&global_quarantine[i]))
331			continue;
332		qlist_move_cache(&global_quarantine[i], &to_free, cache);
333		/* Scanning whole quarantine can take a while. */
334		raw_spin_unlock_irqrestore(&quarantine_lock, flags);
335		cond_resched();
336		raw_spin_lock_irqsave(&quarantine_lock, flags);
337	}
338	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
339
340	qlist_free_all(&to_free, cache);
341
342	synchronize_srcu(&remove_cache_srcu);
343}
344
345static int kasan_cpu_online(unsigned int cpu)
346{
347	this_cpu_ptr(&cpu_quarantine)->offline = false;
348	return 0;
349}
350
351static int kasan_cpu_offline(unsigned int cpu)
352{
353	struct qlist_head *q;
354
355	q = this_cpu_ptr(&cpu_quarantine);
356	/* Ensure the ordering between the writing to q->offline and
357	 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
358	 * by interrupt.
359	 */
360	WRITE_ONCE(q->offline, true);
361	barrier();
362	qlist_free_all(q, NULL);
363	return 0;
364}
365
366static int __init kasan_cpu_quarantine_init(void)
367{
368	int ret = 0;
369
370	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
371				kasan_cpu_online, kasan_cpu_offline);
372	if (ret < 0)
373		pr_err("kasan cpu quarantine register failed [%d]\n", ret);
374	return ret;
375}
376late_initcall(kasan_cpu_quarantine_init);
377