xref: /kernel/linux/linux-5.10/security/selinux/avc.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Implementation of the kernel access vector cache (AVC).
4 *
5 * Authors:  Stephen Smalley, <sds@tycho.nsa.gov>
6 *	     James Morris <jmorris@redhat.com>
7 *
8 * Update:   KaiGai, Kohei <kaigai@ak.jp.nec.com>
9 *	Replaced the avc_lock spinlock by RCU.
10 *
11 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
12 */
13#include <linux/types.h>
14#include <linux/stddef.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/dcache.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/percpu.h>
22#include <linux/list.h>
23#include <net/sock.h>
24#include <linux/un.h>
25#include <net/af_unix.h>
26#include <linux/ip.h>
27#include <linux/audit.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include "avc.h"
31#include "avc_ss.h"
32#include "classmap.h"
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/avc.h>
36
37#define AVC_CACHE_SLOTS			512
38#define AVC_DEF_CACHE_THRESHOLD		512
39#define AVC_CACHE_RECLAIM		16
40
41#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
42#define avc_cache_stats_incr(field)	this_cpu_inc(avc_cache_stats.field)
43#else
44#define avc_cache_stats_incr(field)	do {} while (0)
45#endif
46
47struct avc_entry {
48	u32			ssid;
49	u32			tsid;
50	u16			tclass;
51	struct av_decision	avd;
52	struct avc_xperms_node	*xp_node;
53};
54
55struct avc_node {
56	struct avc_entry	ae;
57	struct hlist_node	list; /* anchored in avc_cache->slots[i] */
58	struct rcu_head		rhead;
59};
60
61struct avc_xperms_decision_node {
62	struct extended_perms_decision xpd;
63	struct list_head xpd_list; /* list of extended_perms_decision */
64};
65
66struct avc_xperms_node {
67	struct extended_perms xp;
68	struct list_head xpd_head; /* list head of extended_perms_decision */
69};
70
71struct avc_cache {
72	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
73	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
74	atomic_t		lru_hint;	/* LRU hint for reclaim scan */
75	atomic_t		active_nodes;
76	u32			latest_notif;	/* latest revocation notification */
77};
78
79struct avc_callback_node {
80	int (*callback) (u32 event);
81	u32 events;
82	struct avc_callback_node *next;
83};
84
85#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
86DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
87#endif
88
89struct selinux_avc {
90	unsigned int avc_cache_threshold;
91	struct avc_cache avc_cache;
92};
93
94static struct selinux_avc selinux_avc;
95
96void selinux_avc_init(struct selinux_avc **avc)
97{
98	int i;
99
100	selinux_avc.avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
101	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
102		INIT_HLIST_HEAD(&selinux_avc.avc_cache.slots[i]);
103		spin_lock_init(&selinux_avc.avc_cache.slots_lock[i]);
104	}
105	atomic_set(&selinux_avc.avc_cache.active_nodes, 0);
106	atomic_set(&selinux_avc.avc_cache.lru_hint, 0);
107	*avc = &selinux_avc;
108}
109
110unsigned int avc_get_cache_threshold(struct selinux_avc *avc)
111{
112	return avc->avc_cache_threshold;
113}
114
115void avc_set_cache_threshold(struct selinux_avc *avc,
116			     unsigned int cache_threshold)
117{
118	avc->avc_cache_threshold = cache_threshold;
119}
120
121static struct avc_callback_node *avc_callbacks;
122static struct kmem_cache *avc_node_cachep;
123static struct kmem_cache *avc_xperms_data_cachep;
124static struct kmem_cache *avc_xperms_decision_cachep;
125static struct kmem_cache *avc_xperms_cachep;
126
127static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
128{
129	return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
130}
131
132/**
133 * avc_init - Initialize the AVC.
134 *
135 * Initialize the access vector cache.
136 */
137void __init avc_init(void)
138{
139	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
140					0, SLAB_PANIC, NULL);
141	avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
142					sizeof(struct avc_xperms_node),
143					0, SLAB_PANIC, NULL);
144	avc_xperms_decision_cachep = kmem_cache_create(
145					"avc_xperms_decision_node",
146					sizeof(struct avc_xperms_decision_node),
147					0, SLAB_PANIC, NULL);
148	avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
149					sizeof(struct extended_perms_data),
150					0, SLAB_PANIC, NULL);
151}
152
153int avc_get_hash_stats(struct selinux_avc *avc, char *page)
154{
155	int i, chain_len, max_chain_len, slots_used;
156	struct avc_node *node;
157	struct hlist_head *head;
158
159	rcu_read_lock();
160
161	slots_used = 0;
162	max_chain_len = 0;
163	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
164		head = &avc->avc_cache.slots[i];
165		if (!hlist_empty(head)) {
166			slots_used++;
167			chain_len = 0;
168			hlist_for_each_entry_rcu(node, head, list)
169				chain_len++;
170			if (chain_len > max_chain_len)
171				max_chain_len = chain_len;
172		}
173	}
174
175	rcu_read_unlock();
176
177	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
178			 "longest chain: %d\n",
179			 atomic_read(&avc->avc_cache.active_nodes),
180			 slots_used, AVC_CACHE_SLOTS, max_chain_len);
181}
182
183/*
184 * using a linked list for extended_perms_decision lookup because the list is
185 * always small. i.e. less than 5, typically 1
186 */
187static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
188					struct avc_xperms_node *xp_node)
189{
190	struct avc_xperms_decision_node *xpd_node;
191
192	list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
193		if (xpd_node->xpd.driver == driver)
194			return &xpd_node->xpd;
195	}
196	return NULL;
197}
198
199static inline unsigned int
200avc_xperms_has_perm(struct extended_perms_decision *xpd,
201					u8 perm, u8 which)
202{
203	unsigned int rc = 0;
204
205	if ((which == XPERMS_ALLOWED) &&
206			(xpd->used & XPERMS_ALLOWED))
207		rc = security_xperm_test(xpd->allowed->p, perm);
208	else if ((which == XPERMS_AUDITALLOW) &&
209			(xpd->used & XPERMS_AUDITALLOW))
210		rc = security_xperm_test(xpd->auditallow->p, perm);
211	else if ((which == XPERMS_DONTAUDIT) &&
212			(xpd->used & XPERMS_DONTAUDIT))
213		rc = security_xperm_test(xpd->dontaudit->p, perm);
214	return rc;
215}
216
217static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
218				u8 driver, u8 perm)
219{
220	struct extended_perms_decision *xpd;
221	security_xperm_set(xp_node->xp.drivers.p, driver);
222	xpd = avc_xperms_decision_lookup(driver, xp_node);
223	if (xpd && xpd->allowed)
224		security_xperm_set(xpd->allowed->p, perm);
225}
226
227static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
228{
229	struct extended_perms_decision *xpd;
230
231	xpd = &xpd_node->xpd;
232	if (xpd->allowed)
233		kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
234	if (xpd->auditallow)
235		kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
236	if (xpd->dontaudit)
237		kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
238	kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
239}
240
241static void avc_xperms_free(struct avc_xperms_node *xp_node)
242{
243	struct avc_xperms_decision_node *xpd_node, *tmp;
244
245	if (!xp_node)
246		return;
247
248	list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
249		list_del(&xpd_node->xpd_list);
250		avc_xperms_decision_free(xpd_node);
251	}
252	kmem_cache_free(avc_xperms_cachep, xp_node);
253}
254
255static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
256					struct extended_perms_decision *src)
257{
258	dest->driver = src->driver;
259	dest->used = src->used;
260	if (dest->used & XPERMS_ALLOWED)
261		memcpy(dest->allowed->p, src->allowed->p,
262				sizeof(src->allowed->p));
263	if (dest->used & XPERMS_AUDITALLOW)
264		memcpy(dest->auditallow->p, src->auditallow->p,
265				sizeof(src->auditallow->p));
266	if (dest->used & XPERMS_DONTAUDIT)
267		memcpy(dest->dontaudit->p, src->dontaudit->p,
268				sizeof(src->dontaudit->p));
269}
270
271/*
272 * similar to avc_copy_xperms_decision, but only copy decision
273 * information relevant to this perm
274 */
275static inline void avc_quick_copy_xperms_decision(u8 perm,
276			struct extended_perms_decision *dest,
277			struct extended_perms_decision *src)
278{
279	/*
280	 * compute index of the u32 of the 256 bits (8 u32s) that contain this
281	 * command permission
282	 */
283	u8 i = perm >> 5;
284
285	dest->used = src->used;
286	if (dest->used & XPERMS_ALLOWED)
287		dest->allowed->p[i] = src->allowed->p[i];
288	if (dest->used & XPERMS_AUDITALLOW)
289		dest->auditallow->p[i] = src->auditallow->p[i];
290	if (dest->used & XPERMS_DONTAUDIT)
291		dest->dontaudit->p[i] = src->dontaudit->p[i];
292}
293
294static struct avc_xperms_decision_node
295		*avc_xperms_decision_alloc(u8 which)
296{
297	struct avc_xperms_decision_node *xpd_node;
298	struct extended_perms_decision *xpd;
299
300	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
301				     GFP_NOWAIT | __GFP_NOWARN);
302	if (!xpd_node)
303		return NULL;
304
305	xpd = &xpd_node->xpd;
306	if (which & XPERMS_ALLOWED) {
307		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
308						GFP_NOWAIT | __GFP_NOWARN);
309		if (!xpd->allowed)
310			goto error;
311	}
312	if (which & XPERMS_AUDITALLOW) {
313		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
314						GFP_NOWAIT | __GFP_NOWARN);
315		if (!xpd->auditallow)
316			goto error;
317	}
318	if (which & XPERMS_DONTAUDIT) {
319		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
320						GFP_NOWAIT | __GFP_NOWARN);
321		if (!xpd->dontaudit)
322			goto error;
323	}
324	return xpd_node;
325error:
326	avc_xperms_decision_free(xpd_node);
327	return NULL;
328}
329
330static int avc_add_xperms_decision(struct avc_node *node,
331			struct extended_perms_decision *src)
332{
333	struct avc_xperms_decision_node *dest_xpd;
334
335	node->ae.xp_node->xp.len++;
336	dest_xpd = avc_xperms_decision_alloc(src->used);
337	if (!dest_xpd)
338		return -ENOMEM;
339	avc_copy_xperms_decision(&dest_xpd->xpd, src);
340	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
341	return 0;
342}
343
344static struct avc_xperms_node *avc_xperms_alloc(void)
345{
346	struct avc_xperms_node *xp_node;
347
348	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN);
349	if (!xp_node)
350		return xp_node;
351	INIT_LIST_HEAD(&xp_node->xpd_head);
352	return xp_node;
353}
354
355static int avc_xperms_populate(struct avc_node *node,
356				struct avc_xperms_node *src)
357{
358	struct avc_xperms_node *dest;
359	struct avc_xperms_decision_node *dest_xpd;
360	struct avc_xperms_decision_node *src_xpd;
361
362	if (src->xp.len == 0)
363		return 0;
364	dest = avc_xperms_alloc();
365	if (!dest)
366		return -ENOMEM;
367
368	memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
369	dest->xp.len = src->xp.len;
370
371	/* for each source xpd allocate a destination xpd and copy */
372	list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
373		dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
374		if (!dest_xpd)
375			goto error;
376		avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
377		list_add(&dest_xpd->xpd_list, &dest->xpd_head);
378	}
379	node->ae.xp_node = dest;
380	return 0;
381error:
382	avc_xperms_free(dest);
383	return -ENOMEM;
384
385}
386
387static inline u32 avc_xperms_audit_required(u32 requested,
388					struct av_decision *avd,
389					struct extended_perms_decision *xpd,
390					u8 perm,
391					int result,
392					u32 *deniedp)
393{
394	u32 denied, audited;
395
396	denied = requested & ~avd->allowed;
397	if (unlikely(denied)) {
398		audited = denied & avd->auditdeny;
399		if (audited && xpd) {
400			if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
401				audited &= ~requested;
402		}
403	} else if (result) {
404		audited = denied = requested;
405	} else {
406		audited = requested & avd->auditallow;
407		if (audited && xpd) {
408			if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
409				audited &= ~requested;
410		}
411	}
412
413	*deniedp = denied;
414	return audited;
415}
416
417static inline int avc_xperms_audit(struct selinux_state *state,
418				   u32 ssid, u32 tsid, u16 tclass,
419				   u32 requested, struct av_decision *avd,
420				   struct extended_perms_decision *xpd,
421				   u8 perm, int result,
422				   struct common_audit_data *ad)
423{
424	u32 audited, denied;
425
426	audited = avc_xperms_audit_required(
427			requested, avd, xpd, perm, result, &denied);
428	if (likely(!audited))
429		return 0;
430	return slow_avc_audit(state, ssid, tsid, tclass, requested,
431			audited, denied, result, ad);
432}
433
434static void avc_node_free(struct rcu_head *rhead)
435{
436	struct avc_node *node = container_of(rhead, struct avc_node, rhead);
437	avc_xperms_free(node->ae.xp_node);
438	kmem_cache_free(avc_node_cachep, node);
439	avc_cache_stats_incr(frees);
440}
441
442static void avc_node_delete(struct selinux_avc *avc, struct avc_node *node)
443{
444	hlist_del_rcu(&node->list);
445	call_rcu(&node->rhead, avc_node_free);
446	atomic_dec(&avc->avc_cache.active_nodes);
447}
448
449static void avc_node_kill(struct selinux_avc *avc, struct avc_node *node)
450{
451	avc_xperms_free(node->ae.xp_node);
452	kmem_cache_free(avc_node_cachep, node);
453	avc_cache_stats_incr(frees);
454	atomic_dec(&avc->avc_cache.active_nodes);
455}
456
457static void avc_node_replace(struct selinux_avc *avc,
458			     struct avc_node *new, struct avc_node *old)
459{
460	hlist_replace_rcu(&old->list, &new->list);
461	call_rcu(&old->rhead, avc_node_free);
462	atomic_dec(&avc->avc_cache.active_nodes);
463}
464
465static inline int avc_reclaim_node(struct selinux_avc *avc)
466{
467	struct avc_node *node;
468	int hvalue, try, ecx;
469	unsigned long flags;
470	struct hlist_head *head;
471	spinlock_t *lock;
472
473	for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
474		hvalue = atomic_inc_return(&avc->avc_cache.lru_hint) &
475			(AVC_CACHE_SLOTS - 1);
476		head = &avc->avc_cache.slots[hvalue];
477		lock = &avc->avc_cache.slots_lock[hvalue];
478
479		if (!spin_trylock_irqsave(lock, flags))
480			continue;
481
482		rcu_read_lock();
483		hlist_for_each_entry(node, head, list) {
484			avc_node_delete(avc, node);
485			avc_cache_stats_incr(reclaims);
486			ecx++;
487			if (ecx >= AVC_CACHE_RECLAIM) {
488				rcu_read_unlock();
489				spin_unlock_irqrestore(lock, flags);
490				goto out;
491			}
492		}
493		rcu_read_unlock();
494		spin_unlock_irqrestore(lock, flags);
495	}
496out:
497	return ecx;
498}
499
500static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
501{
502	struct avc_node *node;
503
504	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
505	if (!node)
506		goto out;
507
508	INIT_HLIST_NODE(&node->list);
509	avc_cache_stats_incr(allocations);
510
511	if (atomic_inc_return(&avc->avc_cache.active_nodes) >
512	    avc->avc_cache_threshold)
513		avc_reclaim_node(avc);
514
515out:
516	return node;
517}
518
519static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
520{
521	node->ae.ssid = ssid;
522	node->ae.tsid = tsid;
523	node->ae.tclass = tclass;
524	memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
525}
526
527static inline struct avc_node *avc_search_node(struct selinux_avc *avc,
528					       u32 ssid, u32 tsid, u16 tclass)
529{
530	struct avc_node *node, *ret = NULL;
531	int hvalue;
532	struct hlist_head *head;
533
534	hvalue = avc_hash(ssid, tsid, tclass);
535	head = &avc->avc_cache.slots[hvalue];
536	hlist_for_each_entry_rcu(node, head, list) {
537		if (ssid == node->ae.ssid &&
538		    tclass == node->ae.tclass &&
539		    tsid == node->ae.tsid) {
540			ret = node;
541			break;
542		}
543	}
544
545	return ret;
546}
547
548/**
549 * avc_lookup - Look up an AVC entry.
550 * @ssid: source security identifier
551 * @tsid: target security identifier
552 * @tclass: target security class
553 *
554 * Look up an AVC entry that is valid for the
555 * (@ssid, @tsid), interpreting the permissions
556 * based on @tclass.  If a valid AVC entry exists,
557 * then this function returns the avc_node.
558 * Otherwise, this function returns NULL.
559 */
560static struct avc_node *avc_lookup(struct selinux_avc *avc,
561				   u32 ssid, u32 tsid, u16 tclass)
562{
563	struct avc_node *node;
564
565	avc_cache_stats_incr(lookups);
566	node = avc_search_node(avc, ssid, tsid, tclass);
567
568	if (node)
569		return node;
570
571	avc_cache_stats_incr(misses);
572	return NULL;
573}
574
575static int avc_latest_notif_update(struct selinux_avc *avc,
576				   int seqno, int is_insert)
577{
578	int ret = 0;
579	static DEFINE_SPINLOCK(notif_lock);
580	unsigned long flag;
581
582	spin_lock_irqsave(&notif_lock, flag);
583	if (is_insert) {
584		if (seqno < avc->avc_cache.latest_notif) {
585			pr_warn("SELinux: avc:  seqno %d < latest_notif %d\n",
586			       seqno, avc->avc_cache.latest_notif);
587			ret = -EAGAIN;
588		}
589	} else {
590		if (seqno > avc->avc_cache.latest_notif)
591			avc->avc_cache.latest_notif = seqno;
592	}
593	spin_unlock_irqrestore(&notif_lock, flag);
594
595	return ret;
596}
597
598/**
599 * avc_insert - Insert an AVC entry.
600 * @ssid: source security identifier
601 * @tsid: target security identifier
602 * @tclass: target security class
603 * @avd: resulting av decision
604 * @xp_node: resulting extended permissions
605 *
606 * Insert an AVC entry for the SID pair
607 * (@ssid, @tsid) and class @tclass.
608 * The access vectors and the sequence number are
609 * normally provided by the security server in
610 * response to a security_compute_av() call.  If the
611 * sequence number @avd->seqno is not less than the latest
612 * revocation notification, then the function copies
613 * the access vectors into a cache entry, returns
614 * avc_node inserted. Otherwise, this function returns NULL.
615 */
616static struct avc_node *avc_insert(struct selinux_avc *avc,
617				   u32 ssid, u32 tsid, u16 tclass,
618				   struct av_decision *avd,
619				   struct avc_xperms_node *xp_node)
620{
621	struct avc_node *pos, *node = NULL;
622	int hvalue;
623	unsigned long flag;
624	spinlock_t *lock;
625	struct hlist_head *head;
626
627	if (avc_latest_notif_update(avc, avd->seqno, 1))
628		return NULL;
629
630	node = avc_alloc_node(avc);
631	if (!node)
632		return NULL;
633
634	avc_node_populate(node, ssid, tsid, tclass, avd);
635	if (avc_xperms_populate(node, xp_node)) {
636		avc_node_kill(avc, node);
637		return NULL;
638	}
639
640	hvalue = avc_hash(ssid, tsid, tclass);
641	head = &avc->avc_cache.slots[hvalue];
642	lock = &avc->avc_cache.slots_lock[hvalue];
643	spin_lock_irqsave(lock, flag);
644	hlist_for_each_entry(pos, head, list) {
645		if (pos->ae.ssid == ssid &&
646			pos->ae.tsid == tsid &&
647			pos->ae.tclass == tclass) {
648			avc_node_replace(avc, node, pos);
649			goto found;
650		}
651	}
652	hlist_add_head_rcu(&node->list, head);
653found:
654	spin_unlock_irqrestore(lock, flag);
655	return node;
656}
657
658/**
659 * avc_audit_pre_callback - SELinux specific information
660 * will be called by generic audit code
661 * @ab: the audit buffer
662 * @a: audit_data
663 */
664static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
665{
666	struct common_audit_data *ad = a;
667	struct selinux_audit_data *sad = ad->selinux_audit_data;
668	u32 av = sad->audited;
669	const char **perms;
670	int i, perm;
671
672	audit_log_format(ab, "avc:  %s ", sad->denied ? "denied" : "granted");
673
674	if (av == 0) {
675		audit_log_format(ab, " null");
676		return;
677	}
678
679	perms = secclass_map[sad->tclass-1].perms;
680
681	audit_log_format(ab, " {");
682	i = 0;
683	perm = 1;
684	while (i < (sizeof(av) * 8)) {
685		if ((perm & av) && perms[i]) {
686			audit_log_format(ab, " %s", perms[i]);
687			av &= ~perm;
688		}
689		i++;
690		perm <<= 1;
691	}
692
693	if (av)
694		audit_log_format(ab, " 0x%x", av);
695
696	audit_log_format(ab, " } for ");
697}
698
699/**
700 * avc_audit_post_callback - SELinux specific information
701 * will be called by generic audit code
702 * @ab: the audit buffer
703 * @a: audit_data
704 */
705static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
706{
707	struct common_audit_data *ad = a;
708	struct selinux_audit_data *sad = ad->selinux_audit_data;
709	char *scontext = NULL;
710	char *tcontext = NULL;
711	const char *tclass = NULL;
712	u32 scontext_len;
713	u32 tcontext_len;
714	int rc;
715
716	rc = security_sid_to_context(sad->state, sad->ssid, &scontext,
717				     &scontext_len);
718	if (rc)
719		audit_log_format(ab, " ssid=%d", sad->ssid);
720	else
721		audit_log_format(ab, " scontext=%s", scontext);
722
723	rc = security_sid_to_context(sad->state, sad->tsid, &tcontext,
724				     &tcontext_len);
725	if (rc)
726		audit_log_format(ab, " tsid=%d", sad->tsid);
727	else
728		audit_log_format(ab, " tcontext=%s", tcontext);
729
730	tclass = secclass_map[sad->tclass-1].name;
731	audit_log_format(ab, " tclass=%s", tclass);
732
733	if (sad->denied)
734		audit_log_format(ab, " permissive=%u", sad->result ? 0 : 1);
735
736	trace_selinux_audited(sad, scontext, tcontext, tclass);
737	kfree(tcontext);
738	kfree(scontext);
739
740	/* in case of invalid context report also the actual context string */
741	rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
742					   &scontext_len);
743	if (!rc && scontext) {
744		if (scontext_len && scontext[scontext_len - 1] == '\0')
745			scontext_len--;
746		audit_log_format(ab, " srawcon=");
747		audit_log_n_untrustedstring(ab, scontext, scontext_len);
748		kfree(scontext);
749	}
750
751	rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
752					   &scontext_len);
753	if (!rc && scontext) {
754		if (scontext_len && scontext[scontext_len - 1] == '\0')
755			scontext_len--;
756		audit_log_format(ab, " trawcon=");
757		audit_log_n_untrustedstring(ab, scontext, scontext_len);
758		kfree(scontext);
759	}
760}
761
762/* This is the slow part of avc audit with big stack footprint */
763noinline int slow_avc_audit(struct selinux_state *state,
764			    u32 ssid, u32 tsid, u16 tclass,
765			    u32 requested, u32 audited, u32 denied, int result,
766			    struct common_audit_data *a)
767{
768	struct common_audit_data stack_data;
769	struct selinux_audit_data sad;
770
771	if (WARN_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)))
772		return -EINVAL;
773
774	if (!a) {
775		a = &stack_data;
776		a->type = LSM_AUDIT_DATA_NONE;
777	}
778
779	sad.tclass = tclass;
780	sad.requested = requested;
781	sad.ssid = ssid;
782	sad.tsid = tsid;
783	sad.audited = audited;
784	sad.denied = denied;
785	sad.result = result;
786	sad.state = state;
787
788	a->selinux_audit_data = &sad;
789
790	common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
791	return 0;
792}
793
794/**
795 * avc_add_callback - Register a callback for security events.
796 * @callback: callback function
797 * @events: security events
798 *
799 * Register a callback function for events in the set @events.
800 * Returns %0 on success or -%ENOMEM if insufficient memory
801 * exists to add the callback.
802 */
803int __init avc_add_callback(int (*callback)(u32 event), u32 events)
804{
805	struct avc_callback_node *c;
806	int rc = 0;
807
808	c = kmalloc(sizeof(*c), GFP_KERNEL);
809	if (!c) {
810		rc = -ENOMEM;
811		goto out;
812	}
813
814	c->callback = callback;
815	c->events = events;
816	c->next = avc_callbacks;
817	avc_callbacks = c;
818out:
819	return rc;
820}
821
822/**
823 * avc_update_node Update an AVC entry
824 * @event : Updating event
825 * @perms : Permission mask bits
826 * @ssid,@tsid,@tclass : identifier of an AVC entry
827 * @seqno : sequence number when decision was made
828 * @xpd: extended_perms_decision to be added to the node
829 * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
830 *
831 * if a valid AVC entry doesn't exist,this function returns -ENOENT.
832 * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
833 * otherwise, this function updates the AVC entry. The original AVC-entry object
834 * will release later by RCU.
835 */
836static int avc_update_node(struct selinux_avc *avc,
837			   u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
838			   u32 tsid, u16 tclass, u32 seqno,
839			   struct extended_perms_decision *xpd,
840			   u32 flags)
841{
842	int hvalue, rc = 0;
843	unsigned long flag;
844	struct avc_node *pos, *node, *orig = NULL;
845	struct hlist_head *head;
846	spinlock_t *lock;
847
848	/*
849	 * If we are in a non-blocking code path, e.g. VFS RCU walk,
850	 * then we must not add permissions to a cache entry
851	 * because we will not audit the denial.  Otherwise,
852	 * during the subsequent blocking retry (e.g. VFS ref walk), we
853	 * will find the permissions already granted in the cache entry
854	 * and won't audit anything at all, leading to silent denials in
855	 * permissive mode that only appear when in enforcing mode.
856	 *
857	 * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
858	 * and selinux_inode_permission().
859	 */
860	if (flags & AVC_NONBLOCKING)
861		return 0;
862
863	node = avc_alloc_node(avc);
864	if (!node) {
865		rc = -ENOMEM;
866		goto out;
867	}
868
869	/* Lock the target slot */
870	hvalue = avc_hash(ssid, tsid, tclass);
871
872	head = &avc->avc_cache.slots[hvalue];
873	lock = &avc->avc_cache.slots_lock[hvalue];
874
875	spin_lock_irqsave(lock, flag);
876
877	hlist_for_each_entry(pos, head, list) {
878		if (ssid == pos->ae.ssid &&
879		    tsid == pos->ae.tsid &&
880		    tclass == pos->ae.tclass &&
881		    seqno == pos->ae.avd.seqno){
882			orig = pos;
883			break;
884		}
885	}
886
887	if (!orig) {
888		rc = -ENOENT;
889		avc_node_kill(avc, node);
890		goto out_unlock;
891	}
892
893	/*
894	 * Copy and replace original node.
895	 */
896
897	avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
898
899	if (orig->ae.xp_node) {
900		rc = avc_xperms_populate(node, orig->ae.xp_node);
901		if (rc) {
902			avc_node_kill(avc, node);
903			goto out_unlock;
904		}
905	}
906
907	switch (event) {
908	case AVC_CALLBACK_GRANT:
909		node->ae.avd.allowed |= perms;
910		if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
911			avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
912		break;
913	case AVC_CALLBACK_TRY_REVOKE:
914	case AVC_CALLBACK_REVOKE:
915		node->ae.avd.allowed &= ~perms;
916		break;
917	case AVC_CALLBACK_AUDITALLOW_ENABLE:
918		node->ae.avd.auditallow |= perms;
919		break;
920	case AVC_CALLBACK_AUDITALLOW_DISABLE:
921		node->ae.avd.auditallow &= ~perms;
922		break;
923	case AVC_CALLBACK_AUDITDENY_ENABLE:
924		node->ae.avd.auditdeny |= perms;
925		break;
926	case AVC_CALLBACK_AUDITDENY_DISABLE:
927		node->ae.avd.auditdeny &= ~perms;
928		break;
929	case AVC_CALLBACK_ADD_XPERMS:
930		avc_add_xperms_decision(node, xpd);
931		break;
932	}
933	avc_node_replace(avc, node, orig);
934out_unlock:
935	spin_unlock_irqrestore(lock, flag);
936out:
937	return rc;
938}
939
940/**
941 * avc_flush - Flush the cache
942 */
943static void avc_flush(struct selinux_avc *avc)
944{
945	struct hlist_head *head;
946	struct avc_node *node;
947	spinlock_t *lock;
948	unsigned long flag;
949	int i;
950
951	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
952		head = &avc->avc_cache.slots[i];
953		lock = &avc->avc_cache.slots_lock[i];
954
955		spin_lock_irqsave(lock, flag);
956		/*
957		 * With preemptable RCU, the outer spinlock does not
958		 * prevent RCU grace periods from ending.
959		 */
960		rcu_read_lock();
961		hlist_for_each_entry(node, head, list)
962			avc_node_delete(avc, node);
963		rcu_read_unlock();
964		spin_unlock_irqrestore(lock, flag);
965	}
966}
967
968/**
969 * avc_ss_reset - Flush the cache and revalidate migrated permissions.
970 * @seqno: policy sequence number
971 */
972int avc_ss_reset(struct selinux_avc *avc, u32 seqno)
973{
974	struct avc_callback_node *c;
975	int rc = 0, tmprc;
976
977	avc_flush(avc);
978
979	for (c = avc_callbacks; c; c = c->next) {
980		if (c->events & AVC_CALLBACK_RESET) {
981			tmprc = c->callback(AVC_CALLBACK_RESET);
982			/* save the first error encountered for the return
983			   value and continue processing the callbacks */
984			if (!rc)
985				rc = tmprc;
986		}
987	}
988
989	avc_latest_notif_update(avc, seqno, 0);
990	return rc;
991}
992
993/*
994 * Slow-path helper function for avc_has_perm_noaudit,
995 * when the avc_node lookup fails. We get called with
996 * the RCU read lock held, and need to return with it
997 * still held, but drop if for the security compute.
998 *
999 * Don't inline this, since it's the slow-path and just
1000 * results in a bigger stack frame.
1001 */
1002static noinline
1003struct avc_node *avc_compute_av(struct selinux_state *state,
1004				u32 ssid, u32 tsid,
1005				u16 tclass, struct av_decision *avd,
1006				struct avc_xperms_node *xp_node)
1007{
1008	rcu_read_unlock();
1009	INIT_LIST_HEAD(&xp_node->xpd_head);
1010	security_compute_av(state, ssid, tsid, tclass, avd, &xp_node->xp);
1011	rcu_read_lock();
1012	return avc_insert(state->avc, ssid, tsid, tclass, avd, xp_node);
1013}
1014
1015static noinline int avc_denied(struct selinux_state *state,
1016			       u32 ssid, u32 tsid,
1017			       u16 tclass, u32 requested,
1018			       u8 driver, u8 xperm, unsigned int flags,
1019			       struct av_decision *avd)
1020{
1021	if (flags & AVC_STRICT)
1022		return -EACCES;
1023
1024	if (enforcing_enabled(state) &&
1025	    !(avd->flags & AVD_FLAGS_PERMISSIVE))
1026		return -EACCES;
1027
1028	avc_update_node(state->avc, AVC_CALLBACK_GRANT, requested, driver,
1029			xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
1030	return 0;
1031}
1032
1033/*
1034 * The avc extended permissions logic adds an additional 256 bits of
1035 * permissions to an avc node when extended permissions for that node are
1036 * specified in the avtab. If the additional 256 permissions is not adequate,
1037 * as-is the case with ioctls, then multiple may be chained together and the
1038 * driver field is used to specify which set contains the permission.
1039 */
1040int avc_has_extended_perms(struct selinux_state *state,
1041			   u32 ssid, u32 tsid, u16 tclass, u32 requested,
1042			   u8 driver, u8 xperm, struct common_audit_data *ad)
1043{
1044	struct avc_node *node;
1045	struct av_decision avd;
1046	u32 denied;
1047	struct extended_perms_decision local_xpd;
1048	struct extended_perms_decision *xpd = NULL;
1049	struct extended_perms_data allowed;
1050	struct extended_perms_data auditallow;
1051	struct extended_perms_data dontaudit;
1052	struct avc_xperms_node local_xp_node;
1053	struct avc_xperms_node *xp_node;
1054	int rc = 0, rc2;
1055
1056	xp_node = &local_xp_node;
1057	if (WARN_ON(!requested))
1058		return -EACCES;
1059
1060	rcu_read_lock();
1061
1062	node = avc_lookup(state->avc, ssid, tsid, tclass);
1063	if (unlikely(!node)) {
1064		node = avc_compute_av(state, ssid, tsid, tclass, &avd, xp_node);
1065	} else {
1066		memcpy(&avd, &node->ae.avd, sizeof(avd));
1067		xp_node = node->ae.xp_node;
1068	}
1069	/* if extended permissions are not defined, only consider av_decision */
1070	if (!xp_node || !xp_node->xp.len)
1071		goto decision;
1072
1073	local_xpd.allowed = &allowed;
1074	local_xpd.auditallow = &auditallow;
1075	local_xpd.dontaudit = &dontaudit;
1076
1077	xpd = avc_xperms_decision_lookup(driver, xp_node);
1078	if (unlikely(!xpd)) {
1079		/*
1080		 * Compute the extended_perms_decision only if the driver
1081		 * is flagged
1082		 */
1083		if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
1084			avd.allowed &= ~requested;
1085			goto decision;
1086		}
1087		rcu_read_unlock();
1088		security_compute_xperms_decision(state, ssid, tsid, tclass,
1089						 driver, &local_xpd);
1090		rcu_read_lock();
1091		avc_update_node(state->avc, AVC_CALLBACK_ADD_XPERMS, requested,
1092				driver, xperm, ssid, tsid, tclass, avd.seqno,
1093				&local_xpd, 0);
1094	} else {
1095		avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
1096	}
1097	xpd = &local_xpd;
1098
1099	if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
1100		avd.allowed &= ~requested;
1101
1102decision:
1103	denied = requested & ~(avd.allowed);
1104	if (unlikely(denied))
1105		rc = avc_denied(state, ssid, tsid, tclass, requested,
1106				driver, xperm, AVC_EXTENDED_PERMS, &avd);
1107
1108	rcu_read_unlock();
1109
1110	rc2 = avc_xperms_audit(state, ssid, tsid, tclass, requested,
1111			&avd, xpd, xperm, rc, ad);
1112	if (rc2)
1113		return rc2;
1114	return rc;
1115}
1116
1117/**
1118 * avc_has_perm_noaudit - Check permissions but perform no auditing.
1119 * @ssid: source security identifier
1120 * @tsid: target security identifier
1121 * @tclass: target security class
1122 * @requested: requested permissions, interpreted based on @tclass
1123 * @flags:  AVC_STRICT, AVC_NONBLOCKING, or 0
1124 * @avd: access vector decisions
1125 *
1126 * Check the AVC to determine whether the @requested permissions are granted
1127 * for the SID pair (@ssid, @tsid), interpreting the permissions
1128 * based on @tclass, and call the security server on a cache miss to obtain
1129 * a new decision and add it to the cache.  Return a copy of the decisions
1130 * in @avd.  Return %0 if all @requested permissions are granted,
1131 * -%EACCES if any permissions are denied, or another -errno upon
1132 * other errors.  This function is typically called by avc_has_perm(),
1133 * but may also be called directly to separate permission checking from
1134 * auditing, e.g. in cases where a lock must be held for the check but
1135 * should be released for the auditing.
1136 */
1137inline int avc_has_perm_noaudit(struct selinux_state *state,
1138				u32 ssid, u32 tsid,
1139				u16 tclass, u32 requested,
1140				unsigned int flags,
1141				struct av_decision *avd)
1142{
1143	struct avc_node *node;
1144	struct avc_xperms_node xp_node;
1145	int rc = 0;
1146	u32 denied;
1147
1148	if (WARN_ON(!requested))
1149		return -EACCES;
1150
1151	rcu_read_lock();
1152
1153	node = avc_lookup(state->avc, ssid, tsid, tclass);
1154	if (unlikely(!node))
1155		node = avc_compute_av(state, ssid, tsid, tclass, avd, &xp_node);
1156	else
1157		memcpy(avd, &node->ae.avd, sizeof(*avd));
1158
1159	denied = requested & ~(avd->allowed);
1160	if (unlikely(denied))
1161		rc = avc_denied(state, ssid, tsid, tclass, requested, 0, 0,
1162				flags, avd);
1163
1164	rcu_read_unlock();
1165	return rc;
1166}
1167
1168/**
1169 * avc_has_perm - Check permissions and perform any appropriate auditing.
1170 * @ssid: source security identifier
1171 * @tsid: target security identifier
1172 * @tclass: target security class
1173 * @requested: requested permissions, interpreted based on @tclass
1174 * @auditdata: auxiliary audit data
1175 *
1176 * Check the AVC to determine whether the @requested permissions are granted
1177 * for the SID pair (@ssid, @tsid), interpreting the permissions
1178 * based on @tclass, and call the security server on a cache miss to obtain
1179 * a new decision and add it to the cache.  Audit the granting or denial of
1180 * permissions in accordance with the policy.  Return %0 if all @requested
1181 * permissions are granted, -%EACCES if any permissions are denied, or
1182 * another -errno upon other errors.
1183 */
1184int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
1185		 u32 requested, struct common_audit_data *auditdata)
1186{
1187	struct av_decision avd;
1188	int rc, rc2;
1189
1190	rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
1191				  &avd);
1192
1193	rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
1194			auditdata, 0);
1195	if (rc2)
1196		return rc2;
1197	return rc;
1198}
1199
1200int avc_has_perm_flags(struct selinux_state *state,
1201		       u32 ssid, u32 tsid, u16 tclass, u32 requested,
1202		       struct common_audit_data *auditdata,
1203		       int flags)
1204{
1205	struct av_decision avd;
1206	int rc, rc2;
1207
1208	rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
1209				  (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
1210				  &avd);
1211
1212	rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
1213			auditdata, flags);
1214	if (rc2)
1215		return rc2;
1216	return rc;
1217}
1218
1219u32 avc_policy_seqno(struct selinux_state *state)
1220{
1221	return state->avc->avc_cache.latest_notif;
1222}
1223
1224void avc_disable(void)
1225{
1226	/*
1227	 * If you are looking at this because you have realized that we are
1228	 * not destroying the avc_node_cachep it might be easy to fix, but
1229	 * I don't know the memory barrier semantics well enough to know.  It's
1230	 * possible that some other task dereferenced security_ops when
1231	 * it still pointed to selinux operations.  If that is the case it's
1232	 * possible that it is about to use the avc and is about to need the
1233	 * avc_node_cachep.  I know I could wrap the security.c security_ops call
1234	 * in an rcu_lock, but seriously, it's not worth it.  Instead I just flush
1235	 * the cache and get that memory back.
1236	 */
1237	if (avc_node_cachep) {
1238		avc_flush(selinux_state.avc);
1239		/* kmem_cache_destroy(avc_node_cachep); */
1240	}
1241}
1242