xref: /kernel/linux/linux-5.10/security/keys/key.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Basic authentication token and access key management
3 *
4 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>
9#include <linux/init.h>
10#include <linux/poison.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/security.h>
14#include <linux/workqueue.h>
15#include <linux/random.h>
16#include <linux/ima.h>
17#include <linux/err.h>
18#include "internal.h"
19
20struct kmem_cache *key_jar;
21struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
22DEFINE_SPINLOCK(key_serial_lock);
23
24struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
25DEFINE_SPINLOCK(key_user_lock);
26
27unsigned int key_quota_root_maxkeys = 1000000;	/* root's key count quota */
28unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
29unsigned int key_quota_maxkeys = 200;		/* general key count quota */
30unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
31
32static LIST_HEAD(key_types_list);
33static DECLARE_RWSEM(key_types_sem);
34
35/* We serialise key instantiation and link */
36DEFINE_MUTEX(key_construction_mutex);
37
38#ifdef KEY_DEBUGGING
39void __key_check(const struct key *key)
40{
41	printk("__key_check: key %p {%08x} should be {%08x}\n",
42	       key, key->magic, KEY_DEBUG_MAGIC);
43	BUG();
44}
45#endif
46
47/*
48 * Get the key quota record for a user, allocating a new record if one doesn't
49 * already exist.
50 */
51struct key_user *key_user_lookup(kuid_t uid)
52{
53	struct key_user *candidate = NULL, *user;
54	struct rb_node *parent, **p;
55
56try_again:
57	parent = NULL;
58	p = &key_user_tree.rb_node;
59	spin_lock(&key_user_lock);
60
61	/* search the tree for a user record with a matching UID */
62	while (*p) {
63		parent = *p;
64		user = rb_entry(parent, struct key_user, node);
65
66		if (uid_lt(uid, user->uid))
67			p = &(*p)->rb_left;
68		else if (uid_gt(uid, user->uid))
69			p = &(*p)->rb_right;
70		else
71			goto found;
72	}
73
74	/* if we get here, we failed to find a match in the tree */
75	if (!candidate) {
76		/* allocate a candidate user record if we don't already have
77		 * one */
78		spin_unlock(&key_user_lock);
79
80		user = NULL;
81		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
82		if (unlikely(!candidate))
83			goto out;
84
85		/* the allocation may have scheduled, so we need to repeat the
86		 * search lest someone else added the record whilst we were
87		 * asleep */
88		goto try_again;
89	}
90
91	/* if we get here, then the user record still hadn't appeared on the
92	 * second pass - so we use the candidate record */
93	refcount_set(&candidate->usage, 1);
94	atomic_set(&candidate->nkeys, 0);
95	atomic_set(&candidate->nikeys, 0);
96	candidate->uid = uid;
97	candidate->qnkeys = 0;
98	candidate->qnbytes = 0;
99	spin_lock_init(&candidate->lock);
100	mutex_init(&candidate->cons_lock);
101
102	rb_link_node(&candidate->node, parent, p);
103	rb_insert_color(&candidate->node, &key_user_tree);
104	spin_unlock(&key_user_lock);
105	user = candidate;
106	goto out;
107
108	/* okay - we found a user record for this UID */
109found:
110	refcount_inc(&user->usage);
111	spin_unlock(&key_user_lock);
112	kfree(candidate);
113out:
114	return user;
115}
116
117/*
118 * Dispose of a user structure
119 */
120void key_user_put(struct key_user *user)
121{
122	if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
123		rb_erase(&user->node, &key_user_tree);
124		spin_unlock(&key_user_lock);
125
126		kfree(user);
127	}
128}
129
130/*
131 * Allocate a serial number for a key.  These are assigned randomly to avoid
132 * security issues through covert channel problems.
133 */
134static inline void key_alloc_serial(struct key *key)
135{
136	struct rb_node *parent, **p;
137	struct key *xkey;
138
139	/* propose a random serial number and look for a hole for it in the
140	 * serial number tree */
141	do {
142		get_random_bytes(&key->serial, sizeof(key->serial));
143
144		key->serial >>= 1; /* negative numbers are not permitted */
145	} while (key->serial < 3);
146
147	spin_lock(&key_serial_lock);
148
149attempt_insertion:
150	parent = NULL;
151	p = &key_serial_tree.rb_node;
152
153	while (*p) {
154		parent = *p;
155		xkey = rb_entry(parent, struct key, serial_node);
156
157		if (key->serial < xkey->serial)
158			p = &(*p)->rb_left;
159		else if (key->serial > xkey->serial)
160			p = &(*p)->rb_right;
161		else
162			goto serial_exists;
163	}
164
165	/* we've found a suitable hole - arrange for this key to occupy it */
166	rb_link_node(&key->serial_node, parent, p);
167	rb_insert_color(&key->serial_node, &key_serial_tree);
168
169	spin_unlock(&key_serial_lock);
170	return;
171
172	/* we found a key with the proposed serial number - walk the tree from
173	 * that point looking for the next unused serial number */
174serial_exists:
175	for (;;) {
176		key->serial++;
177		if (key->serial < 3) {
178			key->serial = 3;
179			goto attempt_insertion;
180		}
181
182		parent = rb_next(parent);
183		if (!parent)
184			goto attempt_insertion;
185
186		xkey = rb_entry(parent, struct key, serial_node);
187		if (key->serial < xkey->serial)
188			goto attempt_insertion;
189	}
190}
191
192/**
193 * key_alloc - Allocate a key of the specified type.
194 * @type: The type of key to allocate.
195 * @desc: The key description to allow the key to be searched out.
196 * @uid: The owner of the new key.
197 * @gid: The group ID for the new key's group permissions.
198 * @cred: The credentials specifying UID namespace.
199 * @perm: The permissions mask of the new key.
200 * @flags: Flags specifying quota properties.
201 * @restrict_link: Optional link restriction for new keyrings.
202 *
203 * Allocate a key of the specified type with the attributes given.  The key is
204 * returned in an uninstantiated state and the caller needs to instantiate the
205 * key before returning.
206 *
207 * The restrict_link structure (if not NULL) will be freed when the
208 * keyring is destroyed, so it must be dynamically allocated.
209 *
210 * The user's key count quota is updated to reflect the creation of the key and
211 * the user's key data quota has the default for the key type reserved.  The
212 * instantiation function should amend this as necessary.  If insufficient
213 * quota is available, -EDQUOT will be returned.
214 *
215 * The LSM security modules can prevent a key being created, in which case
216 * -EACCES will be returned.
217 *
218 * Returns a pointer to the new key if successful and an error code otherwise.
219 *
220 * Note that the caller needs to ensure the key type isn't uninstantiated.
221 * Internally this can be done by locking key_types_sem.  Externally, this can
222 * be done by either never unregistering the key type, or making sure
223 * key_alloc() calls don't race with module unloading.
224 */
225struct key *key_alloc(struct key_type *type, const char *desc,
226		      kuid_t uid, kgid_t gid, const struct cred *cred,
227		      key_perm_t perm, unsigned long flags,
228		      struct key_restriction *restrict_link)
229{
230	struct key_user *user = NULL;
231	struct key *key;
232	size_t desclen, quotalen;
233	int ret;
234
235	key = ERR_PTR(-EINVAL);
236	if (!desc || !*desc)
237		goto error;
238
239	if (type->vet_description) {
240		ret = type->vet_description(desc);
241		if (ret < 0) {
242			key = ERR_PTR(ret);
243			goto error;
244		}
245	}
246
247	desclen = strlen(desc);
248	quotalen = desclen + 1 + type->def_datalen;
249
250	/* get hold of the key tracking for this user */
251	user = key_user_lookup(uid);
252	if (!user)
253		goto no_memory_1;
254
255	/* check that the user's quota permits allocation of another key and
256	 * its description */
257	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
258		unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
259			key_quota_root_maxkeys : key_quota_maxkeys;
260		unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
261			key_quota_root_maxbytes : key_quota_maxbytes;
262
263		spin_lock(&user->lock);
264		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
265			if (user->qnkeys + 1 > maxkeys ||
266			    user->qnbytes + quotalen > maxbytes ||
267			    user->qnbytes + quotalen < user->qnbytes)
268				goto no_quota;
269		}
270
271		user->qnkeys++;
272		user->qnbytes += quotalen;
273		spin_unlock(&user->lock);
274	}
275
276	/* allocate and initialise the key and its description */
277	key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
278	if (!key)
279		goto no_memory_2;
280
281	key->index_key.desc_len = desclen;
282	key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
283	if (!key->index_key.description)
284		goto no_memory_3;
285	key->index_key.type = type;
286	key_set_index_key(&key->index_key);
287
288	refcount_set(&key->usage, 1);
289	init_rwsem(&key->sem);
290	lockdep_set_class(&key->sem, &type->lock_class);
291	key->user = user;
292	key->quotalen = quotalen;
293	key->datalen = type->def_datalen;
294	key->uid = uid;
295	key->gid = gid;
296	key->perm = perm;
297	key->expiry = TIME64_MAX;
298	key->restrict_link = restrict_link;
299	key->last_used_at = ktime_get_real_seconds();
300
301	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
302		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
303	if (flags & KEY_ALLOC_BUILT_IN)
304		key->flags |= 1 << KEY_FLAG_BUILTIN;
305	if (flags & KEY_ALLOC_UID_KEYRING)
306		key->flags |= 1 << KEY_FLAG_UID_KEYRING;
307	if (flags & KEY_ALLOC_SET_KEEP)
308		key->flags |= 1 << KEY_FLAG_KEEP;
309
310#ifdef KEY_DEBUGGING
311	key->magic = KEY_DEBUG_MAGIC;
312#endif
313
314	/* let the security module know about the key */
315	ret = security_key_alloc(key, cred, flags);
316	if (ret < 0)
317		goto security_error;
318
319	/* publish the key by giving it a serial number */
320	refcount_inc(&key->domain_tag->usage);
321	atomic_inc(&user->nkeys);
322	key_alloc_serial(key);
323
324error:
325	return key;
326
327security_error:
328	kfree(key->description);
329	kmem_cache_free(key_jar, key);
330	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
331		spin_lock(&user->lock);
332		user->qnkeys--;
333		user->qnbytes -= quotalen;
334		spin_unlock(&user->lock);
335	}
336	key_user_put(user);
337	key = ERR_PTR(ret);
338	goto error;
339
340no_memory_3:
341	kmem_cache_free(key_jar, key);
342no_memory_2:
343	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
344		spin_lock(&user->lock);
345		user->qnkeys--;
346		user->qnbytes -= quotalen;
347		spin_unlock(&user->lock);
348	}
349	key_user_put(user);
350no_memory_1:
351	key = ERR_PTR(-ENOMEM);
352	goto error;
353
354no_quota:
355	spin_unlock(&user->lock);
356	key_user_put(user);
357	key = ERR_PTR(-EDQUOT);
358	goto error;
359}
360EXPORT_SYMBOL(key_alloc);
361
362/**
363 * key_payload_reserve - Adjust data quota reservation for the key's payload
364 * @key: The key to make the reservation for.
365 * @datalen: The amount of data payload the caller now wants.
366 *
367 * Adjust the amount of the owning user's key data quota that a key reserves.
368 * If the amount is increased, then -EDQUOT may be returned if there isn't
369 * enough free quota available.
370 *
371 * If successful, 0 is returned.
372 */
373int key_payload_reserve(struct key *key, size_t datalen)
374{
375	int delta = (int)datalen - key->datalen;
376	int ret = 0;
377
378	key_check(key);
379
380	/* contemplate the quota adjustment */
381	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
382		unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
383			key_quota_root_maxbytes : key_quota_maxbytes;
384
385		spin_lock(&key->user->lock);
386
387		if (delta > 0 &&
388		    (key->user->qnbytes + delta > maxbytes ||
389		     key->user->qnbytes + delta < key->user->qnbytes)) {
390			ret = -EDQUOT;
391		}
392		else {
393			key->user->qnbytes += delta;
394			key->quotalen += delta;
395		}
396		spin_unlock(&key->user->lock);
397	}
398
399	/* change the recorded data length if that didn't generate an error */
400	if (ret == 0)
401		key->datalen = datalen;
402
403	return ret;
404}
405EXPORT_SYMBOL(key_payload_reserve);
406
407/*
408 * Change the key state to being instantiated.
409 */
410static void mark_key_instantiated(struct key *key, int reject_error)
411{
412	/* Commit the payload before setting the state; barrier versus
413	 * key_read_state().
414	 */
415	smp_store_release(&key->state,
416			  (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
417}
418
419/*
420 * Instantiate a key and link it into the target keyring atomically.  Must be
421 * called with the target keyring's semaphore writelocked.  The target key's
422 * semaphore need not be locked as instantiation is serialised by
423 * key_construction_mutex.
424 */
425static int __key_instantiate_and_link(struct key *key,
426				      struct key_preparsed_payload *prep,
427				      struct key *keyring,
428				      struct key *authkey,
429				      struct assoc_array_edit **_edit)
430{
431	int ret, awaken;
432
433	key_check(key);
434	key_check(keyring);
435
436	awaken = 0;
437	ret = -EBUSY;
438
439	mutex_lock(&key_construction_mutex);
440
441	/* can't instantiate twice */
442	if (key->state == KEY_IS_UNINSTANTIATED) {
443		/* instantiate the key */
444		ret = key->type->instantiate(key, prep);
445
446		if (ret == 0) {
447			/* mark the key as being instantiated */
448			atomic_inc(&key->user->nikeys);
449			mark_key_instantiated(key, 0);
450			notify_key(key, NOTIFY_KEY_INSTANTIATED, 0);
451
452			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
453				awaken = 1;
454
455			/* and link it into the destination keyring */
456			if (keyring) {
457				if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
458					set_bit(KEY_FLAG_KEEP, &key->flags);
459
460				__key_link(keyring, key, _edit);
461			}
462
463			/* disable the authorisation key */
464			if (authkey)
465				key_invalidate(authkey);
466
467			if (prep->expiry != TIME64_MAX)
468				key_set_expiry(key, prep->expiry);
469		}
470	}
471
472	mutex_unlock(&key_construction_mutex);
473
474	/* wake up anyone waiting for a key to be constructed */
475	if (awaken)
476		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
477
478	return ret;
479}
480
481/**
482 * key_instantiate_and_link - Instantiate a key and link it into the keyring.
483 * @key: The key to instantiate.
484 * @data: The data to use to instantiate the keyring.
485 * @datalen: The length of @data.
486 * @keyring: Keyring to create a link in on success (or NULL).
487 * @authkey: The authorisation token permitting instantiation.
488 *
489 * Instantiate a key that's in the uninstantiated state using the provided data
490 * and, if successful, link it in to the destination keyring if one is
491 * supplied.
492 *
493 * If successful, 0 is returned, the authorisation token is revoked and anyone
494 * waiting for the key is woken up.  If the key was already instantiated,
495 * -EBUSY will be returned.
496 */
497int key_instantiate_and_link(struct key *key,
498			     const void *data,
499			     size_t datalen,
500			     struct key *keyring,
501			     struct key *authkey)
502{
503	struct key_preparsed_payload prep;
504	struct assoc_array_edit *edit = NULL;
505	int ret;
506
507	memset(&prep, 0, sizeof(prep));
508	prep.data = data;
509	prep.datalen = datalen;
510	prep.quotalen = key->type->def_datalen;
511	prep.expiry = TIME64_MAX;
512	if (key->type->preparse) {
513		ret = key->type->preparse(&prep);
514		if (ret < 0)
515			goto error;
516	}
517
518	if (keyring) {
519		ret = __key_link_lock(keyring, &key->index_key);
520		if (ret < 0)
521			goto error;
522
523		ret = __key_link_begin(keyring, &key->index_key, &edit);
524		if (ret < 0)
525			goto error_link_end;
526
527		if (keyring->restrict_link && keyring->restrict_link->check) {
528			struct key_restriction *keyres = keyring->restrict_link;
529
530			ret = keyres->check(keyring, key->type, &prep.payload,
531					    keyres->key);
532			if (ret < 0)
533				goto error_link_end;
534		}
535	}
536
537	ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
538
539error_link_end:
540	if (keyring)
541		__key_link_end(keyring, &key->index_key, edit);
542
543error:
544	if (key->type->preparse)
545		key->type->free_preparse(&prep);
546	return ret;
547}
548
549EXPORT_SYMBOL(key_instantiate_and_link);
550
551/**
552 * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
553 * @key: The key to instantiate.
554 * @timeout: The timeout on the negative key.
555 * @error: The error to return when the key is hit.
556 * @keyring: Keyring to create a link in on success (or NULL).
557 * @authkey: The authorisation token permitting instantiation.
558 *
559 * Negatively instantiate a key that's in the uninstantiated state and, if
560 * successful, set its timeout and stored error and link it in to the
561 * destination keyring if one is supplied.  The key and any links to the key
562 * will be automatically garbage collected after the timeout expires.
563 *
564 * Negative keys are used to rate limit repeated request_key() calls by causing
565 * them to return the stored error code (typically ENOKEY) until the negative
566 * key expires.
567 *
568 * If successful, 0 is returned, the authorisation token is revoked and anyone
569 * waiting for the key is woken up.  If the key was already instantiated,
570 * -EBUSY will be returned.
571 */
572int key_reject_and_link(struct key *key,
573			unsigned timeout,
574			unsigned error,
575			struct key *keyring,
576			struct key *authkey)
577{
578	struct assoc_array_edit *edit = NULL;
579	int ret, awaken, link_ret = 0;
580
581	key_check(key);
582	key_check(keyring);
583
584	awaken = 0;
585	ret = -EBUSY;
586
587	if (keyring) {
588		if (keyring->restrict_link)
589			return -EPERM;
590
591		link_ret = __key_link_lock(keyring, &key->index_key);
592		if (link_ret == 0) {
593			link_ret = __key_link_begin(keyring, &key->index_key, &edit);
594			if (link_ret < 0)
595				__key_link_end(keyring, &key->index_key, edit);
596		}
597	}
598
599	mutex_lock(&key_construction_mutex);
600
601	/* can't instantiate twice */
602	if (key->state == KEY_IS_UNINSTANTIATED) {
603		/* mark the key as being negatively instantiated */
604		atomic_inc(&key->user->nikeys);
605		mark_key_instantiated(key, -error);
606		notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
607		key_set_expiry(key, ktime_get_real_seconds() + timeout);
608
609		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
610			awaken = 1;
611
612		ret = 0;
613
614		/* and link it into the destination keyring */
615		if (keyring && link_ret == 0)
616			__key_link(keyring, key, &edit);
617
618		/* disable the authorisation key */
619		if (authkey)
620			key_invalidate(authkey);
621	}
622
623	mutex_unlock(&key_construction_mutex);
624
625	if (keyring && link_ret == 0)
626		__key_link_end(keyring, &key->index_key, edit);
627
628	/* wake up anyone waiting for a key to be constructed */
629	if (awaken)
630		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
631
632	return ret == 0 ? link_ret : ret;
633}
634EXPORT_SYMBOL(key_reject_and_link);
635
636/**
637 * key_put - Discard a reference to a key.
638 * @key: The key to discard a reference from.
639 *
640 * Discard a reference to a key, and when all the references are gone, we
641 * schedule the cleanup task to come and pull it out of the tree in process
642 * context at some later time.
643 */
644void key_put(struct key *key)
645{
646	if (key) {
647		key_check(key);
648
649		if (refcount_dec_and_test(&key->usage))
650			schedule_work(&key_gc_work);
651	}
652}
653EXPORT_SYMBOL(key_put);
654
655/*
656 * Find a key by its serial number.
657 */
658struct key *key_lookup(key_serial_t id)
659{
660	struct rb_node *n;
661	struct key *key;
662
663	spin_lock(&key_serial_lock);
664
665	/* search the tree for the specified key */
666	n = key_serial_tree.rb_node;
667	while (n) {
668		key = rb_entry(n, struct key, serial_node);
669
670		if (id < key->serial)
671			n = n->rb_left;
672		else if (id > key->serial)
673			n = n->rb_right;
674		else
675			goto found;
676	}
677
678not_found:
679	key = ERR_PTR(-ENOKEY);
680	goto error;
681
682found:
683	/* A key is allowed to be looked up only if someone still owns a
684	 * reference to it - otherwise it's awaiting the gc.
685	 */
686	if (!refcount_inc_not_zero(&key->usage))
687		goto not_found;
688
689error:
690	spin_unlock(&key_serial_lock);
691	return key;
692}
693
694/*
695 * Find and lock the specified key type against removal.
696 *
697 * We return with the sem read-locked if successful.  If the type wasn't
698 * available -ENOKEY is returned instead.
699 */
700struct key_type *key_type_lookup(const char *type)
701{
702	struct key_type *ktype;
703
704	down_read(&key_types_sem);
705
706	/* look up the key type to see if it's one of the registered kernel
707	 * types */
708	list_for_each_entry(ktype, &key_types_list, link) {
709		if (strcmp(ktype->name, type) == 0)
710			goto found_kernel_type;
711	}
712
713	up_read(&key_types_sem);
714	ktype = ERR_PTR(-ENOKEY);
715
716found_kernel_type:
717	return ktype;
718}
719
720void key_set_timeout(struct key *key, unsigned timeout)
721{
722	time64_t expiry = TIME64_MAX;
723
724	/* make the changes with the locks held to prevent races */
725	down_write(&key->sem);
726
727	if (timeout > 0)
728		expiry = ktime_get_real_seconds() + timeout;
729	key_set_expiry(key, expiry);
730
731	up_write(&key->sem);
732}
733EXPORT_SYMBOL_GPL(key_set_timeout);
734
735/*
736 * Unlock a key type locked by key_type_lookup().
737 */
738void key_type_put(struct key_type *ktype)
739{
740	up_read(&key_types_sem);
741}
742
743/*
744 * Attempt to update an existing key.
745 *
746 * The key is given to us with an incremented refcount that we need to discard
747 * if we get an error.
748 */
749static inline key_ref_t __key_update(key_ref_t key_ref,
750				     struct key_preparsed_payload *prep)
751{
752	struct key *key = key_ref_to_ptr(key_ref);
753	int ret;
754
755	/* need write permission on the key to update it */
756	ret = key_permission(key_ref, KEY_NEED_WRITE);
757	if (ret < 0)
758		goto error;
759
760	ret = -EEXIST;
761	if (!key->type->update)
762		goto error;
763
764	down_write(&key->sem);
765
766	ret = key->type->update(key, prep);
767	if (ret == 0) {
768		/* Updating a negative key positively instantiates it */
769		mark_key_instantiated(key, 0);
770		notify_key(key, NOTIFY_KEY_UPDATED, 0);
771	}
772
773	up_write(&key->sem);
774
775	if (ret < 0)
776		goto error;
777out:
778	return key_ref;
779
780error:
781	key_put(key);
782	key_ref = ERR_PTR(ret);
783	goto out;
784}
785
786/**
787 * key_create_or_update - Update or create and instantiate a key.
788 * @keyring_ref: A pointer to the destination keyring with possession flag.
789 * @type: The type of key.
790 * @description: The searchable description for the key.
791 * @payload: The data to use to instantiate or update the key.
792 * @plen: The length of @payload.
793 * @perm: The permissions mask for a new key.
794 * @flags: The quota flags for a new key.
795 *
796 * Search the destination keyring for a key of the same description and if one
797 * is found, update it, otherwise create and instantiate a new one and create a
798 * link to it from that keyring.
799 *
800 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
801 * concocted.
802 *
803 * Returns a pointer to the new key if successful, -ENODEV if the key type
804 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
805 * caller isn't permitted to modify the keyring or the LSM did not permit
806 * creation of the key.
807 *
808 * On success, the possession flag from the keyring ref will be tacked on to
809 * the key ref before it is returned.
810 */
811key_ref_t key_create_or_update(key_ref_t keyring_ref,
812			       const char *type,
813			       const char *description,
814			       const void *payload,
815			       size_t plen,
816			       key_perm_t perm,
817			       unsigned long flags)
818{
819	struct keyring_index_key index_key = {
820		.description	= description,
821	};
822	struct key_preparsed_payload prep;
823	struct assoc_array_edit *edit = NULL;
824	const struct cred *cred = current_cred();
825	struct key *keyring, *key = NULL;
826	key_ref_t key_ref;
827	int ret;
828	struct key_restriction *restrict_link = NULL;
829
830	/* look up the key type to see if it's one of the registered kernel
831	 * types */
832	index_key.type = key_type_lookup(type);
833	if (IS_ERR(index_key.type)) {
834		key_ref = ERR_PTR(-ENODEV);
835		goto error;
836	}
837
838	key_ref = ERR_PTR(-EINVAL);
839	if (!index_key.type->instantiate ||
840	    (!index_key.description && !index_key.type->preparse))
841		goto error_put_type;
842
843	keyring = key_ref_to_ptr(keyring_ref);
844
845	key_check(keyring);
846
847	if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
848		restrict_link = keyring->restrict_link;
849
850	key_ref = ERR_PTR(-ENOTDIR);
851	if (keyring->type != &key_type_keyring)
852		goto error_put_type;
853
854	memset(&prep, 0, sizeof(prep));
855	prep.data = payload;
856	prep.datalen = plen;
857	prep.quotalen = index_key.type->def_datalen;
858	prep.expiry = TIME64_MAX;
859	if (index_key.type->preparse) {
860		ret = index_key.type->preparse(&prep);
861		if (ret < 0) {
862			key_ref = ERR_PTR(ret);
863			goto error_free_prep;
864		}
865		if (!index_key.description)
866			index_key.description = prep.description;
867		key_ref = ERR_PTR(-EINVAL);
868		if (!index_key.description)
869			goto error_free_prep;
870	}
871	index_key.desc_len = strlen(index_key.description);
872	key_set_index_key(&index_key);
873
874	ret = __key_link_lock(keyring, &index_key);
875	if (ret < 0) {
876		key_ref = ERR_PTR(ret);
877		goto error_free_prep;
878	}
879
880	ret = __key_link_begin(keyring, &index_key, &edit);
881	if (ret < 0) {
882		key_ref = ERR_PTR(ret);
883		goto error_link_end;
884	}
885
886	if (restrict_link && restrict_link->check) {
887		ret = restrict_link->check(keyring, index_key.type,
888					   &prep.payload, restrict_link->key);
889		if (ret < 0) {
890			key_ref = ERR_PTR(ret);
891			goto error_link_end;
892		}
893	}
894
895	/* if we're going to allocate a new key, we're going to have
896	 * to modify the keyring */
897	ret = key_permission(keyring_ref, KEY_NEED_WRITE);
898	if (ret < 0) {
899		key_ref = ERR_PTR(ret);
900		goto error_link_end;
901	}
902
903	/* if it's possible to update this type of key, search for an existing
904	 * key of the same type and description in the destination keyring and
905	 * update that instead if possible
906	 */
907	if (index_key.type->update) {
908		key_ref = find_key_to_update(keyring_ref, &index_key);
909		if (key_ref)
910			goto found_matching_key;
911	}
912
913	/* if the client doesn't provide, decide on the permissions we want */
914	if (perm == KEY_PERM_UNDEF) {
915		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
916		perm |= KEY_USR_VIEW;
917
918		if (index_key.type->read)
919			perm |= KEY_POS_READ;
920
921		if (index_key.type == &key_type_keyring ||
922		    index_key.type->update)
923			perm |= KEY_POS_WRITE;
924	}
925
926	/* allocate a new key */
927	key = key_alloc(index_key.type, index_key.description,
928			cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
929	if (IS_ERR(key)) {
930		key_ref = ERR_CAST(key);
931		goto error_link_end;
932	}
933
934	/* instantiate it and link it into the target keyring */
935	ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
936	if (ret < 0) {
937		key_put(key);
938		key_ref = ERR_PTR(ret);
939		goto error_link_end;
940	}
941
942	ima_post_key_create_or_update(keyring, key, payload, plen,
943				      flags, true);
944
945	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
946
947error_link_end:
948	__key_link_end(keyring, &index_key, edit);
949error_free_prep:
950	if (index_key.type->preparse)
951		index_key.type->free_preparse(&prep);
952error_put_type:
953	key_type_put(index_key.type);
954error:
955	return key_ref;
956
957 found_matching_key:
958	/* we found a matching key, so we're going to try to update it
959	 * - we can drop the locks first as we have the key pinned
960	 */
961	__key_link_end(keyring, &index_key, edit);
962
963	key = key_ref_to_ptr(key_ref);
964	if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
965		ret = wait_for_key_construction(key, true);
966		if (ret < 0) {
967			key_ref_put(key_ref);
968			key_ref = ERR_PTR(ret);
969			goto error_free_prep;
970		}
971	}
972
973	key_ref = __key_update(key_ref, &prep);
974
975	if (!IS_ERR(key_ref))
976		ima_post_key_create_or_update(keyring, key,
977					      payload, plen,
978					      flags, false);
979
980	goto error_free_prep;
981}
982EXPORT_SYMBOL(key_create_or_update);
983
984/**
985 * key_update - Update a key's contents.
986 * @key_ref: The pointer (plus possession flag) to the key.
987 * @payload: The data to be used to update the key.
988 * @plen: The length of @payload.
989 *
990 * Attempt to update the contents of a key with the given payload data.  The
991 * caller must be granted Write permission on the key.  Negative keys can be
992 * instantiated by this method.
993 *
994 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
995 * type does not support updating.  The key type may return other errors.
996 */
997int key_update(key_ref_t key_ref, const void *payload, size_t plen)
998{
999	struct key_preparsed_payload prep;
1000	struct key *key = key_ref_to_ptr(key_ref);
1001	int ret;
1002
1003	key_check(key);
1004
1005	/* the key must be writable */
1006	ret = key_permission(key_ref, KEY_NEED_WRITE);
1007	if (ret < 0)
1008		return ret;
1009
1010	/* attempt to update it if supported */
1011	if (!key->type->update)
1012		return -EOPNOTSUPP;
1013
1014	memset(&prep, 0, sizeof(prep));
1015	prep.data = payload;
1016	prep.datalen = plen;
1017	prep.quotalen = key->type->def_datalen;
1018	prep.expiry = TIME64_MAX;
1019	if (key->type->preparse) {
1020		ret = key->type->preparse(&prep);
1021		if (ret < 0)
1022			goto error;
1023	}
1024
1025	down_write(&key->sem);
1026
1027	ret = key->type->update(key, &prep);
1028	if (ret == 0) {
1029		/* Updating a negative key positively instantiates it */
1030		mark_key_instantiated(key, 0);
1031		notify_key(key, NOTIFY_KEY_UPDATED, 0);
1032	}
1033
1034	up_write(&key->sem);
1035
1036error:
1037	if (key->type->preparse)
1038		key->type->free_preparse(&prep);
1039	return ret;
1040}
1041EXPORT_SYMBOL(key_update);
1042
1043/**
1044 * key_revoke - Revoke a key.
1045 * @key: The key to be revoked.
1046 *
1047 * Mark a key as being revoked and ask the type to free up its resources.  The
1048 * revocation timeout is set and the key and all its links will be
1049 * automatically garbage collected after key_gc_delay amount of time if they
1050 * are not manually dealt with first.
1051 */
1052void key_revoke(struct key *key)
1053{
1054	time64_t time;
1055
1056	key_check(key);
1057
1058	/* make sure no one's trying to change or use the key when we mark it
1059	 * - we tell lockdep that we might nest because we might be revoking an
1060	 *   authorisation key whilst holding the sem on a key we've just
1061	 *   instantiated
1062	 */
1063	down_write_nested(&key->sem, 1);
1064	if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags)) {
1065		notify_key(key, NOTIFY_KEY_REVOKED, 0);
1066		if (key->type->revoke)
1067			key->type->revoke(key);
1068
1069		/* set the death time to no more than the expiry time */
1070		time = ktime_get_real_seconds();
1071		if (key->revoked_at == 0 || key->revoked_at > time) {
1072			key->revoked_at = time;
1073			key_schedule_gc(key->revoked_at + key_gc_delay);
1074		}
1075	}
1076
1077	up_write(&key->sem);
1078}
1079EXPORT_SYMBOL(key_revoke);
1080
1081/**
1082 * key_invalidate - Invalidate a key.
1083 * @key: The key to be invalidated.
1084 *
1085 * Mark a key as being invalidated and have it cleaned up immediately.  The key
1086 * is ignored by all searches and other operations from this point.
1087 */
1088void key_invalidate(struct key *key)
1089{
1090	kenter("%d", key_serial(key));
1091
1092	key_check(key);
1093
1094	if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
1095		down_write_nested(&key->sem, 1);
1096		if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
1097			notify_key(key, NOTIFY_KEY_INVALIDATED, 0);
1098			key_schedule_gc_links();
1099		}
1100		up_write(&key->sem);
1101	}
1102}
1103EXPORT_SYMBOL(key_invalidate);
1104
1105/**
1106 * generic_key_instantiate - Simple instantiation of a key from preparsed data
1107 * @key: The key to be instantiated
1108 * @prep: The preparsed data to load.
1109 *
1110 * Instantiate a key from preparsed data.  We assume we can just copy the data
1111 * in directly and clear the old pointers.
1112 *
1113 * This can be pointed to directly by the key type instantiate op pointer.
1114 */
1115int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
1116{
1117	int ret;
1118
1119	pr_devel("==>%s()\n", __func__);
1120
1121	ret = key_payload_reserve(key, prep->quotalen);
1122	if (ret == 0) {
1123		rcu_assign_keypointer(key, prep->payload.data[0]);
1124		key->payload.data[1] = prep->payload.data[1];
1125		key->payload.data[2] = prep->payload.data[2];
1126		key->payload.data[3] = prep->payload.data[3];
1127		prep->payload.data[0] = NULL;
1128		prep->payload.data[1] = NULL;
1129		prep->payload.data[2] = NULL;
1130		prep->payload.data[3] = NULL;
1131	}
1132	pr_devel("<==%s() = %d\n", __func__, ret);
1133	return ret;
1134}
1135EXPORT_SYMBOL(generic_key_instantiate);
1136
1137/**
1138 * register_key_type - Register a type of key.
1139 * @ktype: The new key type.
1140 *
1141 * Register a new key type.
1142 *
1143 * Returns 0 on success or -EEXIST if a type of this name already exists.
1144 */
1145int register_key_type(struct key_type *ktype)
1146{
1147	struct key_type *p;
1148	int ret;
1149
1150	memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
1151
1152	ret = -EEXIST;
1153	down_write(&key_types_sem);
1154
1155	/* disallow key types with the same name */
1156	list_for_each_entry(p, &key_types_list, link) {
1157		if (strcmp(p->name, ktype->name) == 0)
1158			goto out;
1159	}
1160
1161	/* store the type */
1162	list_add(&ktype->link, &key_types_list);
1163
1164	pr_notice("Key type %s registered\n", ktype->name);
1165	ret = 0;
1166
1167out:
1168	up_write(&key_types_sem);
1169	return ret;
1170}
1171EXPORT_SYMBOL(register_key_type);
1172
1173/**
1174 * unregister_key_type - Unregister a type of key.
1175 * @ktype: The key type.
1176 *
1177 * Unregister a key type and mark all the extant keys of this type as dead.
1178 * Those keys of this type are then destroyed to get rid of their payloads and
1179 * they and their links will be garbage collected as soon as possible.
1180 */
1181void unregister_key_type(struct key_type *ktype)
1182{
1183	down_write(&key_types_sem);
1184	list_del_init(&ktype->link);
1185	downgrade_write(&key_types_sem);
1186	key_gc_keytype(ktype);
1187	pr_notice("Key type %s unregistered\n", ktype->name);
1188	up_read(&key_types_sem);
1189}
1190EXPORT_SYMBOL(unregister_key_type);
1191
1192/*
1193 * Initialise the key management state.
1194 */
1195void __init key_init(void)
1196{
1197	/* allocate a slab in which we can store keys */
1198	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1199			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1200
1201	/* add the special key types */
1202	list_add_tail(&key_type_keyring.link, &key_types_list);
1203	list_add_tail(&key_type_dead.link, &key_types_list);
1204	list_add_tail(&key_type_user.link, &key_types_list);
1205	list_add_tail(&key_type_logon.link, &key_types_list);
1206
1207	/* record the root user tracking */
1208	rb_link_node(&root_key_user.node,
1209		     NULL,
1210		     &key_user_tree.rb_node);
1211
1212	rb_insert_color(&root_key_user.node,
1213			&key_user_tree);
1214}
1215