1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * FILS AEAD for (Re)Association Request/Response frames
4 * Copyright 2016, Qualcomm Atheros, Inc.
5 */
6
7#include <crypto/aes.h>
8#include <crypto/algapi.h>
9#include <crypto/hash.h>
10#include <crypto/skcipher.h>
11
12#include "ieee80211_i.h"
13#include "aes_cmac.h"
14#include "fils_aead.h"
15
16static void gf_mulx(u8 *pad)
17{
18	u64 a = get_unaligned_be64(pad);
19	u64 b = get_unaligned_be64(pad + 8);
20
21	put_unaligned_be64((a << 1) | (b >> 63), pad);
22	put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8);
23}
24
25static int aes_s2v(struct crypto_shash *tfm,
26		   size_t num_elem, const u8 *addr[], size_t len[], u8 *v)
27{
28	u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {};
29	SHASH_DESC_ON_STACK(desc, tfm);
30	size_t i;
31
32	desc->tfm = tfm;
33
34	/* D = AES-CMAC(K, <zero>) */
35	crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d);
36
37	for (i = 0; i < num_elem - 1; i++) {
38		/* D = dbl(D) xor AES_CMAC(K, Si) */
39		gf_mulx(d); /* dbl */
40		crypto_shash_digest(desc, addr[i], len[i], tmp);
41		crypto_xor(d, tmp, AES_BLOCK_SIZE);
42	}
43
44	crypto_shash_init(desc);
45
46	if (len[i] >= AES_BLOCK_SIZE) {
47		/* len(Sn) >= 128 */
48		/* T = Sn xorend D */
49		crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE);
50		crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE,
51			   AES_BLOCK_SIZE);
52	} else {
53		/* len(Sn) < 128 */
54		/* T = dbl(D) xor pad(Sn) */
55		gf_mulx(d); /* dbl */
56		crypto_xor(d, addr[i], len[i]);
57		d[len[i]] ^= 0x80;
58	}
59	/* V = AES-CMAC(K, T) */
60	crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v);
61
62	return 0;
63}
64
65/* Note: addr[] and len[] needs to have one extra slot at the end. */
66static int aes_siv_encrypt(const u8 *key, size_t key_len,
67			   const u8 *plain, size_t plain_len,
68			   size_t num_elem, const u8 *addr[],
69			   size_t len[], u8 *out)
70{
71	u8 v[AES_BLOCK_SIZE];
72	struct crypto_shash *tfm;
73	struct crypto_skcipher *tfm2;
74	struct skcipher_request *req;
75	int res;
76	struct scatterlist src[1], dst[1];
77	u8 *tmp;
78
79	key_len /= 2; /* S2V key || CTR key */
80
81	addr[num_elem] = plain;
82	len[num_elem] = plain_len;
83	num_elem++;
84
85	/* S2V */
86
87	tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
88	if (IS_ERR(tfm))
89		return PTR_ERR(tfm);
90	/* K1 for S2V */
91	res = crypto_shash_setkey(tfm, key, key_len);
92	if (!res)
93		res = aes_s2v(tfm, num_elem, addr, len, v);
94	crypto_free_shash(tfm);
95	if (res)
96		return res;
97
98	/* Use a temporary buffer of the plaintext to handle need for
99	 * overwriting this during AES-CTR.
100	 */
101	tmp = kmemdup(plain, plain_len, GFP_KERNEL);
102	if (!tmp)
103		return -ENOMEM;
104
105	/* IV for CTR before encrypted data */
106	memcpy(out, v, AES_BLOCK_SIZE);
107
108	/* Synthetic IV to be used as the initial counter in CTR:
109	 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
110	 */
111	v[8] &= 0x7f;
112	v[12] &= 0x7f;
113
114	/* CTR */
115
116	tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
117	if (IS_ERR(tfm2)) {
118		kfree(tmp);
119		return PTR_ERR(tfm2);
120	}
121	/* K2 for CTR */
122	res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
123	if (res)
124		goto fail;
125
126	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
127	if (!req) {
128		res = -ENOMEM;
129		goto fail;
130	}
131
132	sg_init_one(src, tmp, plain_len);
133	sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len);
134	skcipher_request_set_crypt(req, src, dst, plain_len, v);
135	res = crypto_skcipher_encrypt(req);
136	skcipher_request_free(req);
137fail:
138	kfree(tmp);
139	crypto_free_skcipher(tfm2);
140	return res;
141}
142
143/* Note: addr[] and len[] needs to have one extra slot at the end. */
144static int aes_siv_decrypt(const u8 *key, size_t key_len,
145			   const u8 *iv_crypt, size_t iv_c_len,
146			   size_t num_elem, const u8 *addr[], size_t len[],
147			   u8 *out)
148{
149	struct crypto_shash *tfm;
150	struct crypto_skcipher *tfm2;
151	struct skcipher_request *req;
152	struct scatterlist src[1], dst[1];
153	size_t crypt_len;
154	int res;
155	u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE];
156	u8 check[AES_BLOCK_SIZE];
157
158	crypt_len = iv_c_len - AES_BLOCK_SIZE;
159	key_len /= 2; /* S2V key || CTR key */
160	addr[num_elem] = out;
161	len[num_elem] = crypt_len;
162	num_elem++;
163
164	memcpy(iv, iv_crypt, AES_BLOCK_SIZE);
165	memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE);
166
167	/* Synthetic IV to be used as the initial counter in CTR:
168	 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
169	 */
170	iv[8] &= 0x7f;
171	iv[12] &= 0x7f;
172
173	/* CTR */
174
175	tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
176	if (IS_ERR(tfm2))
177		return PTR_ERR(tfm2);
178	/* K2 for CTR */
179	res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
180	if (res) {
181		crypto_free_skcipher(tfm2);
182		return res;
183	}
184
185	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
186	if (!req) {
187		crypto_free_skcipher(tfm2);
188		return -ENOMEM;
189	}
190
191	sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len);
192	sg_init_one(dst, out, crypt_len);
193	skcipher_request_set_crypt(req, src, dst, crypt_len, iv);
194	res = crypto_skcipher_decrypt(req);
195	skcipher_request_free(req);
196	crypto_free_skcipher(tfm2);
197	if (res)
198		return res;
199
200	/* S2V */
201
202	tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
203	if (IS_ERR(tfm))
204		return PTR_ERR(tfm);
205	/* K1 for S2V */
206	res = crypto_shash_setkey(tfm, key, key_len);
207	if (!res)
208		res = aes_s2v(tfm, num_elem, addr, len, check);
209	crypto_free_shash(tfm);
210	if (res)
211		return res;
212	if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0)
213		return -EINVAL;
214	return 0;
215}
216
217int fils_encrypt_assoc_req(struct sk_buff *skb,
218			   struct ieee80211_mgd_assoc_data *assoc_data)
219{
220	struct ieee80211_mgmt *mgmt = (void *)skb->data;
221	u8 *capab, *ies, *encr;
222	const u8 *addr[5 + 1], *session;
223	size_t len[5 + 1];
224	size_t crypt_len;
225
226	if (ieee80211_is_reassoc_req(mgmt->frame_control)) {
227		capab = (u8 *)&mgmt->u.reassoc_req.capab_info;
228		ies = mgmt->u.reassoc_req.variable;
229	} else {
230		capab = (u8 *)&mgmt->u.assoc_req.capab_info;
231		ies = mgmt->u.assoc_req.variable;
232	}
233
234	session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION,
235				       ies, skb->data + skb->len - ies);
236	if (!session || session[1] != 1 + 8)
237		return -EINVAL;
238	/* encrypt after FILS Session element */
239	encr = (u8 *)session + 2 + 1 + 8;
240
241	/* AES-SIV AAD vectors */
242
243	/* The STA's MAC address */
244	addr[0] = mgmt->sa;
245	len[0] = ETH_ALEN;
246	/* The AP's BSSID */
247	addr[1] = mgmt->da;
248	len[1] = ETH_ALEN;
249	/* The STA's nonce */
250	addr[2] = assoc_data->fils_nonces;
251	len[2] = FILS_NONCE_LEN;
252	/* The AP's nonce */
253	addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
254	len[3] = FILS_NONCE_LEN;
255	/* The (Re)Association Request frame from the Capability Information
256	 * field to the FILS Session element (both inclusive).
257	 */
258	addr[4] = capab;
259	len[4] = encr - capab;
260
261	crypt_len = skb->data + skb->len - encr;
262	skb_put(skb, AES_BLOCK_SIZE);
263	return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
264			       encr, crypt_len, 5, addr, len, encr);
265}
266
267int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
268			    u8 *frame, size_t *frame_len,
269			    struct ieee80211_mgd_assoc_data *assoc_data)
270{
271	struct ieee80211_mgmt *mgmt = (void *)frame;
272	u8 *capab, *ies, *encr;
273	const u8 *addr[5 + 1], *session;
274	size_t len[5 + 1];
275	int res;
276	size_t crypt_len;
277
278	if (*frame_len < 24 + 6)
279		return -EINVAL;
280
281	capab = (u8 *)&mgmt->u.assoc_resp.capab_info;
282	ies = mgmt->u.assoc_resp.variable;
283	session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION,
284				       ies, frame + *frame_len - ies);
285	if (!session || session[1] != 1 + 8) {
286		mlme_dbg(sdata,
287			 "No (valid) FILS Session element in (Re)Association Response frame from %pM",
288			 mgmt->sa);
289		return -EINVAL;
290	}
291	/* decrypt after FILS Session element */
292	encr = (u8 *)session + 2 + 1 + 8;
293
294	/* AES-SIV AAD vectors */
295
296	/* The AP's BSSID */
297	addr[0] = mgmt->sa;
298	len[0] = ETH_ALEN;
299	/* The STA's MAC address */
300	addr[1] = mgmt->da;
301	len[1] = ETH_ALEN;
302	/* The AP's nonce */
303	addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
304	len[2] = FILS_NONCE_LEN;
305	/* The STA's nonce */
306	addr[3] = assoc_data->fils_nonces;
307	len[3] = FILS_NONCE_LEN;
308	/* The (Re)Association Response frame from the Capability Information
309	 * field to the FILS Session element (both inclusive).
310	 */
311	addr[4] = capab;
312	len[4] = encr - capab;
313
314	crypt_len = frame + *frame_len - encr;
315	if (crypt_len < AES_BLOCK_SIZE) {
316		mlme_dbg(sdata,
317			 "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM",
318			 mgmt->sa);
319		return -EINVAL;
320	}
321	res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
322			      encr, crypt_len, 5, addr, len, encr);
323	if (res != 0) {
324		mlme_dbg(sdata,
325			 "AES-SIV decryption of (Re)Association Response frame from %pM failed",
326			 mgmt->sa);
327		return res;
328	}
329	*frame_len -= AES_BLOCK_SIZE;
330	return 0;
331}
332