1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2016 Broadcom
4 */
5
6#include <linux/err.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/scatterlist.h>
14#include <linux/crypto.h>
15#include <linux/kthread.h>
16#include <linux/rtnetlink.h>
17#include <linux/sched.h>
18#include <linux/of_address.h>
19#include <linux/of_device.h>
20#include <linux/io.h>
21#include <linux/bitops.h>
22
23#include <crypto/algapi.h>
24#include <crypto/aead.h>
25#include <crypto/internal/aead.h>
26#include <crypto/aes.h>
27#include <crypto/internal/des.h>
28#include <crypto/hmac.h>
29#include <crypto/sha.h>
30#include <crypto/md5.h>
31#include <crypto/authenc.h>
32#include <crypto/skcipher.h>
33#include <crypto/hash.h>
34#include <crypto/sha3.h>
35
36#include "util.h"
37#include "cipher.h"
38#include "spu.h"
39#include "spum.h"
40#include "spu2.h"
41
42/* ================= Device Structure ================== */
43
44struct bcm_device_private iproc_priv;
45
46/* ==================== Parameters ===================== */
47
48int flow_debug_logging;
49module_param(flow_debug_logging, int, 0644);
50MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51
52int packet_debug_logging;
53module_param(packet_debug_logging, int, 0644);
54MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55
56int debug_logging_sleep;
57module_param(debug_logging_sleep, int, 0644);
58MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59
60/*
61 * The value of these module parameters is used to set the priority for each
62 * algo type when this driver registers algos with the kernel crypto API.
63 * To use a priority other than the default, set the priority in the insmod or
64 * modprobe. Changing the module priority after init time has no effect.
65 *
66 * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
67 * algos, but more preferred than generic software algos.
68 */
69static int cipher_pri = 150;
70module_param(cipher_pri, int, 0644);
71MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72
73static int hash_pri = 100;
74module_param(hash_pri, int, 0644);
75MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76
77static int aead_pri = 150;
78module_param(aead_pri, int, 0644);
79MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80
81/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
82 * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
83 * 0x60 - ring 0
84 * 0x68 - ring 1
85 * 0x70 - ring 2
86 * 0x78 - ring 3
87 */
88static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89/*
90 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
91 * is set dynamically after reading SPU type from device tree.
92 */
93#define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
94
95/* min and max time to sleep before retrying when mbox queue is full. usec */
96#define MBOX_SLEEP_MIN  800
97#define MBOX_SLEEP_MAX 1000
98
99/**
100 * select_channel() - Select a SPU channel to handle a crypto request. Selects
101 * channel in round robin order.
102 *
103 * Return:  channel index
104 */
105static u8 select_channel(void)
106{
107	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108
109	return chan_idx % iproc_priv.spu.num_chan;
110}
111
112/**
113 * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
114 * receive a SPU response message for an skcipher request. Includes buffers to
115 * catch SPU message headers and the response data.
116 * @mssg:	mailbox message containing the receive sg
117 * @rctx:	crypto request context
118 * @rx_frag_num: number of scatterlist elements required to hold the
119 *		SPU response message
120 * @chunksize:	Number of bytes of response data expected
121 * @stat_pad_len: Number of bytes required to pad the STAT field to
122 *		a 4-byte boundary
123 *
124 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
125 * when the request completes, whether the request is handled successfully or
126 * there is an error.
127 *
128 * Returns:
129 *   0 if successful
130 *   < 0 if an error
131 */
132static int
133spu_skcipher_rx_sg_create(struct brcm_message *mssg,
134			    struct iproc_reqctx_s *rctx,
135			    u8 rx_frag_num,
136			    unsigned int chunksize, u32 stat_pad_len)
137{
138	struct spu_hw *spu = &iproc_priv.spu;
139	struct scatterlist *sg;	/* used to build sgs in mbox message */
140	struct iproc_ctx_s *ctx = rctx->ctx;
141	u32 datalen;		/* Number of bytes of response data expected */
142
143	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144				rctx->gfp);
145	if (!mssg->spu.dst)
146		return -ENOMEM;
147
148	sg = mssg->spu.dst;
149	sg_init_table(sg, rx_frag_num);
150	/* Space for SPU message header */
151	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152
153	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
154	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155	    spu->spu_xts_tweak_in_payload())
156		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157			   SPU_XTS_TWEAK_SIZE);
158
159	/* Copy in each dst sg entry from request, up to chunksize */
160	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161				 rctx->dst_nents, chunksize);
162	if (datalen < chunksize) {
163		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164		       __func__, chunksize, datalen);
165		return -EFAULT;
166	}
167
168	if (stat_pad_len)
169		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
170
171	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
172	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
173
174	return 0;
175}
176
177/**
178 * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
179 * send a SPU request message for an skcipher request. Includes SPU message
180 * headers and the request data.
181 * @mssg:	mailbox message containing the transmit sg
182 * @rctx:	crypto request context
183 * @tx_frag_num: number of scatterlist elements required to construct the
184 *		SPU request message
185 * @chunksize:	Number of bytes of request data
186 * @pad_len:	Number of pad bytes
187 *
188 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
189 * when the request completes, whether the request is handled successfully or
190 * there is an error.
191 *
192 * Returns:
193 *   0 if successful
194 *   < 0 if an error
195 */
196static int
197spu_skcipher_tx_sg_create(struct brcm_message *mssg,
198			    struct iproc_reqctx_s *rctx,
199			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
200{
201	struct spu_hw *spu = &iproc_priv.spu;
202	struct scatterlist *sg;	/* used to build sgs in mbox message */
203	struct iproc_ctx_s *ctx = rctx->ctx;
204	u32 datalen;		/* Number of bytes of response data expected */
205	u32 stat_len;
206
207	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
208				rctx->gfp);
209	if (unlikely(!mssg->spu.src))
210		return -ENOMEM;
211
212	sg = mssg->spu.src;
213	sg_init_table(sg, tx_frag_num);
214
215	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
216		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
217
218	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
219	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
220	    spu->spu_xts_tweak_in_payload())
221		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
222
223	/* Copy in each src sg entry from request, up to chunksize */
224	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
225				 rctx->src_nents, chunksize);
226	if (unlikely(datalen < chunksize)) {
227		pr_err("%s(): failed to copy src sg to mbox msg",
228		       __func__);
229		return -EFAULT;
230	}
231
232	if (pad_len)
233		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
234
235	stat_len = spu->spu_tx_status_len();
236	if (stat_len) {
237		memset(rctx->msg_buf.tx_stat, 0, stat_len);
238		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
239	}
240	return 0;
241}
242
243static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
244				u8 chan_idx)
245{
246	int err;
247	int retry_cnt = 0;
248	struct device *dev = &(iproc_priv.pdev->dev);
249
250	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
251	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
252		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
253			/*
254			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
255			 * not in atomic context and we can wait and try again.
256			 */
257			retry_cnt++;
258			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
259			err = mbox_send_message(iproc_priv.mbox[chan_idx],
260						mssg);
261			atomic_inc(&iproc_priv.mb_no_spc);
262		}
263	}
264	if (err < 0) {
265		atomic_inc(&iproc_priv.mb_send_fail);
266		return err;
267	}
268
269	/* Check error returned by mailbox controller */
270	err = mssg->error;
271	if (unlikely(err < 0)) {
272		dev_err(dev, "message error %d", err);
273		/* Signal txdone for mailbox channel */
274	}
275
276	/* Signal txdone for mailbox channel */
277	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
278	return err;
279}
280
281/**
282 * handle_skcipher_req() - Submit as much of a block cipher request as fits in
283 * a single SPU request message, starting at the current position in the request
284 * data.
285 * @rctx:	Crypto request context
286 *
287 * This may be called on the crypto API thread, or, when a request is so large
288 * it must be broken into multiple SPU messages, on the thread used to invoke
289 * the response callback. When requests are broken into multiple SPU
290 * messages, we assume subsequent messages depend on previous results, and
291 * thus always wait for previous results before submitting the next message.
292 * Because requests are submitted in lock step like this, there is no need
293 * to synchronize access to request data structures.
294 *
295 * Return: -EINPROGRESS: request has been accepted and result will be returned
296 *			 asynchronously
297 *         Any other value indicates an error
298 */
299static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
300{
301	struct spu_hw *spu = &iproc_priv.spu;
302	struct crypto_async_request *areq = rctx->parent;
303	struct skcipher_request *req =
304	    container_of(areq, struct skcipher_request, base);
305	struct iproc_ctx_s *ctx = rctx->ctx;
306	struct spu_cipher_parms cipher_parms;
307	int err;
308	unsigned int chunksize;	/* Num bytes of request to submit */
309	int remaining;	/* Bytes of request still to process */
310	int chunk_start;	/* Beginning of data for current SPU msg */
311
312	/* IV or ctr value to use in this SPU msg */
313	u8 local_iv_ctr[MAX_IV_SIZE];
314	u32 stat_pad_len;	/* num bytes to align status field */
315	u32 pad_len;		/* total length of all padding */
316	struct brcm_message *mssg;	/* mailbox message */
317
318	/* number of entries in src and dst sg in mailbox message. */
319	u8 rx_frag_num = 2;	/* response header and STATUS */
320	u8 tx_frag_num = 1;	/* request header */
321
322	flow_log("%s\n", __func__);
323
324	cipher_parms.alg = ctx->cipher.alg;
325	cipher_parms.mode = ctx->cipher.mode;
326	cipher_parms.type = ctx->cipher_type;
327	cipher_parms.key_len = ctx->enckeylen;
328	cipher_parms.key_buf = ctx->enckey;
329	cipher_parms.iv_buf = local_iv_ctr;
330	cipher_parms.iv_len = rctx->iv_ctr_len;
331
332	mssg = &rctx->mb_mssg;
333	chunk_start = rctx->src_sent;
334	remaining = rctx->total_todo - chunk_start;
335
336	/* determine the chunk we are breaking off and update the indexes */
337	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
338	    (remaining > ctx->max_payload))
339		chunksize = ctx->max_payload;
340	else
341		chunksize = remaining;
342
343	rctx->src_sent += chunksize;
344	rctx->total_sent = rctx->src_sent;
345
346	/* Count number of sg entries to be included in this request */
347	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
348	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
349
350	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
351	    rctx->is_encrypt && chunk_start)
352		/*
353		 * Encrypting non-first first chunk. Copy last block of
354		 * previous result to IV for this chunk.
355		 */
356		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
357				    rctx->iv_ctr_len,
358				    chunk_start - rctx->iv_ctr_len);
359
360	if (rctx->iv_ctr_len) {
361		/* get our local copy of the iv */
362		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
363				 rctx->iv_ctr_len);
364
365		/* generate the next IV if possible */
366		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
367		    !rctx->is_encrypt) {
368			/*
369			 * CBC Decrypt: next IV is the last ciphertext block in
370			 * this chunk
371			 */
372			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
373					    rctx->iv_ctr_len,
374					    rctx->src_sent - rctx->iv_ctr_len);
375		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
376			/*
377			 * The SPU hardware increments the counter once for
378			 * each AES block of 16 bytes. So update the counter
379			 * for the next chunk, if there is one. Note that for
380			 * this chunk, the counter has already been copied to
381			 * local_iv_ctr. We can assume a block size of 16,
382			 * because we only support CTR mode for AES, not for
383			 * any other cipher alg.
384			 */
385			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
386		}
387	}
388
389	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
390		flow_log("max_payload infinite\n");
391	else
392		flow_log("max_payload %u\n", ctx->max_payload);
393
394	flow_log("sent:%u start:%u remains:%u size:%u\n",
395		 rctx->src_sent, chunk_start, remaining, chunksize);
396
397	/* Copy SPU header template created at setkey time */
398	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
399	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
400
401	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
402				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
403				   &cipher_parms, chunksize);
404
405	atomic64_add(chunksize, &iproc_priv.bytes_out);
406
407	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
408	if (stat_pad_len)
409		rx_frag_num++;
410	pad_len = stat_pad_len;
411	if (pad_len) {
412		tx_frag_num++;
413		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
414				     0, ctx->auth.alg, ctx->auth.mode,
415				     rctx->total_sent, stat_pad_len);
416	}
417
418	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
419			      ctx->spu_req_hdr_len);
420	packet_log("payload:\n");
421	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
422	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
423
424	/*
425	 * Build mailbox message containing SPU request msg and rx buffers
426	 * to catch response message
427	 */
428	memset(mssg, 0, sizeof(*mssg));
429	mssg->type = BRCM_MESSAGE_SPU;
430	mssg->ctx = rctx;	/* Will be returned in response */
431
432	/* Create rx scatterlist to catch result */
433	rx_frag_num += rctx->dst_nents;
434
435	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
436	    spu->spu_xts_tweak_in_payload())
437		rx_frag_num++;	/* extra sg to insert tweak */
438
439	err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
440					  stat_pad_len);
441	if (err)
442		return err;
443
444	/* Create tx scatterlist containing SPU request message */
445	tx_frag_num += rctx->src_nents;
446	if (spu->spu_tx_status_len())
447		tx_frag_num++;
448
449	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
450	    spu->spu_xts_tweak_in_payload())
451		tx_frag_num++;	/* extra sg to insert tweak */
452
453	err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
454					  pad_len);
455	if (err)
456		return err;
457
458	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
459	if (unlikely(err < 0))
460		return err;
461
462	return -EINPROGRESS;
463}
464
465/**
466 * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
467 * total received count for the request and updates global stats.
468 * @rctx:	Crypto request context
469 */
470static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
471{
472	struct spu_hw *spu = &iproc_priv.spu;
473#ifdef DEBUG
474	struct crypto_async_request *areq = rctx->parent;
475	struct skcipher_request *req = skcipher_request_cast(areq);
476#endif
477	struct iproc_ctx_s *ctx = rctx->ctx;
478	u32 payload_len;
479
480	/* See how much data was returned */
481	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
482
483	/*
484	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
485	 * encrypted tweak ("i") value; we don't count those.
486	 */
487	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
488	    spu->spu_xts_tweak_in_payload() &&
489	    (payload_len >= SPU_XTS_TWEAK_SIZE))
490		payload_len -= SPU_XTS_TWEAK_SIZE;
491
492	atomic64_add(payload_len, &iproc_priv.bytes_in);
493
494	flow_log("%s() offset: %u, bd_len: %u BD:\n",
495		 __func__, rctx->total_received, payload_len);
496
497	dump_sg(req->dst, rctx->total_received, payload_len);
498
499	rctx->total_received += payload_len;
500	if (rctx->total_received == rctx->total_todo) {
501		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
502		atomic_inc(
503		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
504	}
505}
506
507/**
508 * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
509 * receive a SPU response message for an ahash request.
510 * @mssg:	mailbox message containing the receive sg
511 * @rctx:	crypto request context
512 * @rx_frag_num: number of scatterlist elements required to hold the
513 *		SPU response message
514 * @digestsize: length of hash digest, in bytes
515 * @stat_pad_len: Number of bytes required to pad the STAT field to
516 *		a 4-byte boundary
517 *
518 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
519 * when the request completes, whether the request is handled successfully or
520 * there is an error.
521 *
522 * Return:
523 *   0 if successful
524 *   < 0 if an error
525 */
526static int
527spu_ahash_rx_sg_create(struct brcm_message *mssg,
528		       struct iproc_reqctx_s *rctx,
529		       u8 rx_frag_num, unsigned int digestsize,
530		       u32 stat_pad_len)
531{
532	struct spu_hw *spu = &iproc_priv.spu;
533	struct scatterlist *sg;	/* used to build sgs in mbox message */
534	struct iproc_ctx_s *ctx = rctx->ctx;
535
536	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
537				rctx->gfp);
538	if (!mssg->spu.dst)
539		return -ENOMEM;
540
541	sg = mssg->spu.dst;
542	sg_init_table(sg, rx_frag_num);
543	/* Space for SPU message header */
544	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
545
546	/* Space for digest */
547	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
548
549	if (stat_pad_len)
550		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
551
552	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
553	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
554	return 0;
555}
556
557/**
558 * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
559 * a SPU request message for an ahash request. Includes SPU message headers and
560 * the request data.
561 * @mssg:	mailbox message containing the transmit sg
562 * @rctx:	crypto request context
563 * @tx_frag_num: number of scatterlist elements required to construct the
564 *		SPU request message
565 * @spu_hdr_len: length in bytes of SPU message header
566 * @hash_carry_len: Number of bytes of data carried over from previous req
567 * @new_data_len: Number of bytes of new request data
568 * @pad_len:	Number of pad bytes
569 *
570 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
571 * when the request completes, whether the request is handled successfully or
572 * there is an error.
573 *
574 * Return:
575 *   0 if successful
576 *   < 0 if an error
577 */
578static int
579spu_ahash_tx_sg_create(struct brcm_message *mssg,
580		       struct iproc_reqctx_s *rctx,
581		       u8 tx_frag_num,
582		       u32 spu_hdr_len,
583		       unsigned int hash_carry_len,
584		       unsigned int new_data_len, u32 pad_len)
585{
586	struct spu_hw *spu = &iproc_priv.spu;
587	struct scatterlist *sg;	/* used to build sgs in mbox message */
588	u32 datalen;		/* Number of bytes of response data expected */
589	u32 stat_len;
590
591	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
592				rctx->gfp);
593	if (!mssg->spu.src)
594		return -ENOMEM;
595
596	sg = mssg->spu.src;
597	sg_init_table(sg, tx_frag_num);
598
599	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
600		   BCM_HDR_LEN + spu_hdr_len);
601
602	if (hash_carry_len)
603		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
604
605	if (new_data_len) {
606		/* Copy in each src sg entry from request, up to chunksize */
607		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
608					 rctx->src_nents, new_data_len);
609		if (datalen < new_data_len) {
610			pr_err("%s(): failed to copy src sg to mbox msg",
611			       __func__);
612			return -EFAULT;
613		}
614	}
615
616	if (pad_len)
617		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
618
619	stat_len = spu->spu_tx_status_len();
620	if (stat_len) {
621		memset(rctx->msg_buf.tx_stat, 0, stat_len);
622		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
623	}
624
625	return 0;
626}
627
628/**
629 * handle_ahash_req() - Process an asynchronous hash request from the crypto
630 * API.
631 * @rctx:  Crypto request context
632 *
633 * Builds a SPU request message embedded in a mailbox message and submits the
634 * mailbox message on a selected mailbox channel. The SPU request message is
635 * constructed as a scatterlist, including entries from the crypto API's
636 * src scatterlist to avoid copying the data to be hashed. This function is
637 * called either on the thread from the crypto API, or, in the case that the
638 * crypto API request is too large to fit in a single SPU request message,
639 * on the thread that invokes the receive callback with a response message.
640 * Because some operations require the response from one chunk before the next
641 * chunk can be submitted, we always wait for the response for the previous
642 * chunk before submitting the next chunk. Because requests are submitted in
643 * lock step like this, there is no need to synchronize access to request data
644 * structures.
645 *
646 * Return:
647 *   -EINPROGRESS: request has been submitted to SPU and response will be
648 *		   returned asynchronously
649 *   -EAGAIN:      non-final request included a small amount of data, which for
650 *		   efficiency we did not submit to the SPU, but instead stored
651 *		   to be submitted to the SPU with the next part of the request
652 *   other:        an error code
653 */
654static int handle_ahash_req(struct iproc_reqctx_s *rctx)
655{
656	struct spu_hw *spu = &iproc_priv.spu;
657	struct crypto_async_request *areq = rctx->parent;
658	struct ahash_request *req = ahash_request_cast(areq);
659	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
660	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
661	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
662	struct iproc_ctx_s *ctx = rctx->ctx;
663
664	/* number of bytes still to be hashed in this req */
665	unsigned int nbytes_to_hash = 0;
666	int err;
667	unsigned int chunksize = 0;	/* length of hash carry + new data */
668	/*
669	 * length of new data, not from hash carry, to be submitted in
670	 * this hw request
671	 */
672	unsigned int new_data_len;
673
674	unsigned int __maybe_unused chunk_start = 0;
675	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
676	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
677	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
678	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
679	struct brcm_message *mssg;	/* mailbox message */
680	struct spu_request_opts req_opts;
681	struct spu_cipher_parms cipher_parms;
682	struct spu_hash_parms hash_parms;
683	struct spu_aead_parms aead_parms;
684	unsigned int local_nbuf;
685	u32 spu_hdr_len;
686	unsigned int digestsize;
687	u16 rem = 0;
688
689	/*
690	 * number of entries in src and dst sg. Always includes SPU msg header.
691	 * rx always includes a buffer to catch digest and STATUS.
692	 */
693	u8 rx_frag_num = 3;
694	u8 tx_frag_num = 1;
695
696	flow_log("total_todo %u, total_sent %u\n",
697		 rctx->total_todo, rctx->total_sent);
698
699	memset(&req_opts, 0, sizeof(req_opts));
700	memset(&cipher_parms, 0, sizeof(cipher_parms));
701	memset(&hash_parms, 0, sizeof(hash_parms));
702	memset(&aead_parms, 0, sizeof(aead_parms));
703
704	req_opts.bd_suppress = true;
705	hash_parms.alg = ctx->auth.alg;
706	hash_parms.mode = ctx->auth.mode;
707	hash_parms.type = HASH_TYPE_NONE;
708	hash_parms.key_buf = (u8 *)ctx->authkey;
709	hash_parms.key_len = ctx->authkeylen;
710
711	/*
712	 * For hash algorithms below assignment looks bit odd but
713	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
714	 * to differentiate between 128, 192, 256 bit key values.
715	 * Based on the key values, hash algorithm is selected.
716	 * For example for 128 bit key, hash algorithm is AES-128.
717	 */
718	cipher_parms.type = ctx->cipher_type;
719
720	mssg = &rctx->mb_mssg;
721	chunk_start = rctx->src_sent;
722
723	/*
724	 * Compute the amount remaining to hash. This may include data
725	 * carried over from previous requests.
726	 */
727	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
728	chunksize = nbytes_to_hash;
729	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
730	    (chunksize > ctx->max_payload))
731		chunksize = ctx->max_payload;
732
733	/*
734	 * If this is not a final request and the request data is not a multiple
735	 * of a full block, then simply park the extra data and prefix it to the
736	 * data for the next request.
737	 */
738	if (!rctx->is_final) {
739		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
740		u16 new_len;  /* len of data to add to hash carry */
741
742		rem = chunksize % blocksize;   /* remainder */
743		if (rem) {
744			/* chunksize not a multiple of blocksize */
745			chunksize -= rem;
746			if (chunksize == 0) {
747				/* Don't have a full block to submit to hw */
748				new_len = rem - rctx->hash_carry_len;
749				sg_copy_part_to_buf(req->src, dest, new_len,
750						    rctx->src_sent);
751				rctx->hash_carry_len = rem;
752				flow_log("Exiting with hash carry len: %u\n",
753					 rctx->hash_carry_len);
754				packet_dump("  buf: ",
755					    rctx->hash_carry,
756					    rctx->hash_carry_len);
757				return -EAGAIN;
758			}
759		}
760	}
761
762	/* if we have hash carry, then prefix it to the data in this request */
763	local_nbuf = rctx->hash_carry_len;
764	rctx->hash_carry_len = 0;
765	if (local_nbuf)
766		tx_frag_num++;
767	new_data_len = chunksize - local_nbuf;
768
769	/* Count number of sg entries to be used in this request */
770	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
771				       new_data_len);
772
773	/* AES hashing keeps key size in type field, so need to copy it here */
774	if (hash_parms.alg == HASH_ALG_AES)
775		hash_parms.type = (enum hash_type)cipher_parms.type;
776	else
777		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
778
779	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
780					  hash_parms.type);
781	hash_parms.digestsize =	digestsize;
782
783	/* update the indexes */
784	rctx->total_sent += chunksize;
785	/* if you sent a prebuf then that wasn't from this req->src */
786	rctx->src_sent += new_data_len;
787
788	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
789		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
790							   hash_parms.mode,
791							   chunksize,
792							   blocksize);
793
794	/*
795	 * If a non-first chunk, then include the digest returned from the
796	 * previous chunk so that hw can add to it (except for AES types).
797	 */
798	if ((hash_parms.type == HASH_TYPE_UPDT) &&
799	    (hash_parms.alg != HASH_ALG_AES)) {
800		hash_parms.key_buf = rctx->incr_hash;
801		hash_parms.key_len = digestsize;
802	}
803
804	atomic64_add(chunksize, &iproc_priv.bytes_out);
805
806	flow_log("%s() final: %u nbuf: %u ",
807		 __func__, rctx->is_final, local_nbuf);
808
809	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
810		flow_log("max_payload infinite\n");
811	else
812		flow_log("max_payload %u\n", ctx->max_payload);
813
814	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
815
816	/* Prepend SPU header with type 3 BCM header */
817	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
818
819	hash_parms.prebuf_len = local_nbuf;
820	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
821					      BCM_HDR_LEN,
822					      &req_opts, &cipher_parms,
823					      &hash_parms, &aead_parms,
824					      new_data_len);
825
826	if (spu_hdr_len == 0) {
827		pr_err("Failed to create SPU request header\n");
828		return -EFAULT;
829	}
830
831	/*
832	 * Determine total length of padding required. Put all padding in one
833	 * buffer.
834	 */
835	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
836	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
837				   0, 0, hash_parms.pad_len);
838	if (spu->spu_tx_status_len())
839		stat_pad_len = spu->spu_wordalign_padlen(db_size);
840	if (stat_pad_len)
841		rx_frag_num++;
842	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
843	if (pad_len) {
844		tx_frag_num++;
845		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
846				     hash_parms.pad_len, ctx->auth.alg,
847				     ctx->auth.mode, rctx->total_sent,
848				     stat_pad_len);
849	}
850
851	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
852			      spu_hdr_len);
853	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
854	flow_log("Data:\n");
855	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
856	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
857
858	/*
859	 * Build mailbox message containing SPU request msg and rx buffers
860	 * to catch response message
861	 */
862	memset(mssg, 0, sizeof(*mssg));
863	mssg->type = BRCM_MESSAGE_SPU;
864	mssg->ctx = rctx;	/* Will be returned in response */
865
866	/* Create rx scatterlist to catch result */
867	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
868				     stat_pad_len);
869	if (err)
870		return err;
871
872	/* Create tx scatterlist containing SPU request message */
873	tx_frag_num += rctx->src_nents;
874	if (spu->spu_tx_status_len())
875		tx_frag_num++;
876	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
877				     local_nbuf, new_data_len, pad_len);
878	if (err)
879		return err;
880
881	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
882	if (unlikely(err < 0))
883		return err;
884
885	return -EINPROGRESS;
886}
887
888/**
889 * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
890 * for an HMAC request.
891 * @req:  The HMAC request from the crypto API
892 * @ctx:  The session context
893 *
894 * Return: 0 if synchronous hash operation successful
895 *         -EINVAL if the hash algo is unrecognized
896 *         any other value indicates an error
897 */
898static int spu_hmac_outer_hash(struct ahash_request *req,
899			       struct iproc_ctx_s *ctx)
900{
901	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
902	unsigned int blocksize =
903		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
904	int rc;
905
906	switch (ctx->auth.alg) {
907	case HASH_ALG_MD5:
908		rc = do_shash("md5", req->result, ctx->opad, blocksize,
909			      req->result, ctx->digestsize, NULL, 0);
910		break;
911	case HASH_ALG_SHA1:
912		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
913			      req->result, ctx->digestsize, NULL, 0);
914		break;
915	case HASH_ALG_SHA224:
916		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
917			      req->result, ctx->digestsize, NULL, 0);
918		break;
919	case HASH_ALG_SHA256:
920		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
921			      req->result, ctx->digestsize, NULL, 0);
922		break;
923	case HASH_ALG_SHA384:
924		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
925			      req->result, ctx->digestsize, NULL, 0);
926		break;
927	case HASH_ALG_SHA512:
928		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
929			      req->result, ctx->digestsize, NULL, 0);
930		break;
931	default:
932		pr_err("%s() Error : unknown hmac type\n", __func__);
933		rc = -EINVAL;
934	}
935	return rc;
936}
937
938/**
939 * ahash_req_done() - Process a hash result from the SPU hardware.
940 * @rctx: Crypto request context
941 *
942 * Return: 0 if successful
943 *         < 0 if an error
944 */
945static int ahash_req_done(struct iproc_reqctx_s *rctx)
946{
947	struct spu_hw *spu = &iproc_priv.spu;
948	struct crypto_async_request *areq = rctx->parent;
949	struct ahash_request *req = ahash_request_cast(areq);
950	struct iproc_ctx_s *ctx = rctx->ctx;
951	int err;
952
953	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
954
955	if (spu->spu_type == SPU_TYPE_SPUM) {
956		/* byte swap the output from the UPDT function to network byte
957		 * order
958		 */
959		if (ctx->auth.alg == HASH_ALG_MD5) {
960			__swab32s((u32 *)req->result);
961			__swab32s(((u32 *)req->result) + 1);
962			__swab32s(((u32 *)req->result) + 2);
963			__swab32s(((u32 *)req->result) + 3);
964			__swab32s(((u32 *)req->result) + 4);
965		}
966	}
967
968	flow_dump("  digest ", req->result, ctx->digestsize);
969
970	/* if this an HMAC then do the outer hash */
971	if (rctx->is_sw_hmac) {
972		err = spu_hmac_outer_hash(req, ctx);
973		if (err < 0)
974			return err;
975		flow_dump("  hmac: ", req->result, ctx->digestsize);
976	}
977
978	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
979		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
980		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
981	} else {
982		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
983		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
984	}
985
986	return 0;
987}
988
989/**
990 * handle_ahash_resp() - Process a SPU response message for a hash request.
991 * Checks if the entire crypto API request has been processed, and if so,
992 * invokes post processing on the result.
993 * @rctx: Crypto request context
994 */
995static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
996{
997	struct iproc_ctx_s *ctx = rctx->ctx;
998#ifdef DEBUG
999	struct crypto_async_request *areq = rctx->parent;
1000	struct ahash_request *req = ahash_request_cast(areq);
1001	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1002	unsigned int blocksize =
1003		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1004#endif
1005	/*
1006	 * Save hash to use as input to next op if incremental. Might be copying
1007	 * too much, but that's easier than figuring out actual digest size here
1008	 */
1009	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1010
1011	flow_log("%s() blocksize:%u digestsize:%u\n",
1012		 __func__, blocksize, ctx->digestsize);
1013
1014	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1015
1016	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1017		ahash_req_done(rctx);
1018}
1019
1020/**
1021 * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1022 * a SPU response message for an AEAD request. Includes buffers to catch SPU
1023 * message headers and the response data.
1024 * @mssg:	mailbox message containing the receive sg
1025 * @rctx:	crypto request context
1026 * @rx_frag_num: number of scatterlist elements required to hold the
1027 *		SPU response message
1028 * @assoc_len:	Length of associated data included in the crypto request
1029 * @ret_iv_len: Length of IV returned in response
1030 * @resp_len:	Number of bytes of response data expected to be written to
1031 *              dst buffer from crypto API
1032 * @digestsize: Length of hash digest, in bytes
1033 * @stat_pad_len: Number of bytes required to pad the STAT field to
1034 *		a 4-byte boundary
1035 *
1036 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1037 * when the request completes, whether the request is handled successfully or
1038 * there is an error.
1039 *
1040 * Returns:
1041 *   0 if successful
1042 *   < 0 if an error
1043 */
1044static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1045				 struct aead_request *req,
1046				 struct iproc_reqctx_s *rctx,
1047				 u8 rx_frag_num,
1048				 unsigned int assoc_len,
1049				 u32 ret_iv_len, unsigned int resp_len,
1050				 unsigned int digestsize, u32 stat_pad_len)
1051{
1052	struct spu_hw *spu = &iproc_priv.spu;
1053	struct scatterlist *sg;	/* used to build sgs in mbox message */
1054	struct iproc_ctx_s *ctx = rctx->ctx;
1055	u32 datalen;		/* Number of bytes of response data expected */
1056	u32 assoc_buf_len;
1057	u8 data_padlen = 0;
1058
1059	if (ctx->is_rfc4543) {
1060		/* RFC4543: only pad after data, not after AAD */
1061		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1062							  assoc_len + resp_len);
1063		assoc_buf_len = assoc_len;
1064	} else {
1065		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1066							  resp_len);
1067		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1068						assoc_len, ret_iv_len,
1069						rctx->is_encrypt);
1070	}
1071
1072	if (ctx->cipher.mode == CIPHER_MODE_CCM)
1073		/* ICV (after data) must be in the next 32-bit word for CCM */
1074		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1075							 resp_len +
1076							 data_padlen);
1077
1078	if (data_padlen)
1079		/* have to catch gcm pad in separate buffer */
1080		rx_frag_num++;
1081
1082	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1083				rctx->gfp);
1084	if (!mssg->spu.dst)
1085		return -ENOMEM;
1086
1087	sg = mssg->spu.dst;
1088	sg_init_table(sg, rx_frag_num);
1089
1090	/* Space for SPU message header */
1091	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1092
1093	if (assoc_buf_len) {
1094		/*
1095		 * Don't write directly to req->dst, because SPU may pad the
1096		 * assoc data in the response
1097		 */
1098		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1099		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1100	}
1101
1102	if (resp_len) {
1103		/*
1104		 * Copy in each dst sg entry from request, up to chunksize.
1105		 * dst sg catches just the data. digest caught in separate buf.
1106		 */
1107		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1108					 rctx->dst_nents, resp_len);
1109		if (datalen < (resp_len)) {
1110			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1111			       __func__, resp_len, datalen);
1112			return -EFAULT;
1113		}
1114	}
1115
1116	/* If GCM/CCM data is padded, catch padding in separate buffer */
1117	if (data_padlen) {
1118		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1119		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1120	}
1121
1122	/* Always catch ICV in separate buffer */
1123	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1124
1125	flow_log("stat_pad_len %u\n", stat_pad_len);
1126	if (stat_pad_len) {
1127		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1128		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1129	}
1130
1131	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1132	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1133
1134	return 0;
1135}
1136
1137/**
1138 * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1139 * SPU request message for an AEAD request. Includes SPU message headers and the
1140 * request data.
1141 * @mssg:	mailbox message containing the transmit sg
1142 * @rctx:	crypto request context
1143 * @tx_frag_num: number of scatterlist elements required to construct the
1144 *		SPU request message
1145 * @spu_hdr_len: length of SPU message header in bytes
1146 * @assoc:	crypto API associated data scatterlist
1147 * @assoc_len:	length of associated data
1148 * @assoc_nents: number of scatterlist entries containing assoc data
1149 * @aead_iv_len: length of AEAD IV, if included
1150 * @chunksize:	Number of bytes of request data
1151 * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1152 * @pad_len:	Number of pad bytes
1153 * @incl_icv:	If true, write separate ICV buffer after data and
1154 *              any padding
1155 *
1156 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1157 * when the request completes, whether the request is handled successfully or
1158 * there is an error.
1159 *
1160 * Return:
1161 *   0 if successful
1162 *   < 0 if an error
1163 */
1164static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1165				 struct iproc_reqctx_s *rctx,
1166				 u8 tx_frag_num,
1167				 u32 spu_hdr_len,
1168				 struct scatterlist *assoc,
1169				 unsigned int assoc_len,
1170				 int assoc_nents,
1171				 unsigned int aead_iv_len,
1172				 unsigned int chunksize,
1173				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1174{
1175	struct spu_hw *spu = &iproc_priv.spu;
1176	struct scatterlist *sg;	/* used to build sgs in mbox message */
1177	struct scatterlist *assoc_sg = assoc;
1178	struct iproc_ctx_s *ctx = rctx->ctx;
1179	u32 datalen;		/* Number of bytes of data to write */
1180	u32 written;		/* Number of bytes of data written */
1181	u32 assoc_offset = 0;
1182	u32 stat_len;
1183
1184	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1185				rctx->gfp);
1186	if (!mssg->spu.src)
1187		return -ENOMEM;
1188
1189	sg = mssg->spu.src;
1190	sg_init_table(sg, tx_frag_num);
1191
1192	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1193		   BCM_HDR_LEN + spu_hdr_len);
1194
1195	if (assoc_len) {
1196		/* Copy in each associated data sg entry from request */
1197		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1198					 assoc_nents, assoc_len);
1199		if (written < assoc_len) {
1200			pr_err("%s(): failed to copy assoc sg to mbox msg",
1201			       __func__);
1202			return -EFAULT;
1203		}
1204	}
1205
1206	if (aead_iv_len)
1207		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1208
1209	if (aad_pad_len) {
1210		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1211		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1212	}
1213
1214	datalen = chunksize;
1215	if ((chunksize > ctx->digestsize) && incl_icv)
1216		datalen -= ctx->digestsize;
1217	if (datalen) {
1218		/* For aead, a single msg should consume the entire src sg */
1219		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1220					 rctx->src_nents, datalen);
1221		if (written < datalen) {
1222			pr_err("%s(): failed to copy src sg to mbox msg",
1223			       __func__);
1224			return -EFAULT;
1225		}
1226	}
1227
1228	if (pad_len) {
1229		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1230		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1231	}
1232
1233	if (incl_icv)
1234		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1235
1236	stat_len = spu->spu_tx_status_len();
1237	if (stat_len) {
1238		memset(rctx->msg_buf.tx_stat, 0, stat_len);
1239		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1240	}
1241	return 0;
1242}
1243
1244/**
1245 * handle_aead_req() - Submit a SPU request message for the next chunk of the
1246 * current AEAD request.
1247 * @rctx:  Crypto request context
1248 *
1249 * Unlike other operation types, we assume the length of the request fits in
1250 * a single SPU request message. aead_enqueue() makes sure this is true.
1251 * Comments for other op types regarding threads applies here as well.
1252 *
1253 * Unlike incremental hash ops, where the spu returns the entire hash for
1254 * truncated algs like sha-224, the SPU returns just the truncated hash in
1255 * response to aead requests. So digestsize is always ctx->digestsize here.
1256 *
1257 * Return: -EINPROGRESS: crypto request has been accepted and result will be
1258 *			 returned asynchronously
1259 *         Any other value indicates an error
1260 */
1261static int handle_aead_req(struct iproc_reqctx_s *rctx)
1262{
1263	struct spu_hw *spu = &iproc_priv.spu;
1264	struct crypto_async_request *areq = rctx->parent;
1265	struct aead_request *req = container_of(areq,
1266						struct aead_request, base);
1267	struct iproc_ctx_s *ctx = rctx->ctx;
1268	int err;
1269	unsigned int chunksize;
1270	unsigned int resp_len;
1271	u32 spu_hdr_len;
1272	u32 db_size;
1273	u32 stat_pad_len;
1274	u32 pad_len;
1275	struct brcm_message *mssg;	/* mailbox message */
1276	struct spu_request_opts req_opts;
1277	struct spu_cipher_parms cipher_parms;
1278	struct spu_hash_parms hash_parms;
1279	struct spu_aead_parms aead_parms;
1280	int assoc_nents = 0;
1281	bool incl_icv = false;
1282	unsigned int digestsize = ctx->digestsize;
1283
1284	/* number of entries in src and dst sg. Always includes SPU msg header.
1285	 */
1286	u8 rx_frag_num = 2;	/* and STATUS */
1287	u8 tx_frag_num = 1;
1288
1289	/* doing the whole thing at once */
1290	chunksize = rctx->total_todo;
1291
1292	flow_log("%s: chunksize %u\n", __func__, chunksize);
1293
1294	memset(&req_opts, 0, sizeof(req_opts));
1295	memset(&hash_parms, 0, sizeof(hash_parms));
1296	memset(&aead_parms, 0, sizeof(aead_parms));
1297
1298	req_opts.is_inbound = !(rctx->is_encrypt);
1299	req_opts.auth_first = ctx->auth_first;
1300	req_opts.is_aead = true;
1301	req_opts.is_esp = ctx->is_esp;
1302
1303	cipher_parms.alg = ctx->cipher.alg;
1304	cipher_parms.mode = ctx->cipher.mode;
1305	cipher_parms.type = ctx->cipher_type;
1306	cipher_parms.key_buf = ctx->enckey;
1307	cipher_parms.key_len = ctx->enckeylen;
1308	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1309	cipher_parms.iv_len = rctx->iv_ctr_len;
1310
1311	hash_parms.alg = ctx->auth.alg;
1312	hash_parms.mode = ctx->auth.mode;
1313	hash_parms.type = HASH_TYPE_NONE;
1314	hash_parms.key_buf = (u8 *)ctx->authkey;
1315	hash_parms.key_len = ctx->authkeylen;
1316	hash_parms.digestsize = digestsize;
1317
1318	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1319	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
1320		hash_parms.key_len = SHA224_DIGEST_SIZE;
1321
1322	aead_parms.assoc_size = req->assoclen;
1323	if (ctx->is_esp && !ctx->is_rfc4543) {
1324		/*
1325		 * 8-byte IV is included assoc data in request. SPU2
1326		 * expects AAD to include just SPI and seqno. So
1327		 * subtract off the IV len.
1328		 */
1329		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1330
1331		if (rctx->is_encrypt) {
1332			aead_parms.return_iv = true;
1333			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1334			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1335		}
1336	} else {
1337		aead_parms.ret_iv_len = 0;
1338	}
1339
1340	/*
1341	 * Count number of sg entries from the crypto API request that are to
1342	 * be included in this mailbox message. For dst sg, don't count space
1343	 * for digest. Digest gets caught in a separate buffer and copied back
1344	 * to dst sg when processing response.
1345	 */
1346	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1347	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1348	if (aead_parms.assoc_size)
1349		assoc_nents = spu_sg_count(rctx->assoc, 0,
1350					   aead_parms.assoc_size);
1351
1352	mssg = &rctx->mb_mssg;
1353
1354	rctx->total_sent = chunksize;
1355	rctx->src_sent = chunksize;
1356	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1357				    aead_parms.assoc_size,
1358				    aead_parms.ret_iv_len,
1359				    rctx->is_encrypt))
1360		rx_frag_num++;
1361
1362	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1363						rctx->iv_ctr_len);
1364
1365	if (ctx->auth.alg == HASH_ALG_AES)
1366		hash_parms.type = (enum hash_type)ctx->cipher_type;
1367
1368	/* General case AAD padding (CCM and RFC4543 special cases below) */
1369	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1370						 aead_parms.assoc_size);
1371
1372	/* General case data padding (CCM decrypt special case below) */
1373	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1374							   chunksize);
1375
1376	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1377		/*
1378		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1379		 * 128-bit aligned
1380		 */
1381		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1382					 ctx->cipher.mode,
1383					 aead_parms.assoc_size + 2);
1384
1385		/*
1386		 * And when decrypting CCM, need to pad without including
1387		 * size of ICV which is tacked on to end of chunk
1388		 */
1389		if (!rctx->is_encrypt)
1390			aead_parms.data_pad_len =
1391				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1392							chunksize - digestsize);
1393
1394		/* CCM also requires software to rewrite portions of IV: */
1395		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1396				       chunksize, rctx->is_encrypt,
1397				       ctx->is_esp);
1398	}
1399
1400	if (ctx->is_rfc4543) {
1401		/*
1402		 * RFC4543: data is included in AAD, so don't pad after AAD
1403		 * and pad data based on both AAD + data size
1404		 */
1405		aead_parms.aad_pad_len = 0;
1406		if (!rctx->is_encrypt)
1407			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1408					ctx->cipher.mode,
1409					aead_parms.assoc_size + chunksize -
1410					digestsize);
1411		else
1412			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1413					ctx->cipher.mode,
1414					aead_parms.assoc_size + chunksize);
1415
1416		req_opts.is_rfc4543 = true;
1417	}
1418
1419	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1420		incl_icv = true;
1421		tx_frag_num++;
1422		/* Copy ICV from end of src scatterlist to digest buf */
1423		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1424				    req->assoclen + rctx->total_sent -
1425				    digestsize);
1426	}
1427
1428	atomic64_add(chunksize, &iproc_priv.bytes_out);
1429
1430	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1431
1432	/* Prepend SPU header with type 3 BCM header */
1433	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1434
1435	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1436					      BCM_HDR_LEN, &req_opts,
1437					      &cipher_parms, &hash_parms,
1438					      &aead_parms, chunksize);
1439
1440	/* Determine total length of padding. Put all padding in one buffer. */
1441	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1442				   chunksize, aead_parms.aad_pad_len,
1443				   aead_parms.data_pad_len, 0);
1444
1445	stat_pad_len = spu->spu_wordalign_padlen(db_size);
1446
1447	if (stat_pad_len)
1448		rx_frag_num++;
1449	pad_len = aead_parms.data_pad_len + stat_pad_len;
1450	if (pad_len) {
1451		tx_frag_num++;
1452		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1453				     aead_parms.data_pad_len, 0,
1454				     ctx->auth.alg, ctx->auth.mode,
1455				     rctx->total_sent, stat_pad_len);
1456	}
1457
1458	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1459			      spu_hdr_len);
1460	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1461	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1462	packet_log("BD:\n");
1463	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1464	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1465
1466	/*
1467	 * Build mailbox message containing SPU request msg and rx buffers
1468	 * to catch response message
1469	 */
1470	memset(mssg, 0, sizeof(*mssg));
1471	mssg->type = BRCM_MESSAGE_SPU;
1472	mssg->ctx = rctx;	/* Will be returned in response */
1473
1474	/* Create rx scatterlist to catch result */
1475	rx_frag_num += rctx->dst_nents;
1476	resp_len = chunksize;
1477
1478	/*
1479	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1480	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1481	 * sends entire digest back.
1482	 */
1483	rx_frag_num++;
1484
1485	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1486	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1487		/*
1488		 * Input is ciphertxt plus ICV, but ICV not incl
1489		 * in output.
1490		 */
1491		resp_len -= ctx->digestsize;
1492		if (resp_len == 0)
1493			/* no rx frags to catch output data */
1494			rx_frag_num -= rctx->dst_nents;
1495	}
1496
1497	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1498				    aead_parms.assoc_size,
1499				    aead_parms.ret_iv_len, resp_len, digestsize,
1500				    stat_pad_len);
1501	if (err)
1502		return err;
1503
1504	/* Create tx scatterlist containing SPU request message */
1505	tx_frag_num += rctx->src_nents;
1506	tx_frag_num += assoc_nents;
1507	if (aead_parms.aad_pad_len)
1508		tx_frag_num++;
1509	if (aead_parms.iv_len)
1510		tx_frag_num++;
1511	if (spu->spu_tx_status_len())
1512		tx_frag_num++;
1513	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1514				    rctx->assoc, aead_parms.assoc_size,
1515				    assoc_nents, aead_parms.iv_len, chunksize,
1516				    aead_parms.aad_pad_len, pad_len, incl_icv);
1517	if (err)
1518		return err;
1519
1520	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1521	if (unlikely(err < 0))
1522		return err;
1523
1524	return -EINPROGRESS;
1525}
1526
1527/**
1528 * handle_aead_resp() - Process a SPU response message for an AEAD request.
1529 * @rctx:  Crypto request context
1530 */
1531static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1532{
1533	struct spu_hw *spu = &iproc_priv.spu;
1534	struct crypto_async_request *areq = rctx->parent;
1535	struct aead_request *req = container_of(areq,
1536						struct aead_request, base);
1537	struct iproc_ctx_s *ctx = rctx->ctx;
1538	u32 payload_len;
1539	unsigned int icv_offset;
1540	u32 result_len;
1541
1542	/* See how much data was returned */
1543	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1544	flow_log("payload_len %u\n", payload_len);
1545
1546	/* only count payload */
1547	atomic64_add(payload_len, &iproc_priv.bytes_in);
1548
1549	if (req->assoclen)
1550		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1551			    req->assoclen);
1552
1553	/*
1554	 * Copy the ICV back to the destination
1555	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1556	 * API doesn't expect ICV in dst buffer.
1557	 */
1558	result_len = req->cryptlen;
1559	if (rctx->is_encrypt) {
1560		icv_offset = req->assoclen + rctx->total_sent;
1561		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1562		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1563		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1564				      ctx->digestsize, icv_offset);
1565		result_len += ctx->digestsize;
1566	}
1567
1568	packet_log("response data:  ");
1569	dump_sg(req->dst, req->assoclen, result_len);
1570
1571	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1572	if (ctx->cipher.alg == CIPHER_ALG_AES) {
1573		if (ctx->cipher.mode == CIPHER_MODE_CCM)
1574			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1575		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1576			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1577		else
1578			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1579	} else {
1580		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1581	}
1582}
1583
1584/**
1585 * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1586 * @rctx:  request context
1587 *
1588 * Mailbox scatterlists are allocated for each chunk. So free them after
1589 * processing each chunk.
1590 */
1591static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1592{
1593	/* mailbox message used to tx request */
1594	struct brcm_message *mssg = &rctx->mb_mssg;
1595
1596	kfree(mssg->spu.src);
1597	kfree(mssg->spu.dst);
1598	memset(mssg, 0, sizeof(struct brcm_message));
1599}
1600
1601/**
1602 * finish_req() - Used to invoke the complete callback from the requester when
1603 * a request has been handled asynchronously.
1604 * @rctx:  Request context
1605 * @err:   Indicates whether the request was successful or not
1606 *
1607 * Ensures that cleanup has been done for request
1608 */
1609static void finish_req(struct iproc_reqctx_s *rctx, int err)
1610{
1611	struct crypto_async_request *areq = rctx->parent;
1612
1613	flow_log("%s() err:%d\n\n", __func__, err);
1614
1615	/* No harm done if already called */
1616	spu_chunk_cleanup(rctx);
1617
1618	if (areq)
1619		areq->complete(areq, err);
1620}
1621
1622/**
1623 * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1624 * @cl:		mailbox client structure for SPU driver
1625 * @msg:	mailbox message containing SPU response
1626 */
1627static void spu_rx_callback(struct mbox_client *cl, void *msg)
1628{
1629	struct spu_hw *spu = &iproc_priv.spu;
1630	struct brcm_message *mssg = msg;
1631	struct iproc_reqctx_s *rctx;
1632	int err;
1633
1634	rctx = mssg->ctx;
1635	if (unlikely(!rctx)) {
1636		/* This is fatal */
1637		pr_err("%s(): no request context", __func__);
1638		err = -EFAULT;
1639		goto cb_finish;
1640	}
1641
1642	/* process the SPU status */
1643	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1644	if (err != 0) {
1645		if (err == SPU_INVALID_ICV)
1646			atomic_inc(&iproc_priv.bad_icv);
1647		err = -EBADMSG;
1648		goto cb_finish;
1649	}
1650
1651	/* Process the SPU response message */
1652	switch (rctx->ctx->alg->type) {
1653	case CRYPTO_ALG_TYPE_SKCIPHER:
1654		handle_skcipher_resp(rctx);
1655		break;
1656	case CRYPTO_ALG_TYPE_AHASH:
1657		handle_ahash_resp(rctx);
1658		break;
1659	case CRYPTO_ALG_TYPE_AEAD:
1660		handle_aead_resp(rctx);
1661		break;
1662	default:
1663		err = -EINVAL;
1664		goto cb_finish;
1665	}
1666
1667	/*
1668	 * If this response does not complete the request, then send the next
1669	 * request chunk.
1670	 */
1671	if (rctx->total_sent < rctx->total_todo) {
1672		/* Deallocate anything specific to previous chunk */
1673		spu_chunk_cleanup(rctx);
1674
1675		switch (rctx->ctx->alg->type) {
1676		case CRYPTO_ALG_TYPE_SKCIPHER:
1677			err = handle_skcipher_req(rctx);
1678			break;
1679		case CRYPTO_ALG_TYPE_AHASH:
1680			err = handle_ahash_req(rctx);
1681			if (err == -EAGAIN)
1682				/*
1683				 * we saved data in hash carry, but tell crypto
1684				 * API we successfully completed request.
1685				 */
1686				err = 0;
1687			break;
1688		case CRYPTO_ALG_TYPE_AEAD:
1689			err = handle_aead_req(rctx);
1690			break;
1691		default:
1692			err = -EINVAL;
1693		}
1694
1695		if (err == -EINPROGRESS)
1696			/* Successfully submitted request for next chunk */
1697			return;
1698	}
1699
1700cb_finish:
1701	finish_req(rctx, err);
1702}
1703
1704/* ==================== Kernel Cryptographic API ==================== */
1705
1706/**
1707 * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
1708 * @req:	Crypto API request
1709 * @encrypt:	true if encrypting; false if decrypting
1710 *
1711 * Return: -EINPROGRESS if request accepted and result will be returned
1712 *			asynchronously
1713 *	   < 0 if an error
1714 */
1715static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1716{
1717	struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1718	struct iproc_ctx_s *ctx =
1719	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1720	int err;
1721
1722	flow_log("%s() enc:%u\n", __func__, encrypt);
1723
1724	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1725		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1726	rctx->parent = &req->base;
1727	rctx->is_encrypt = encrypt;
1728	rctx->bd_suppress = false;
1729	rctx->total_todo = req->cryptlen;
1730	rctx->src_sent = 0;
1731	rctx->total_sent = 0;
1732	rctx->total_received = 0;
1733	rctx->ctx = ctx;
1734
1735	/* Initialize current position in src and dst scatterlists */
1736	rctx->src_sg = req->src;
1737	rctx->src_nents = 0;
1738	rctx->src_skip = 0;
1739	rctx->dst_sg = req->dst;
1740	rctx->dst_nents = 0;
1741	rctx->dst_skip = 0;
1742
1743	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1744	    ctx->cipher.mode == CIPHER_MODE_CTR ||
1745	    ctx->cipher.mode == CIPHER_MODE_OFB ||
1746	    ctx->cipher.mode == CIPHER_MODE_XTS ||
1747	    ctx->cipher.mode == CIPHER_MODE_GCM ||
1748	    ctx->cipher.mode == CIPHER_MODE_CCM) {
1749		rctx->iv_ctr_len =
1750		    crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1751		memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1752	} else {
1753		rctx->iv_ctr_len = 0;
1754	}
1755
1756	/* Choose a SPU to process this request */
1757	rctx->chan_idx = select_channel();
1758	err = handle_skcipher_req(rctx);
1759	if (err != -EINPROGRESS)
1760		/* synchronous result */
1761		spu_chunk_cleanup(rctx);
1762
1763	return err;
1764}
1765
1766static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1767		      unsigned int keylen)
1768{
1769	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1770	int err;
1771
1772	err = verify_skcipher_des_key(cipher, key);
1773	if (err)
1774		return err;
1775
1776	ctx->cipher_type = CIPHER_TYPE_DES;
1777	return 0;
1778}
1779
1780static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1781			   unsigned int keylen)
1782{
1783	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1784	int err;
1785
1786	err = verify_skcipher_des3_key(cipher, key);
1787	if (err)
1788		return err;
1789
1790	ctx->cipher_type = CIPHER_TYPE_3DES;
1791	return 0;
1792}
1793
1794static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1795		      unsigned int keylen)
1796{
1797	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1798
1799	if (ctx->cipher.mode == CIPHER_MODE_XTS)
1800		/* XTS includes two keys of equal length */
1801		keylen = keylen / 2;
1802
1803	switch (keylen) {
1804	case AES_KEYSIZE_128:
1805		ctx->cipher_type = CIPHER_TYPE_AES128;
1806		break;
1807	case AES_KEYSIZE_192:
1808		ctx->cipher_type = CIPHER_TYPE_AES192;
1809		break;
1810	case AES_KEYSIZE_256:
1811		ctx->cipher_type = CIPHER_TYPE_AES256;
1812		break;
1813	default:
1814		return -EINVAL;
1815	}
1816	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1817		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1818	return 0;
1819}
1820
1821static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1822			     unsigned int keylen)
1823{
1824	struct spu_hw *spu = &iproc_priv.spu;
1825	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1826	struct spu_cipher_parms cipher_parms;
1827	u32 alloc_len = 0;
1828	int err;
1829
1830	flow_log("skcipher_setkey() keylen: %d\n", keylen);
1831	flow_dump("  key: ", key, keylen);
1832
1833	switch (ctx->cipher.alg) {
1834	case CIPHER_ALG_DES:
1835		err = des_setkey(cipher, key, keylen);
1836		break;
1837	case CIPHER_ALG_3DES:
1838		err = threedes_setkey(cipher, key, keylen);
1839		break;
1840	case CIPHER_ALG_AES:
1841		err = aes_setkey(cipher, key, keylen);
1842		break;
1843	default:
1844		pr_err("%s() Error: unknown cipher alg\n", __func__);
1845		err = -EINVAL;
1846	}
1847	if (err)
1848		return err;
1849
1850	memcpy(ctx->enckey, key, keylen);
1851	ctx->enckeylen = keylen;
1852
1853	/* SPU needs XTS keys in the reverse order the crypto API presents */
1854	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1855	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1856		unsigned int xts_keylen = keylen / 2;
1857
1858		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1859		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1860	}
1861
1862	if (spu->spu_type == SPU_TYPE_SPUM)
1863		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1864	else if (spu->spu_type == SPU_TYPE_SPU2)
1865		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1866	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1867	cipher_parms.iv_buf = NULL;
1868	cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1869	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1870
1871	cipher_parms.alg = ctx->cipher.alg;
1872	cipher_parms.mode = ctx->cipher.mode;
1873	cipher_parms.type = ctx->cipher_type;
1874	cipher_parms.key_buf = ctx->enckey;
1875	cipher_parms.key_len = ctx->enckeylen;
1876
1877	/* Prepend SPU request message with BCM header */
1878	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1879	ctx->spu_req_hdr_len =
1880	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1881				     &cipher_parms);
1882
1883	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1884							  ctx->enckeylen,
1885							  false);
1886
1887	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1888
1889	return 0;
1890}
1891
1892static int skcipher_encrypt(struct skcipher_request *req)
1893{
1894	flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1895
1896	return skcipher_enqueue(req, true);
1897}
1898
1899static int skcipher_decrypt(struct skcipher_request *req)
1900{
1901	flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1902	return skcipher_enqueue(req, false);
1903}
1904
1905static int ahash_enqueue(struct ahash_request *req)
1906{
1907	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1908	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1909	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1910	int err;
1911	const char *alg_name;
1912
1913	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1914
1915	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1916		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1917	rctx->parent = &req->base;
1918	rctx->ctx = ctx;
1919	rctx->bd_suppress = true;
1920	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1921
1922	/* Initialize position in src scatterlist */
1923	rctx->src_sg = req->src;
1924	rctx->src_skip = 0;
1925	rctx->src_nents = 0;
1926	rctx->dst_sg = NULL;
1927	rctx->dst_skip = 0;
1928	rctx->dst_nents = 0;
1929
1930	/* SPU2 hardware does not compute hash of zero length data */
1931	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1932	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1933		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
1934		flow_log("Doing %sfinal %s zero-len hash request in software\n",
1935			 rctx->is_final ? "" : "non-", alg_name);
1936		err = do_shash((unsigned char *)alg_name, req->result,
1937			       NULL, 0, NULL, 0, ctx->authkey,
1938			       ctx->authkeylen);
1939		if (err < 0)
1940			flow_log("Hash request failed with error %d\n", err);
1941		return err;
1942	}
1943	/* Choose a SPU to process this request */
1944	rctx->chan_idx = select_channel();
1945
1946	err = handle_ahash_req(rctx);
1947	if (err != -EINPROGRESS)
1948		/* synchronous result */
1949		spu_chunk_cleanup(rctx);
1950
1951	if (err == -EAGAIN)
1952		/*
1953		 * we saved data in hash carry, but tell crypto API
1954		 * we successfully completed request.
1955		 */
1956		err = 0;
1957
1958	return err;
1959}
1960
1961static int __ahash_init(struct ahash_request *req)
1962{
1963	struct spu_hw *spu = &iproc_priv.spu;
1964	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1965	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1966	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1967
1968	flow_log("%s()\n", __func__);
1969
1970	/* Initialize the context */
1971	rctx->hash_carry_len = 0;
1972	rctx->is_final = 0;
1973
1974	rctx->total_todo = 0;
1975	rctx->src_sent = 0;
1976	rctx->total_sent = 0;
1977	rctx->total_received = 0;
1978
1979	ctx->digestsize = crypto_ahash_digestsize(tfm);
1980	/* If we add a hash whose digest is larger, catch it here. */
1981	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
1982
1983	rctx->is_sw_hmac = false;
1984
1985	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
1986							  true);
1987
1988	return 0;
1989}
1990
1991/**
1992 * spu_no_incr_hash() - Determine whether incremental hashing is supported.
1993 * @ctx:  Crypto session context
1994 *
1995 * SPU-2 does not support incremental hashing (we'll have to revisit and
1996 * condition based on chip revision or device tree entry if future versions do
1997 * support incremental hash)
1998 *
1999 * SPU-M also doesn't support incremental hashing of AES-XCBC
2000 *
2001 * Return: true if incremental hashing is not supported
2002 *         false otherwise
2003 */
2004static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2005{
2006	struct spu_hw *spu = &iproc_priv.spu;
2007
2008	if (spu->spu_type == SPU_TYPE_SPU2)
2009		return true;
2010
2011	if ((ctx->auth.alg == HASH_ALG_AES) &&
2012	    (ctx->auth.mode == HASH_MODE_XCBC))
2013		return true;
2014
2015	/* Otherwise, incremental hashing is supported */
2016	return false;
2017}
2018
2019static int ahash_init(struct ahash_request *req)
2020{
2021	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2022	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2023	const char *alg_name;
2024	struct crypto_shash *hash;
2025	int ret;
2026	gfp_t gfp;
2027
2028	if (spu_no_incr_hash(ctx)) {
2029		/*
2030		 * If we get an incremental hashing request and it's not
2031		 * supported by the hardware, we need to handle it in software
2032		 * by calling synchronous hash functions.
2033		 */
2034		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2035		hash = crypto_alloc_shash(alg_name, 0, 0);
2036		if (IS_ERR(hash)) {
2037			ret = PTR_ERR(hash);
2038			goto err;
2039		}
2040
2041		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2042		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2043		ctx->shash = kmalloc(sizeof(*ctx->shash) +
2044				     crypto_shash_descsize(hash), gfp);
2045		if (!ctx->shash) {
2046			ret = -ENOMEM;
2047			goto err_hash;
2048		}
2049		ctx->shash->tfm = hash;
2050
2051		/* Set the key using data we already have from setkey */
2052		if (ctx->authkeylen > 0) {
2053			ret = crypto_shash_setkey(hash, ctx->authkey,
2054						  ctx->authkeylen);
2055			if (ret)
2056				goto err_shash;
2057		}
2058
2059		/* Initialize hash w/ this key and other params */
2060		ret = crypto_shash_init(ctx->shash);
2061		if (ret)
2062			goto err_shash;
2063	} else {
2064		/* Otherwise call the internal function which uses SPU hw */
2065		ret = __ahash_init(req);
2066	}
2067
2068	return ret;
2069
2070err_shash:
2071	kfree(ctx->shash);
2072err_hash:
2073	crypto_free_shash(hash);
2074err:
2075	return ret;
2076}
2077
2078static int __ahash_update(struct ahash_request *req)
2079{
2080	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2081
2082	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2083
2084	if (!req->nbytes)
2085		return 0;
2086	rctx->total_todo += req->nbytes;
2087	rctx->src_sent = 0;
2088
2089	return ahash_enqueue(req);
2090}
2091
2092static int ahash_update(struct ahash_request *req)
2093{
2094	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2095	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2096	u8 *tmpbuf;
2097	int ret;
2098	int nents;
2099	gfp_t gfp;
2100
2101	if (spu_no_incr_hash(ctx)) {
2102		/*
2103		 * If we get an incremental hashing request and it's not
2104		 * supported by the hardware, we need to handle it in software
2105		 * by calling synchronous hash functions.
2106		 */
2107		if (req->src)
2108			nents = sg_nents(req->src);
2109		else
2110			return -EINVAL;
2111
2112		/* Copy data from req scatterlist to tmp buffer */
2113		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2114		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2115		tmpbuf = kmalloc(req->nbytes, gfp);
2116		if (!tmpbuf)
2117			return -ENOMEM;
2118
2119		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2120				req->nbytes) {
2121			kfree(tmpbuf);
2122			return -EINVAL;
2123		}
2124
2125		/* Call synchronous update */
2126		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2127		kfree(tmpbuf);
2128	} else {
2129		/* Otherwise call the internal function which uses SPU hw */
2130		ret = __ahash_update(req);
2131	}
2132
2133	return ret;
2134}
2135
2136static int __ahash_final(struct ahash_request *req)
2137{
2138	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2139
2140	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2141
2142	rctx->is_final = 1;
2143
2144	return ahash_enqueue(req);
2145}
2146
2147static int ahash_final(struct ahash_request *req)
2148{
2149	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2150	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2151	int ret;
2152
2153	if (spu_no_incr_hash(ctx)) {
2154		/*
2155		 * If we get an incremental hashing request and it's not
2156		 * supported by the hardware, we need to handle it in software
2157		 * by calling synchronous hash functions.
2158		 */
2159		ret = crypto_shash_final(ctx->shash, req->result);
2160
2161		/* Done with hash, can deallocate it now */
2162		crypto_free_shash(ctx->shash->tfm);
2163		kfree(ctx->shash);
2164
2165	} else {
2166		/* Otherwise call the internal function which uses SPU hw */
2167		ret = __ahash_final(req);
2168	}
2169
2170	return ret;
2171}
2172
2173static int __ahash_finup(struct ahash_request *req)
2174{
2175	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2176
2177	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2178
2179	rctx->total_todo += req->nbytes;
2180	rctx->src_sent = 0;
2181	rctx->is_final = 1;
2182
2183	return ahash_enqueue(req);
2184}
2185
2186static int ahash_finup(struct ahash_request *req)
2187{
2188	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2189	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2190	u8 *tmpbuf;
2191	int ret;
2192	int nents;
2193	gfp_t gfp;
2194
2195	if (spu_no_incr_hash(ctx)) {
2196		/*
2197		 * If we get an incremental hashing request and it's not
2198		 * supported by the hardware, we need to handle it in software
2199		 * by calling synchronous hash functions.
2200		 */
2201		if (req->src) {
2202			nents = sg_nents(req->src);
2203		} else {
2204			ret = -EINVAL;
2205			goto ahash_finup_exit;
2206		}
2207
2208		/* Copy data from req scatterlist to tmp buffer */
2209		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2210		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2211		tmpbuf = kmalloc(req->nbytes, gfp);
2212		if (!tmpbuf) {
2213			ret = -ENOMEM;
2214			goto ahash_finup_exit;
2215		}
2216
2217		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2218				req->nbytes) {
2219			ret = -EINVAL;
2220			goto ahash_finup_free;
2221		}
2222
2223		/* Call synchronous update */
2224		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2225					 req->result);
2226	} else {
2227		/* Otherwise call the internal function which uses SPU hw */
2228		return __ahash_finup(req);
2229	}
2230ahash_finup_free:
2231	kfree(tmpbuf);
2232
2233ahash_finup_exit:
2234	/* Done with hash, can deallocate it now */
2235	crypto_free_shash(ctx->shash->tfm);
2236	kfree(ctx->shash);
2237	return ret;
2238}
2239
2240static int ahash_digest(struct ahash_request *req)
2241{
2242	int err;
2243
2244	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2245
2246	/* whole thing at once */
2247	err = __ahash_init(req);
2248	if (!err)
2249		err = __ahash_finup(req);
2250
2251	return err;
2252}
2253
2254static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2255			unsigned int keylen)
2256{
2257	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2258
2259	flow_log("%s() ahash:%p key:%p keylen:%u\n",
2260		 __func__, ahash, key, keylen);
2261	flow_dump("  key: ", key, keylen);
2262
2263	if (ctx->auth.alg == HASH_ALG_AES) {
2264		switch (keylen) {
2265		case AES_KEYSIZE_128:
2266			ctx->cipher_type = CIPHER_TYPE_AES128;
2267			break;
2268		case AES_KEYSIZE_192:
2269			ctx->cipher_type = CIPHER_TYPE_AES192;
2270			break;
2271		case AES_KEYSIZE_256:
2272			ctx->cipher_type = CIPHER_TYPE_AES256;
2273			break;
2274		default:
2275			pr_err("%s() Error: Invalid key length\n", __func__);
2276			return -EINVAL;
2277		}
2278	} else {
2279		pr_err("%s() Error: unknown hash alg\n", __func__);
2280		return -EINVAL;
2281	}
2282	memcpy(ctx->authkey, key, keylen);
2283	ctx->authkeylen = keylen;
2284
2285	return 0;
2286}
2287
2288static int ahash_export(struct ahash_request *req, void *out)
2289{
2290	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2291	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2292
2293	spu_exp->total_todo = rctx->total_todo;
2294	spu_exp->total_sent = rctx->total_sent;
2295	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2296	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2297	spu_exp->hash_carry_len = rctx->hash_carry_len;
2298	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2299
2300	return 0;
2301}
2302
2303static int ahash_import(struct ahash_request *req, const void *in)
2304{
2305	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2306	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2307
2308	rctx->total_todo = spu_exp->total_todo;
2309	rctx->total_sent = spu_exp->total_sent;
2310	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2311	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2312	rctx->hash_carry_len = spu_exp->hash_carry_len;
2313	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2314
2315	return 0;
2316}
2317
2318static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2319			     unsigned int keylen)
2320{
2321	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2322	unsigned int blocksize =
2323		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2324	unsigned int digestsize = crypto_ahash_digestsize(ahash);
2325	unsigned int index;
2326	int rc;
2327
2328	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2329		 __func__, ahash, key, keylen, blocksize, digestsize);
2330	flow_dump("  key: ", key, keylen);
2331
2332	if (keylen > blocksize) {
2333		switch (ctx->auth.alg) {
2334		case HASH_ALG_MD5:
2335			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2336				      0, NULL, 0);
2337			break;
2338		case HASH_ALG_SHA1:
2339			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2340				      0, NULL, 0);
2341			break;
2342		case HASH_ALG_SHA224:
2343			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2344				      0, NULL, 0);
2345			break;
2346		case HASH_ALG_SHA256:
2347			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2348				      0, NULL, 0);
2349			break;
2350		case HASH_ALG_SHA384:
2351			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2352				      0, NULL, 0);
2353			break;
2354		case HASH_ALG_SHA512:
2355			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2356				      0, NULL, 0);
2357			break;
2358		case HASH_ALG_SHA3_224:
2359			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2360				      NULL, 0, NULL, 0);
2361			break;
2362		case HASH_ALG_SHA3_256:
2363			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2364				      NULL, 0, NULL, 0);
2365			break;
2366		case HASH_ALG_SHA3_384:
2367			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2368				      NULL, 0, NULL, 0);
2369			break;
2370		case HASH_ALG_SHA3_512:
2371			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2372				      NULL, 0, NULL, 0);
2373			break;
2374		default:
2375			pr_err("%s() Error: unknown hash alg\n", __func__);
2376			return -EINVAL;
2377		}
2378		if (rc < 0) {
2379			pr_err("%s() Error %d computing shash for %s\n",
2380			       __func__, rc, hash_alg_name[ctx->auth.alg]);
2381			return rc;
2382		}
2383		ctx->authkeylen = digestsize;
2384
2385		flow_log("  keylen > digestsize... hashed\n");
2386		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2387	} else {
2388		memcpy(ctx->authkey, key, keylen);
2389		ctx->authkeylen = keylen;
2390	}
2391
2392	/*
2393	 * Full HMAC operation in SPUM is not verified,
2394	 * So keeping the generation of IPAD, OPAD and
2395	 * outer hashing in software.
2396	 */
2397	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2398		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2399		memset(ctx->ipad + ctx->authkeylen, 0,
2400		       blocksize - ctx->authkeylen);
2401		ctx->authkeylen = 0;
2402		memcpy(ctx->opad, ctx->ipad, blocksize);
2403
2404		for (index = 0; index < blocksize; index++) {
2405			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2406			ctx->opad[index] ^= HMAC_OPAD_VALUE;
2407		}
2408
2409		flow_dump("  ipad: ", ctx->ipad, blocksize);
2410		flow_dump("  opad: ", ctx->opad, blocksize);
2411	}
2412	ctx->digestsize = digestsize;
2413	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2414
2415	return 0;
2416}
2417
2418static int ahash_hmac_init(struct ahash_request *req)
2419{
2420	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2421	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2422	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2423	unsigned int blocksize =
2424			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2425
2426	flow_log("ahash_hmac_init()\n");
2427
2428	/* init the context as a hash */
2429	ahash_init(req);
2430
2431	if (!spu_no_incr_hash(ctx)) {
2432		/* SPU-M can do incr hashing but needs sw for outer HMAC */
2433		rctx->is_sw_hmac = true;
2434		ctx->auth.mode = HASH_MODE_HASH;
2435		/* start with a prepended ipad */
2436		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2437		rctx->hash_carry_len = blocksize;
2438		rctx->total_todo += blocksize;
2439	}
2440
2441	return 0;
2442}
2443
2444static int ahash_hmac_update(struct ahash_request *req)
2445{
2446	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2447
2448	if (!req->nbytes)
2449		return 0;
2450
2451	return ahash_update(req);
2452}
2453
2454static int ahash_hmac_final(struct ahash_request *req)
2455{
2456	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2457
2458	return ahash_final(req);
2459}
2460
2461static int ahash_hmac_finup(struct ahash_request *req)
2462{
2463	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2464
2465	return ahash_finup(req);
2466}
2467
2468static int ahash_hmac_digest(struct ahash_request *req)
2469{
2470	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2471	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2472	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2473	unsigned int blocksize =
2474			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2475
2476	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2477
2478	/* Perform initialization and then call finup */
2479	__ahash_init(req);
2480
2481	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2482		/*
2483		 * SPU2 supports full HMAC implementation in the
2484		 * hardware, need not to generate IPAD, OPAD and
2485		 * outer hash in software.
2486		 * Only for hash key len > hash block size, SPU2
2487		 * expects to perform hashing on the key, shorten
2488		 * it to digest size and feed it as hash key.
2489		 */
2490		rctx->is_sw_hmac = false;
2491		ctx->auth.mode = HASH_MODE_HMAC;
2492	} else {
2493		rctx->is_sw_hmac = true;
2494		ctx->auth.mode = HASH_MODE_HASH;
2495		/* start with a prepended ipad */
2496		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2497		rctx->hash_carry_len = blocksize;
2498		rctx->total_todo += blocksize;
2499	}
2500
2501	return __ahash_finup(req);
2502}
2503
2504/* aead helpers */
2505
2506static int aead_need_fallback(struct aead_request *req)
2507{
2508	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2509	struct spu_hw *spu = &iproc_priv.spu;
2510	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2511	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2512	u32 payload_len;
2513
2514	/*
2515	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2516	 * and AAD are both 0 bytes long. So use fallback in this case.
2517	 */
2518	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2519	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2520	    (req->assoclen == 0)) {
2521		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2522		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2523			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2524			return 1;
2525		}
2526	}
2527
2528	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2529	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2530	    (spu->spu_type == SPU_TYPE_SPUM) &&
2531	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2532	    (ctx->digestsize != 16)) {
2533		flow_log("%s() AES CCM needs fallback for digest size %d\n",
2534			 __func__, ctx->digestsize);
2535		return 1;
2536	}
2537
2538	/*
2539	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2540	 * when AAD size is 0
2541	 */
2542	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2543	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2544	    (req->assoclen == 0)) {
2545		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2546			 __func__);
2547		return 1;
2548	}
2549
2550	/*
2551	 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
2552	 * 16 or 20 bytes long. So use fallback in this case.
2553	 */
2554	if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2555	    ctx->cipher.alg == CIPHER_ALG_AES &&
2556	    rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2557	    req->assoclen != 16 && req->assoclen != 20) {
2558		flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2559			 " other than 16 or 20 bytes\n");
2560		return 1;
2561	}
2562
2563	payload_len = req->cryptlen;
2564	if (spu->spu_type == SPU_TYPE_SPUM)
2565		payload_len += req->assoclen;
2566
2567	flow_log("%s() payload len: %u\n", __func__, payload_len);
2568
2569	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2570		return 0;
2571	else
2572		return payload_len > ctx->max_payload;
2573}
2574
2575static void aead_complete(struct crypto_async_request *areq, int err)
2576{
2577	struct aead_request *req =
2578	    container_of(areq, struct aead_request, base);
2579	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2580	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2581
2582	flow_log("%s() err:%d\n", __func__, err);
2583
2584	areq->tfm = crypto_aead_tfm(aead);
2585
2586	areq->complete = rctx->old_complete;
2587	areq->data = rctx->old_data;
2588
2589	areq->complete(areq, err);
2590}
2591
2592static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2593{
2594	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2595	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2596	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2597	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2598	int err;
2599	u32 req_flags;
2600
2601	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2602
2603	if (ctx->fallback_cipher) {
2604		/* Store the cipher tfm and then use the fallback tfm */
2605		rctx->old_tfm = tfm;
2606		aead_request_set_tfm(req, ctx->fallback_cipher);
2607		/*
2608		 * Save the callback and chain ourselves in, so we can restore
2609		 * the tfm
2610		 */
2611		rctx->old_complete = req->base.complete;
2612		rctx->old_data = req->base.data;
2613		req_flags = aead_request_flags(req);
2614		aead_request_set_callback(req, req_flags, aead_complete, req);
2615		err = is_encrypt ? crypto_aead_encrypt(req) :
2616		    crypto_aead_decrypt(req);
2617
2618		if (err == 0) {
2619			/*
2620			 * fallback was synchronous (did not return
2621			 * -EINPROGRESS). So restore request state here.
2622			 */
2623			aead_request_set_callback(req, req_flags,
2624						  rctx->old_complete, req);
2625			req->base.data = rctx->old_data;
2626			aead_request_set_tfm(req, aead);
2627			flow_log("%s() fallback completed successfully\n\n",
2628				 __func__);
2629		}
2630	} else {
2631		err = -EINVAL;
2632	}
2633
2634	return err;
2635}
2636
2637static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2638{
2639	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2640	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2641	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2642	int err;
2643
2644	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2645
2646	if (req->assoclen > MAX_ASSOC_SIZE) {
2647		pr_err
2648		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
2649		     __func__, req->assoclen, MAX_ASSOC_SIZE);
2650		return -EINVAL;
2651	}
2652
2653	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2654		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2655	rctx->parent = &req->base;
2656	rctx->is_encrypt = is_encrypt;
2657	rctx->bd_suppress = false;
2658	rctx->total_todo = req->cryptlen;
2659	rctx->src_sent = 0;
2660	rctx->total_sent = 0;
2661	rctx->total_received = 0;
2662	rctx->is_sw_hmac = false;
2663	rctx->ctx = ctx;
2664	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2665
2666	/* assoc data is at start of src sg */
2667	rctx->assoc = req->src;
2668
2669	/*
2670	 * Init current position in src scatterlist to be after assoc data.
2671	 * src_skip set to buffer offset where data begins. (Assoc data could
2672	 * end in the middle of a buffer.)
2673	 */
2674	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2675			     &rctx->src_skip) < 0) {
2676		pr_err("%s() Error: Unable to find start of src data\n",
2677		       __func__);
2678		return -EINVAL;
2679	}
2680
2681	rctx->src_nents = 0;
2682	rctx->dst_nents = 0;
2683	if (req->dst == req->src) {
2684		rctx->dst_sg = rctx->src_sg;
2685		rctx->dst_skip = rctx->src_skip;
2686	} else {
2687		/*
2688		 * Expect req->dst to have room for assoc data followed by
2689		 * output data and ICV, if encrypt. So initialize dst_sg
2690		 * to point beyond assoc len offset.
2691		 */
2692		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2693				     &rctx->dst_skip) < 0) {
2694			pr_err("%s() Error: Unable to find start of dst data\n",
2695			       __func__);
2696			return -EINVAL;
2697		}
2698	}
2699
2700	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2701	    ctx->cipher.mode == CIPHER_MODE_CTR ||
2702	    ctx->cipher.mode == CIPHER_MODE_OFB ||
2703	    ctx->cipher.mode == CIPHER_MODE_XTS ||
2704	    ctx->cipher.mode == CIPHER_MODE_GCM) {
2705		rctx->iv_ctr_len =
2706			ctx->salt_len +
2707			crypto_aead_ivsize(crypto_aead_reqtfm(req));
2708	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2709		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2710	} else {
2711		rctx->iv_ctr_len = 0;
2712	}
2713
2714	rctx->hash_carry_len = 0;
2715
2716	flow_log("  src sg: %p\n", req->src);
2717	flow_log("  rctx->src_sg: %p, src_skip %u\n",
2718		 rctx->src_sg, rctx->src_skip);
2719	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2720	flow_log("  dst sg: %p\n", req->dst);
2721	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2722		 rctx->dst_sg, rctx->dst_skip);
2723	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2724	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2725	flow_log("  authkeylen:%u\n", ctx->authkeylen);
2726	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2727
2728	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2729		flow_log("  max_payload infinite");
2730	else
2731		flow_log("  max_payload: %u\n", ctx->max_payload);
2732
2733	if (unlikely(aead_need_fallback(req)))
2734		return aead_do_fallback(req, is_encrypt);
2735
2736	/*
2737	 * Do memory allocations for request after fallback check, because if we
2738	 * do fallback, we won't call finish_req() to dealloc.
2739	 */
2740	if (rctx->iv_ctr_len) {
2741		if (ctx->salt_len)
2742			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2743			       ctx->salt, ctx->salt_len);
2744		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2745		       req->iv,
2746		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2747	}
2748
2749	rctx->chan_idx = select_channel();
2750	err = handle_aead_req(rctx);
2751	if (err != -EINPROGRESS)
2752		/* synchronous result */
2753		spu_chunk_cleanup(rctx);
2754
2755	return err;
2756}
2757
2758static int aead_authenc_setkey(struct crypto_aead *cipher,
2759			       const u8 *key, unsigned int keylen)
2760{
2761	struct spu_hw *spu = &iproc_priv.spu;
2762	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2763	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2764	struct crypto_authenc_keys keys;
2765	int ret;
2766
2767	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2768		 keylen);
2769	flow_dump("  key: ", key, keylen);
2770
2771	ret = crypto_authenc_extractkeys(&keys, key, keylen);
2772	if (ret)
2773		goto badkey;
2774
2775	if (keys.enckeylen > MAX_KEY_SIZE ||
2776	    keys.authkeylen > MAX_KEY_SIZE)
2777		goto badkey;
2778
2779	ctx->enckeylen = keys.enckeylen;
2780	ctx->authkeylen = keys.authkeylen;
2781
2782	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2783	/* May end up padding auth key. So make sure it's zeroed. */
2784	memset(ctx->authkey, 0, sizeof(ctx->authkey));
2785	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2786
2787	switch (ctx->alg->cipher_info.alg) {
2788	case CIPHER_ALG_DES:
2789		if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2790			return -EINVAL;
2791
2792		ctx->cipher_type = CIPHER_TYPE_DES;
2793		break;
2794	case CIPHER_ALG_3DES:
2795		if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2796			return -EINVAL;
2797
2798		ctx->cipher_type = CIPHER_TYPE_3DES;
2799		break;
2800	case CIPHER_ALG_AES:
2801		switch (ctx->enckeylen) {
2802		case AES_KEYSIZE_128:
2803			ctx->cipher_type = CIPHER_TYPE_AES128;
2804			break;
2805		case AES_KEYSIZE_192:
2806			ctx->cipher_type = CIPHER_TYPE_AES192;
2807			break;
2808		case AES_KEYSIZE_256:
2809			ctx->cipher_type = CIPHER_TYPE_AES256;
2810			break;
2811		default:
2812			goto badkey;
2813		}
2814		break;
2815	default:
2816		pr_err("%s() Error: Unknown cipher alg\n", __func__);
2817		return -EINVAL;
2818	}
2819
2820	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2821		 ctx->authkeylen);
2822	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2823	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2824
2825	/* setkey the fallback just in case we needto use it */
2826	if (ctx->fallback_cipher) {
2827		flow_log("  running fallback setkey()\n");
2828
2829		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2830		ctx->fallback_cipher->base.crt_flags |=
2831		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2832		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2833		if (ret)
2834			flow_log("  fallback setkey() returned:%d\n", ret);
2835	}
2836
2837	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2838							  ctx->enckeylen,
2839							  false);
2840
2841	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2842
2843	return ret;
2844
2845badkey:
2846	ctx->enckeylen = 0;
2847	ctx->authkeylen = 0;
2848	ctx->digestsize = 0;
2849
2850	return -EINVAL;
2851}
2852
2853static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2854			       const u8 *key, unsigned int keylen)
2855{
2856	struct spu_hw *spu = &iproc_priv.spu;
2857	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2858	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2859
2860	int ret = 0;
2861
2862	flow_log("%s() keylen:%u\n", __func__, keylen);
2863	flow_dump("  key: ", key, keylen);
2864
2865	if (!ctx->is_esp)
2866		ctx->digestsize = keylen;
2867
2868	ctx->enckeylen = keylen;
2869	ctx->authkeylen = 0;
2870
2871	switch (ctx->enckeylen) {
2872	case AES_KEYSIZE_128:
2873		ctx->cipher_type = CIPHER_TYPE_AES128;
2874		break;
2875	case AES_KEYSIZE_192:
2876		ctx->cipher_type = CIPHER_TYPE_AES192;
2877		break;
2878	case AES_KEYSIZE_256:
2879		ctx->cipher_type = CIPHER_TYPE_AES256;
2880		break;
2881	default:
2882		goto badkey;
2883	}
2884
2885	memcpy(ctx->enckey, key, ctx->enckeylen);
2886
2887	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2888		 ctx->authkeylen);
2889	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2890	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2891
2892	/* setkey the fallback just in case we need to use it */
2893	if (ctx->fallback_cipher) {
2894		flow_log("  running fallback setkey()\n");
2895
2896		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2897		ctx->fallback_cipher->base.crt_flags |=
2898		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2899		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2900					 keylen + ctx->salt_len);
2901		if (ret)
2902			flow_log("  fallback setkey() returned:%d\n", ret);
2903	}
2904
2905	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2906							  ctx->enckeylen,
2907							  false);
2908
2909	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2910
2911	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2912		 ctx->authkeylen);
2913
2914	return ret;
2915
2916badkey:
2917	ctx->enckeylen = 0;
2918	ctx->authkeylen = 0;
2919	ctx->digestsize = 0;
2920
2921	return -EINVAL;
2922}
2923
2924/**
2925 * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
2926 * @cipher: AEAD structure
2927 * @key:    Key followed by 4 bytes of salt
2928 * @keylen: Length of key plus salt, in bytes
2929 *
2930 * Extracts salt from key and stores it to be prepended to IV on each request.
2931 * Digest is always 16 bytes
2932 *
2933 * Return: Value from generic gcm setkey.
2934 */
2935static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2936			       const u8 *key, unsigned int keylen)
2937{
2938	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2939
2940	flow_log("%s\n", __func__);
2941
2942	if (keylen < GCM_ESP_SALT_SIZE)
2943		return -EINVAL;
2944
2945	ctx->salt_len = GCM_ESP_SALT_SIZE;
2946	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2947	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2948	keylen -= GCM_ESP_SALT_SIZE;
2949	ctx->digestsize = GCM_ESP_DIGESTSIZE;
2950	ctx->is_esp = true;
2951	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2952
2953	return aead_gcm_ccm_setkey(cipher, key, keylen);
2954}
2955
2956/**
2957 * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
2958 * cipher: AEAD structure
2959 * key:    Key followed by 4 bytes of salt
2960 * keylen: Length of key plus salt, in bytes
2961 *
2962 * Extracts salt from key and stores it to be prepended to IV on each request.
2963 * Digest is always 16 bytes
2964 *
2965 * Return: Value from generic gcm setkey.
2966 */
2967static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
2968				  const u8 *key, unsigned int keylen)
2969{
2970	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2971
2972	flow_log("%s\n", __func__);
2973
2974	if (keylen < GCM_ESP_SALT_SIZE)
2975		return -EINVAL;
2976
2977	ctx->salt_len = GCM_ESP_SALT_SIZE;
2978	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2979	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2980	keylen -= GCM_ESP_SALT_SIZE;
2981	ctx->digestsize = GCM_ESP_DIGESTSIZE;
2982	ctx->is_esp = true;
2983	ctx->is_rfc4543 = true;
2984	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2985
2986	return aead_gcm_ccm_setkey(cipher, key, keylen);
2987}
2988
2989/**
2990 * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
2991 * @cipher: AEAD structure
2992 * @key:    Key followed by 4 bytes of salt
2993 * @keylen: Length of key plus salt, in bytes
2994 *
2995 * Extracts salt from key and stores it to be prepended to IV on each request.
2996 * Digest is always 16 bytes
2997 *
2998 * Return: Value from generic ccm setkey.
2999 */
3000static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3001			       const u8 *key, unsigned int keylen)
3002{
3003	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3004
3005	flow_log("%s\n", __func__);
3006
3007	if (keylen < CCM_ESP_SALT_SIZE)
3008		return -EINVAL;
3009
3010	ctx->salt_len = CCM_ESP_SALT_SIZE;
3011	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3012	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3013	keylen -= CCM_ESP_SALT_SIZE;
3014	ctx->is_esp = true;
3015	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3016
3017	return aead_gcm_ccm_setkey(cipher, key, keylen);
3018}
3019
3020static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3021{
3022	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3023	int ret = 0;
3024
3025	flow_log("%s() authkeylen:%u authsize:%u\n",
3026		 __func__, ctx->authkeylen, authsize);
3027
3028	ctx->digestsize = authsize;
3029
3030	/* setkey the fallback just in case we needto use it */
3031	if (ctx->fallback_cipher) {
3032		flow_log("  running fallback setauth()\n");
3033
3034		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3035		if (ret)
3036			flow_log("  fallback setauth() returned:%d\n", ret);
3037	}
3038
3039	return ret;
3040}
3041
3042static int aead_encrypt(struct aead_request *req)
3043{
3044	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3045		 req->cryptlen);
3046	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3047	flow_log("  assoc_len:%u\n", req->assoclen);
3048
3049	return aead_enqueue(req, true);
3050}
3051
3052static int aead_decrypt(struct aead_request *req)
3053{
3054	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3055	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3056	flow_log("  assoc_len:%u\n", req->assoclen);
3057
3058	return aead_enqueue(req, false);
3059}
3060
3061/* ==================== Supported Cipher Algorithms ==================== */
3062
3063static struct iproc_alg_s driver_algs[] = {
3064	{
3065	 .type = CRYPTO_ALG_TYPE_AEAD,
3066	 .alg.aead = {
3067		 .base = {
3068			.cra_name = "gcm(aes)",
3069			.cra_driver_name = "gcm-aes-iproc",
3070			.cra_blocksize = AES_BLOCK_SIZE,
3071			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3072		 },
3073		 .setkey = aead_gcm_ccm_setkey,
3074		 .ivsize = GCM_AES_IV_SIZE,
3075		.maxauthsize = AES_BLOCK_SIZE,
3076	 },
3077	 .cipher_info = {
3078			 .alg = CIPHER_ALG_AES,
3079			 .mode = CIPHER_MODE_GCM,
3080			 },
3081	 .auth_info = {
3082		       .alg = HASH_ALG_AES,
3083		       .mode = HASH_MODE_GCM,
3084		       },
3085	 .auth_first = 0,
3086	 },
3087	{
3088	 .type = CRYPTO_ALG_TYPE_AEAD,
3089	 .alg.aead = {
3090		 .base = {
3091			.cra_name = "ccm(aes)",
3092			.cra_driver_name = "ccm-aes-iproc",
3093			.cra_blocksize = AES_BLOCK_SIZE,
3094			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3095		 },
3096		 .setkey = aead_gcm_ccm_setkey,
3097		 .ivsize = CCM_AES_IV_SIZE,
3098		.maxauthsize = AES_BLOCK_SIZE,
3099	 },
3100	 .cipher_info = {
3101			 .alg = CIPHER_ALG_AES,
3102			 .mode = CIPHER_MODE_CCM,
3103			 },
3104	 .auth_info = {
3105		       .alg = HASH_ALG_AES,
3106		       .mode = HASH_MODE_CCM,
3107		       },
3108	 .auth_first = 0,
3109	 },
3110	{
3111	 .type = CRYPTO_ALG_TYPE_AEAD,
3112	 .alg.aead = {
3113		 .base = {
3114			.cra_name = "rfc4106(gcm(aes))",
3115			.cra_driver_name = "gcm-aes-esp-iproc",
3116			.cra_blocksize = AES_BLOCK_SIZE,
3117			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3118		 },
3119		 .setkey = aead_gcm_esp_setkey,
3120		 .ivsize = GCM_RFC4106_IV_SIZE,
3121		 .maxauthsize = AES_BLOCK_SIZE,
3122	 },
3123	 .cipher_info = {
3124			 .alg = CIPHER_ALG_AES,
3125			 .mode = CIPHER_MODE_GCM,
3126			 },
3127	 .auth_info = {
3128		       .alg = HASH_ALG_AES,
3129		       .mode = HASH_MODE_GCM,
3130		       },
3131	 .auth_first = 0,
3132	 },
3133	{
3134	 .type = CRYPTO_ALG_TYPE_AEAD,
3135	 .alg.aead = {
3136		 .base = {
3137			.cra_name = "rfc4309(ccm(aes))",
3138			.cra_driver_name = "ccm-aes-esp-iproc",
3139			.cra_blocksize = AES_BLOCK_SIZE,
3140			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3141		 },
3142		 .setkey = aead_ccm_esp_setkey,
3143		 .ivsize = CCM_AES_IV_SIZE,
3144		 .maxauthsize = AES_BLOCK_SIZE,
3145	 },
3146	 .cipher_info = {
3147			 .alg = CIPHER_ALG_AES,
3148			 .mode = CIPHER_MODE_CCM,
3149			 },
3150	 .auth_info = {
3151		       .alg = HASH_ALG_AES,
3152		       .mode = HASH_MODE_CCM,
3153		       },
3154	 .auth_first = 0,
3155	 },
3156	{
3157	 .type = CRYPTO_ALG_TYPE_AEAD,
3158	 .alg.aead = {
3159		 .base = {
3160			.cra_name = "rfc4543(gcm(aes))",
3161			.cra_driver_name = "gmac-aes-esp-iproc",
3162			.cra_blocksize = AES_BLOCK_SIZE,
3163			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3164		 },
3165		 .setkey = rfc4543_gcm_esp_setkey,
3166		 .ivsize = GCM_RFC4106_IV_SIZE,
3167		 .maxauthsize = AES_BLOCK_SIZE,
3168	 },
3169	 .cipher_info = {
3170			 .alg = CIPHER_ALG_AES,
3171			 .mode = CIPHER_MODE_GCM,
3172			 },
3173	 .auth_info = {
3174		       .alg = HASH_ALG_AES,
3175		       .mode = HASH_MODE_GCM,
3176		       },
3177	 .auth_first = 0,
3178	 },
3179	{
3180	 .type = CRYPTO_ALG_TYPE_AEAD,
3181	 .alg.aead = {
3182		 .base = {
3183			.cra_name = "authenc(hmac(md5),cbc(aes))",
3184			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3185			.cra_blocksize = AES_BLOCK_SIZE,
3186			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3187				     CRYPTO_ALG_ASYNC |
3188				     CRYPTO_ALG_ALLOCATES_MEMORY
3189		 },
3190		 .setkey = aead_authenc_setkey,
3191		.ivsize = AES_BLOCK_SIZE,
3192		.maxauthsize = MD5_DIGEST_SIZE,
3193	 },
3194	 .cipher_info = {
3195			 .alg = CIPHER_ALG_AES,
3196			 .mode = CIPHER_MODE_CBC,
3197			 },
3198	 .auth_info = {
3199		       .alg = HASH_ALG_MD5,
3200		       .mode = HASH_MODE_HMAC,
3201		       },
3202	 .auth_first = 0,
3203	 },
3204	{
3205	 .type = CRYPTO_ALG_TYPE_AEAD,
3206	 .alg.aead = {
3207		 .base = {
3208			.cra_name = "authenc(hmac(sha1),cbc(aes))",
3209			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3210			.cra_blocksize = AES_BLOCK_SIZE,
3211			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3212				     CRYPTO_ALG_ASYNC |
3213				     CRYPTO_ALG_ALLOCATES_MEMORY
3214		 },
3215		 .setkey = aead_authenc_setkey,
3216		 .ivsize = AES_BLOCK_SIZE,
3217		 .maxauthsize = SHA1_DIGEST_SIZE,
3218	 },
3219	 .cipher_info = {
3220			 .alg = CIPHER_ALG_AES,
3221			 .mode = CIPHER_MODE_CBC,
3222			 },
3223	 .auth_info = {
3224		       .alg = HASH_ALG_SHA1,
3225		       .mode = HASH_MODE_HMAC,
3226		       },
3227	 .auth_first = 0,
3228	 },
3229	{
3230	 .type = CRYPTO_ALG_TYPE_AEAD,
3231	 .alg.aead = {
3232		 .base = {
3233			.cra_name = "authenc(hmac(sha256),cbc(aes))",
3234			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3235			.cra_blocksize = AES_BLOCK_SIZE,
3236			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3237				     CRYPTO_ALG_ASYNC |
3238				     CRYPTO_ALG_ALLOCATES_MEMORY
3239		 },
3240		 .setkey = aead_authenc_setkey,
3241		 .ivsize = AES_BLOCK_SIZE,
3242		 .maxauthsize = SHA256_DIGEST_SIZE,
3243	 },
3244	 .cipher_info = {
3245			 .alg = CIPHER_ALG_AES,
3246			 .mode = CIPHER_MODE_CBC,
3247			 },
3248	 .auth_info = {
3249		       .alg = HASH_ALG_SHA256,
3250		       .mode = HASH_MODE_HMAC,
3251		       },
3252	 .auth_first = 0,
3253	 },
3254	{
3255	 .type = CRYPTO_ALG_TYPE_AEAD,
3256	 .alg.aead = {
3257		 .base = {
3258			.cra_name = "authenc(hmac(md5),cbc(des))",
3259			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3260			.cra_blocksize = DES_BLOCK_SIZE,
3261			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3262				     CRYPTO_ALG_ASYNC |
3263				     CRYPTO_ALG_ALLOCATES_MEMORY
3264		 },
3265		 .setkey = aead_authenc_setkey,
3266		 .ivsize = DES_BLOCK_SIZE,
3267		 .maxauthsize = MD5_DIGEST_SIZE,
3268	 },
3269	 .cipher_info = {
3270			 .alg = CIPHER_ALG_DES,
3271			 .mode = CIPHER_MODE_CBC,
3272			 },
3273	 .auth_info = {
3274		       .alg = HASH_ALG_MD5,
3275		       .mode = HASH_MODE_HMAC,
3276		       },
3277	 .auth_first = 0,
3278	 },
3279	{
3280	 .type = CRYPTO_ALG_TYPE_AEAD,
3281	 .alg.aead = {
3282		 .base = {
3283			.cra_name = "authenc(hmac(sha1),cbc(des))",
3284			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3285			.cra_blocksize = DES_BLOCK_SIZE,
3286			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3287				     CRYPTO_ALG_ASYNC |
3288				     CRYPTO_ALG_ALLOCATES_MEMORY
3289		 },
3290		 .setkey = aead_authenc_setkey,
3291		 .ivsize = DES_BLOCK_SIZE,
3292		 .maxauthsize = SHA1_DIGEST_SIZE,
3293	 },
3294	 .cipher_info = {
3295			 .alg = CIPHER_ALG_DES,
3296			 .mode = CIPHER_MODE_CBC,
3297			 },
3298	 .auth_info = {
3299		       .alg = HASH_ALG_SHA1,
3300		       .mode = HASH_MODE_HMAC,
3301		       },
3302	 .auth_first = 0,
3303	 },
3304	{
3305	 .type = CRYPTO_ALG_TYPE_AEAD,
3306	 .alg.aead = {
3307		 .base = {
3308			.cra_name = "authenc(hmac(sha224),cbc(des))",
3309			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3310			.cra_blocksize = DES_BLOCK_SIZE,
3311			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3312				     CRYPTO_ALG_ASYNC |
3313				     CRYPTO_ALG_ALLOCATES_MEMORY
3314		 },
3315		 .setkey = aead_authenc_setkey,
3316		 .ivsize = DES_BLOCK_SIZE,
3317		 .maxauthsize = SHA224_DIGEST_SIZE,
3318	 },
3319	 .cipher_info = {
3320			 .alg = CIPHER_ALG_DES,
3321			 .mode = CIPHER_MODE_CBC,
3322			 },
3323	 .auth_info = {
3324		       .alg = HASH_ALG_SHA224,
3325		       .mode = HASH_MODE_HMAC,
3326		       },
3327	 .auth_first = 0,
3328	 },
3329	{
3330	 .type = CRYPTO_ALG_TYPE_AEAD,
3331	 .alg.aead = {
3332		 .base = {
3333			.cra_name = "authenc(hmac(sha256),cbc(des))",
3334			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3335			.cra_blocksize = DES_BLOCK_SIZE,
3336			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3337				     CRYPTO_ALG_ASYNC |
3338				     CRYPTO_ALG_ALLOCATES_MEMORY
3339		 },
3340		 .setkey = aead_authenc_setkey,
3341		 .ivsize = DES_BLOCK_SIZE,
3342		 .maxauthsize = SHA256_DIGEST_SIZE,
3343	 },
3344	 .cipher_info = {
3345			 .alg = CIPHER_ALG_DES,
3346			 .mode = CIPHER_MODE_CBC,
3347			 },
3348	 .auth_info = {
3349		       .alg = HASH_ALG_SHA256,
3350		       .mode = HASH_MODE_HMAC,
3351		       },
3352	 .auth_first = 0,
3353	 },
3354	{
3355	 .type = CRYPTO_ALG_TYPE_AEAD,
3356	 .alg.aead = {
3357		 .base = {
3358			.cra_name = "authenc(hmac(sha384),cbc(des))",
3359			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3360			.cra_blocksize = DES_BLOCK_SIZE,
3361			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3362				     CRYPTO_ALG_ASYNC |
3363				     CRYPTO_ALG_ALLOCATES_MEMORY
3364		 },
3365		 .setkey = aead_authenc_setkey,
3366		 .ivsize = DES_BLOCK_SIZE,
3367		 .maxauthsize = SHA384_DIGEST_SIZE,
3368	 },
3369	 .cipher_info = {
3370			 .alg = CIPHER_ALG_DES,
3371			 .mode = CIPHER_MODE_CBC,
3372			 },
3373	 .auth_info = {
3374		       .alg = HASH_ALG_SHA384,
3375		       .mode = HASH_MODE_HMAC,
3376		       },
3377	 .auth_first = 0,
3378	 },
3379	{
3380	 .type = CRYPTO_ALG_TYPE_AEAD,
3381	 .alg.aead = {
3382		 .base = {
3383			.cra_name = "authenc(hmac(sha512),cbc(des))",
3384			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3385			.cra_blocksize = DES_BLOCK_SIZE,
3386			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3387				     CRYPTO_ALG_ASYNC |
3388				     CRYPTO_ALG_ALLOCATES_MEMORY
3389		 },
3390		 .setkey = aead_authenc_setkey,
3391		 .ivsize = DES_BLOCK_SIZE,
3392		 .maxauthsize = SHA512_DIGEST_SIZE,
3393	 },
3394	 .cipher_info = {
3395			 .alg = CIPHER_ALG_DES,
3396			 .mode = CIPHER_MODE_CBC,
3397			 },
3398	 .auth_info = {
3399		       .alg = HASH_ALG_SHA512,
3400		       .mode = HASH_MODE_HMAC,
3401		       },
3402	 .auth_first = 0,
3403	 },
3404	{
3405	 .type = CRYPTO_ALG_TYPE_AEAD,
3406	 .alg.aead = {
3407		 .base = {
3408			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3409			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3410			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3411			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3412				     CRYPTO_ALG_ASYNC |
3413				     CRYPTO_ALG_ALLOCATES_MEMORY
3414		 },
3415		 .setkey = aead_authenc_setkey,
3416		 .ivsize = DES3_EDE_BLOCK_SIZE,
3417		 .maxauthsize = MD5_DIGEST_SIZE,
3418	 },
3419	 .cipher_info = {
3420			 .alg = CIPHER_ALG_3DES,
3421			 .mode = CIPHER_MODE_CBC,
3422			 },
3423	 .auth_info = {
3424		       .alg = HASH_ALG_MD5,
3425		       .mode = HASH_MODE_HMAC,
3426		       },
3427	 .auth_first = 0,
3428	 },
3429	{
3430	 .type = CRYPTO_ALG_TYPE_AEAD,
3431	 .alg.aead = {
3432		 .base = {
3433			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3434			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3435			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3436			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3437				     CRYPTO_ALG_ASYNC |
3438				     CRYPTO_ALG_ALLOCATES_MEMORY
3439		 },
3440		 .setkey = aead_authenc_setkey,
3441		 .ivsize = DES3_EDE_BLOCK_SIZE,
3442		 .maxauthsize = SHA1_DIGEST_SIZE,
3443	 },
3444	 .cipher_info = {
3445			 .alg = CIPHER_ALG_3DES,
3446			 .mode = CIPHER_MODE_CBC,
3447			 },
3448	 .auth_info = {
3449		       .alg = HASH_ALG_SHA1,
3450		       .mode = HASH_MODE_HMAC,
3451		       },
3452	 .auth_first = 0,
3453	 },
3454	{
3455	 .type = CRYPTO_ALG_TYPE_AEAD,
3456	 .alg.aead = {
3457		 .base = {
3458			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3459			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3460			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3461			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3462				     CRYPTO_ALG_ASYNC |
3463				     CRYPTO_ALG_ALLOCATES_MEMORY
3464		 },
3465		 .setkey = aead_authenc_setkey,
3466		 .ivsize = DES3_EDE_BLOCK_SIZE,
3467		 .maxauthsize = SHA224_DIGEST_SIZE,
3468	 },
3469	 .cipher_info = {
3470			 .alg = CIPHER_ALG_3DES,
3471			 .mode = CIPHER_MODE_CBC,
3472			 },
3473	 .auth_info = {
3474		       .alg = HASH_ALG_SHA224,
3475		       .mode = HASH_MODE_HMAC,
3476		       },
3477	 .auth_first = 0,
3478	 },
3479	{
3480	 .type = CRYPTO_ALG_TYPE_AEAD,
3481	 .alg.aead = {
3482		 .base = {
3483			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3484			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3485			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3486			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3487				     CRYPTO_ALG_ASYNC |
3488				     CRYPTO_ALG_ALLOCATES_MEMORY
3489		 },
3490		 .setkey = aead_authenc_setkey,
3491		 .ivsize = DES3_EDE_BLOCK_SIZE,
3492		 .maxauthsize = SHA256_DIGEST_SIZE,
3493	 },
3494	 .cipher_info = {
3495			 .alg = CIPHER_ALG_3DES,
3496			 .mode = CIPHER_MODE_CBC,
3497			 },
3498	 .auth_info = {
3499		       .alg = HASH_ALG_SHA256,
3500		       .mode = HASH_MODE_HMAC,
3501		       },
3502	 .auth_first = 0,
3503	 },
3504	{
3505	 .type = CRYPTO_ALG_TYPE_AEAD,
3506	 .alg.aead = {
3507		 .base = {
3508			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3509			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3510			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3511			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3512				     CRYPTO_ALG_ASYNC |
3513				     CRYPTO_ALG_ALLOCATES_MEMORY
3514		 },
3515		 .setkey = aead_authenc_setkey,
3516		 .ivsize = DES3_EDE_BLOCK_SIZE,
3517		 .maxauthsize = SHA384_DIGEST_SIZE,
3518	 },
3519	 .cipher_info = {
3520			 .alg = CIPHER_ALG_3DES,
3521			 .mode = CIPHER_MODE_CBC,
3522			 },
3523	 .auth_info = {
3524		       .alg = HASH_ALG_SHA384,
3525		       .mode = HASH_MODE_HMAC,
3526		       },
3527	 .auth_first = 0,
3528	 },
3529	{
3530	 .type = CRYPTO_ALG_TYPE_AEAD,
3531	 .alg.aead = {
3532		 .base = {
3533			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3534			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3535			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3536			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3537				     CRYPTO_ALG_ASYNC |
3538				     CRYPTO_ALG_ALLOCATES_MEMORY
3539		 },
3540		 .setkey = aead_authenc_setkey,
3541		 .ivsize = DES3_EDE_BLOCK_SIZE,
3542		 .maxauthsize = SHA512_DIGEST_SIZE,
3543	 },
3544	 .cipher_info = {
3545			 .alg = CIPHER_ALG_3DES,
3546			 .mode = CIPHER_MODE_CBC,
3547			 },
3548	 .auth_info = {
3549		       .alg = HASH_ALG_SHA512,
3550		       .mode = HASH_MODE_HMAC,
3551		       },
3552	 .auth_first = 0,
3553	 },
3554
3555/* SKCIPHER algorithms. */
3556	{
3557	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3558	 .alg.skcipher = {
3559			.base.cra_name = "ofb(des)",
3560			.base.cra_driver_name = "ofb-des-iproc",
3561			.base.cra_blocksize = DES_BLOCK_SIZE,
3562			.min_keysize = DES_KEY_SIZE,
3563			.max_keysize = DES_KEY_SIZE,
3564			.ivsize = DES_BLOCK_SIZE,
3565			},
3566	 .cipher_info = {
3567			 .alg = CIPHER_ALG_DES,
3568			 .mode = CIPHER_MODE_OFB,
3569			 },
3570	 .auth_info = {
3571		       .alg = HASH_ALG_NONE,
3572		       .mode = HASH_MODE_NONE,
3573		       },
3574	 },
3575	{
3576	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3577	 .alg.skcipher = {
3578			.base.cra_name = "cbc(des)",
3579			.base.cra_driver_name = "cbc-des-iproc",
3580			.base.cra_blocksize = DES_BLOCK_SIZE,
3581			.min_keysize = DES_KEY_SIZE,
3582			.max_keysize = DES_KEY_SIZE,
3583			.ivsize = DES_BLOCK_SIZE,
3584			},
3585	 .cipher_info = {
3586			 .alg = CIPHER_ALG_DES,
3587			 .mode = CIPHER_MODE_CBC,
3588			 },
3589	 .auth_info = {
3590		       .alg = HASH_ALG_NONE,
3591		       .mode = HASH_MODE_NONE,
3592		       },
3593	 },
3594	{
3595	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3596	 .alg.skcipher = {
3597			.base.cra_name = "ecb(des)",
3598			.base.cra_driver_name = "ecb-des-iproc",
3599			.base.cra_blocksize = DES_BLOCK_SIZE,
3600			.min_keysize = DES_KEY_SIZE,
3601			.max_keysize = DES_KEY_SIZE,
3602			.ivsize = 0,
3603			},
3604	 .cipher_info = {
3605			 .alg = CIPHER_ALG_DES,
3606			 .mode = CIPHER_MODE_ECB,
3607			 },
3608	 .auth_info = {
3609		       .alg = HASH_ALG_NONE,
3610		       .mode = HASH_MODE_NONE,
3611		       },
3612	 },
3613	{
3614	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3615	 .alg.skcipher = {
3616			.base.cra_name = "ofb(des3_ede)",
3617			.base.cra_driver_name = "ofb-des3-iproc",
3618			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3619			.min_keysize = DES3_EDE_KEY_SIZE,
3620			.max_keysize = DES3_EDE_KEY_SIZE,
3621			.ivsize = DES3_EDE_BLOCK_SIZE,
3622			},
3623	 .cipher_info = {
3624			 .alg = CIPHER_ALG_3DES,
3625			 .mode = CIPHER_MODE_OFB,
3626			 },
3627	 .auth_info = {
3628		       .alg = HASH_ALG_NONE,
3629		       .mode = HASH_MODE_NONE,
3630		       },
3631	 },
3632	{
3633	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3634	 .alg.skcipher = {
3635			.base.cra_name = "cbc(des3_ede)",
3636			.base.cra_driver_name = "cbc-des3-iproc",
3637			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3638			.min_keysize = DES3_EDE_KEY_SIZE,
3639			.max_keysize = DES3_EDE_KEY_SIZE,
3640			.ivsize = DES3_EDE_BLOCK_SIZE,
3641			},
3642	 .cipher_info = {
3643			 .alg = CIPHER_ALG_3DES,
3644			 .mode = CIPHER_MODE_CBC,
3645			 },
3646	 .auth_info = {
3647		       .alg = HASH_ALG_NONE,
3648		       .mode = HASH_MODE_NONE,
3649		       },
3650	 },
3651	{
3652	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3653	 .alg.skcipher = {
3654			.base.cra_name = "ecb(des3_ede)",
3655			.base.cra_driver_name = "ecb-des3-iproc",
3656			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3657			.min_keysize = DES3_EDE_KEY_SIZE,
3658			.max_keysize = DES3_EDE_KEY_SIZE,
3659			.ivsize = 0,
3660			},
3661	 .cipher_info = {
3662			 .alg = CIPHER_ALG_3DES,
3663			 .mode = CIPHER_MODE_ECB,
3664			 },
3665	 .auth_info = {
3666		       .alg = HASH_ALG_NONE,
3667		       .mode = HASH_MODE_NONE,
3668		       },
3669	 },
3670	{
3671	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3672	 .alg.skcipher = {
3673			.base.cra_name = "ofb(aes)",
3674			.base.cra_driver_name = "ofb-aes-iproc",
3675			.base.cra_blocksize = AES_BLOCK_SIZE,
3676			.min_keysize = AES_MIN_KEY_SIZE,
3677			.max_keysize = AES_MAX_KEY_SIZE,
3678			.ivsize = AES_BLOCK_SIZE,
3679			},
3680	 .cipher_info = {
3681			 .alg = CIPHER_ALG_AES,
3682			 .mode = CIPHER_MODE_OFB,
3683			 },
3684	 .auth_info = {
3685		       .alg = HASH_ALG_NONE,
3686		       .mode = HASH_MODE_NONE,
3687		       },
3688	 },
3689	{
3690	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3691	 .alg.skcipher = {
3692			.base.cra_name = "cbc(aes)",
3693			.base.cra_driver_name = "cbc-aes-iproc",
3694			.base.cra_blocksize = AES_BLOCK_SIZE,
3695			.min_keysize = AES_MIN_KEY_SIZE,
3696			.max_keysize = AES_MAX_KEY_SIZE,
3697			.ivsize = AES_BLOCK_SIZE,
3698			},
3699	 .cipher_info = {
3700			 .alg = CIPHER_ALG_AES,
3701			 .mode = CIPHER_MODE_CBC,
3702			 },
3703	 .auth_info = {
3704		       .alg = HASH_ALG_NONE,
3705		       .mode = HASH_MODE_NONE,
3706		       },
3707	 },
3708	{
3709	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3710	 .alg.skcipher = {
3711			.base.cra_name = "ecb(aes)",
3712			.base.cra_driver_name = "ecb-aes-iproc",
3713			.base.cra_blocksize = AES_BLOCK_SIZE,
3714			.min_keysize = AES_MIN_KEY_SIZE,
3715			.max_keysize = AES_MAX_KEY_SIZE,
3716			.ivsize = 0,
3717			},
3718	 .cipher_info = {
3719			 .alg = CIPHER_ALG_AES,
3720			 .mode = CIPHER_MODE_ECB,
3721			 },
3722	 .auth_info = {
3723		       .alg = HASH_ALG_NONE,
3724		       .mode = HASH_MODE_NONE,
3725		       },
3726	 },
3727	{
3728	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3729	 .alg.skcipher = {
3730			.base.cra_name = "ctr(aes)",
3731			.base.cra_driver_name = "ctr-aes-iproc",
3732			.base.cra_blocksize = AES_BLOCK_SIZE,
3733			.min_keysize = AES_MIN_KEY_SIZE,
3734			.max_keysize = AES_MAX_KEY_SIZE,
3735			.ivsize = AES_BLOCK_SIZE,
3736			},
3737	 .cipher_info = {
3738			 .alg = CIPHER_ALG_AES,
3739			 .mode = CIPHER_MODE_CTR,
3740			 },
3741	 .auth_info = {
3742		       .alg = HASH_ALG_NONE,
3743		       .mode = HASH_MODE_NONE,
3744		       },
3745	 },
3746{
3747	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3748	 .alg.skcipher = {
3749			.base.cra_name = "xts(aes)",
3750			.base.cra_driver_name = "xts-aes-iproc",
3751			.base.cra_blocksize = AES_BLOCK_SIZE,
3752			.min_keysize = 2 * AES_MIN_KEY_SIZE,
3753			.max_keysize = 2 * AES_MAX_KEY_SIZE,
3754			.ivsize = AES_BLOCK_SIZE,
3755			},
3756	 .cipher_info = {
3757			 .alg = CIPHER_ALG_AES,
3758			 .mode = CIPHER_MODE_XTS,
3759			 },
3760	 .auth_info = {
3761		       .alg = HASH_ALG_NONE,
3762		       .mode = HASH_MODE_NONE,
3763		       },
3764	 },
3765
3766/* AHASH algorithms. */
3767	{
3768	 .type = CRYPTO_ALG_TYPE_AHASH,
3769	 .alg.hash = {
3770		      .halg.digestsize = MD5_DIGEST_SIZE,
3771		      .halg.base = {
3772				    .cra_name = "md5",
3773				    .cra_driver_name = "md5-iproc",
3774				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3775				    .cra_flags = CRYPTO_ALG_ASYNC |
3776						 CRYPTO_ALG_ALLOCATES_MEMORY,
3777				}
3778		      },
3779	 .cipher_info = {
3780			 .alg = CIPHER_ALG_NONE,
3781			 .mode = CIPHER_MODE_NONE,
3782			 },
3783	 .auth_info = {
3784		       .alg = HASH_ALG_MD5,
3785		       .mode = HASH_MODE_HASH,
3786		       },
3787	 },
3788	{
3789	 .type = CRYPTO_ALG_TYPE_AHASH,
3790	 .alg.hash = {
3791		      .halg.digestsize = MD5_DIGEST_SIZE,
3792		      .halg.base = {
3793				    .cra_name = "hmac(md5)",
3794				    .cra_driver_name = "hmac-md5-iproc",
3795				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3796				}
3797		      },
3798	 .cipher_info = {
3799			 .alg = CIPHER_ALG_NONE,
3800			 .mode = CIPHER_MODE_NONE,
3801			 },
3802	 .auth_info = {
3803		       .alg = HASH_ALG_MD5,
3804		       .mode = HASH_MODE_HMAC,
3805		       },
3806	 },
3807	{.type = CRYPTO_ALG_TYPE_AHASH,
3808	 .alg.hash = {
3809		      .halg.digestsize = SHA1_DIGEST_SIZE,
3810		      .halg.base = {
3811				    .cra_name = "sha1",
3812				    .cra_driver_name = "sha1-iproc",
3813				    .cra_blocksize = SHA1_BLOCK_SIZE,
3814				}
3815		      },
3816	 .cipher_info = {
3817			 .alg = CIPHER_ALG_NONE,
3818			 .mode = CIPHER_MODE_NONE,
3819			 },
3820	 .auth_info = {
3821		       .alg = HASH_ALG_SHA1,
3822		       .mode = HASH_MODE_HASH,
3823		       },
3824	 },
3825	{.type = CRYPTO_ALG_TYPE_AHASH,
3826	 .alg.hash = {
3827		      .halg.digestsize = SHA1_DIGEST_SIZE,
3828		      .halg.base = {
3829				    .cra_name = "hmac(sha1)",
3830				    .cra_driver_name = "hmac-sha1-iproc",
3831				    .cra_blocksize = SHA1_BLOCK_SIZE,
3832				}
3833		      },
3834	 .cipher_info = {
3835			 .alg = CIPHER_ALG_NONE,
3836			 .mode = CIPHER_MODE_NONE,
3837			 },
3838	 .auth_info = {
3839		       .alg = HASH_ALG_SHA1,
3840		       .mode = HASH_MODE_HMAC,
3841		       },
3842	 },
3843	{.type = CRYPTO_ALG_TYPE_AHASH,
3844	 .alg.hash = {
3845			.halg.digestsize = SHA224_DIGEST_SIZE,
3846			.halg.base = {
3847				    .cra_name = "sha224",
3848				    .cra_driver_name = "sha224-iproc",
3849				    .cra_blocksize = SHA224_BLOCK_SIZE,
3850			}
3851		      },
3852	 .cipher_info = {
3853			 .alg = CIPHER_ALG_NONE,
3854			 .mode = CIPHER_MODE_NONE,
3855			 },
3856	 .auth_info = {
3857		       .alg = HASH_ALG_SHA224,
3858		       .mode = HASH_MODE_HASH,
3859		       },
3860	 },
3861	{.type = CRYPTO_ALG_TYPE_AHASH,
3862	 .alg.hash = {
3863		      .halg.digestsize = SHA224_DIGEST_SIZE,
3864		      .halg.base = {
3865				    .cra_name = "hmac(sha224)",
3866				    .cra_driver_name = "hmac-sha224-iproc",
3867				    .cra_blocksize = SHA224_BLOCK_SIZE,
3868				}
3869		      },
3870	 .cipher_info = {
3871			 .alg = CIPHER_ALG_NONE,
3872			 .mode = CIPHER_MODE_NONE,
3873			 },
3874	 .auth_info = {
3875		       .alg = HASH_ALG_SHA224,
3876		       .mode = HASH_MODE_HMAC,
3877		       },
3878	 },
3879	{.type = CRYPTO_ALG_TYPE_AHASH,
3880	 .alg.hash = {
3881		      .halg.digestsize = SHA256_DIGEST_SIZE,
3882		      .halg.base = {
3883				    .cra_name = "sha256",
3884				    .cra_driver_name = "sha256-iproc",
3885				    .cra_blocksize = SHA256_BLOCK_SIZE,
3886				}
3887		      },
3888	 .cipher_info = {
3889			 .alg = CIPHER_ALG_NONE,
3890			 .mode = CIPHER_MODE_NONE,
3891			 },
3892	 .auth_info = {
3893		       .alg = HASH_ALG_SHA256,
3894		       .mode = HASH_MODE_HASH,
3895		       },
3896	 },
3897	{.type = CRYPTO_ALG_TYPE_AHASH,
3898	 .alg.hash = {
3899		      .halg.digestsize = SHA256_DIGEST_SIZE,
3900		      .halg.base = {
3901				    .cra_name = "hmac(sha256)",
3902				    .cra_driver_name = "hmac-sha256-iproc",
3903				    .cra_blocksize = SHA256_BLOCK_SIZE,
3904				}
3905		      },
3906	 .cipher_info = {
3907			 .alg = CIPHER_ALG_NONE,
3908			 .mode = CIPHER_MODE_NONE,
3909			 },
3910	 .auth_info = {
3911		       .alg = HASH_ALG_SHA256,
3912		       .mode = HASH_MODE_HMAC,
3913		       },
3914	 },
3915	{
3916	.type = CRYPTO_ALG_TYPE_AHASH,
3917	 .alg.hash = {
3918		      .halg.digestsize = SHA384_DIGEST_SIZE,
3919		      .halg.base = {
3920				    .cra_name = "sha384",
3921				    .cra_driver_name = "sha384-iproc",
3922				    .cra_blocksize = SHA384_BLOCK_SIZE,
3923				}
3924		      },
3925	 .cipher_info = {
3926			 .alg = CIPHER_ALG_NONE,
3927			 .mode = CIPHER_MODE_NONE,
3928			 },
3929	 .auth_info = {
3930		       .alg = HASH_ALG_SHA384,
3931		       .mode = HASH_MODE_HASH,
3932		       },
3933	 },
3934	{
3935	 .type = CRYPTO_ALG_TYPE_AHASH,
3936	 .alg.hash = {
3937		      .halg.digestsize = SHA384_DIGEST_SIZE,
3938		      .halg.base = {
3939				    .cra_name = "hmac(sha384)",
3940				    .cra_driver_name = "hmac-sha384-iproc",
3941				    .cra_blocksize = SHA384_BLOCK_SIZE,
3942				}
3943		      },
3944	 .cipher_info = {
3945			 .alg = CIPHER_ALG_NONE,
3946			 .mode = CIPHER_MODE_NONE,
3947			 },
3948	 .auth_info = {
3949		       .alg = HASH_ALG_SHA384,
3950		       .mode = HASH_MODE_HMAC,
3951		       },
3952	 },
3953	{
3954	 .type = CRYPTO_ALG_TYPE_AHASH,
3955	 .alg.hash = {
3956		      .halg.digestsize = SHA512_DIGEST_SIZE,
3957		      .halg.base = {
3958				    .cra_name = "sha512",
3959				    .cra_driver_name = "sha512-iproc",
3960				    .cra_blocksize = SHA512_BLOCK_SIZE,
3961				}
3962		      },
3963	 .cipher_info = {
3964			 .alg = CIPHER_ALG_NONE,
3965			 .mode = CIPHER_MODE_NONE,
3966			 },
3967	 .auth_info = {
3968		       .alg = HASH_ALG_SHA512,
3969		       .mode = HASH_MODE_HASH,
3970		       },
3971	 },
3972	{
3973	 .type = CRYPTO_ALG_TYPE_AHASH,
3974	 .alg.hash = {
3975		      .halg.digestsize = SHA512_DIGEST_SIZE,
3976		      .halg.base = {
3977				    .cra_name = "hmac(sha512)",
3978				    .cra_driver_name = "hmac-sha512-iproc",
3979				    .cra_blocksize = SHA512_BLOCK_SIZE,
3980				}
3981		      },
3982	 .cipher_info = {
3983			 .alg = CIPHER_ALG_NONE,
3984			 .mode = CIPHER_MODE_NONE,
3985			 },
3986	 .auth_info = {
3987		       .alg = HASH_ALG_SHA512,
3988		       .mode = HASH_MODE_HMAC,
3989		       },
3990	 },
3991	{
3992	 .type = CRYPTO_ALG_TYPE_AHASH,
3993	 .alg.hash = {
3994		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
3995		      .halg.base = {
3996				    .cra_name = "sha3-224",
3997				    .cra_driver_name = "sha3-224-iproc",
3998				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
3999				}
4000		      },
4001	 .cipher_info = {
4002			 .alg = CIPHER_ALG_NONE,
4003			 .mode = CIPHER_MODE_NONE,
4004			 },
4005	 .auth_info = {
4006		       .alg = HASH_ALG_SHA3_224,
4007		       .mode = HASH_MODE_HASH,
4008		       },
4009	 },
4010	{
4011	 .type = CRYPTO_ALG_TYPE_AHASH,
4012	 .alg.hash = {
4013		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4014		      .halg.base = {
4015				    .cra_name = "hmac(sha3-224)",
4016				    .cra_driver_name = "hmac-sha3-224-iproc",
4017				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4018				}
4019		      },
4020	 .cipher_info = {
4021			 .alg = CIPHER_ALG_NONE,
4022			 .mode = CIPHER_MODE_NONE,
4023			 },
4024	 .auth_info = {
4025		       .alg = HASH_ALG_SHA3_224,
4026		       .mode = HASH_MODE_HMAC
4027		       },
4028	 },
4029	{
4030	 .type = CRYPTO_ALG_TYPE_AHASH,
4031	 .alg.hash = {
4032		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4033		      .halg.base = {
4034				    .cra_name = "sha3-256",
4035				    .cra_driver_name = "sha3-256-iproc",
4036				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4037				}
4038		      },
4039	 .cipher_info = {
4040			 .alg = CIPHER_ALG_NONE,
4041			 .mode = CIPHER_MODE_NONE,
4042			 },
4043	 .auth_info = {
4044		       .alg = HASH_ALG_SHA3_256,
4045		       .mode = HASH_MODE_HASH,
4046		       },
4047	 },
4048	{
4049	 .type = CRYPTO_ALG_TYPE_AHASH,
4050	 .alg.hash = {
4051		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4052		      .halg.base = {
4053				    .cra_name = "hmac(sha3-256)",
4054				    .cra_driver_name = "hmac-sha3-256-iproc",
4055				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4056				}
4057		      },
4058	 .cipher_info = {
4059			 .alg = CIPHER_ALG_NONE,
4060			 .mode = CIPHER_MODE_NONE,
4061			 },
4062	 .auth_info = {
4063		       .alg = HASH_ALG_SHA3_256,
4064		       .mode = HASH_MODE_HMAC,
4065		       },
4066	 },
4067	{
4068	 .type = CRYPTO_ALG_TYPE_AHASH,
4069	 .alg.hash = {
4070		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4071		      .halg.base = {
4072				    .cra_name = "sha3-384",
4073				    .cra_driver_name = "sha3-384-iproc",
4074				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4075				}
4076		      },
4077	 .cipher_info = {
4078			 .alg = CIPHER_ALG_NONE,
4079			 .mode = CIPHER_MODE_NONE,
4080			 },
4081	 .auth_info = {
4082		       .alg = HASH_ALG_SHA3_384,
4083		       .mode = HASH_MODE_HASH,
4084		       },
4085	 },
4086	{
4087	 .type = CRYPTO_ALG_TYPE_AHASH,
4088	 .alg.hash = {
4089		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4090		      .halg.base = {
4091				    .cra_name = "hmac(sha3-384)",
4092				    .cra_driver_name = "hmac-sha3-384-iproc",
4093				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
4094				}
4095		      },
4096	 .cipher_info = {
4097			 .alg = CIPHER_ALG_NONE,
4098			 .mode = CIPHER_MODE_NONE,
4099			 },
4100	 .auth_info = {
4101		       .alg = HASH_ALG_SHA3_384,
4102		       .mode = HASH_MODE_HMAC,
4103		       },
4104	 },
4105	{
4106	 .type = CRYPTO_ALG_TYPE_AHASH,
4107	 .alg.hash = {
4108		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4109		      .halg.base = {
4110				    .cra_name = "sha3-512",
4111				    .cra_driver_name = "sha3-512-iproc",
4112				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4113				}
4114		      },
4115	 .cipher_info = {
4116			 .alg = CIPHER_ALG_NONE,
4117			 .mode = CIPHER_MODE_NONE,
4118			 },
4119	 .auth_info = {
4120		       .alg = HASH_ALG_SHA3_512,
4121		       .mode = HASH_MODE_HASH,
4122		       },
4123	 },
4124	{
4125	 .type = CRYPTO_ALG_TYPE_AHASH,
4126	 .alg.hash = {
4127		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4128		      .halg.base = {
4129				    .cra_name = "hmac(sha3-512)",
4130				    .cra_driver_name = "hmac-sha3-512-iproc",
4131				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4132				}
4133		      },
4134	 .cipher_info = {
4135			 .alg = CIPHER_ALG_NONE,
4136			 .mode = CIPHER_MODE_NONE,
4137			 },
4138	 .auth_info = {
4139		       .alg = HASH_ALG_SHA3_512,
4140		       .mode = HASH_MODE_HMAC,
4141		       },
4142	 },
4143	{
4144	 .type = CRYPTO_ALG_TYPE_AHASH,
4145	 .alg.hash = {
4146		      .halg.digestsize = AES_BLOCK_SIZE,
4147		      .halg.base = {
4148				    .cra_name = "xcbc(aes)",
4149				    .cra_driver_name = "xcbc-aes-iproc",
4150				    .cra_blocksize = AES_BLOCK_SIZE,
4151				}
4152		      },
4153	 .cipher_info = {
4154			 .alg = CIPHER_ALG_NONE,
4155			 .mode = CIPHER_MODE_NONE,
4156			 },
4157	 .auth_info = {
4158		       .alg = HASH_ALG_AES,
4159		       .mode = HASH_MODE_XCBC,
4160		       },
4161	 },
4162	{
4163	 .type = CRYPTO_ALG_TYPE_AHASH,
4164	 .alg.hash = {
4165		      .halg.digestsize = AES_BLOCK_SIZE,
4166		      .halg.base = {
4167				    .cra_name = "cmac(aes)",
4168				    .cra_driver_name = "cmac-aes-iproc",
4169				    .cra_blocksize = AES_BLOCK_SIZE,
4170				}
4171		      },
4172	 .cipher_info = {
4173			 .alg = CIPHER_ALG_NONE,
4174			 .mode = CIPHER_MODE_NONE,
4175			 },
4176	 .auth_info = {
4177		       .alg = HASH_ALG_AES,
4178		       .mode = HASH_MODE_CMAC,
4179		       },
4180	 },
4181};
4182
4183static int generic_cra_init(struct crypto_tfm *tfm,
4184			    struct iproc_alg_s *cipher_alg)
4185{
4186	struct spu_hw *spu = &iproc_priv.spu;
4187	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4188	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4189
4190	flow_log("%s()\n", __func__);
4191
4192	ctx->alg = cipher_alg;
4193	ctx->cipher = cipher_alg->cipher_info;
4194	ctx->auth = cipher_alg->auth_info;
4195	ctx->auth_first = cipher_alg->auth_first;
4196	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4197						    ctx->cipher.mode,
4198						    blocksize);
4199	ctx->fallback_cipher = NULL;
4200
4201	ctx->enckeylen = 0;
4202	ctx->authkeylen = 0;
4203
4204	atomic_inc(&iproc_priv.stream_count);
4205	atomic_inc(&iproc_priv.session_count);
4206
4207	return 0;
4208}
4209
4210static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4211{
4212	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4213	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4214	struct iproc_alg_s *cipher_alg;
4215
4216	flow_log("%s()\n", __func__);
4217
4218	crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4219
4220	cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4221	return generic_cra_init(tfm, cipher_alg);
4222}
4223
4224static int ahash_cra_init(struct crypto_tfm *tfm)
4225{
4226	int err;
4227	struct crypto_alg *alg = tfm->__crt_alg;
4228	struct iproc_alg_s *cipher_alg;
4229
4230	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4231				  alg.hash);
4232
4233	err = generic_cra_init(tfm, cipher_alg);
4234	flow_log("%s()\n", __func__);
4235
4236	/*
4237	 * export state size has to be < 512 bytes. So don't include msg bufs
4238	 * in state size.
4239	 */
4240	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4241				 sizeof(struct iproc_reqctx_s));
4242
4243	return err;
4244}
4245
4246static int aead_cra_init(struct crypto_aead *aead)
4247{
4248	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4249	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4250	struct crypto_alg *alg = tfm->__crt_alg;
4251	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4252	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4253						      alg.aead);
4254
4255	int err = generic_cra_init(tfm, cipher_alg);
4256
4257	flow_log("%s()\n", __func__);
4258
4259	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4260	ctx->is_esp = false;
4261	ctx->salt_len = 0;
4262	ctx->salt_offset = 0;
4263
4264	/* random first IV */
4265	get_random_bytes(ctx->iv, MAX_IV_SIZE);
4266	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4267
4268	if (!err) {
4269		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4270			flow_log("%s() creating fallback cipher\n", __func__);
4271
4272			ctx->fallback_cipher =
4273			    crypto_alloc_aead(alg->cra_name, 0,
4274					      CRYPTO_ALG_ASYNC |
4275					      CRYPTO_ALG_NEED_FALLBACK);
4276			if (IS_ERR(ctx->fallback_cipher)) {
4277				pr_err("%s() Error: failed to allocate fallback for %s\n",
4278				       __func__, alg->cra_name);
4279				return PTR_ERR(ctx->fallback_cipher);
4280			}
4281		}
4282	}
4283
4284	return err;
4285}
4286
4287static void generic_cra_exit(struct crypto_tfm *tfm)
4288{
4289	atomic_dec(&iproc_priv.session_count);
4290}
4291
4292static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4293{
4294	generic_cra_exit(crypto_skcipher_tfm(tfm));
4295}
4296
4297static void aead_cra_exit(struct crypto_aead *aead)
4298{
4299	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4300	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4301
4302	generic_cra_exit(tfm);
4303
4304	if (ctx->fallback_cipher) {
4305		crypto_free_aead(ctx->fallback_cipher);
4306		ctx->fallback_cipher = NULL;
4307	}
4308}
4309
4310/**
4311 * spu_functions_register() - Specify hardware-specific SPU functions based on
4312 * SPU type read from device tree.
4313 * @dev:	device structure
4314 * @spu_type:	SPU hardware generation
4315 * @spu_subtype: SPU hardware version
4316 */
4317static void spu_functions_register(struct device *dev,
4318				   enum spu_spu_type spu_type,
4319				   enum spu_spu_subtype spu_subtype)
4320{
4321	struct spu_hw *spu = &iproc_priv.spu;
4322
4323	if (spu_type == SPU_TYPE_SPUM) {
4324		dev_dbg(dev, "Registering SPUM functions");
4325		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4326		spu->spu_payload_length = spum_payload_length;
4327		spu->spu_response_hdr_len = spum_response_hdr_len;
4328		spu->spu_hash_pad_len = spum_hash_pad_len;
4329		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4330		spu->spu_assoc_resp_len = spum_assoc_resp_len;
4331		spu->spu_aead_ivlen = spum_aead_ivlen;
4332		spu->spu_hash_type = spum_hash_type;
4333		spu->spu_digest_size = spum_digest_size;
4334		spu->spu_create_request = spum_create_request;
4335		spu->spu_cipher_req_init = spum_cipher_req_init;
4336		spu->spu_cipher_req_finish = spum_cipher_req_finish;
4337		spu->spu_request_pad = spum_request_pad;
4338		spu->spu_tx_status_len = spum_tx_status_len;
4339		spu->spu_rx_status_len = spum_rx_status_len;
4340		spu->spu_status_process = spum_status_process;
4341		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4342		spu->spu_ccm_update_iv = spum_ccm_update_iv;
4343		spu->spu_wordalign_padlen = spum_wordalign_padlen;
4344		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4345			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4346		else
4347			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4348	} else {
4349		dev_dbg(dev, "Registering SPU2 functions");
4350		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4351		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4352		spu->spu_payload_length = spu2_payload_length;
4353		spu->spu_response_hdr_len = spu2_response_hdr_len;
4354		spu->spu_hash_pad_len = spu2_hash_pad_len;
4355		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4356		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4357		spu->spu_aead_ivlen = spu2_aead_ivlen;
4358		spu->spu_hash_type = spu2_hash_type;
4359		spu->spu_digest_size = spu2_digest_size;
4360		spu->spu_create_request = spu2_create_request;
4361		spu->spu_cipher_req_init = spu2_cipher_req_init;
4362		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4363		spu->spu_request_pad = spu2_request_pad;
4364		spu->spu_tx_status_len = spu2_tx_status_len;
4365		spu->spu_rx_status_len = spu2_rx_status_len;
4366		spu->spu_status_process = spu2_status_process;
4367		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4368		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4369		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4370	}
4371}
4372
4373/**
4374 * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4375 * channel for the SPU being probed.
4376 * @dev:  SPU driver device structure
4377 *
4378 * Return: 0 if successful
4379 *	   < 0 otherwise
4380 */
4381static int spu_mb_init(struct device *dev)
4382{
4383	struct mbox_client *mcl = &iproc_priv.mcl;
4384	int err, i;
4385
4386	iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4387				  sizeof(struct mbox_chan *), GFP_KERNEL);
4388	if (!iproc_priv.mbox)
4389		return -ENOMEM;
4390
4391	mcl->dev = dev;
4392	mcl->tx_block = false;
4393	mcl->tx_tout = 0;
4394	mcl->knows_txdone = true;
4395	mcl->rx_callback = spu_rx_callback;
4396	mcl->tx_done = NULL;
4397
4398	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4399		iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4400		if (IS_ERR(iproc_priv.mbox[i])) {
4401			err = PTR_ERR(iproc_priv.mbox[i]);
4402			dev_err(dev,
4403				"Mbox channel %d request failed with err %d",
4404				i, err);
4405			iproc_priv.mbox[i] = NULL;
4406			goto free_channels;
4407		}
4408	}
4409
4410	return 0;
4411free_channels:
4412	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4413		if (iproc_priv.mbox[i])
4414			mbox_free_channel(iproc_priv.mbox[i]);
4415	}
4416
4417	return err;
4418}
4419
4420static void spu_mb_release(struct platform_device *pdev)
4421{
4422	int i;
4423
4424	for (i = 0; i < iproc_priv.spu.num_chan; i++)
4425		mbox_free_channel(iproc_priv.mbox[i]);
4426}
4427
4428static void spu_counters_init(void)
4429{
4430	int i;
4431	int j;
4432
4433	atomic_set(&iproc_priv.session_count, 0);
4434	atomic_set(&iproc_priv.stream_count, 0);
4435	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4436	atomic64_set(&iproc_priv.bytes_in, 0);
4437	atomic64_set(&iproc_priv.bytes_out, 0);
4438	for (i = 0; i < SPU_OP_NUM; i++) {
4439		atomic_set(&iproc_priv.op_counts[i], 0);
4440		atomic_set(&iproc_priv.setkey_cnt[i], 0);
4441	}
4442	for (i = 0; i < CIPHER_ALG_LAST; i++)
4443		for (j = 0; j < CIPHER_MODE_LAST; j++)
4444			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4445
4446	for (i = 0; i < HASH_ALG_LAST; i++) {
4447		atomic_set(&iproc_priv.hash_cnt[i], 0);
4448		atomic_set(&iproc_priv.hmac_cnt[i], 0);
4449	}
4450	for (i = 0; i < AEAD_TYPE_LAST; i++)
4451		atomic_set(&iproc_priv.aead_cnt[i], 0);
4452
4453	atomic_set(&iproc_priv.mb_no_spc, 0);
4454	atomic_set(&iproc_priv.mb_send_fail, 0);
4455	atomic_set(&iproc_priv.bad_icv, 0);
4456}
4457
4458static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4459{
4460	struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4461	int err;
4462
4463	crypto->base.cra_module = THIS_MODULE;
4464	crypto->base.cra_priority = cipher_pri;
4465	crypto->base.cra_alignmask = 0;
4466	crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4467	crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4468				 CRYPTO_ALG_ALLOCATES_MEMORY |
4469				 CRYPTO_ALG_KERN_DRIVER_ONLY;
4470
4471	crypto->init = skcipher_init_tfm;
4472	crypto->exit = skcipher_exit_tfm;
4473	crypto->setkey = skcipher_setkey;
4474	crypto->encrypt = skcipher_encrypt;
4475	crypto->decrypt = skcipher_decrypt;
4476
4477	err = crypto_register_skcipher(crypto);
4478	/* Mark alg as having been registered, if successful */
4479	if (err == 0)
4480		driver_alg->registered = true;
4481	pr_debug("  registered skcipher %s\n", crypto->base.cra_driver_name);
4482	return err;
4483}
4484
4485static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4486{
4487	struct spu_hw *spu = &iproc_priv.spu;
4488	struct ahash_alg *hash = &driver_alg->alg.hash;
4489	int err;
4490
4491	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4492	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4493	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4494	    (spu->spu_type == SPU_TYPE_SPUM))
4495		return 0;
4496
4497	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4498	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4499	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4500		return 0;
4501
4502	hash->halg.base.cra_module = THIS_MODULE;
4503	hash->halg.base.cra_priority = hash_pri;
4504	hash->halg.base.cra_alignmask = 0;
4505	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4506	hash->halg.base.cra_init = ahash_cra_init;
4507	hash->halg.base.cra_exit = generic_cra_exit;
4508	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4509				    CRYPTO_ALG_ALLOCATES_MEMORY;
4510	hash->halg.statesize = sizeof(struct spu_hash_export_s);
4511
4512	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4513		hash->init = ahash_init;
4514		hash->update = ahash_update;
4515		hash->final = ahash_final;
4516		hash->finup = ahash_finup;
4517		hash->digest = ahash_digest;
4518		if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4519		    ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4520		    (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4521			hash->setkey = ahash_setkey;
4522		}
4523	} else {
4524		hash->setkey = ahash_hmac_setkey;
4525		hash->init = ahash_hmac_init;
4526		hash->update = ahash_hmac_update;
4527		hash->final = ahash_hmac_final;
4528		hash->finup = ahash_hmac_finup;
4529		hash->digest = ahash_hmac_digest;
4530	}
4531	hash->export = ahash_export;
4532	hash->import = ahash_import;
4533
4534	err = crypto_register_ahash(hash);
4535	/* Mark alg as having been registered, if successful */
4536	if (err == 0)
4537		driver_alg->registered = true;
4538	pr_debug("  registered ahash %s\n",
4539		 hash->halg.base.cra_driver_name);
4540	return err;
4541}
4542
4543static int spu_register_aead(struct iproc_alg_s *driver_alg)
4544{
4545	struct aead_alg *aead = &driver_alg->alg.aead;
4546	int err;
4547
4548	aead->base.cra_module = THIS_MODULE;
4549	aead->base.cra_priority = aead_pri;
4550	aead->base.cra_alignmask = 0;
4551	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4552
4553	aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4554	/* setkey set in alg initialization */
4555	aead->setauthsize = aead_setauthsize;
4556	aead->encrypt = aead_encrypt;
4557	aead->decrypt = aead_decrypt;
4558	aead->init = aead_cra_init;
4559	aead->exit = aead_cra_exit;
4560
4561	err = crypto_register_aead(aead);
4562	/* Mark alg as having been registered, if successful */
4563	if (err == 0)
4564		driver_alg->registered = true;
4565	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4566	return err;
4567}
4568
4569/* register crypto algorithms the device supports */
4570static int spu_algs_register(struct device *dev)
4571{
4572	int i, j;
4573	int err;
4574
4575	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4576		switch (driver_algs[i].type) {
4577		case CRYPTO_ALG_TYPE_SKCIPHER:
4578			err = spu_register_skcipher(&driver_algs[i]);
4579			break;
4580		case CRYPTO_ALG_TYPE_AHASH:
4581			err = spu_register_ahash(&driver_algs[i]);
4582			break;
4583		case CRYPTO_ALG_TYPE_AEAD:
4584			err = spu_register_aead(&driver_algs[i]);
4585			break;
4586		default:
4587			dev_err(dev,
4588				"iproc-crypto: unknown alg type: %d",
4589				driver_algs[i].type);
4590			err = -EINVAL;
4591		}
4592
4593		if (err) {
4594			dev_err(dev, "alg registration failed with error %d\n",
4595				err);
4596			goto err_algs;
4597		}
4598	}
4599
4600	return 0;
4601
4602err_algs:
4603	for (j = 0; j < i; j++) {
4604		/* Skip any algorithm not registered */
4605		if (!driver_algs[j].registered)
4606			continue;
4607		switch (driver_algs[j].type) {
4608		case CRYPTO_ALG_TYPE_SKCIPHER:
4609			crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4610			driver_algs[j].registered = false;
4611			break;
4612		case CRYPTO_ALG_TYPE_AHASH:
4613			crypto_unregister_ahash(&driver_algs[j].alg.hash);
4614			driver_algs[j].registered = false;
4615			break;
4616		case CRYPTO_ALG_TYPE_AEAD:
4617			crypto_unregister_aead(&driver_algs[j].alg.aead);
4618			driver_algs[j].registered = false;
4619			break;
4620		}
4621	}
4622	return err;
4623}
4624
4625/* ==================== Kernel Platform API ==================== */
4626
4627static struct spu_type_subtype spum_ns2_types = {
4628	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4629};
4630
4631static struct spu_type_subtype spum_nsp_types = {
4632	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4633};
4634
4635static struct spu_type_subtype spu2_types = {
4636	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4637};
4638
4639static struct spu_type_subtype spu2_v2_types = {
4640	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4641};
4642
4643static const struct of_device_id bcm_spu_dt_ids[] = {
4644	{
4645		.compatible = "brcm,spum-crypto",
4646		.data = &spum_ns2_types,
4647	},
4648	{
4649		.compatible = "brcm,spum-nsp-crypto",
4650		.data = &spum_nsp_types,
4651	},
4652	{
4653		.compatible = "brcm,spu2-crypto",
4654		.data = &spu2_types,
4655	},
4656	{
4657		.compatible = "brcm,spu2-v2-crypto",
4658		.data = &spu2_v2_types,
4659	},
4660	{ /* sentinel */ }
4661};
4662
4663MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4664
4665static int spu_dt_read(struct platform_device *pdev)
4666{
4667	struct device *dev = &pdev->dev;
4668	struct spu_hw *spu = &iproc_priv.spu;
4669	struct resource *spu_ctrl_regs;
4670	const struct spu_type_subtype *matched_spu_type;
4671	struct device_node *dn = pdev->dev.of_node;
4672	int err, i;
4673
4674	/* Count number of mailbox channels */
4675	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4676
4677	matched_spu_type = of_device_get_match_data(dev);
4678	if (!matched_spu_type) {
4679		dev_err(dev, "Failed to match device\n");
4680		return -ENODEV;
4681	}
4682
4683	spu->spu_type = matched_spu_type->type;
4684	spu->spu_subtype = matched_spu_type->subtype;
4685
4686	for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4687		platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4688
4689		spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4690		if (IS_ERR(spu->reg_vbase[i])) {
4691			err = PTR_ERR(spu->reg_vbase[i]);
4692			dev_err(dev, "Failed to map registers: %d\n",
4693				err);
4694			spu->reg_vbase[i] = NULL;
4695			return err;
4696		}
4697	}
4698	spu->num_spu = i;
4699	dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4700
4701	return 0;
4702}
4703
4704static int bcm_spu_probe(struct platform_device *pdev)
4705{
4706	struct device *dev = &pdev->dev;
4707	struct spu_hw *spu = &iproc_priv.spu;
4708	int err;
4709
4710	iproc_priv.pdev  = pdev;
4711	platform_set_drvdata(iproc_priv.pdev,
4712			     &iproc_priv);
4713
4714	err = spu_dt_read(pdev);
4715	if (err < 0)
4716		goto failure;
4717
4718	err = spu_mb_init(dev);
4719	if (err < 0)
4720		goto failure;
4721
4722	if (spu->spu_type == SPU_TYPE_SPUM)
4723		iproc_priv.bcm_hdr_len = 8;
4724	else if (spu->spu_type == SPU_TYPE_SPU2)
4725		iproc_priv.bcm_hdr_len = 0;
4726
4727	spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4728
4729	spu_counters_init();
4730
4731	spu_setup_debugfs();
4732
4733	err = spu_algs_register(dev);
4734	if (err < 0)
4735		goto fail_reg;
4736
4737	return 0;
4738
4739fail_reg:
4740	spu_free_debugfs();
4741failure:
4742	spu_mb_release(pdev);
4743	dev_err(dev, "%s failed with error %d.\n", __func__, err);
4744
4745	return err;
4746}
4747
4748static int bcm_spu_remove(struct platform_device *pdev)
4749{
4750	int i;
4751	struct device *dev = &pdev->dev;
4752	char *cdn;
4753
4754	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4755		/*
4756		 * Not all algorithms were registered, depending on whether
4757		 * hardware is SPU or SPU2.  So here we make sure to skip
4758		 * those algorithms that were not previously registered.
4759		 */
4760		if (!driver_algs[i].registered)
4761			continue;
4762
4763		switch (driver_algs[i].type) {
4764		case CRYPTO_ALG_TYPE_SKCIPHER:
4765			crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4766			dev_dbg(dev, "  unregistered cipher %s\n",
4767				driver_algs[i].alg.skcipher.base.cra_driver_name);
4768			driver_algs[i].registered = false;
4769			break;
4770		case CRYPTO_ALG_TYPE_AHASH:
4771			crypto_unregister_ahash(&driver_algs[i].alg.hash);
4772			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4773			dev_dbg(dev, "  unregistered hash %s\n", cdn);
4774			driver_algs[i].registered = false;
4775			break;
4776		case CRYPTO_ALG_TYPE_AEAD:
4777			crypto_unregister_aead(&driver_algs[i].alg.aead);
4778			dev_dbg(dev, "  unregistered aead %s\n",
4779				driver_algs[i].alg.aead.base.cra_driver_name);
4780			driver_algs[i].registered = false;
4781			break;
4782		}
4783	}
4784	spu_free_debugfs();
4785	spu_mb_release(pdev);
4786	return 0;
4787}
4788
4789/* ===== Kernel Module API ===== */
4790
4791static struct platform_driver bcm_spu_pdriver = {
4792	.driver = {
4793		   .name = "brcm-spu-crypto",
4794		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4795		   },
4796	.probe = bcm_spu_probe,
4797	.remove = bcm_spu_remove,
4798};
4799module_platform_driver(bcm_spu_pdriver);
4800
4801MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4802MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4803MODULE_LICENSE("GPL v2");
4804