1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12#include <crypto/hmac.h>
13#include <crypto/md5.h>
14#include <crypto/sha.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17
18#include "cesa.h"
19
20struct mv_cesa_ahash_dma_iter {
21	struct mv_cesa_dma_iter base;
22	struct mv_cesa_sg_dma_iter src;
23};
24
25static inline void
26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27			    struct ahash_request *req)
28{
29	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30	unsigned int len = req->nbytes + creq->cache_ptr;
31
32	if (!creq->last_req)
33		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
34
35	mv_cesa_req_dma_iter_init(&iter->base, len);
36	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37	iter->src.op_offset = creq->cache_ptr;
38}
39
40static inline bool
41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
42{
43	iter->src.op_offset = 0;
44
45	return mv_cesa_req_dma_iter_next_op(&iter->base);
46}
47
48static inline int
49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
50{
51	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
52				    &req->cache_dma);
53	if (!req->cache)
54		return -ENOMEM;
55
56	return 0;
57}
58
59static inline void
60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
61{
62	if (!req->cache)
63		return;
64
65	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
66		      req->cache_dma);
67}
68
69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
70					   gfp_t flags)
71{
72	if (req->padding)
73		return 0;
74
75	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
76				      &req->padding_dma);
77	if (!req->padding)
78		return -ENOMEM;
79
80	return 0;
81}
82
83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
84{
85	if (!req->padding)
86		return;
87
88	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
89		      req->padding_dma);
90	req->padding = NULL;
91}
92
93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
94{
95	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
96
97	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
98}
99
100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
101{
102	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
103
104	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106	mv_cesa_dma_cleanup(&creq->base);
107}
108
109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110{
111	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112
113	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114		mv_cesa_ahash_dma_cleanup(req);
115}
116
117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118{
119	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120
121	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122		mv_cesa_ahash_dma_last_cleanup(req);
123}
124
125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
126{
127	unsigned int index, padlen;
128
129	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
131
132	return padlen;
133}
134
135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
136{
137	unsigned int padlen;
138
139	buf[0] = 0x80;
140	/* Pad out to 56 mod 64 */
141	padlen = mv_cesa_ahash_pad_len(creq);
142	memset(buf + 1, 0, padlen - 1);
143
144	if (creq->algo_le) {
145		__le64 bits = cpu_to_le64(creq->len << 3);
146
147		memcpy(buf + padlen, &bits, sizeof(bits));
148	} else {
149		__be64 bits = cpu_to_be64(creq->len << 3);
150
151		memcpy(buf + padlen, &bits, sizeof(bits));
152	}
153
154	return padlen + 8;
155}
156
157static void mv_cesa_ahash_std_step(struct ahash_request *req)
158{
159	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161	struct mv_cesa_engine *engine = creq->base.engine;
162	struct mv_cesa_op_ctx *op;
163	unsigned int new_cache_ptr = 0;
164	u32 frag_mode;
165	size_t  len;
166	unsigned int digsize;
167	int i;
168
169	mv_cesa_adjust_op(engine, &creq->op_tmpl);
170	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
171
172	if (!sreq->offset) {
173		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
174		for (i = 0; i < digsize / 4; i++)
175			writel_relaxed(creq->state[i],
176				       engine->regs + CESA_IVDIG(i));
177	}
178
179	if (creq->cache_ptr)
180		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
181			    creq->cache, creq->cache_ptr);
182
183	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
184		    CESA_SA_SRAM_PAYLOAD_SIZE);
185
186	if (!creq->last_req) {
187		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
188		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
189	}
190
191	if (len - creq->cache_ptr)
192		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
193						   engine->sram +
194						   CESA_SA_DATA_SRAM_OFFSET +
195						   creq->cache_ptr,
196						   len - creq->cache_ptr,
197						   sreq->offset);
198
199	op = &creq->op_tmpl;
200
201	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
202
203	if (creq->last_req && sreq->offset == req->nbytes &&
204	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
205		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
206			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
207		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
208			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
209	}
210
211	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
212	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
213		if (len &&
214		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215			mv_cesa_set_mac_op_total_len(op, creq->len);
216		} else {
217			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
218
219			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
220				len &= CESA_HASH_BLOCK_SIZE_MSK;
221				new_cache_ptr = 64 - trailerlen;
222				memcpy_fromio(creq->cache,
223					      engine->sram +
224					      CESA_SA_DATA_SRAM_OFFSET + len,
225					      new_cache_ptr);
226			} else {
227				i = mv_cesa_ahash_pad_req(creq, creq->cache);
228				len += i;
229				memcpy_toio(engine->sram + len +
230					    CESA_SA_DATA_SRAM_OFFSET,
231					    creq->cache, i);
232			}
233
234			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
235				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
236			else
237				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
238		}
239	}
240
241	mv_cesa_set_mac_op_frag_len(op, len);
242	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
243
244	/* FIXME: only update enc_len field */
245	memcpy_toio(engine->sram, op, sizeof(*op));
246
247	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
248		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
249				      CESA_SA_DESC_CFG_FRAG_MSK);
250
251	creq->cache_ptr = new_cache_ptr;
252
253	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
254	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
255	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
256		CESA_SA_CMD_EN_CESA_SA_ACCL0);
257	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
258}
259
260static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
261{
262	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
263	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
264
265	if (sreq->offset < (req->nbytes - creq->cache_ptr))
266		return -EINPROGRESS;
267
268	return 0;
269}
270
271static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
272{
273	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
274	struct mv_cesa_req *basereq = &creq->base;
275
276	mv_cesa_dma_prepare(basereq, basereq->engine);
277}
278
279static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
280{
281	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
282	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
283
284	sreq->offset = 0;
285}
286
287static void mv_cesa_ahash_dma_step(struct ahash_request *req)
288{
289	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290	struct mv_cesa_req *base = &creq->base;
291
292	/* We must explicitly set the digest state. */
293	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
294		struct mv_cesa_engine *engine = base->engine;
295		int i;
296
297		/* Set the hash state in the IVDIG regs. */
298		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
299			writel_relaxed(creq->state[i], engine->regs +
300				       CESA_IVDIG(i));
301	}
302
303	mv_cesa_dma_step(base);
304}
305
306static void mv_cesa_ahash_step(struct crypto_async_request *req)
307{
308	struct ahash_request *ahashreq = ahash_request_cast(req);
309	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
310
311	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
312		mv_cesa_ahash_dma_step(ahashreq);
313	else
314		mv_cesa_ahash_std_step(ahashreq);
315}
316
317static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
318{
319	struct ahash_request *ahashreq = ahash_request_cast(req);
320	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
321
322	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
323		return mv_cesa_dma_process(&creq->base, status);
324
325	return mv_cesa_ahash_std_process(ahashreq, status);
326}
327
328static void mv_cesa_ahash_complete(struct crypto_async_request *req)
329{
330	struct ahash_request *ahashreq = ahash_request_cast(req);
331	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
332	struct mv_cesa_engine *engine = creq->base.engine;
333	unsigned int digsize;
334	int i;
335
336	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
337
338	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
339	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
340	     CESA_TDMA_RESULT) {
341		__le32 *data = NULL;
342
343		/*
344		 * Result is already in the correct endianness when the SA is
345		 * used
346		 */
347		data = creq->base.chain.last->op->ctx.hash.hash;
348		for (i = 0; i < digsize / 4; i++)
349			creq->state[i] = le32_to_cpu(data[i]);
350
351		memcpy(ahashreq->result, data, digsize);
352	} else {
353		for (i = 0; i < digsize / 4; i++)
354			creq->state[i] = readl_relaxed(engine->regs +
355						       CESA_IVDIG(i));
356		if (creq->last_req) {
357			/*
358			 * Hardware's MD5 digest is in little endian format, but
359			 * SHA in big endian format
360			 */
361			if (creq->algo_le) {
362				__le32 *result = (void *)ahashreq->result;
363
364				for (i = 0; i < digsize / 4; i++)
365					result[i] = cpu_to_le32(creq->state[i]);
366			} else {
367				__be32 *result = (void *)ahashreq->result;
368
369				for (i = 0; i < digsize / 4; i++)
370					result[i] = cpu_to_be32(creq->state[i]);
371			}
372		}
373	}
374
375	atomic_sub(ahashreq->nbytes, &engine->load);
376}
377
378static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
379				  struct mv_cesa_engine *engine)
380{
381	struct ahash_request *ahashreq = ahash_request_cast(req);
382	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
383
384	creq->base.engine = engine;
385
386	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
387		mv_cesa_ahash_dma_prepare(ahashreq);
388	else
389		mv_cesa_ahash_std_prepare(ahashreq);
390}
391
392static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
393{
394	struct ahash_request *ahashreq = ahash_request_cast(req);
395	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
396
397	if (creq->last_req)
398		mv_cesa_ahash_last_cleanup(ahashreq);
399
400	mv_cesa_ahash_cleanup(ahashreq);
401
402	if (creq->cache_ptr)
403		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
404				   creq->cache,
405				   creq->cache_ptr,
406				   ahashreq->nbytes - creq->cache_ptr);
407}
408
409static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
410	.step = mv_cesa_ahash_step,
411	.process = mv_cesa_ahash_process,
412	.cleanup = mv_cesa_ahash_req_cleanup,
413	.complete = mv_cesa_ahash_complete,
414};
415
416static void mv_cesa_ahash_init(struct ahash_request *req,
417			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
418{
419	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
420
421	memset(creq, 0, sizeof(*creq));
422	mv_cesa_update_op_cfg(tmpl,
423			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
424			      CESA_SA_DESC_CFG_FIRST_FRAG,
425			      CESA_SA_DESC_CFG_OP_MSK |
426			      CESA_SA_DESC_CFG_FRAG_MSK);
427	mv_cesa_set_mac_op_total_len(tmpl, 0);
428	mv_cesa_set_mac_op_frag_len(tmpl, 0);
429	creq->op_tmpl = *tmpl;
430	creq->len = 0;
431	creq->algo_le = algo_le;
432}
433
434static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
435{
436	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
437
438	ctx->base.ops = &mv_cesa_ahash_req_ops;
439
440	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
441				 sizeof(struct mv_cesa_ahash_req));
442	return 0;
443}
444
445static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
446{
447	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
448	bool cached = false;
449
450	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
451	    !creq->last_req) {
452		cached = true;
453
454		if (!req->nbytes)
455			return cached;
456
457		sg_pcopy_to_buffer(req->src, creq->src_nents,
458				   creq->cache + creq->cache_ptr,
459				   req->nbytes, 0);
460
461		creq->cache_ptr += req->nbytes;
462	}
463
464	return cached;
465}
466
467static struct mv_cesa_op_ctx *
468mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
469		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
470		     gfp_t flags)
471{
472	struct mv_cesa_op_ctx *op;
473	int ret;
474
475	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
476	if (IS_ERR(op))
477		return op;
478
479	/* Set the operation block fragment length. */
480	mv_cesa_set_mac_op_frag_len(op, frag_len);
481
482	/* Append dummy desc to launch operation */
483	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
484	if (ret)
485		return ERR_PTR(ret);
486
487	if (mv_cesa_mac_op_is_first_frag(tmpl))
488		mv_cesa_update_op_cfg(tmpl,
489				      CESA_SA_DESC_CFG_MID_FRAG,
490				      CESA_SA_DESC_CFG_FRAG_MSK);
491
492	return op;
493}
494
495static int
496mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
497			    struct mv_cesa_ahash_req *creq,
498			    gfp_t flags)
499{
500	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
501	int ret;
502
503	if (!creq->cache_ptr)
504		return 0;
505
506	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
507	if (ret)
508		return ret;
509
510	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
511
512	return mv_cesa_dma_add_data_transfer(chain,
513					     CESA_SA_DATA_SRAM_OFFSET,
514					     ahashdreq->cache_dma,
515					     creq->cache_ptr,
516					     CESA_TDMA_DST_IN_SRAM,
517					     flags);
518}
519
520static struct mv_cesa_op_ctx *
521mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
522			   struct mv_cesa_ahash_dma_iter *dma_iter,
523			   struct mv_cesa_ahash_req *creq,
524			   unsigned int frag_len, gfp_t flags)
525{
526	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
527	unsigned int len, trailerlen, padoff = 0;
528	struct mv_cesa_op_ctx *op;
529	int ret;
530
531	/*
532	 * If the transfer is smaller than our maximum length, and we have
533	 * some data outstanding, we can ask the engine to finish the hash.
534	 */
535	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
536		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
537					  flags);
538		if (IS_ERR(op))
539			return op;
540
541		mv_cesa_set_mac_op_total_len(op, creq->len);
542		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
543						CESA_SA_DESC_CFG_NOT_FRAG :
544						CESA_SA_DESC_CFG_LAST_FRAG,
545				      CESA_SA_DESC_CFG_FRAG_MSK);
546
547		ret = mv_cesa_dma_add_result_op(chain,
548						CESA_SA_CFG_SRAM_OFFSET,
549						CESA_SA_DATA_SRAM_OFFSET,
550						CESA_TDMA_SRC_IN_SRAM, flags);
551		if (ret)
552			return ERR_PTR(-ENOMEM);
553		return op;
554	}
555
556	/*
557	 * The request is longer than the engine can handle, or we have
558	 * no data outstanding. Manually generate the padding, adding it
559	 * as a "mid" fragment.
560	 */
561	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
562	if (ret)
563		return ERR_PTR(ret);
564
565	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
566
567	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
568	if (len) {
569		ret = mv_cesa_dma_add_data_transfer(chain,
570						CESA_SA_DATA_SRAM_OFFSET +
571						frag_len,
572						ahashdreq->padding_dma,
573						len, CESA_TDMA_DST_IN_SRAM,
574						flags);
575		if (ret)
576			return ERR_PTR(ret);
577
578		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
579					  flags);
580		if (IS_ERR(op))
581			return op;
582
583		if (len == trailerlen)
584			return op;
585
586		padoff += len;
587	}
588
589	ret = mv_cesa_dma_add_data_transfer(chain,
590					    CESA_SA_DATA_SRAM_OFFSET,
591					    ahashdreq->padding_dma +
592					    padoff,
593					    trailerlen - padoff,
594					    CESA_TDMA_DST_IN_SRAM,
595					    flags);
596	if (ret)
597		return ERR_PTR(ret);
598
599	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
600				    flags);
601}
602
603static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
604{
605	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
606	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
607		      GFP_KERNEL : GFP_ATOMIC;
608	struct mv_cesa_req *basereq = &creq->base;
609	struct mv_cesa_ahash_dma_iter iter;
610	struct mv_cesa_op_ctx *op = NULL;
611	unsigned int frag_len;
612	bool set_state = false;
613	int ret;
614	u32 type;
615
616	basereq->chain.first = NULL;
617	basereq->chain.last = NULL;
618
619	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
620		set_state = true;
621
622	if (creq->src_nents) {
623		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
624				 DMA_TO_DEVICE);
625		if (!ret) {
626			ret = -ENOMEM;
627			goto err;
628		}
629	}
630
631	mv_cesa_tdma_desc_iter_init(&basereq->chain);
632	mv_cesa_ahash_req_iter_init(&iter, req);
633
634	/*
635	 * Add the cache (left-over data from a previous block) first.
636	 * This will never overflow the SRAM size.
637	 */
638	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
639	if (ret)
640		goto err_free_tdma;
641
642	if (iter.src.sg) {
643		/*
644		 * Add all the new data, inserting an operation block and
645		 * launch command between each full SRAM block-worth of
646		 * data. We intentionally do not add the final op block.
647		 */
648		while (true) {
649			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
650							   &iter.base,
651							   &iter.src, flags);
652			if (ret)
653				goto err_free_tdma;
654
655			frag_len = iter.base.op_len;
656
657			if (!mv_cesa_ahash_req_iter_next_op(&iter))
658				break;
659
660			op = mv_cesa_dma_add_frag(&basereq->chain,
661						  &creq->op_tmpl,
662						  frag_len, flags);
663			if (IS_ERR(op)) {
664				ret = PTR_ERR(op);
665				goto err_free_tdma;
666			}
667		}
668	} else {
669		/* Account for the data that was in the cache. */
670		frag_len = iter.base.op_len;
671	}
672
673	/*
674	 * At this point, frag_len indicates whether we have any data
675	 * outstanding which needs an operation.  Queue up the final
676	 * operation, which depends whether this is the final request.
677	 */
678	if (creq->last_req)
679		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
680						frag_len, flags);
681	else if (frag_len)
682		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
683					  frag_len, flags);
684
685	if (IS_ERR(op)) {
686		ret = PTR_ERR(op);
687		goto err_free_tdma;
688	}
689
690	/*
691	 * If results are copied via DMA, this means that this
692	 * request can be directly processed by the engine,
693	 * without partial updates. So we can chain it at the
694	 * DMA level with other requests.
695	 */
696	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
697
698	if (op && type != CESA_TDMA_RESULT) {
699		/* Add dummy desc to wait for crypto operation end */
700		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
701		if (ret)
702			goto err_free_tdma;
703	}
704
705	if (!creq->last_req)
706		creq->cache_ptr = req->nbytes + creq->cache_ptr -
707				  iter.base.len;
708	else
709		creq->cache_ptr = 0;
710
711	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
712
713	if (type != CESA_TDMA_RESULT)
714		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
715
716	if (set_state) {
717		/*
718		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
719		 * let the step logic know that the IVDIG registers should be
720		 * explicitly set before launching a TDMA chain.
721		 */
722		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
723	}
724
725	return 0;
726
727err_free_tdma:
728	mv_cesa_dma_cleanup(basereq);
729	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
730
731err:
732	mv_cesa_ahash_last_cleanup(req);
733
734	return ret;
735}
736
737static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
738{
739	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
740
741	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
742	if (creq->src_nents < 0) {
743		dev_err(cesa_dev->dev, "Invalid number of src SG");
744		return creq->src_nents;
745	}
746
747	*cached = mv_cesa_ahash_cache_req(req);
748
749	if (*cached)
750		return 0;
751
752	if (cesa_dev->caps->has_tdma)
753		return mv_cesa_ahash_dma_req_init(req);
754	else
755		return 0;
756}
757
758static int mv_cesa_ahash_queue_req(struct ahash_request *req)
759{
760	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
761	struct mv_cesa_engine *engine;
762	bool cached = false;
763	int ret;
764
765	ret = mv_cesa_ahash_req_init(req, &cached);
766	if (ret)
767		return ret;
768
769	if (cached)
770		return 0;
771
772	engine = mv_cesa_select_engine(req->nbytes);
773	mv_cesa_ahash_prepare(&req->base, engine);
774
775	ret = mv_cesa_queue_req(&req->base, &creq->base);
776
777	if (mv_cesa_req_needs_cleanup(&req->base, ret))
778		mv_cesa_ahash_cleanup(req);
779
780	return ret;
781}
782
783static int mv_cesa_ahash_update(struct ahash_request *req)
784{
785	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
786
787	creq->len += req->nbytes;
788
789	return mv_cesa_ahash_queue_req(req);
790}
791
792static int mv_cesa_ahash_final(struct ahash_request *req)
793{
794	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
795	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
796
797	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
798	creq->last_req = true;
799	req->nbytes = 0;
800
801	return mv_cesa_ahash_queue_req(req);
802}
803
804static int mv_cesa_ahash_finup(struct ahash_request *req)
805{
806	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
807	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
808
809	creq->len += req->nbytes;
810	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
811	creq->last_req = true;
812
813	return mv_cesa_ahash_queue_req(req);
814}
815
816static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
817				u64 *len, void *cache)
818{
819	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
820	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
821	unsigned int digsize = crypto_ahash_digestsize(ahash);
822	unsigned int blocksize;
823
824	blocksize = crypto_ahash_blocksize(ahash);
825
826	*len = creq->len;
827	memcpy(hash, creq->state, digsize);
828	memset(cache, 0, blocksize);
829	memcpy(cache, creq->cache, creq->cache_ptr);
830
831	return 0;
832}
833
834static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
835				u64 len, const void *cache)
836{
837	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
838	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
839	unsigned int digsize = crypto_ahash_digestsize(ahash);
840	unsigned int blocksize;
841	unsigned int cache_ptr;
842	int ret;
843
844	ret = crypto_ahash_init(req);
845	if (ret)
846		return ret;
847
848	blocksize = crypto_ahash_blocksize(ahash);
849	if (len >= blocksize)
850		mv_cesa_update_op_cfg(&creq->op_tmpl,
851				      CESA_SA_DESC_CFG_MID_FRAG,
852				      CESA_SA_DESC_CFG_FRAG_MSK);
853
854	creq->len = len;
855	memcpy(creq->state, hash, digsize);
856	creq->cache_ptr = 0;
857
858	cache_ptr = do_div(len, blocksize);
859	if (!cache_ptr)
860		return 0;
861
862	memcpy(creq->cache, cache, cache_ptr);
863	creq->cache_ptr = cache_ptr;
864
865	return 0;
866}
867
868static int mv_cesa_md5_init(struct ahash_request *req)
869{
870	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
871	struct mv_cesa_op_ctx tmpl = { };
872
873	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
874
875	mv_cesa_ahash_init(req, &tmpl, true);
876
877	creq->state[0] = MD5_H0;
878	creq->state[1] = MD5_H1;
879	creq->state[2] = MD5_H2;
880	creq->state[3] = MD5_H3;
881
882	return 0;
883}
884
885static int mv_cesa_md5_export(struct ahash_request *req, void *out)
886{
887	struct md5_state *out_state = out;
888
889	return mv_cesa_ahash_export(req, out_state->hash,
890				    &out_state->byte_count, out_state->block);
891}
892
893static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
894{
895	const struct md5_state *in_state = in;
896
897	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
898				    in_state->block);
899}
900
901static int mv_cesa_md5_digest(struct ahash_request *req)
902{
903	int ret;
904
905	ret = mv_cesa_md5_init(req);
906	if (ret)
907		return ret;
908
909	return mv_cesa_ahash_finup(req);
910}
911
912struct ahash_alg mv_md5_alg = {
913	.init = mv_cesa_md5_init,
914	.update = mv_cesa_ahash_update,
915	.final = mv_cesa_ahash_final,
916	.finup = mv_cesa_ahash_finup,
917	.digest = mv_cesa_md5_digest,
918	.export = mv_cesa_md5_export,
919	.import = mv_cesa_md5_import,
920	.halg = {
921		.digestsize = MD5_DIGEST_SIZE,
922		.statesize = sizeof(struct md5_state),
923		.base = {
924			.cra_name = "md5",
925			.cra_driver_name = "mv-md5",
926			.cra_priority = 300,
927			.cra_flags = CRYPTO_ALG_ASYNC |
928				     CRYPTO_ALG_ALLOCATES_MEMORY |
929				     CRYPTO_ALG_KERN_DRIVER_ONLY,
930			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
931			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
932			.cra_init = mv_cesa_ahash_cra_init,
933			.cra_module = THIS_MODULE,
934		}
935	}
936};
937
938static int mv_cesa_sha1_init(struct ahash_request *req)
939{
940	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
941	struct mv_cesa_op_ctx tmpl = { };
942
943	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
944
945	mv_cesa_ahash_init(req, &tmpl, false);
946
947	creq->state[0] = SHA1_H0;
948	creq->state[1] = SHA1_H1;
949	creq->state[2] = SHA1_H2;
950	creq->state[3] = SHA1_H3;
951	creq->state[4] = SHA1_H4;
952
953	return 0;
954}
955
956static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
957{
958	struct sha1_state *out_state = out;
959
960	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
961				    out_state->buffer);
962}
963
964static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
965{
966	const struct sha1_state *in_state = in;
967
968	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
969				    in_state->buffer);
970}
971
972static int mv_cesa_sha1_digest(struct ahash_request *req)
973{
974	int ret;
975
976	ret = mv_cesa_sha1_init(req);
977	if (ret)
978		return ret;
979
980	return mv_cesa_ahash_finup(req);
981}
982
983struct ahash_alg mv_sha1_alg = {
984	.init = mv_cesa_sha1_init,
985	.update = mv_cesa_ahash_update,
986	.final = mv_cesa_ahash_final,
987	.finup = mv_cesa_ahash_finup,
988	.digest = mv_cesa_sha1_digest,
989	.export = mv_cesa_sha1_export,
990	.import = mv_cesa_sha1_import,
991	.halg = {
992		.digestsize = SHA1_DIGEST_SIZE,
993		.statesize = sizeof(struct sha1_state),
994		.base = {
995			.cra_name = "sha1",
996			.cra_driver_name = "mv-sha1",
997			.cra_priority = 300,
998			.cra_flags = CRYPTO_ALG_ASYNC |
999				     CRYPTO_ALG_ALLOCATES_MEMORY |
1000				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1001			.cra_blocksize = SHA1_BLOCK_SIZE,
1002			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1003			.cra_init = mv_cesa_ahash_cra_init,
1004			.cra_module = THIS_MODULE,
1005		}
1006	}
1007};
1008
1009static int mv_cesa_sha256_init(struct ahash_request *req)
1010{
1011	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1012	struct mv_cesa_op_ctx tmpl = { };
1013
1014	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1015
1016	mv_cesa_ahash_init(req, &tmpl, false);
1017
1018	creq->state[0] = SHA256_H0;
1019	creq->state[1] = SHA256_H1;
1020	creq->state[2] = SHA256_H2;
1021	creq->state[3] = SHA256_H3;
1022	creq->state[4] = SHA256_H4;
1023	creq->state[5] = SHA256_H5;
1024	creq->state[6] = SHA256_H6;
1025	creq->state[7] = SHA256_H7;
1026
1027	return 0;
1028}
1029
1030static int mv_cesa_sha256_digest(struct ahash_request *req)
1031{
1032	int ret;
1033
1034	ret = mv_cesa_sha256_init(req);
1035	if (ret)
1036		return ret;
1037
1038	return mv_cesa_ahash_finup(req);
1039}
1040
1041static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1042{
1043	struct sha256_state *out_state = out;
1044
1045	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1046				    out_state->buf);
1047}
1048
1049static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1050{
1051	const struct sha256_state *in_state = in;
1052
1053	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1054				    in_state->buf);
1055}
1056
1057struct ahash_alg mv_sha256_alg = {
1058	.init = mv_cesa_sha256_init,
1059	.update = mv_cesa_ahash_update,
1060	.final = mv_cesa_ahash_final,
1061	.finup = mv_cesa_ahash_finup,
1062	.digest = mv_cesa_sha256_digest,
1063	.export = mv_cesa_sha256_export,
1064	.import = mv_cesa_sha256_import,
1065	.halg = {
1066		.digestsize = SHA256_DIGEST_SIZE,
1067		.statesize = sizeof(struct sha256_state),
1068		.base = {
1069			.cra_name = "sha256",
1070			.cra_driver_name = "mv-sha256",
1071			.cra_priority = 300,
1072			.cra_flags = CRYPTO_ALG_ASYNC |
1073				     CRYPTO_ALG_ALLOCATES_MEMORY |
1074				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1075			.cra_blocksize = SHA256_BLOCK_SIZE,
1076			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1077			.cra_init = mv_cesa_ahash_cra_init,
1078			.cra_module = THIS_MODULE,
1079		}
1080	}
1081};
1082
1083struct mv_cesa_ahash_result {
1084	struct completion completion;
1085	int error;
1086};
1087
1088static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1089					int error)
1090{
1091	struct mv_cesa_ahash_result *result = req->data;
1092
1093	if (error == -EINPROGRESS)
1094		return;
1095
1096	result->error = error;
1097	complete(&result->completion);
1098}
1099
1100static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1101				       void *state, unsigned int blocksize)
1102{
1103	struct mv_cesa_ahash_result result;
1104	struct scatterlist sg;
1105	int ret;
1106
1107	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1108				   mv_cesa_hmac_ahash_complete, &result);
1109	sg_init_one(&sg, pad, blocksize);
1110	ahash_request_set_crypt(req, &sg, pad, blocksize);
1111	init_completion(&result.completion);
1112
1113	ret = crypto_ahash_init(req);
1114	if (ret)
1115		return ret;
1116
1117	ret = crypto_ahash_update(req);
1118	if (ret && ret != -EINPROGRESS)
1119		return ret;
1120
1121	wait_for_completion_interruptible(&result.completion);
1122	if (result.error)
1123		return result.error;
1124
1125	ret = crypto_ahash_export(req, state);
1126	if (ret)
1127		return ret;
1128
1129	return 0;
1130}
1131
1132static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1133				  const u8 *key, unsigned int keylen,
1134				  u8 *ipad, u8 *opad,
1135				  unsigned int blocksize)
1136{
1137	struct mv_cesa_ahash_result result;
1138	struct scatterlist sg;
1139	int ret;
1140	int i;
1141
1142	if (keylen <= blocksize) {
1143		memcpy(ipad, key, keylen);
1144	} else {
1145		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1146
1147		if (!keydup)
1148			return -ENOMEM;
1149
1150		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1151					   mv_cesa_hmac_ahash_complete,
1152					   &result);
1153		sg_init_one(&sg, keydup, keylen);
1154		ahash_request_set_crypt(req, &sg, ipad, keylen);
1155		init_completion(&result.completion);
1156
1157		ret = crypto_ahash_digest(req);
1158		if (ret == -EINPROGRESS) {
1159			wait_for_completion_interruptible(&result.completion);
1160			ret = result.error;
1161		}
1162
1163		/* Set the memory region to 0 to avoid any leak. */
1164		kfree_sensitive(keydup);
1165
1166		if (ret)
1167			return ret;
1168
1169		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1170	}
1171
1172	memset(ipad + keylen, 0, blocksize - keylen);
1173	memcpy(opad, ipad, blocksize);
1174
1175	for (i = 0; i < blocksize; i++) {
1176		ipad[i] ^= HMAC_IPAD_VALUE;
1177		opad[i] ^= HMAC_OPAD_VALUE;
1178	}
1179
1180	return 0;
1181}
1182
1183static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1184				const u8 *key, unsigned int keylen,
1185				void *istate, void *ostate)
1186{
1187	struct ahash_request *req;
1188	struct crypto_ahash *tfm;
1189	unsigned int blocksize;
1190	u8 *ipad = NULL;
1191	u8 *opad;
1192	int ret;
1193
1194	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1195	if (IS_ERR(tfm))
1196		return PTR_ERR(tfm);
1197
1198	req = ahash_request_alloc(tfm, GFP_KERNEL);
1199	if (!req) {
1200		ret = -ENOMEM;
1201		goto free_ahash;
1202	}
1203
1204	crypto_ahash_clear_flags(tfm, ~0);
1205
1206	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1207
1208	ipad = kcalloc(2, blocksize, GFP_KERNEL);
1209	if (!ipad) {
1210		ret = -ENOMEM;
1211		goto free_req;
1212	}
1213
1214	opad = ipad + blocksize;
1215
1216	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1217	if (ret)
1218		goto free_ipad;
1219
1220	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1221	if (ret)
1222		goto free_ipad;
1223
1224	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1225
1226free_ipad:
1227	kfree(ipad);
1228free_req:
1229	ahash_request_free(req);
1230free_ahash:
1231	crypto_free_ahash(tfm);
1232
1233	return ret;
1234}
1235
1236static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1237{
1238	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1239
1240	ctx->base.ops = &mv_cesa_ahash_req_ops;
1241
1242	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1243				 sizeof(struct mv_cesa_ahash_req));
1244	return 0;
1245}
1246
1247static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1248{
1249	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1250	struct mv_cesa_op_ctx tmpl = { };
1251
1252	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1253	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1254
1255	mv_cesa_ahash_init(req, &tmpl, true);
1256
1257	return 0;
1258}
1259
1260static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1261				    unsigned int keylen)
1262{
1263	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1264	struct md5_state istate, ostate;
1265	int ret, i;
1266
1267	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1268	if (ret)
1269		return ret;
1270
1271	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1272		ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1273
1274	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1275		ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1276
1277	return 0;
1278}
1279
1280static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1281{
1282	int ret;
1283
1284	ret = mv_cesa_ahmac_md5_init(req);
1285	if (ret)
1286		return ret;
1287
1288	return mv_cesa_ahash_finup(req);
1289}
1290
1291struct ahash_alg mv_ahmac_md5_alg = {
1292	.init = mv_cesa_ahmac_md5_init,
1293	.update = mv_cesa_ahash_update,
1294	.final = mv_cesa_ahash_final,
1295	.finup = mv_cesa_ahash_finup,
1296	.digest = mv_cesa_ahmac_md5_digest,
1297	.setkey = mv_cesa_ahmac_md5_setkey,
1298	.export = mv_cesa_md5_export,
1299	.import = mv_cesa_md5_import,
1300	.halg = {
1301		.digestsize = MD5_DIGEST_SIZE,
1302		.statesize = sizeof(struct md5_state),
1303		.base = {
1304			.cra_name = "hmac(md5)",
1305			.cra_driver_name = "mv-hmac-md5",
1306			.cra_priority = 300,
1307			.cra_flags = CRYPTO_ALG_ASYNC |
1308				     CRYPTO_ALG_ALLOCATES_MEMORY |
1309				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1310			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1311			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1312			.cra_init = mv_cesa_ahmac_cra_init,
1313			.cra_module = THIS_MODULE,
1314		}
1315	}
1316};
1317
1318static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1319{
1320	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1321	struct mv_cesa_op_ctx tmpl = { };
1322
1323	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1324	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1325
1326	mv_cesa_ahash_init(req, &tmpl, false);
1327
1328	return 0;
1329}
1330
1331static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1332				     unsigned int keylen)
1333{
1334	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1335	struct sha1_state istate, ostate;
1336	int ret, i;
1337
1338	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1339	if (ret)
1340		return ret;
1341
1342	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1343		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1344
1345	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1346		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1347
1348	return 0;
1349}
1350
1351static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1352{
1353	int ret;
1354
1355	ret = mv_cesa_ahmac_sha1_init(req);
1356	if (ret)
1357		return ret;
1358
1359	return mv_cesa_ahash_finup(req);
1360}
1361
1362struct ahash_alg mv_ahmac_sha1_alg = {
1363	.init = mv_cesa_ahmac_sha1_init,
1364	.update = mv_cesa_ahash_update,
1365	.final = mv_cesa_ahash_final,
1366	.finup = mv_cesa_ahash_finup,
1367	.digest = mv_cesa_ahmac_sha1_digest,
1368	.setkey = mv_cesa_ahmac_sha1_setkey,
1369	.export = mv_cesa_sha1_export,
1370	.import = mv_cesa_sha1_import,
1371	.halg = {
1372		.digestsize = SHA1_DIGEST_SIZE,
1373		.statesize = sizeof(struct sha1_state),
1374		.base = {
1375			.cra_name = "hmac(sha1)",
1376			.cra_driver_name = "mv-hmac-sha1",
1377			.cra_priority = 300,
1378			.cra_flags = CRYPTO_ALG_ASYNC |
1379				     CRYPTO_ALG_ALLOCATES_MEMORY |
1380				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1381			.cra_blocksize = SHA1_BLOCK_SIZE,
1382			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1383			.cra_init = mv_cesa_ahmac_cra_init,
1384			.cra_module = THIS_MODULE,
1385		}
1386	}
1387};
1388
1389static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1390				       unsigned int keylen)
1391{
1392	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1393	struct sha256_state istate, ostate;
1394	int ret, i;
1395
1396	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1397	if (ret)
1398		return ret;
1399
1400	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1401		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1402
1403	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1404		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1405
1406	return 0;
1407}
1408
1409static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1410{
1411	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1412	struct mv_cesa_op_ctx tmpl = { };
1413
1414	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1415	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1416
1417	mv_cesa_ahash_init(req, &tmpl, false);
1418
1419	return 0;
1420}
1421
1422static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1423{
1424	int ret;
1425
1426	ret = mv_cesa_ahmac_sha256_init(req);
1427	if (ret)
1428		return ret;
1429
1430	return mv_cesa_ahash_finup(req);
1431}
1432
1433struct ahash_alg mv_ahmac_sha256_alg = {
1434	.init = mv_cesa_ahmac_sha256_init,
1435	.update = mv_cesa_ahash_update,
1436	.final = mv_cesa_ahash_final,
1437	.finup = mv_cesa_ahash_finup,
1438	.digest = mv_cesa_ahmac_sha256_digest,
1439	.setkey = mv_cesa_ahmac_sha256_setkey,
1440	.export = mv_cesa_sha256_export,
1441	.import = mv_cesa_sha256_import,
1442	.halg = {
1443		.digestsize = SHA256_DIGEST_SIZE,
1444		.statesize = sizeof(struct sha256_state),
1445		.base = {
1446			.cra_name = "hmac(sha256)",
1447			.cra_driver_name = "mv-hmac-sha256",
1448			.cra_priority = 300,
1449			.cra_flags = CRYPTO_ALG_ASYNC |
1450				     CRYPTO_ALG_ALLOCATES_MEMORY |
1451				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1452			.cra_blocksize = SHA256_BLOCK_SIZE,
1453			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1454			.cra_init = mv_cesa_ahmac_cra_init,
1455			.cra_module = THIS_MODULE,
1456		}
1457	}
1458};
1459