1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 *
5 *    Copyright (C) 2014-2017  Axis Communications AB
6 */
7#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
8
9#include <linux/bitfield.h>
10#include <linux/crypto.h>
11#include <linux/debugfs.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/fault-inject.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25#include <crypto/aes.h>
26#include <crypto/gcm.h>
27#include <crypto/internal/aead.h>
28#include <crypto/internal/hash.h>
29#include <crypto/internal/skcipher.h>
30#include <crypto/scatterwalk.h>
31#include <crypto/sha.h>
32#include <crypto/xts.h>
33
34/* Max length of a line in all cache levels for Artpec SoCs. */
35#define ARTPEC_CACHE_LINE_MAX	32
36
37#define PDMA_OUT_CFG		0x0000
38#define PDMA_OUT_BUF_CFG	0x0004
39#define PDMA_OUT_CMD		0x0008
40#define PDMA_OUT_DESCRQ_PUSH	0x0010
41#define PDMA_OUT_DESCRQ_STAT	0x0014
42
43#define A6_PDMA_IN_CFG		0x0028
44#define A6_PDMA_IN_BUF_CFG	0x002c
45#define A6_PDMA_IN_CMD		0x0030
46#define A6_PDMA_IN_STATQ_PUSH	0x0038
47#define A6_PDMA_IN_DESCRQ_PUSH	0x0044
48#define A6_PDMA_IN_DESCRQ_STAT	0x0048
49#define A6_PDMA_INTR_MASK	0x0068
50#define A6_PDMA_ACK_INTR	0x006c
51#define A6_PDMA_MASKED_INTR	0x0074
52
53#define A7_PDMA_IN_CFG		0x002c
54#define A7_PDMA_IN_BUF_CFG	0x0030
55#define A7_PDMA_IN_CMD		0x0034
56#define A7_PDMA_IN_STATQ_PUSH	0x003c
57#define A7_PDMA_IN_DESCRQ_PUSH	0x0048
58#define A7_PDMA_IN_DESCRQ_STAT	0x004C
59#define A7_PDMA_INTR_MASK	0x006c
60#define A7_PDMA_ACK_INTR	0x0070
61#define A7_PDMA_MASKED_INTR	0x0078
62
63#define PDMA_OUT_CFG_EN				BIT(0)
64
65#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
66#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
67
68#define PDMA_OUT_CMD_START			BIT(0)
69#define A6_PDMA_OUT_CMD_STOP			BIT(3)
70#define A7_PDMA_OUT_CMD_STOP			BIT(2)
71
72#define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
73#define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
74
75#define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
76#define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
77
78#define PDMA_IN_CFG_EN				BIT(0)
79
80#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
81#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
82#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
83
84#define PDMA_IN_CMD_START			BIT(0)
85#define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
86#define A6_PDMA_IN_CMD_STOP			BIT(3)
87#define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
88#define A7_PDMA_IN_CMD_STOP			BIT(2)
89
90#define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
91#define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
92
93#define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
94#define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
95
96#define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
97#define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
98
99#define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
100#define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
101#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
102
103#define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
104#define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
105#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
106
107#define A6_CRY_MD_OPER		GENMASK(19, 16)
108
109#define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
110#define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
111
112#define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
113#define A6_CRY_MD_CIPHER_DECR	BIT(22)
114#define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
115#define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
116
117#define A7_CRY_MD_OPER		GENMASK(11, 8)
118
119#define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
120#define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
121
122#define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
123#define A7_CRY_MD_CIPHER_DECR	BIT(14)
124#define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
125#define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
126
127/* DMA metadata constants */
128#define regk_crypto_aes_cbc     0x00000002
129#define regk_crypto_aes_ctr     0x00000003
130#define regk_crypto_aes_ecb     0x00000001
131#define regk_crypto_aes_gcm     0x00000004
132#define regk_crypto_aes_xts     0x00000005
133#define regk_crypto_cache       0x00000002
134#define a6_regk_crypto_dlkey    0x0000000a
135#define a7_regk_crypto_dlkey    0x0000000e
136#define regk_crypto_ext         0x00000001
137#define regk_crypto_hmac_sha1   0x00000007
138#define regk_crypto_hmac_sha256 0x00000009
139#define regk_crypto_init        0x00000000
140#define regk_crypto_key_128     0x00000000
141#define regk_crypto_key_192     0x00000001
142#define regk_crypto_key_256     0x00000002
143#define regk_crypto_null        0x00000000
144#define regk_crypto_sha1        0x00000006
145#define regk_crypto_sha256      0x00000008
146
147/* DMA descriptor structures */
148struct pdma_descr_ctrl  {
149	unsigned char short_descr : 1;
150	unsigned char pad1        : 1;
151	unsigned char eop         : 1;
152	unsigned char intr        : 1;
153	unsigned char short_len   : 3;
154	unsigned char pad2        : 1;
155} __packed;
156
157struct pdma_data_descr {
158	unsigned int len : 24;
159	unsigned int buf : 32;
160} __packed;
161
162struct pdma_short_descr {
163	unsigned char data[7];
164} __packed;
165
166struct pdma_descr {
167	struct pdma_descr_ctrl ctrl;
168	union {
169		struct pdma_data_descr   data;
170		struct pdma_short_descr  shrt;
171	};
172};
173
174struct pdma_stat_descr {
175	unsigned char pad1        : 1;
176	unsigned char pad2        : 1;
177	unsigned char eop         : 1;
178	unsigned char pad3        : 5;
179	unsigned int  len         : 24;
180};
181
182/* Each descriptor array can hold max 64 entries */
183#define PDMA_DESCR_COUNT	64
184
185#define MODULE_NAME   "Artpec-6 CA"
186
187/* Hash modes (including HMAC variants) */
188#define ARTPEC6_CRYPTO_HASH_SHA1	1
189#define ARTPEC6_CRYPTO_HASH_SHA256	2
190
191/* Crypto modes */
192#define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
193#define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
194#define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
195#define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
196
197/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
198 * It operates on a descriptor array with up to 64 descriptor entries.
199 * The arrays must be 64 byte aligned in memory.
200 *
201 * The ciphering unit has no registers and is completely controlled by
202 * a 4-byte metadata that is inserted at the beginning of each dma packet.
203 *
204 * A dma packet is a sequence of descriptors terminated by setting the .eop
205 * field in the final descriptor of the packet.
206 *
207 * Multiple packets are used for providing context data, key data and
208 * the plain/ciphertext.
209 *
210 *   PDMA Descriptors (Array)
211 *  +------+------+------+~~+-------+------+----
212 *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
213 *  +--+---+--+---+----+-+~~+-------+----+-+----
214 *     |      |        |       |         |
215 *     |      |        |       |         |
216 *   __|__  +-------++-------++-------+ +----+
217 *  | MD  | |Payload||Payload||Payload| | MD |
218 *  +-----+ +-------++-------++-------+ +----+
219 */
220
221struct artpec6_crypto_bounce_buffer {
222	struct list_head list;
223	size_t length;
224	struct scatterlist *sg;
225	size_t offset;
226	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
227	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
228	 */
229	void *buf;
230};
231
232struct artpec6_crypto_dma_map {
233	dma_addr_t dma_addr;
234	size_t size;
235	enum dma_data_direction dir;
236};
237
238struct artpec6_crypto_dma_descriptors {
239	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
240	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
241	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
242	struct list_head bounce_buffers;
243	/* Enough maps for all out/in buffers, and all three descr. arrays */
244	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
245	dma_addr_t out_dma_addr;
246	dma_addr_t in_dma_addr;
247	dma_addr_t stat_dma_addr;
248	size_t out_cnt;
249	size_t in_cnt;
250	size_t map_count;
251};
252
253enum artpec6_crypto_variant {
254	ARTPEC6_CRYPTO,
255	ARTPEC7_CRYPTO,
256};
257
258struct artpec6_crypto {
259	void __iomem *base;
260	spinlock_t queue_lock;
261	struct list_head queue; /* waiting for pdma fifo space */
262	struct list_head pending; /* submitted to pdma fifo */
263	struct tasklet_struct task;
264	struct kmem_cache *dma_cache;
265	int pending_count;
266	struct timer_list timer;
267	enum artpec6_crypto_variant variant;
268	void *pad_buffer; /* cache-aligned block padding buffer */
269	void *zero_buffer;
270};
271
272enum artpec6_crypto_hash_flags {
273	HASH_FLAG_INIT_CTX = 2,
274	HASH_FLAG_UPDATE = 4,
275	HASH_FLAG_FINALIZE = 8,
276	HASH_FLAG_HMAC = 16,
277	HASH_FLAG_UPDATE_KEY = 32,
278};
279
280struct artpec6_crypto_req_common {
281	struct list_head list;
282	struct list_head complete_in_progress;
283	struct artpec6_crypto_dma_descriptors *dma;
284	struct crypto_async_request *req;
285	void (*complete)(struct crypto_async_request *req);
286	gfp_t gfp_flags;
287};
288
289struct artpec6_hash_request_context {
290	char partial_buffer[SHA256_BLOCK_SIZE];
291	char partial_buffer_out[SHA256_BLOCK_SIZE];
292	char key_buffer[SHA256_BLOCK_SIZE];
293	char pad_buffer[SHA256_BLOCK_SIZE + 32];
294	unsigned char digeststate[SHA256_DIGEST_SIZE];
295	size_t partial_bytes;
296	u64 digcnt;
297	u32 key_md;
298	u32 hash_md;
299	enum artpec6_crypto_hash_flags hash_flags;
300	struct artpec6_crypto_req_common common;
301};
302
303struct artpec6_hash_export_state {
304	char partial_buffer[SHA256_BLOCK_SIZE];
305	unsigned char digeststate[SHA256_DIGEST_SIZE];
306	size_t partial_bytes;
307	u64 digcnt;
308	int oper;
309	unsigned int hash_flags;
310};
311
312struct artpec6_hashalg_context {
313	char hmac_key[SHA256_BLOCK_SIZE];
314	size_t hmac_key_length;
315	struct crypto_shash *child_hash;
316};
317
318struct artpec6_crypto_request_context {
319	u32 cipher_md;
320	bool decrypt;
321	struct artpec6_crypto_req_common common;
322};
323
324struct artpec6_cryptotfm_context {
325	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
326	size_t key_length;
327	u32 key_md;
328	int crypto_type;
329	struct crypto_sync_skcipher *fallback;
330};
331
332struct artpec6_crypto_aead_hw_ctx {
333	__be64	aad_length_bits;
334	__be64  text_length_bits;
335	__u8	J0[AES_BLOCK_SIZE];
336};
337
338struct artpec6_crypto_aead_req_ctx {
339	struct artpec6_crypto_aead_hw_ctx hw_ctx;
340	u32 cipher_md;
341	bool decrypt;
342	struct artpec6_crypto_req_common common;
343	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
344};
345
346/* The crypto framework makes it hard to avoid this global. */
347static struct device *artpec6_crypto_dev;
348
349#ifdef CONFIG_FAULT_INJECTION
350static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
351static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
352#endif
353
354enum {
355	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
356	ARTPEC6_CRYPTO_PREPARE_HASH_START,
357};
358
359static int artpec6_crypto_prepare_aead(struct aead_request *areq);
360static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
361static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
362
363static void
364artpec6_crypto_complete_crypto(struct crypto_async_request *req);
365static void
366artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
367static void
368artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
369static void
370artpec6_crypto_complete_aead(struct crypto_async_request *req);
371static void
372artpec6_crypto_complete_hash(struct crypto_async_request *req);
373
374static int
375artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
376
377static void
378artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
379
380struct artpec6_crypto_walk {
381	struct scatterlist *sg;
382	size_t offset;
383};
384
385static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
386				     struct scatterlist *sg)
387{
388	awalk->sg = sg;
389	awalk->offset = 0;
390}
391
392static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
393					  size_t nbytes)
394{
395	while (nbytes && awalk->sg) {
396		size_t piece;
397
398		WARN_ON(awalk->offset > awalk->sg->length);
399
400		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
401		nbytes -= piece;
402		awalk->offset += piece;
403		if (awalk->offset == awalk->sg->length) {
404			awalk->sg = sg_next(awalk->sg);
405			awalk->offset = 0;
406		}
407
408	}
409
410	return nbytes;
411}
412
413static size_t
414artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
415{
416	WARN_ON(awalk->sg->length == awalk->offset);
417
418	return awalk->sg->length - awalk->offset;
419}
420
421static dma_addr_t
422artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
423{
424	return sg_phys(awalk->sg) + awalk->offset;
425}
426
427static void
428artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
429{
430	struct artpec6_crypto_dma_descriptors *dma = common->dma;
431	struct artpec6_crypto_bounce_buffer *b;
432	struct artpec6_crypto_bounce_buffer *next;
433
434	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
435		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
436			 b, b->length, b->offset, b->buf);
437		sg_pcopy_from_buffer(b->sg,
438				   1,
439				   b->buf,
440				   b->length,
441				   b->offset);
442
443		list_del(&b->list);
444		kfree(b);
445	}
446}
447
448static inline bool artpec6_crypto_busy(void)
449{
450	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
451	int fifo_count = ac->pending_count;
452
453	return fifo_count > 6;
454}
455
456static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
457{
458	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
459	int ret = -EBUSY;
460
461	spin_lock_bh(&ac->queue_lock);
462
463	if (!artpec6_crypto_busy()) {
464		list_add_tail(&req->list, &ac->pending);
465		artpec6_crypto_start_dma(req);
466		ret = -EINPROGRESS;
467	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
468		list_add_tail(&req->list, &ac->queue);
469	} else {
470		artpec6_crypto_common_destroy(req);
471	}
472
473	spin_unlock_bh(&ac->queue_lock);
474
475	return ret;
476}
477
478static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
479{
480	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
481	enum artpec6_crypto_variant variant = ac->variant;
482	void __iomem *base = ac->base;
483	struct artpec6_crypto_dma_descriptors *dma = common->dma;
484	u32 ind, statd, outd;
485
486	/* Make descriptor content visible to the DMA before starting it. */
487	wmb();
488
489	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
490	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
491
492	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
493		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
494
495	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
496	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
497
498	if (variant == ARTPEC6_CRYPTO) {
499		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
500		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
501		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
502	} else {
503		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
504		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
505		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
506	}
507
508	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
509	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
510
511	ac->pending_count++;
512}
513
514static void
515artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
516{
517	struct artpec6_crypto_dma_descriptors *dma = common->dma;
518
519	dma->out_cnt = 0;
520	dma->in_cnt = 0;
521	dma->map_count = 0;
522	INIT_LIST_HEAD(&dma->bounce_buffers);
523}
524
525static bool fault_inject_dma_descr(void)
526{
527#ifdef CONFIG_FAULT_INJECTION
528	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
529#else
530	return false;
531#endif
532}
533
534/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
535 *                                        physical address
536 *
537 * @addr: The physical address of the data buffer
538 * @len:  The length of the data buffer
539 * @eop:  True if this is the last buffer in the packet
540 *
541 * @return 0 on success or -ENOSPC if there are no more descriptors available
542 */
543static int
544artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
545				    dma_addr_t addr, size_t len, bool eop)
546{
547	struct artpec6_crypto_dma_descriptors *dma = common->dma;
548	struct pdma_descr *d;
549
550	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
551	    fault_inject_dma_descr()) {
552		pr_err("No free OUT DMA descriptors available!\n");
553		return -ENOSPC;
554	}
555
556	d = &dma->out[dma->out_cnt++];
557	memset(d, 0, sizeof(*d));
558
559	d->ctrl.short_descr = 0;
560	d->ctrl.eop = eop;
561	d->data.len = len;
562	d->data.buf = addr;
563	return 0;
564}
565
566/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
567 *
568 * @dst: The virtual address of the data
569 * @len: The length of the data, must be between 1 to 7 bytes
570 * @eop: True if this is the last buffer in the packet
571 *
572 * @return 0 on success
573 *	-ENOSPC if no more descriptors are available
574 *	-EINVAL if the data length exceeds 7 bytes
575 */
576static int
577artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
578				     void *dst, unsigned int len, bool eop)
579{
580	struct artpec6_crypto_dma_descriptors *dma = common->dma;
581	struct pdma_descr *d;
582
583	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
584	    fault_inject_dma_descr()) {
585		pr_err("No free OUT DMA descriptors available!\n");
586		return -ENOSPC;
587	} else if (len > 7 || len < 1) {
588		return -EINVAL;
589	}
590	d = &dma->out[dma->out_cnt++];
591	memset(d, 0, sizeof(*d));
592
593	d->ctrl.short_descr = 1;
594	d->ctrl.short_len = len;
595	d->ctrl.eop = eop;
596	memcpy(d->shrt.data, dst, len);
597	return 0;
598}
599
600static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
601				      struct page *page, size_t offset,
602				      size_t size,
603				      enum dma_data_direction dir,
604				      dma_addr_t *dma_addr_out)
605{
606	struct artpec6_crypto_dma_descriptors *dma = common->dma;
607	struct device *dev = artpec6_crypto_dev;
608	struct artpec6_crypto_dma_map *map;
609	dma_addr_t dma_addr;
610
611	*dma_addr_out = 0;
612
613	if (dma->map_count >= ARRAY_SIZE(dma->maps))
614		return -ENOMEM;
615
616	dma_addr = dma_map_page(dev, page, offset, size, dir);
617	if (dma_mapping_error(dev, dma_addr))
618		return -ENOMEM;
619
620	map = &dma->maps[dma->map_count++];
621	map->size = size;
622	map->dma_addr = dma_addr;
623	map->dir = dir;
624
625	*dma_addr_out = dma_addr;
626
627	return 0;
628}
629
630static int
631artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
632			      void *ptr, size_t size,
633			      enum dma_data_direction dir,
634			      dma_addr_t *dma_addr_out)
635{
636	struct page *page = virt_to_page(ptr);
637	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
638
639	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
640					  dma_addr_out);
641}
642
643static int
644artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
645{
646	struct artpec6_crypto_dma_descriptors *dma = common->dma;
647	int ret;
648
649	ret = artpec6_crypto_dma_map_single(common, dma->in,
650				sizeof(dma->in[0]) * dma->in_cnt,
651				DMA_TO_DEVICE, &dma->in_dma_addr);
652	if (ret)
653		return ret;
654
655	ret = artpec6_crypto_dma_map_single(common, dma->out,
656				sizeof(dma->out[0]) * dma->out_cnt,
657				DMA_TO_DEVICE, &dma->out_dma_addr);
658	if (ret)
659		return ret;
660
661	/* We only read one stat descriptor */
662	dma->stat[dma->in_cnt - 1] = 0;
663
664	/*
665	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
666	 * to be written.
667	 */
668	return artpec6_crypto_dma_map_single(common,
669				dma->stat,
670				sizeof(dma->stat[0]) * dma->in_cnt,
671				DMA_BIDIRECTIONAL,
672				&dma->stat_dma_addr);
673}
674
675static void
676artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
677{
678	struct artpec6_crypto_dma_descriptors *dma = common->dma;
679	struct device *dev = artpec6_crypto_dev;
680	int i;
681
682	for (i = 0; i < dma->map_count; i++) {
683		struct artpec6_crypto_dma_map *map = &dma->maps[i];
684
685		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
686	}
687
688	dma->map_count = 0;
689}
690
691/** artpec6_crypto_setup_out_descr - Setup an out descriptor
692 *
693 * @dst: The virtual address of the data
694 * @len: The length of the data
695 * @eop: True if this is the last buffer in the packet
696 * @use_short: If this is true and the data length is 7 bytes or less then
697 *	a short descriptor will be used
698 *
699 * @return 0 on success
700 *	Any errors from artpec6_crypto_setup_out_descr_short() or
701 *	setup_out_descr_phys()
702 */
703static int
704artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
705			       void *dst, unsigned int len, bool eop,
706			       bool use_short)
707{
708	if (use_short && len < 7) {
709		return artpec6_crypto_setup_out_descr_short(common, dst, len,
710							    eop);
711	} else {
712		int ret;
713		dma_addr_t dma_addr;
714
715		ret = artpec6_crypto_dma_map_single(common, dst, len,
716						   DMA_TO_DEVICE,
717						   &dma_addr);
718		if (ret)
719			return ret;
720
721		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
722							   len, eop);
723	}
724}
725
726/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
727 *                                       physical address
728 *
729 * @addr: The physical address of the data buffer
730 * @len:  The length of the data buffer
731 * @intr: True if an interrupt should be fired after HW processing of this
732 *	  descriptor
733 *
734 */
735static int
736artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
737			       dma_addr_t addr, unsigned int len, bool intr)
738{
739	struct artpec6_crypto_dma_descriptors *dma = common->dma;
740	struct pdma_descr *d;
741
742	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
743	    fault_inject_dma_descr()) {
744		pr_err("No free IN DMA descriptors available!\n");
745		return -ENOSPC;
746	}
747	d = &dma->in[dma->in_cnt++];
748	memset(d, 0, sizeof(*d));
749
750	d->ctrl.intr = intr;
751	d->data.len = len;
752	d->data.buf = addr;
753	return 0;
754}
755
756/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
757 *
758 * @buffer: The virtual address to of the data buffer
759 * @len:    The length of the data buffer
760 * @last:   If this is the last data buffer in the request (i.e. an interrupt
761 *	    is needed
762 *
763 * Short descriptors are not used for the in channel
764 */
765static int
766artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
767			  void *buffer, unsigned int len, bool last)
768{
769	dma_addr_t dma_addr;
770	int ret;
771
772	ret = artpec6_crypto_dma_map_single(common, buffer, len,
773					   DMA_FROM_DEVICE, &dma_addr);
774	if (ret)
775		return ret;
776
777	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
778}
779
780static struct artpec6_crypto_bounce_buffer *
781artpec6_crypto_alloc_bounce(gfp_t flags)
782{
783	void *base;
784	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
785			    2 * ARTPEC_CACHE_LINE_MAX;
786	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
787
788	if (!bbuf)
789		return NULL;
790
791	base = bbuf + 1;
792	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
793	return bbuf;
794}
795
796static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
797				  struct artpec6_crypto_walk *walk, size_t size)
798{
799	struct artpec6_crypto_bounce_buffer *bbuf;
800	int ret;
801
802	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
803	if (!bbuf)
804		return -ENOMEM;
805
806	bbuf->length = size;
807	bbuf->sg = walk->sg;
808	bbuf->offset = walk->offset;
809
810	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
811	if (ret) {
812		kfree(bbuf);
813		return ret;
814	}
815
816	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
817	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
818	return 0;
819}
820
821static int
822artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
823				  struct artpec6_crypto_walk *walk,
824				  size_t count)
825{
826	size_t chunk;
827	int ret;
828	dma_addr_t addr;
829
830	while (walk->sg && count) {
831		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
832		addr = artpec6_crypto_walk_chunk_phys(walk);
833
834		/* When destination buffers are not aligned to the cache line
835		 * size we need bounce buffers. The DMA-API requires that the
836		 * entire line is owned by the DMA buffer and this holds also
837		 * for the case when coherent DMA is used.
838		 */
839		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
840			chunk = min_t(dma_addr_t, chunk,
841				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
842				      addr);
843
844			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
845			ret = setup_bounce_buffer_in(common, walk, chunk);
846		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
847			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
848			ret = setup_bounce_buffer_in(common, walk, chunk);
849		} else {
850			dma_addr_t dma_addr;
851
852			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
853
854			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
855
856			ret = artpec6_crypto_dma_map_page(common,
857							 sg_page(walk->sg),
858							 walk->sg->offset +
859							 walk->offset,
860							 chunk,
861							 DMA_FROM_DEVICE,
862							 &dma_addr);
863			if (ret)
864				return ret;
865
866			ret = artpec6_crypto_setup_in_descr_phys(common,
867								 dma_addr,
868								 chunk, false);
869		}
870
871		if (ret)
872			return ret;
873
874		count = count - chunk;
875		artpec6_crypto_walk_advance(walk, chunk);
876	}
877
878	if (count)
879		pr_err("EOL unexpected %zu bytes left\n", count);
880
881	return count ? -EINVAL : 0;
882}
883
884static int
885artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
886				   struct artpec6_crypto_walk *walk,
887				   size_t count)
888{
889	size_t chunk;
890	int ret;
891	dma_addr_t addr;
892
893	while (walk->sg && count) {
894		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
895		addr = artpec6_crypto_walk_chunk_phys(walk);
896
897		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
898
899		if (addr & 3) {
900			char buf[3];
901
902			chunk = min_t(size_t, chunk, (4-(addr&3)));
903
904			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
905					   walk->offset);
906
907			ret = artpec6_crypto_setup_out_descr_short(common, buf,
908								   chunk,
909								   false);
910		} else {
911			dma_addr_t dma_addr;
912
913			ret = artpec6_crypto_dma_map_page(common,
914							 sg_page(walk->sg),
915							 walk->sg->offset +
916							 walk->offset,
917							 chunk,
918							 DMA_TO_DEVICE,
919							 &dma_addr);
920			if (ret)
921				return ret;
922
923			ret = artpec6_crypto_setup_out_descr_phys(common,
924								 dma_addr,
925								 chunk, false);
926		}
927
928		if (ret)
929			return ret;
930
931		count = count - chunk;
932		artpec6_crypto_walk_advance(walk, chunk);
933	}
934
935	if (count)
936		pr_err("EOL unexpected %zu bytes left\n", count);
937
938	return count ? -EINVAL : 0;
939}
940
941
942/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
943 *
944 * If the out descriptor list is non-empty, then the eop flag on the
945 * last used out descriptor will be set.
946 *
947 * @return  0 on success
948 *	-EINVAL if the out descriptor is empty or has overflown
949 */
950static int
951artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
952{
953	struct artpec6_crypto_dma_descriptors *dma = common->dma;
954	struct pdma_descr *d;
955
956	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
957		pr_err("%s: OUT descriptor list is %s\n",
958			MODULE_NAME, dma->out_cnt ? "empty" : "full");
959		return -EINVAL;
960
961	}
962
963	d = &dma->out[dma->out_cnt-1];
964	d->ctrl.eop = 1;
965
966	return 0;
967}
968
969/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
970 *                                       in descriptor
971 *
972 * See artpec6_crypto_terminate_out_descrs() for return values
973 */
974static int
975artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
976{
977	struct artpec6_crypto_dma_descriptors *dma = common->dma;
978	struct pdma_descr *d;
979
980	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
981		pr_err("%s: IN descriptor list is %s\n",
982			MODULE_NAME, dma->in_cnt ? "empty" : "full");
983		return -EINVAL;
984	}
985
986	d = &dma->in[dma->in_cnt-1];
987	d->ctrl.intr = 1;
988	return 0;
989}
990
991/** create_hash_pad - Create a Secure Hash conformant pad
992 *
993 * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
994 * @dgstlen:  The total length of the hash digest in bytes
995 * @bitcount: The total length of the digest in bits
996 *
997 * @return The total number of padding bytes written to @dst
998 */
999static size_t
1000create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1001{
1002	unsigned int mod, target, diff, pad_bytes, size_bytes;
1003	__be64 bits = __cpu_to_be64(bitcount);
1004
1005	switch (oper) {
1006	case regk_crypto_sha1:
1007	case regk_crypto_sha256:
1008	case regk_crypto_hmac_sha1:
1009	case regk_crypto_hmac_sha256:
1010		target = 448 / 8;
1011		mod = 512 / 8;
1012		size_bytes = 8;
1013		break;
1014	default:
1015		target = 896 / 8;
1016		mod = 1024 / 8;
1017		size_bytes = 16;
1018		break;
1019	}
1020
1021	target -= 1;
1022	diff = dgstlen & (mod - 1);
1023	pad_bytes = diff > target ? target + mod - diff : target - diff;
1024
1025	memset(dst + 1, 0, pad_bytes);
1026	dst[0] = 0x80;
1027
1028	if (size_bytes == 16) {
1029		memset(dst + 1 + pad_bytes, 0, 8);
1030		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1031	} else {
1032		memcpy(dst + 1 + pad_bytes, &bits, 8);
1033	}
1034
1035	return pad_bytes + size_bytes + 1;
1036}
1037
1038static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1039		struct crypto_async_request *parent,
1040		void (*complete)(struct crypto_async_request *req),
1041		struct scatterlist *dstsg, unsigned int nbytes)
1042{
1043	gfp_t flags;
1044	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1045
1046	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1047		 GFP_KERNEL : GFP_ATOMIC;
1048
1049	common->gfp_flags = flags;
1050	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1051	if (!common->dma)
1052		return -ENOMEM;
1053
1054	common->req = parent;
1055	common->complete = complete;
1056	return 0;
1057}
1058
1059static void
1060artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1061{
1062	struct artpec6_crypto_bounce_buffer *b;
1063	struct artpec6_crypto_bounce_buffer *next;
1064
1065	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1066		kfree(b);
1067	}
1068}
1069
1070static int
1071artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1072{
1073	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1074
1075	artpec6_crypto_dma_unmap_all(common);
1076	artpec6_crypto_bounce_destroy(common->dma);
1077	kmem_cache_free(ac->dma_cache, common->dma);
1078	common->dma = NULL;
1079	return 0;
1080}
1081
1082/*
1083 * Ciphering functions.
1084 */
1085static int artpec6_crypto_encrypt(struct skcipher_request *req)
1086{
1087	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1088	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1089	struct artpec6_crypto_request_context *req_ctx = NULL;
1090	void (*complete)(struct crypto_async_request *req);
1091	int ret;
1092
1093	req_ctx = skcipher_request_ctx(req);
1094
1095	switch (ctx->crypto_type) {
1096	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1097	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1098	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1099		req_ctx->decrypt = 0;
1100		break;
1101	default:
1102		break;
1103	}
1104
1105	switch (ctx->crypto_type) {
1106	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1107		complete = artpec6_crypto_complete_cbc_encrypt;
1108		break;
1109	default:
1110		complete = artpec6_crypto_complete_crypto;
1111		break;
1112	}
1113
1114	ret = artpec6_crypto_common_init(&req_ctx->common,
1115				  &req->base,
1116				  complete,
1117				  req->dst, req->cryptlen);
1118	if (ret)
1119		return ret;
1120
1121	ret = artpec6_crypto_prepare_crypto(req);
1122	if (ret) {
1123		artpec6_crypto_common_destroy(&req_ctx->common);
1124		return ret;
1125	}
1126
1127	return artpec6_crypto_submit(&req_ctx->common);
1128}
1129
1130static int artpec6_crypto_decrypt(struct skcipher_request *req)
1131{
1132	int ret;
1133	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1134	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1135	struct artpec6_crypto_request_context *req_ctx = NULL;
1136	void (*complete)(struct crypto_async_request *req);
1137
1138	req_ctx = skcipher_request_ctx(req);
1139
1140	switch (ctx->crypto_type) {
1141	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1142	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1143	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1144		req_ctx->decrypt = 1;
1145		break;
1146	default:
1147		break;
1148	}
1149
1150
1151	switch (ctx->crypto_type) {
1152	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1153		complete = artpec6_crypto_complete_cbc_decrypt;
1154		break;
1155	default:
1156		complete = artpec6_crypto_complete_crypto;
1157		break;
1158	}
1159
1160	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1161				  complete,
1162				  req->dst, req->cryptlen);
1163	if (ret)
1164		return ret;
1165
1166	ret = artpec6_crypto_prepare_crypto(req);
1167	if (ret) {
1168		artpec6_crypto_common_destroy(&req_ctx->common);
1169		return ret;
1170	}
1171
1172	return artpec6_crypto_submit(&req_ctx->common);
1173}
1174
1175static int
1176artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1177{
1178	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1179	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1180	size_t iv_len = crypto_skcipher_ivsize(cipher);
1181	unsigned int counter = be32_to_cpup((__be32 *)
1182					    (req->iv + iv_len - 4));
1183	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1184			     AES_BLOCK_SIZE;
1185
1186	/*
1187	 * The hardware uses only the last 32-bits as the counter while the
1188	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1189	 * the whole IV is a counter.  So fallback if the counter is going to
1190	 * overlow.
1191	 */
1192	if (counter + nblks < counter) {
1193		int ret;
1194
1195		pr_debug("counter %x will overflow (nblks %u), falling back\n",
1196			 counter, counter + nblks);
1197
1198		ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1199						  ctx->key_length);
1200		if (ret)
1201			return ret;
1202
1203		{
1204			SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1205
1206			skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1207			skcipher_request_set_callback(subreq, req->base.flags,
1208						      NULL, NULL);
1209			skcipher_request_set_crypt(subreq, req->src, req->dst,
1210						   req->cryptlen, req->iv);
1211			ret = encrypt ? crypto_skcipher_encrypt(subreq)
1212				      : crypto_skcipher_decrypt(subreq);
1213			skcipher_request_zero(subreq);
1214		}
1215		return ret;
1216	}
1217
1218	return encrypt ? artpec6_crypto_encrypt(req)
1219		       : artpec6_crypto_decrypt(req);
1220}
1221
1222static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1223{
1224	return artpec6_crypto_ctr_crypt(req, true);
1225}
1226
1227static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1228{
1229	return artpec6_crypto_ctr_crypt(req, false);
1230}
1231
1232/*
1233 * AEAD functions
1234 */
1235static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1236{
1237	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1238
1239	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1240
1241	crypto_aead_set_reqsize(tfm,
1242				sizeof(struct artpec6_crypto_aead_req_ctx));
1243
1244	return 0;
1245}
1246
1247static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1248			       unsigned int len)
1249{
1250	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1251
1252	if (len != 16 && len != 24 && len != 32)
1253		return -EINVAL;
1254
1255	ctx->key_length = len;
1256
1257	memcpy(ctx->aes_key, key, len);
1258	return 0;
1259}
1260
1261static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1262{
1263	int ret;
1264	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1265
1266	req_ctx->decrypt = false;
1267	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1268				  artpec6_crypto_complete_aead,
1269				  NULL, 0);
1270	if (ret)
1271		return ret;
1272
1273	ret = artpec6_crypto_prepare_aead(req);
1274	if (ret) {
1275		artpec6_crypto_common_destroy(&req_ctx->common);
1276		return ret;
1277	}
1278
1279	return artpec6_crypto_submit(&req_ctx->common);
1280}
1281
1282static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1283{
1284	int ret;
1285	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1286
1287	req_ctx->decrypt = true;
1288	if (req->cryptlen < AES_BLOCK_SIZE)
1289		return -EINVAL;
1290
1291	ret = artpec6_crypto_common_init(&req_ctx->common,
1292				  &req->base,
1293				  artpec6_crypto_complete_aead,
1294				  NULL, 0);
1295	if (ret)
1296		return ret;
1297
1298	ret = artpec6_crypto_prepare_aead(req);
1299	if (ret) {
1300		artpec6_crypto_common_destroy(&req_ctx->common);
1301		return ret;
1302	}
1303
1304	return artpec6_crypto_submit(&req_ctx->common);
1305}
1306
1307static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1308{
1309	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1310	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1311	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1312	size_t contextsize = digestsize;
1313	size_t blocksize = crypto_tfm_alg_blocksize(
1314		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1315	struct artpec6_crypto_req_common *common = &req_ctx->common;
1316	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1317	enum artpec6_crypto_variant variant = ac->variant;
1318	u32 sel_ctx;
1319	bool ext_ctx = false;
1320	bool run_hw = false;
1321	int error = 0;
1322
1323	artpec6_crypto_init_dma_operation(common);
1324
1325	/* Upload HMAC key, must be first the first packet */
1326	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1327		if (variant == ARTPEC6_CRYPTO) {
1328			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1329						     a6_regk_crypto_dlkey);
1330		} else {
1331			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1332						     a7_regk_crypto_dlkey);
1333		}
1334
1335		/* Copy and pad up the key */
1336		memcpy(req_ctx->key_buffer, ctx->hmac_key,
1337		       ctx->hmac_key_length);
1338		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1339		       blocksize - ctx->hmac_key_length);
1340
1341		error = artpec6_crypto_setup_out_descr(common,
1342					(void *)&req_ctx->key_md,
1343					sizeof(req_ctx->key_md), false, false);
1344		if (error)
1345			return error;
1346
1347		error = artpec6_crypto_setup_out_descr(common,
1348					req_ctx->key_buffer, blocksize,
1349					true, false);
1350		if (error)
1351			return error;
1352	}
1353
1354	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1355		/* Restore context */
1356		sel_ctx = regk_crypto_ext;
1357		ext_ctx = true;
1358	} else {
1359		sel_ctx = regk_crypto_init;
1360	}
1361
1362	if (variant == ARTPEC6_CRYPTO) {
1363		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1364		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1365
1366		/* If this is the final round, set the final flag */
1367		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1368			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1369	} else {
1370		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1371		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1372
1373		/* If this is the final round, set the final flag */
1374		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1375			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1376	}
1377
1378	/* Setup up metadata descriptors */
1379	error = artpec6_crypto_setup_out_descr(common,
1380				(void *)&req_ctx->hash_md,
1381				sizeof(req_ctx->hash_md), false, false);
1382	if (error)
1383		return error;
1384
1385	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1386	if (error)
1387		return error;
1388
1389	if (ext_ctx) {
1390		error = artpec6_crypto_setup_out_descr(common,
1391					req_ctx->digeststate,
1392					contextsize, false, false);
1393
1394		if (error)
1395			return error;
1396	}
1397
1398	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1399		size_t done_bytes = 0;
1400		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1401		size_t ready_bytes = round_down(total_bytes, blocksize);
1402		struct artpec6_crypto_walk walk;
1403
1404		run_hw = ready_bytes > 0;
1405		if (req_ctx->partial_bytes && ready_bytes) {
1406			/* We have a partial buffer and will at least some bytes
1407			 * to the HW. Empty this partial buffer before tackling
1408			 * the SG lists
1409			 */
1410			memcpy(req_ctx->partial_buffer_out,
1411				req_ctx->partial_buffer,
1412				req_ctx->partial_bytes);
1413
1414			error = artpec6_crypto_setup_out_descr(common,
1415						req_ctx->partial_buffer_out,
1416						req_ctx->partial_bytes,
1417						false, true);
1418			if (error)
1419				return error;
1420
1421			/* Reset partial buffer */
1422			done_bytes += req_ctx->partial_bytes;
1423			req_ctx->partial_bytes = 0;
1424		}
1425
1426		artpec6_crypto_walk_init(&walk, areq->src);
1427
1428		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1429							   ready_bytes -
1430							   done_bytes);
1431		if (error)
1432			return error;
1433
1434		if (walk.sg) {
1435			size_t sg_skip = ready_bytes - done_bytes;
1436			size_t sg_rem = areq->nbytes - sg_skip;
1437
1438			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1439					   req_ctx->partial_buffer +
1440					   req_ctx->partial_bytes,
1441					   sg_rem, sg_skip);
1442
1443			req_ctx->partial_bytes += sg_rem;
1444		}
1445
1446		req_ctx->digcnt += ready_bytes;
1447		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1448	}
1449
1450	/* Finalize */
1451	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1452		size_t hash_pad_len;
1453		u64 digest_bits;
1454		u32 oper;
1455
1456		if (variant == ARTPEC6_CRYPTO)
1457			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1458		else
1459			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1460
1461		/* Write out the partial buffer if present */
1462		if (req_ctx->partial_bytes) {
1463			memcpy(req_ctx->partial_buffer_out,
1464			       req_ctx->partial_buffer,
1465			       req_ctx->partial_bytes);
1466			error = artpec6_crypto_setup_out_descr(common,
1467						req_ctx->partial_buffer_out,
1468						req_ctx->partial_bytes,
1469						false, true);
1470			if (error)
1471				return error;
1472
1473			req_ctx->digcnt += req_ctx->partial_bytes;
1474			req_ctx->partial_bytes = 0;
1475		}
1476
1477		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1478			digest_bits = 8 * (req_ctx->digcnt + blocksize);
1479		else
1480			digest_bits = 8 * req_ctx->digcnt;
1481
1482		/* Add the hash pad */
1483		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1484					       req_ctx->digcnt, digest_bits);
1485		error = artpec6_crypto_setup_out_descr(common,
1486						      req_ctx->pad_buffer,
1487						      hash_pad_len, false,
1488						      true);
1489		req_ctx->digcnt = 0;
1490
1491		if (error)
1492			return error;
1493
1494		/* Descriptor for the final result */
1495		error = artpec6_crypto_setup_in_descr(common, areq->result,
1496						      digestsize,
1497						      true);
1498		if (error)
1499			return error;
1500
1501	} else { /* This is not the final operation for this request */
1502		if (!run_hw)
1503			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1504
1505		/* Save the result to the context */
1506		error = artpec6_crypto_setup_in_descr(common,
1507						      req_ctx->digeststate,
1508						      contextsize, false);
1509		if (error)
1510			return error;
1511		/* fall through */
1512	}
1513
1514	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1515				 HASH_FLAG_FINALIZE);
1516
1517	error = artpec6_crypto_terminate_in_descrs(common);
1518	if (error)
1519		return error;
1520
1521	error = artpec6_crypto_terminate_out_descrs(common);
1522	if (error)
1523		return error;
1524
1525	error = artpec6_crypto_dma_map_descs(common);
1526	if (error)
1527		return error;
1528
1529	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1530}
1531
1532
1533static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1534{
1535	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1536
1537	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1538	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1539
1540	return 0;
1541}
1542
1543static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1544{
1545	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1546
1547	ctx->fallback =
1548		crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1549					   0, CRYPTO_ALG_NEED_FALLBACK);
1550	if (IS_ERR(ctx->fallback))
1551		return PTR_ERR(ctx->fallback);
1552
1553	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1554	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1555
1556	return 0;
1557}
1558
1559static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1560{
1561	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1562
1563	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1564	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1565
1566	return 0;
1567}
1568
1569static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1570{
1571	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1572
1573	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1574	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1575
1576	return 0;
1577}
1578
1579static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1580{
1581	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1582
1583	memset(ctx, 0, sizeof(*ctx));
1584}
1585
1586static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1587{
1588	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1589
1590	crypto_free_sync_skcipher(ctx->fallback);
1591	artpec6_crypto_aes_exit(tfm);
1592}
1593
1594static int
1595artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1596			      unsigned int keylen)
1597{
1598	struct artpec6_cryptotfm_context *ctx =
1599		crypto_skcipher_ctx(cipher);
1600
1601	switch (keylen) {
1602	case 16:
1603	case 24:
1604	case 32:
1605		break;
1606	default:
1607		return -EINVAL;
1608	}
1609
1610	memcpy(ctx->aes_key, key, keylen);
1611	ctx->key_length = keylen;
1612	return 0;
1613}
1614
1615static int
1616artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1617			      unsigned int keylen)
1618{
1619	struct artpec6_cryptotfm_context *ctx =
1620		crypto_skcipher_ctx(cipher);
1621	int ret;
1622
1623	ret = xts_check_key(&cipher->base, key, keylen);
1624	if (ret)
1625		return ret;
1626
1627	switch (keylen) {
1628	case 32:
1629	case 48:
1630	case 64:
1631		break;
1632	default:
1633		return -EINVAL;
1634	}
1635
1636	memcpy(ctx->aes_key, key, keylen);
1637	ctx->key_length = keylen;
1638	return 0;
1639}
1640
1641/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1642 *
1643 * @req: The asynch request to process
1644 *
1645 * @return 0 if the dma job was successfully prepared
1646 *	  <0 on error
1647 *
1648 * This function sets up the PDMA descriptors for a block cipher request.
1649 *
1650 * The required padding is added for AES-CTR using a statically defined
1651 * buffer.
1652 *
1653 * The PDMA descriptor list will be as follows:
1654 *
1655 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1656 * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
1657 *
1658 */
1659static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1660{
1661	int ret;
1662	struct artpec6_crypto_walk walk;
1663	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1664	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1665	struct artpec6_crypto_request_context *req_ctx = NULL;
1666	size_t iv_len = crypto_skcipher_ivsize(cipher);
1667	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1668	enum artpec6_crypto_variant variant = ac->variant;
1669	struct artpec6_crypto_req_common *common;
1670	bool cipher_decr = false;
1671	size_t cipher_klen;
1672	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1673	u32 oper;
1674
1675	req_ctx = skcipher_request_ctx(areq);
1676	common = &req_ctx->common;
1677
1678	artpec6_crypto_init_dma_operation(common);
1679
1680	if (variant == ARTPEC6_CRYPTO)
1681		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1682	else
1683		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1684
1685	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1686					     sizeof(ctx->key_md), false, false);
1687	if (ret)
1688		return ret;
1689
1690	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1691					      ctx->key_length, true, false);
1692	if (ret)
1693		return ret;
1694
1695	req_ctx->cipher_md = 0;
1696
1697	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1698		cipher_klen = ctx->key_length/2;
1699	else
1700		cipher_klen =  ctx->key_length;
1701
1702	/* Metadata */
1703	switch (cipher_klen) {
1704	case 16:
1705		cipher_len = regk_crypto_key_128;
1706		break;
1707	case 24:
1708		cipher_len = regk_crypto_key_192;
1709		break;
1710	case 32:
1711		cipher_len = regk_crypto_key_256;
1712		break;
1713	default:
1714		pr_err("%s: Invalid key length %d!\n",
1715			MODULE_NAME, ctx->key_length);
1716		return -EINVAL;
1717	}
1718
1719	switch (ctx->crypto_type) {
1720	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1721		oper = regk_crypto_aes_ecb;
1722		cipher_decr = req_ctx->decrypt;
1723		break;
1724
1725	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1726		oper = regk_crypto_aes_cbc;
1727		cipher_decr = req_ctx->decrypt;
1728		break;
1729
1730	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1731		oper = regk_crypto_aes_ctr;
1732		cipher_decr = false;
1733		break;
1734
1735	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1736		oper = regk_crypto_aes_xts;
1737		cipher_decr = req_ctx->decrypt;
1738
1739		if (variant == ARTPEC6_CRYPTO)
1740			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1741		else
1742			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1743		break;
1744
1745	default:
1746		pr_err("%s: Invalid cipher mode %d!\n",
1747			MODULE_NAME, ctx->crypto_type);
1748		return -EINVAL;
1749	}
1750
1751	if (variant == ARTPEC6_CRYPTO) {
1752		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1753		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1754						 cipher_len);
1755		if (cipher_decr)
1756			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1757	} else {
1758		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1759		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1760						 cipher_len);
1761		if (cipher_decr)
1762			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1763	}
1764
1765	ret = artpec6_crypto_setup_out_descr(common,
1766					    &req_ctx->cipher_md,
1767					    sizeof(req_ctx->cipher_md),
1768					    false, false);
1769	if (ret)
1770		return ret;
1771
1772	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1773	if (ret)
1774		return ret;
1775
1776	if (iv_len) {
1777		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1778						     false, false);
1779		if (ret)
1780			return ret;
1781	}
1782	/* Data out */
1783	artpec6_crypto_walk_init(&walk, areq->src);
1784	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1785	if (ret)
1786		return ret;
1787
1788	/* Data in */
1789	artpec6_crypto_walk_init(&walk, areq->dst);
1790	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1791	if (ret)
1792		return ret;
1793
1794	/* CTR-mode padding required by the HW. */
1795	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1796	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1797		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1798			     areq->cryptlen;
1799
1800		if (pad) {
1801			ret = artpec6_crypto_setup_out_descr(common,
1802							     ac->pad_buffer,
1803							     pad, false, false);
1804			if (ret)
1805				return ret;
1806
1807			ret = artpec6_crypto_setup_in_descr(common,
1808							    ac->pad_buffer, pad,
1809							    false);
1810			if (ret)
1811				return ret;
1812		}
1813	}
1814
1815	ret = artpec6_crypto_terminate_out_descrs(common);
1816	if (ret)
1817		return ret;
1818
1819	ret = artpec6_crypto_terminate_in_descrs(common);
1820	if (ret)
1821		return ret;
1822
1823	return artpec6_crypto_dma_map_descs(common);
1824}
1825
1826static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1827{
1828	size_t count;
1829	int ret;
1830	size_t input_length;
1831	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1832	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1833	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1834	struct artpec6_crypto_req_common *common = &req_ctx->common;
1835	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1836	enum artpec6_crypto_variant variant = ac->variant;
1837	u32 md_cipher_len;
1838
1839	artpec6_crypto_init_dma_operation(common);
1840
1841	/* Key */
1842	if (variant == ARTPEC6_CRYPTO) {
1843		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1844					 a6_regk_crypto_dlkey);
1845	} else {
1846		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1847					 a7_regk_crypto_dlkey);
1848	}
1849	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1850					     sizeof(ctx->key_md), false, false);
1851	if (ret)
1852		return ret;
1853
1854	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1855					     ctx->key_length, true, false);
1856	if (ret)
1857		return ret;
1858
1859	req_ctx->cipher_md = 0;
1860
1861	switch (ctx->key_length) {
1862	case 16:
1863		md_cipher_len = regk_crypto_key_128;
1864		break;
1865	case 24:
1866		md_cipher_len = regk_crypto_key_192;
1867		break;
1868	case 32:
1869		md_cipher_len = regk_crypto_key_256;
1870		break;
1871	default:
1872		return -EINVAL;
1873	}
1874
1875	if (variant == ARTPEC6_CRYPTO) {
1876		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1877						 regk_crypto_aes_gcm);
1878		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1879						 md_cipher_len);
1880		if (req_ctx->decrypt)
1881			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1882	} else {
1883		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1884						 regk_crypto_aes_gcm);
1885		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1886						 md_cipher_len);
1887		if (req_ctx->decrypt)
1888			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1889	}
1890
1891	ret = artpec6_crypto_setup_out_descr(common,
1892					    (void *) &req_ctx->cipher_md,
1893					    sizeof(req_ctx->cipher_md), false,
1894					    false);
1895	if (ret)
1896		return ret;
1897
1898	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1899	if (ret)
1900		return ret;
1901
1902	/* For the decryption, cryptlen includes the tag. */
1903	input_length = areq->cryptlen;
1904	if (req_ctx->decrypt)
1905		input_length -= crypto_aead_authsize(cipher);
1906
1907	/* Prepare the context buffer */
1908	req_ctx->hw_ctx.aad_length_bits =
1909		__cpu_to_be64(8*areq->assoclen);
1910
1911	req_ctx->hw_ctx.text_length_bits =
1912		__cpu_to_be64(8*input_length);
1913
1914	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1915	// The HW omits the initial increment of the counter field.
1916	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1917
1918	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1919		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1920	if (ret)
1921		return ret;
1922
1923	{
1924		struct artpec6_crypto_walk walk;
1925
1926		artpec6_crypto_walk_init(&walk, areq->src);
1927
1928		/* Associated data */
1929		count = areq->assoclen;
1930		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1931		if (ret)
1932			return ret;
1933
1934		if (!IS_ALIGNED(areq->assoclen, 16)) {
1935			size_t assoc_pad = 16 - (areq->assoclen % 16);
1936			/* The HW mandates zero padding here */
1937			ret = artpec6_crypto_setup_out_descr(common,
1938							     ac->zero_buffer,
1939							     assoc_pad, false,
1940							     false);
1941			if (ret)
1942				return ret;
1943		}
1944
1945		/* Data to crypto */
1946		count = input_length;
1947		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1948		if (ret)
1949			return ret;
1950
1951		if (!IS_ALIGNED(input_length, 16)) {
1952			size_t crypto_pad = 16 - (input_length % 16);
1953			/* The HW mandates zero padding here */
1954			ret = artpec6_crypto_setup_out_descr(common,
1955							     ac->zero_buffer,
1956							     crypto_pad,
1957							     false,
1958							     false);
1959			if (ret)
1960				return ret;
1961		}
1962	}
1963
1964	/* Data from crypto */
1965	{
1966		struct artpec6_crypto_walk walk;
1967		size_t output_len = areq->cryptlen;
1968
1969		if (req_ctx->decrypt)
1970			output_len -= crypto_aead_authsize(cipher);
1971
1972		artpec6_crypto_walk_init(&walk, areq->dst);
1973
1974		/* skip associated data in the output */
1975		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1976		if (count)
1977			return -EINVAL;
1978
1979		count = output_len;
1980		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1981		if (ret)
1982			return ret;
1983
1984		/* Put padding between the cryptotext and the auth tag */
1985		if (!IS_ALIGNED(output_len, 16)) {
1986			size_t crypto_pad = 16 - (output_len % 16);
1987
1988			ret = artpec6_crypto_setup_in_descr(common,
1989							    ac->pad_buffer,
1990							    crypto_pad, false);
1991			if (ret)
1992				return ret;
1993		}
1994
1995		/* The authentication tag shall follow immediately after
1996		 * the output ciphertext. For decryption it is put in a context
1997		 * buffer for later compare against the input tag.
1998		 */
1999
2000		if (req_ctx->decrypt) {
2001			ret = artpec6_crypto_setup_in_descr(common,
2002				req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2003			if (ret)
2004				return ret;
2005
2006		} else {
2007			/* For encryption the requested tag size may be smaller
2008			 * than the hardware's generated tag.
2009			 */
2010			size_t authsize = crypto_aead_authsize(cipher);
2011
2012			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2013								authsize);
2014			if (ret)
2015				return ret;
2016
2017			if (authsize < AES_BLOCK_SIZE) {
2018				count = AES_BLOCK_SIZE - authsize;
2019				ret = artpec6_crypto_setup_in_descr(common,
2020					ac->pad_buffer,
2021					count, false);
2022				if (ret)
2023					return ret;
2024			}
2025		}
2026
2027	}
2028
2029	ret = artpec6_crypto_terminate_in_descrs(common);
2030	if (ret)
2031		return ret;
2032
2033	ret = artpec6_crypto_terminate_out_descrs(common);
2034	if (ret)
2035		return ret;
2036
2037	return artpec6_crypto_dma_map_descs(common);
2038}
2039
2040static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2041	    struct list_head *completions)
2042{
2043	struct artpec6_crypto_req_common *req;
2044
2045	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2046		req = list_first_entry(&ac->queue,
2047				       struct artpec6_crypto_req_common,
2048				       list);
2049		list_move_tail(&req->list, &ac->pending);
2050		artpec6_crypto_start_dma(req);
2051
2052		list_add_tail(&req->complete_in_progress, completions);
2053	}
2054
2055	/*
2056	 * In some cases, the hardware can raise an in_eop_flush interrupt
2057	 * before actually updating the status, so we have an timer which will
2058	 * recheck the status on timeout.  Since the cases are expected to be
2059	 * very rare, we use a relatively large timeout value.  There should be
2060	 * no noticeable negative effect if we timeout spuriously.
2061	 */
2062	if (ac->pending_count)
2063		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2064	else
2065		del_timer(&ac->timer);
2066}
2067
2068static void artpec6_crypto_timeout(struct timer_list *t)
2069{
2070	struct artpec6_crypto *ac = from_timer(ac, t, timer);
2071
2072	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2073
2074	tasklet_schedule(&ac->task);
2075}
2076
2077static void artpec6_crypto_task(unsigned long data)
2078{
2079	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2080	struct artpec6_crypto_req_common *req;
2081	struct artpec6_crypto_req_common *n;
2082	struct list_head complete_done;
2083	struct list_head complete_in_progress;
2084
2085	INIT_LIST_HEAD(&complete_done);
2086	INIT_LIST_HEAD(&complete_in_progress);
2087
2088	if (list_empty(&ac->pending)) {
2089		pr_debug("Spurious IRQ\n");
2090		return;
2091	}
2092
2093	spin_lock_bh(&ac->queue_lock);
2094
2095	list_for_each_entry_safe(req, n, &ac->pending, list) {
2096		struct artpec6_crypto_dma_descriptors *dma = req->dma;
2097		u32 stat;
2098		dma_addr_t stataddr;
2099
2100		stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2101		dma_sync_single_for_cpu(artpec6_crypto_dev,
2102					stataddr,
2103					4,
2104					DMA_BIDIRECTIONAL);
2105
2106		stat = req->dma->stat[req->dma->in_cnt-1];
2107
2108		/* A non-zero final status descriptor indicates
2109		 * this job has finished.
2110		 */
2111		pr_debug("Request %p status is %X\n", req, stat);
2112		if (!stat)
2113			break;
2114
2115		/* Allow testing of timeout handling with fault injection */
2116#ifdef CONFIG_FAULT_INJECTION
2117		if (should_fail(&artpec6_crypto_fail_status_read, 1))
2118			continue;
2119#endif
2120
2121		pr_debug("Completing request %p\n", req);
2122
2123		list_move_tail(&req->list, &complete_done);
2124
2125		ac->pending_count--;
2126	}
2127
2128	artpec6_crypto_process_queue(ac, &complete_in_progress);
2129
2130	spin_unlock_bh(&ac->queue_lock);
2131
2132	/* Perform the completion callbacks without holding the queue lock
2133	 * to allow new request submissions from the callbacks.
2134	 */
2135	list_for_each_entry_safe(req, n, &complete_done, list) {
2136		artpec6_crypto_dma_unmap_all(req);
2137		artpec6_crypto_copy_bounce_buffers(req);
2138		artpec6_crypto_common_destroy(req);
2139
2140		req->complete(req->req);
2141	}
2142
2143	list_for_each_entry_safe(req, n, &complete_in_progress,
2144				 complete_in_progress) {
2145		req->req->complete(req->req, -EINPROGRESS);
2146	}
2147}
2148
2149static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2150{
2151	req->complete(req, 0);
2152}
2153
2154static void
2155artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2156{
2157	struct skcipher_request *cipher_req = container_of(req,
2158		struct skcipher_request, base);
2159
2160	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2161				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2162				 AES_BLOCK_SIZE, 0);
2163	req->complete(req, 0);
2164}
2165
2166static void
2167artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2168{
2169	struct skcipher_request *cipher_req = container_of(req,
2170		struct skcipher_request, base);
2171
2172	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2173				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2174				 AES_BLOCK_SIZE, 0);
2175	req->complete(req, 0);
2176}
2177
2178static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2179{
2180	int result = 0;
2181
2182	/* Verify GCM hashtag. */
2183	struct aead_request *areq = container_of(req,
2184		struct aead_request, base);
2185	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2186	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2187
2188	if (req_ctx->decrypt) {
2189		u8 input_tag[AES_BLOCK_SIZE];
2190		unsigned int authsize = crypto_aead_authsize(aead);
2191
2192		sg_pcopy_to_buffer(areq->src,
2193				   sg_nents(areq->src),
2194				   input_tag,
2195				   authsize,
2196				   areq->assoclen + areq->cryptlen -
2197				   authsize);
2198
2199		if (crypto_memneq(req_ctx->decryption_tag,
2200				  input_tag,
2201				  authsize)) {
2202			pr_debug("***EBADMSG:\n");
2203			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2204					     input_tag, authsize, true);
2205			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2206					     req_ctx->decryption_tag,
2207					     authsize, true);
2208
2209			result = -EBADMSG;
2210		}
2211	}
2212
2213	req->complete(req, result);
2214}
2215
2216static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2217{
2218	req->complete(req, 0);
2219}
2220
2221
2222/*------------------- Hash functions -----------------------------------------*/
2223static int
2224artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2225		    const u8 *key, unsigned int keylen)
2226{
2227	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2228	size_t blocksize;
2229	int ret;
2230
2231	if (!keylen) {
2232		pr_err("Invalid length (%d) of HMAC key\n",
2233			keylen);
2234		return -EINVAL;
2235	}
2236
2237	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2238
2239	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2240
2241	if (keylen > blocksize) {
2242		tfm_ctx->hmac_key_length = blocksize;
2243
2244		ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
2245					      tfm_ctx->hmac_key);
2246		if (ret)
2247			return ret;
2248	} else {
2249		memcpy(tfm_ctx->hmac_key, key, keylen);
2250		tfm_ctx->hmac_key_length = keylen;
2251	}
2252
2253	return 0;
2254}
2255
2256static int
2257artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2258{
2259	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2260	enum artpec6_crypto_variant variant = ac->variant;
2261	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2262	u32 oper;
2263
2264	memset(req_ctx, 0, sizeof(*req_ctx));
2265
2266	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2267	if (hmac)
2268		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2269
2270	switch (type) {
2271	case ARTPEC6_CRYPTO_HASH_SHA1:
2272		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2273		break;
2274	case ARTPEC6_CRYPTO_HASH_SHA256:
2275		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2276		break;
2277	default:
2278		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2279		return -EINVAL;
2280	}
2281
2282	if (variant == ARTPEC6_CRYPTO)
2283		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2284	else
2285		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2286
2287	return 0;
2288}
2289
2290static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2291{
2292	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2293	int ret;
2294
2295	if (!req_ctx->common.dma) {
2296		ret = artpec6_crypto_common_init(&req_ctx->common,
2297					  &req->base,
2298					  artpec6_crypto_complete_hash,
2299					  NULL, 0);
2300
2301		if (ret)
2302			return ret;
2303	}
2304
2305	ret = artpec6_crypto_prepare_hash(req);
2306	switch (ret) {
2307	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2308		ret = artpec6_crypto_submit(&req_ctx->common);
2309		break;
2310
2311	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2312		ret = 0;
2313		fallthrough;
2314
2315	default:
2316		artpec6_crypto_common_destroy(&req_ctx->common);
2317		break;
2318	}
2319
2320	return ret;
2321}
2322
2323static int artpec6_crypto_hash_final(struct ahash_request *req)
2324{
2325	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2326
2327	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2328
2329	return artpec6_crypto_prepare_submit_hash(req);
2330}
2331
2332static int artpec6_crypto_hash_update(struct ahash_request *req)
2333{
2334	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2335
2336	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2337
2338	return artpec6_crypto_prepare_submit_hash(req);
2339}
2340
2341static int artpec6_crypto_sha1_init(struct ahash_request *req)
2342{
2343	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2344}
2345
2346static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2347{
2348	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2349
2350	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2351
2352	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2353
2354	return artpec6_crypto_prepare_submit_hash(req);
2355}
2356
2357static int artpec6_crypto_sha256_init(struct ahash_request *req)
2358{
2359	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2360}
2361
2362static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2363{
2364	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2365
2366	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2367	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2368
2369	return artpec6_crypto_prepare_submit_hash(req);
2370}
2371
2372static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2373{
2374	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2375}
2376
2377static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2378{
2379	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2380
2381	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2382	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2383
2384	return artpec6_crypto_prepare_submit_hash(req);
2385}
2386
2387static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2388				    const char *base_hash_name)
2389{
2390	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2391
2392	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2393				 sizeof(struct artpec6_hash_request_context));
2394	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2395
2396	if (base_hash_name) {
2397		struct crypto_shash *child;
2398
2399		child = crypto_alloc_shash(base_hash_name, 0,
2400					   CRYPTO_ALG_NEED_FALLBACK);
2401
2402		if (IS_ERR(child))
2403			return PTR_ERR(child);
2404
2405		tfm_ctx->child_hash = child;
2406	}
2407
2408	return 0;
2409}
2410
2411static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2412{
2413	return artpec6_crypto_ahash_init_common(tfm, NULL);
2414}
2415
2416static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2417{
2418	return artpec6_crypto_ahash_init_common(tfm, "sha256");
2419}
2420
2421static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2422{
2423	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2424
2425	if (tfm_ctx->child_hash)
2426		crypto_free_shash(tfm_ctx->child_hash);
2427
2428	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2429	tfm_ctx->hmac_key_length = 0;
2430}
2431
2432static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2433{
2434	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2435	struct artpec6_hash_export_state *state = out;
2436	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2437	enum artpec6_crypto_variant variant = ac->variant;
2438
2439	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2440		     sizeof(ctx->partial_buffer));
2441	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2442
2443	state->digcnt = ctx->digcnt;
2444	state->partial_bytes = ctx->partial_bytes;
2445	state->hash_flags = ctx->hash_flags;
2446
2447	if (variant == ARTPEC6_CRYPTO)
2448		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2449	else
2450		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2451
2452	memcpy(state->partial_buffer, ctx->partial_buffer,
2453	       sizeof(state->partial_buffer));
2454	memcpy(state->digeststate, ctx->digeststate,
2455	       sizeof(state->digeststate));
2456
2457	return 0;
2458}
2459
2460static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2461{
2462	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2463	const struct artpec6_hash_export_state *state = in;
2464	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2465	enum artpec6_crypto_variant variant = ac->variant;
2466
2467	memset(ctx, 0, sizeof(*ctx));
2468
2469	ctx->digcnt = state->digcnt;
2470	ctx->partial_bytes = state->partial_bytes;
2471	ctx->hash_flags = state->hash_flags;
2472
2473	if (variant == ARTPEC6_CRYPTO)
2474		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2475	else
2476		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2477
2478	memcpy(ctx->partial_buffer, state->partial_buffer,
2479	       sizeof(state->partial_buffer));
2480	memcpy(ctx->digeststate, state->digeststate,
2481	       sizeof(state->digeststate));
2482
2483	return 0;
2484}
2485
2486static int init_crypto_hw(struct artpec6_crypto *ac)
2487{
2488	enum artpec6_crypto_variant variant = ac->variant;
2489	void __iomem *base = ac->base;
2490	u32 out_descr_buf_size;
2491	u32 out_data_buf_size;
2492	u32 in_data_buf_size;
2493	u32 in_descr_buf_size;
2494	u32 in_stat_buf_size;
2495	u32 in, out;
2496
2497	/*
2498	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2499	 * channels and 1024 bytes for the IN channel. This is an elastic
2500	 * memory used to internally store the descriptors and data. The values
2501	 * ares specified in 64 byte incremements.  Trustzone buffers are not
2502	 * used at this stage.
2503	 */
2504	out_data_buf_size = 16;  /* 1024 bytes for data */
2505	out_descr_buf_size = 15; /* 960 bytes for descriptors */
2506	in_data_buf_size = 8;    /* 512 bytes for data */
2507	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
2508	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
2509
2510	BUILD_BUG_ON_MSG((out_data_buf_size
2511				+ out_descr_buf_size) * 64 > 1984,
2512			  "Invalid OUT configuration");
2513
2514	BUILD_BUG_ON_MSG((in_data_buf_size
2515				+ in_descr_buf_size
2516				+ in_stat_buf_size) * 64 > 1024,
2517			  "Invalid IN configuration");
2518
2519	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2520	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2521	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2522
2523	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2524	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2525
2526	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2527	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2528
2529	if (variant == ARTPEC6_CRYPTO) {
2530		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2531		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2532		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2533			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2534			       base + A6_PDMA_INTR_MASK);
2535	} else {
2536		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2537		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2538		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2539			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2540			       base + A7_PDMA_INTR_MASK);
2541	}
2542
2543	return 0;
2544}
2545
2546static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2547{
2548	enum artpec6_crypto_variant variant = ac->variant;
2549	void __iomem *base = ac->base;
2550
2551	if (variant == ARTPEC6_CRYPTO) {
2552		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2553		writel_relaxed(0, base + A6_PDMA_IN_CFG);
2554		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2555	} else {
2556		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2557		writel_relaxed(0, base + A7_PDMA_IN_CFG);
2558		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2559	}
2560
2561	writel_relaxed(0, base + PDMA_OUT_CFG);
2562
2563}
2564
2565static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2566{
2567	struct artpec6_crypto *ac = dev_id;
2568	enum artpec6_crypto_variant variant = ac->variant;
2569	void __iomem *base = ac->base;
2570	u32 mask_in_data, mask_in_eop_flush;
2571	u32 in_cmd_flush_stat, in_cmd_reg;
2572	u32 ack_intr_reg;
2573	u32 ack = 0;
2574	u32 intr;
2575
2576	if (variant == ARTPEC6_CRYPTO) {
2577		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2578		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2579		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2580		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2581		in_cmd_reg = A6_PDMA_IN_CMD;
2582		ack_intr_reg = A6_PDMA_ACK_INTR;
2583	} else {
2584		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2585		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2586		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2587		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2588		in_cmd_reg = A7_PDMA_IN_CMD;
2589		ack_intr_reg = A7_PDMA_ACK_INTR;
2590	}
2591
2592	/* We get two interrupt notifications from each job.
2593	 * The in_data means all data was sent to memory and then
2594	 * we request a status flush command to write the per-job
2595	 * status to its status vector. This ensures that the
2596	 * tasklet can detect exactly how many submitted jobs
2597	 * that have finished.
2598	 */
2599	if (intr & mask_in_data)
2600		ack |= mask_in_data;
2601
2602	if (intr & mask_in_eop_flush)
2603		ack |= mask_in_eop_flush;
2604	else
2605		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2606
2607	writel_relaxed(ack, base + ack_intr_reg);
2608
2609	if (intr & mask_in_eop_flush)
2610		tasklet_schedule(&ac->task);
2611
2612	return IRQ_HANDLED;
2613}
2614
2615/*------------------- Algorithm definitions ----------------------------------*/
2616
2617/* Hashes */
2618static struct ahash_alg hash_algos[] = {
2619	/* SHA-1 */
2620	{
2621		.init = artpec6_crypto_sha1_init,
2622		.update = artpec6_crypto_hash_update,
2623		.final = artpec6_crypto_hash_final,
2624		.digest = artpec6_crypto_sha1_digest,
2625		.import = artpec6_crypto_hash_import,
2626		.export = artpec6_crypto_hash_export,
2627		.halg.digestsize = SHA1_DIGEST_SIZE,
2628		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2629		.halg.base = {
2630			.cra_name = "sha1",
2631			.cra_driver_name = "artpec-sha1",
2632			.cra_priority = 300,
2633			.cra_flags = CRYPTO_ALG_ASYNC |
2634				     CRYPTO_ALG_ALLOCATES_MEMORY,
2635			.cra_blocksize = SHA1_BLOCK_SIZE,
2636			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2637			.cra_alignmask = 3,
2638			.cra_module = THIS_MODULE,
2639			.cra_init = artpec6_crypto_ahash_init,
2640			.cra_exit = artpec6_crypto_ahash_exit,
2641		}
2642	},
2643	/* SHA-256 */
2644	{
2645		.init = artpec6_crypto_sha256_init,
2646		.update = artpec6_crypto_hash_update,
2647		.final = artpec6_crypto_hash_final,
2648		.digest = artpec6_crypto_sha256_digest,
2649		.import = artpec6_crypto_hash_import,
2650		.export = artpec6_crypto_hash_export,
2651		.halg.digestsize = SHA256_DIGEST_SIZE,
2652		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2653		.halg.base = {
2654			.cra_name = "sha256",
2655			.cra_driver_name = "artpec-sha256",
2656			.cra_priority = 300,
2657			.cra_flags = CRYPTO_ALG_ASYNC |
2658				     CRYPTO_ALG_ALLOCATES_MEMORY,
2659			.cra_blocksize = SHA256_BLOCK_SIZE,
2660			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2661			.cra_alignmask = 3,
2662			.cra_module = THIS_MODULE,
2663			.cra_init = artpec6_crypto_ahash_init,
2664			.cra_exit = artpec6_crypto_ahash_exit,
2665		}
2666	},
2667	/* HMAC SHA-256 */
2668	{
2669		.init = artpec6_crypto_hmac_sha256_init,
2670		.update = artpec6_crypto_hash_update,
2671		.final = artpec6_crypto_hash_final,
2672		.digest = artpec6_crypto_hmac_sha256_digest,
2673		.import = artpec6_crypto_hash_import,
2674		.export = artpec6_crypto_hash_export,
2675		.setkey = artpec6_crypto_hash_set_key,
2676		.halg.digestsize = SHA256_DIGEST_SIZE,
2677		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2678		.halg.base = {
2679			.cra_name = "hmac(sha256)",
2680			.cra_driver_name = "artpec-hmac-sha256",
2681			.cra_priority = 300,
2682			.cra_flags = CRYPTO_ALG_ASYNC |
2683				     CRYPTO_ALG_ALLOCATES_MEMORY,
2684			.cra_blocksize = SHA256_BLOCK_SIZE,
2685			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2686			.cra_alignmask = 3,
2687			.cra_module = THIS_MODULE,
2688			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2689			.cra_exit = artpec6_crypto_ahash_exit,
2690		}
2691	},
2692};
2693
2694/* Crypto */
2695static struct skcipher_alg crypto_algos[] = {
2696	/* AES - ECB */
2697	{
2698		.base = {
2699			.cra_name = "ecb(aes)",
2700			.cra_driver_name = "artpec6-ecb-aes",
2701			.cra_priority = 300,
2702			.cra_flags = CRYPTO_ALG_ASYNC |
2703				     CRYPTO_ALG_ALLOCATES_MEMORY,
2704			.cra_blocksize = AES_BLOCK_SIZE,
2705			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2706			.cra_alignmask = 3,
2707			.cra_module = THIS_MODULE,
2708		},
2709		.min_keysize = AES_MIN_KEY_SIZE,
2710		.max_keysize = AES_MAX_KEY_SIZE,
2711		.setkey = artpec6_crypto_cipher_set_key,
2712		.encrypt = artpec6_crypto_encrypt,
2713		.decrypt = artpec6_crypto_decrypt,
2714		.init = artpec6_crypto_aes_ecb_init,
2715		.exit = artpec6_crypto_aes_exit,
2716	},
2717	/* AES - CTR */
2718	{
2719		.base = {
2720			.cra_name = "ctr(aes)",
2721			.cra_driver_name = "artpec6-ctr-aes",
2722			.cra_priority = 300,
2723			.cra_flags = CRYPTO_ALG_ASYNC |
2724				     CRYPTO_ALG_ALLOCATES_MEMORY |
2725				     CRYPTO_ALG_NEED_FALLBACK,
2726			.cra_blocksize = 1,
2727			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2728			.cra_alignmask = 3,
2729			.cra_module = THIS_MODULE,
2730		},
2731		.min_keysize = AES_MIN_KEY_SIZE,
2732		.max_keysize = AES_MAX_KEY_SIZE,
2733		.ivsize = AES_BLOCK_SIZE,
2734		.setkey = artpec6_crypto_cipher_set_key,
2735		.encrypt = artpec6_crypto_ctr_encrypt,
2736		.decrypt = artpec6_crypto_ctr_decrypt,
2737		.init = artpec6_crypto_aes_ctr_init,
2738		.exit = artpec6_crypto_aes_ctr_exit,
2739	},
2740	/* AES - CBC */
2741	{
2742		.base = {
2743			.cra_name = "cbc(aes)",
2744			.cra_driver_name = "artpec6-cbc-aes",
2745			.cra_priority = 300,
2746			.cra_flags = CRYPTO_ALG_ASYNC |
2747				     CRYPTO_ALG_ALLOCATES_MEMORY,
2748			.cra_blocksize = AES_BLOCK_SIZE,
2749			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2750			.cra_alignmask = 3,
2751			.cra_module = THIS_MODULE,
2752		},
2753		.min_keysize = AES_MIN_KEY_SIZE,
2754		.max_keysize = AES_MAX_KEY_SIZE,
2755		.ivsize = AES_BLOCK_SIZE,
2756		.setkey = artpec6_crypto_cipher_set_key,
2757		.encrypt = artpec6_crypto_encrypt,
2758		.decrypt = artpec6_crypto_decrypt,
2759		.init = artpec6_crypto_aes_cbc_init,
2760		.exit = artpec6_crypto_aes_exit
2761	},
2762	/* AES - XTS */
2763	{
2764		.base = {
2765			.cra_name = "xts(aes)",
2766			.cra_driver_name = "artpec6-xts-aes",
2767			.cra_priority = 300,
2768			.cra_flags = CRYPTO_ALG_ASYNC |
2769				     CRYPTO_ALG_ALLOCATES_MEMORY,
2770			.cra_blocksize = 1,
2771			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2772			.cra_alignmask = 3,
2773			.cra_module = THIS_MODULE,
2774		},
2775		.min_keysize = 2*AES_MIN_KEY_SIZE,
2776		.max_keysize = 2*AES_MAX_KEY_SIZE,
2777		.ivsize = 16,
2778		.setkey = artpec6_crypto_xts_set_key,
2779		.encrypt = artpec6_crypto_encrypt,
2780		.decrypt = artpec6_crypto_decrypt,
2781		.init = artpec6_crypto_aes_xts_init,
2782		.exit = artpec6_crypto_aes_exit,
2783	},
2784};
2785
2786static struct aead_alg aead_algos[] = {
2787	{
2788		.init   = artpec6_crypto_aead_init,
2789		.setkey = artpec6_crypto_aead_set_key,
2790		.encrypt = artpec6_crypto_aead_encrypt,
2791		.decrypt = artpec6_crypto_aead_decrypt,
2792		.ivsize = GCM_AES_IV_SIZE,
2793		.maxauthsize = AES_BLOCK_SIZE,
2794
2795		.base = {
2796			.cra_name = "gcm(aes)",
2797			.cra_driver_name = "artpec-gcm-aes",
2798			.cra_priority = 300,
2799			.cra_flags = CRYPTO_ALG_ASYNC |
2800				     CRYPTO_ALG_ALLOCATES_MEMORY |
2801				     CRYPTO_ALG_KERN_DRIVER_ONLY,
2802			.cra_blocksize = 1,
2803			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2804			.cra_alignmask = 3,
2805			.cra_module = THIS_MODULE,
2806		},
2807	}
2808};
2809
2810#ifdef CONFIG_DEBUG_FS
2811
2812struct dbgfs_u32 {
2813	char *name;
2814	mode_t mode;
2815	u32 *flag;
2816	char *desc;
2817};
2818
2819static struct dentry *dbgfs_root;
2820
2821static void artpec6_crypto_init_debugfs(void)
2822{
2823	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2824
2825#ifdef CONFIG_FAULT_INJECTION
2826	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2827				  &artpec6_crypto_fail_status_read);
2828
2829	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2830				  &artpec6_crypto_fail_dma_array_full);
2831#endif
2832}
2833
2834static void artpec6_crypto_free_debugfs(void)
2835{
2836	debugfs_remove_recursive(dbgfs_root);
2837	dbgfs_root = NULL;
2838}
2839#endif
2840
2841static const struct of_device_id artpec6_crypto_of_match[] = {
2842	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2843	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2844	{}
2845};
2846MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2847
2848static int artpec6_crypto_probe(struct platform_device *pdev)
2849{
2850	const struct of_device_id *match;
2851	enum artpec6_crypto_variant variant;
2852	struct artpec6_crypto *ac;
2853	struct device *dev = &pdev->dev;
2854	void __iomem *base;
2855	int irq;
2856	int err;
2857
2858	if (artpec6_crypto_dev)
2859		return -ENODEV;
2860
2861	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
2862	if (!match)
2863		return -EINVAL;
2864
2865	variant = (enum artpec6_crypto_variant)match->data;
2866
2867	base = devm_platform_ioremap_resource(pdev, 0);
2868	if (IS_ERR(base))
2869		return PTR_ERR(base);
2870
2871	irq = platform_get_irq(pdev, 0);
2872	if (irq < 0)
2873		return -ENODEV;
2874
2875	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2876			  GFP_KERNEL);
2877	if (!ac)
2878		return -ENOMEM;
2879
2880	platform_set_drvdata(pdev, ac);
2881	ac->variant = variant;
2882
2883	spin_lock_init(&ac->queue_lock);
2884	INIT_LIST_HEAD(&ac->queue);
2885	INIT_LIST_HEAD(&ac->pending);
2886	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2887
2888	ac->base = base;
2889
2890	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2891		sizeof(struct artpec6_crypto_dma_descriptors),
2892		64,
2893		0,
2894		NULL);
2895	if (!ac->dma_cache)
2896		return -ENOMEM;
2897
2898#ifdef CONFIG_DEBUG_FS
2899	artpec6_crypto_init_debugfs();
2900#endif
2901
2902	tasklet_init(&ac->task, artpec6_crypto_task,
2903		     (unsigned long)ac);
2904
2905	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2906				      GFP_KERNEL);
2907	if (!ac->pad_buffer)
2908		return -ENOMEM;
2909	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2910
2911	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2912				      GFP_KERNEL);
2913	if (!ac->zero_buffer)
2914		return -ENOMEM;
2915	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2916
2917	err = init_crypto_hw(ac);
2918	if (err)
2919		goto free_cache;
2920
2921	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2922			       "artpec6-crypto", ac);
2923	if (err)
2924		goto disable_hw;
2925
2926	artpec6_crypto_dev = &pdev->dev;
2927
2928	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2929	if (err) {
2930		dev_err(dev, "Failed to register ahashes\n");
2931		goto disable_hw;
2932	}
2933
2934	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2935	if (err) {
2936		dev_err(dev, "Failed to register ciphers\n");
2937		goto unregister_ahashes;
2938	}
2939
2940	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2941	if (err) {
2942		dev_err(dev, "Failed to register aeads\n");
2943		goto unregister_algs;
2944	}
2945
2946	return 0;
2947
2948unregister_algs:
2949	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2950unregister_ahashes:
2951	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2952disable_hw:
2953	artpec6_crypto_disable_hw(ac);
2954free_cache:
2955	kmem_cache_destroy(ac->dma_cache);
2956	return err;
2957}
2958
2959static int artpec6_crypto_remove(struct platform_device *pdev)
2960{
2961	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2962	int irq = platform_get_irq(pdev, 0);
2963
2964	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2965	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2966	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2967
2968	tasklet_disable(&ac->task);
2969	devm_free_irq(&pdev->dev, irq, ac);
2970	tasklet_kill(&ac->task);
2971	del_timer_sync(&ac->timer);
2972
2973	artpec6_crypto_disable_hw(ac);
2974
2975	kmem_cache_destroy(ac->dma_cache);
2976#ifdef CONFIG_DEBUG_FS
2977	artpec6_crypto_free_debugfs();
2978#endif
2979	return 0;
2980}
2981
2982static struct platform_driver artpec6_crypto_driver = {
2983	.probe   = artpec6_crypto_probe,
2984	.remove  = artpec6_crypto_remove,
2985	.driver  = {
2986		.name  = "artpec6-crypto",
2987		.of_match_table = artpec6_crypto_of_match,
2988	},
2989};
2990
2991module_platform_driver(artpec6_crypto_driver);
2992
2993MODULE_AUTHOR("Axis Communications AB");
2994MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2995MODULE_LICENSE("GPL");
2996