1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IXP4xx NPE-C crypto driver
4 *
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 */
7
8#include <linux/platform_device.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/crypto.h>
12#include <linux/kernel.h>
13#include <linux/rtnetlink.h>
14#include <linux/interrupt.h>
15#include <linux/spinlock.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18
19#include <crypto/ctr.h>
20#include <crypto/internal/des.h>
21#include <crypto/aes.h>
22#include <crypto/hmac.h>
23#include <crypto/sha.h>
24#include <crypto/algapi.h>
25#include <crypto/internal/aead.h>
26#include <crypto/internal/skcipher.h>
27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h>
29
30#include <linux/soc/ixp4xx/npe.h>
31#include <linux/soc/ixp4xx/qmgr.h>
32
33#define MAX_KEYLEN 32
34
35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36#define NPE_CTX_LEN 80
37#define AES_BLOCK128 16
38
39#define NPE_OP_HASH_VERIFY   0x01
40#define NPE_OP_CCM_ENABLE    0x04
41#define NPE_OP_CRYPT_ENABLE  0x08
42#define NPE_OP_HASH_ENABLE   0x10
43#define NPE_OP_NOT_IN_PLACE  0x20
44#define NPE_OP_HMAC_DISABLE  0x40
45#define NPE_OP_CRYPT_ENCRYPT 0x80
46
47#define NPE_OP_CCM_GEN_MIC   0xcc
48#define NPE_OP_HASH_GEN_ICV  0x50
49#define NPE_OP_ENC_GEN_KEY   0xc9
50
51#define MOD_ECB     0x0000
52#define MOD_CTR     0x1000
53#define MOD_CBC_ENC 0x2000
54#define MOD_CBC_DEC 0x3000
55#define MOD_CCM_ENC 0x4000
56#define MOD_CCM_DEC 0x5000
57
58#define KEYLEN_128  4
59#define KEYLEN_192  6
60#define KEYLEN_256  8
61
62#define CIPH_DECR   0x0000
63#define CIPH_ENCR   0x0400
64
65#define MOD_DES     0x0000
66#define MOD_TDEA2   0x0100
67#define MOD_3DES   0x0200
68#define MOD_AES     0x0800
69#define MOD_AES128  (0x0800 | KEYLEN_128)
70#define MOD_AES192  (0x0900 | KEYLEN_192)
71#define MOD_AES256  (0x0a00 | KEYLEN_256)
72
73#define MAX_IVLEN   16
74#define NPE_ID      2  /* NPE C */
75#define NPE_QLEN    16
76/* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78#define NPE_QLEN_TOTAL 64
79
80#define SEND_QID    29
81#define RECV_QID    30
82
83#define CTL_FLAG_UNUSED		0x0000
84#define CTL_FLAG_USED		0x1000
85#define CTL_FLAG_PERFORM_ABLK	0x0001
86#define CTL_FLAG_GEN_ICV	0x0002
87#define CTL_FLAG_GEN_REVAES	0x0004
88#define CTL_FLAG_PERFORM_AEAD	0x0008
89#define CTL_FLAG_MASK		0x000f
90
91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92
93#define MD5_DIGEST_SIZE   16
94
95struct buffer_desc {
96	u32 phys_next;
97#ifdef __ARMEB__
98	u16 buf_len;
99	u16 pkt_len;
100#else
101	u16 pkt_len;
102	u16 buf_len;
103#endif
104	dma_addr_t phys_addr;
105	u32 __reserved[4];
106	struct buffer_desc *next;
107	enum dma_data_direction dir;
108};
109
110struct crypt_ctl {
111#ifdef __ARMEB__
112	u8 mode;		/* NPE_OP_*  operation mode */
113	u8 init_len;
114	u16 reserved;
115#else
116	u16 reserved;
117	u8 init_len;
118	u8 mode;		/* NPE_OP_*  operation mode */
119#endif
120	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
121	dma_addr_t icv_rev_aes;	/* icv or rev aes */
122	dma_addr_t src_buf;
123	dma_addr_t dst_buf;
124#ifdef __ARMEB__
125	u16 auth_offs;		/* Authentication start offset */
126	u16 auth_len;		/* Authentication data length */
127	u16 crypt_offs;		/* Cryption start offset */
128	u16 crypt_len;		/* Cryption data length */
129#else
130	u16 auth_len;		/* Authentication data length */
131	u16 auth_offs;		/* Authentication start offset */
132	u16 crypt_len;		/* Cryption data length */
133	u16 crypt_offs;		/* Cryption start offset */
134#endif
135	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
136	u32 crypto_ctx;		/* NPE Crypto Param structure address */
137
138	/* Used by Host: 4*4 bytes*/
139	unsigned ctl_flags;
140	union {
141		struct skcipher_request *ablk_req;
142		struct aead_request *aead_req;
143		struct crypto_tfm *tfm;
144	} data;
145	struct buffer_desc *regist_buf;
146	u8 *regist_ptr;
147};
148
149struct ablk_ctx {
150	struct buffer_desc *src;
151	struct buffer_desc *dst;
152	u8 iv[MAX_IVLEN];
153	bool encrypt;
154};
155
156struct aead_ctx {
157	struct buffer_desc *src;
158	struct buffer_desc *dst;
159	struct scatterlist ivlist;
160	/* used when the hmac is not on one sg entry */
161	u8 *hmac_virt;
162	int encrypt;
163};
164
165struct ix_hash_algo {
166	u32 cfgword;
167	unsigned char *icv;
168};
169
170struct ix_sa_dir {
171	unsigned char *npe_ctx;
172	dma_addr_t npe_ctx_phys;
173	int npe_ctx_idx;
174	u8 npe_mode;
175};
176
177struct ixp_ctx {
178	struct ix_sa_dir encrypt;
179	struct ix_sa_dir decrypt;
180	int authkey_len;
181	u8 authkey[MAX_KEYLEN];
182	int enckey_len;
183	u8 enckey[MAX_KEYLEN];
184	u8 salt[MAX_IVLEN];
185	u8 nonce[CTR_RFC3686_NONCE_SIZE];
186	unsigned salted;
187	atomic_t configuring;
188	struct completion completion;
189};
190
191struct ixp_alg {
192	struct skcipher_alg crypto;
193	const struct ix_hash_algo *hash;
194	u32 cfg_enc;
195	u32 cfg_dec;
196
197	int registered;
198};
199
200struct ixp_aead_alg {
201	struct aead_alg crypto;
202	const struct ix_hash_algo *hash;
203	u32 cfg_enc;
204	u32 cfg_dec;
205
206	int registered;
207};
208
209static const struct ix_hash_algo hash_alg_md5 = {
210	.cfgword	= 0xAA010004,
211	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
212			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
213};
214static const struct ix_hash_algo hash_alg_sha1 = {
215	.cfgword	= 0x00000005,
216	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
217			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
218};
219
220static struct npe *npe_c;
221static struct dma_pool *buffer_pool = NULL;
222static struct dma_pool *ctx_pool = NULL;
223
224static struct crypt_ctl *crypt_virt = NULL;
225static dma_addr_t crypt_phys;
226
227static int support_aes = 1;
228
229#define DRIVER_NAME "ixp4xx_crypto"
230
231static struct platform_device *pdev;
232
233static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
234{
235	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
236}
237
238static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
239{
240	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
241}
242
243static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
244{
245	return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
246}
247
248static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
249{
250	return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
251}
252
253static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
254{
255	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
256}
257
258static int setup_crypt_desc(void)
259{
260	struct device *dev = &pdev->dev;
261	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
262	crypt_virt = dma_alloc_coherent(dev,
263					NPE_QLEN * sizeof(struct crypt_ctl),
264					&crypt_phys, GFP_ATOMIC);
265	if (!crypt_virt)
266		return -ENOMEM;
267	return 0;
268}
269
270static spinlock_t desc_lock;
271static struct crypt_ctl *get_crypt_desc(void)
272{
273	int i;
274	static int idx = 0;
275	unsigned long flags;
276
277	spin_lock_irqsave(&desc_lock, flags);
278
279	if (unlikely(!crypt_virt))
280		setup_crypt_desc();
281	if (unlikely(!crypt_virt)) {
282		spin_unlock_irqrestore(&desc_lock, flags);
283		return NULL;
284	}
285	i = idx;
286	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
287		if (++idx >= NPE_QLEN)
288			idx = 0;
289		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
290		spin_unlock_irqrestore(&desc_lock, flags);
291		return crypt_virt +i;
292	} else {
293		spin_unlock_irqrestore(&desc_lock, flags);
294		return NULL;
295	}
296}
297
298static spinlock_t emerg_lock;
299static struct crypt_ctl *get_crypt_desc_emerg(void)
300{
301	int i;
302	static int idx = NPE_QLEN;
303	struct crypt_ctl *desc;
304	unsigned long flags;
305
306	desc = get_crypt_desc();
307	if (desc)
308		return desc;
309	if (unlikely(!crypt_virt))
310		return NULL;
311
312	spin_lock_irqsave(&emerg_lock, flags);
313	i = idx;
314	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
315		if (++idx >= NPE_QLEN_TOTAL)
316			idx = NPE_QLEN;
317		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
318		spin_unlock_irqrestore(&emerg_lock, flags);
319		return crypt_virt +i;
320	} else {
321		spin_unlock_irqrestore(&emerg_lock, flags);
322		return NULL;
323	}
324}
325
326static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
327			   dma_addr_t phys)
328{
329	while (buf) {
330		struct buffer_desc *buf1;
331		u32 phys1;
332
333		buf1 = buf->next;
334		phys1 = buf->phys_next;
335		dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
336		dma_pool_free(buffer_pool, buf, phys);
337		buf = buf1;
338		phys = phys1;
339	}
340}
341
342static struct tasklet_struct crypto_done_tasklet;
343
344static void finish_scattered_hmac(struct crypt_ctl *crypt)
345{
346	struct aead_request *req = crypt->data.aead_req;
347	struct aead_ctx *req_ctx = aead_request_ctx(req);
348	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
349	int authsize = crypto_aead_authsize(tfm);
350	int decryptlen = req->assoclen + req->cryptlen - authsize;
351
352	if (req_ctx->encrypt) {
353		scatterwalk_map_and_copy(req_ctx->hmac_virt,
354			req->dst, decryptlen, authsize, 1);
355	}
356	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
357}
358
359static void one_packet(dma_addr_t phys)
360{
361	struct device *dev = &pdev->dev;
362	struct crypt_ctl *crypt;
363	struct ixp_ctx *ctx;
364	int failed;
365
366	failed = phys & 0x1 ? -EBADMSG : 0;
367	phys &= ~0x3;
368	crypt = crypt_phys2virt(phys);
369
370	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
371	case CTL_FLAG_PERFORM_AEAD: {
372		struct aead_request *req = crypt->data.aead_req;
373		struct aead_ctx *req_ctx = aead_request_ctx(req);
374
375		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
376		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
377		if (req_ctx->hmac_virt) {
378			finish_scattered_hmac(crypt);
379		}
380		req->base.complete(&req->base, failed);
381		break;
382	}
383	case CTL_FLAG_PERFORM_ABLK: {
384		struct skcipher_request *req = crypt->data.ablk_req;
385		struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
386		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
387		unsigned int ivsize = crypto_skcipher_ivsize(tfm);
388		unsigned int offset;
389
390		if (ivsize > 0) {
391			offset = req->cryptlen - ivsize;
392			if (req_ctx->encrypt) {
393				scatterwalk_map_and_copy(req->iv, req->dst,
394							 offset, ivsize, 0);
395			} else {
396				memcpy(req->iv, req_ctx->iv, ivsize);
397				memzero_explicit(req_ctx->iv, ivsize);
398			}
399		}
400
401		if (req_ctx->dst) {
402			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
403		}
404		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
405		req->base.complete(&req->base, failed);
406		break;
407	}
408	case CTL_FLAG_GEN_ICV:
409		ctx = crypto_tfm_ctx(crypt->data.tfm);
410		dma_pool_free(ctx_pool, crypt->regist_ptr,
411				crypt->regist_buf->phys_addr);
412		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
413		if (atomic_dec_and_test(&ctx->configuring))
414			complete(&ctx->completion);
415		break;
416	case CTL_FLAG_GEN_REVAES:
417		ctx = crypto_tfm_ctx(crypt->data.tfm);
418		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
419		if (atomic_dec_and_test(&ctx->configuring))
420			complete(&ctx->completion);
421		break;
422	default:
423		BUG();
424	}
425	crypt->ctl_flags = CTL_FLAG_UNUSED;
426}
427
428static void irqhandler(void *_unused)
429{
430	tasklet_schedule(&crypto_done_tasklet);
431}
432
433static void crypto_done_action(unsigned long arg)
434{
435	int i;
436
437	for(i=0; i<4; i++) {
438		dma_addr_t phys = qmgr_get_entry(RECV_QID);
439		if (!phys)
440			return;
441		one_packet(phys);
442	}
443	tasklet_schedule(&crypto_done_tasklet);
444}
445
446static int init_ixp_crypto(struct device *dev)
447{
448	int ret = -ENODEV;
449	u32 msg[2] = { 0, 0 };
450
451	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
452				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
453		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
454		return ret;
455	}
456	npe_c = npe_request(NPE_ID);
457	if (!npe_c)
458		return ret;
459
460	if (!npe_running(npe_c)) {
461		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
462		if (ret)
463			goto npe_release;
464		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
465			goto npe_error;
466	} else {
467		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
468			goto npe_error;
469
470		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
471			goto npe_error;
472	}
473
474	switch ((msg[1]>>16) & 0xff) {
475	case 3:
476		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
477				npe_name(npe_c));
478		support_aes = 0;
479		break;
480	case 4:
481	case 5:
482		support_aes = 1;
483		break;
484	default:
485		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
486			npe_name(npe_c));
487		ret = -ENODEV;
488		goto npe_release;
489	}
490	/* buffer_pool will also be used to sometimes store the hmac,
491	 * so assure it is large enough
492	 */
493	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
494	buffer_pool = dma_pool_create("buffer", dev,
495			sizeof(struct buffer_desc), 32, 0);
496	ret = -ENOMEM;
497	if (!buffer_pool) {
498		goto err;
499	}
500	ctx_pool = dma_pool_create("context", dev,
501			NPE_CTX_LEN, 16, 0);
502	if (!ctx_pool) {
503		goto err;
504	}
505	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
506				 "ixp_crypto:out", NULL);
507	if (ret)
508		goto err;
509	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
510				 "ixp_crypto:in", NULL);
511	if (ret) {
512		qmgr_release_queue(SEND_QID);
513		goto err;
514	}
515	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
516	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
517
518	qmgr_enable_irq(RECV_QID);
519	return 0;
520
521npe_error:
522	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
523	ret = -EIO;
524err:
525	dma_pool_destroy(ctx_pool);
526	dma_pool_destroy(buffer_pool);
527npe_release:
528	npe_release(npe_c);
529	return ret;
530}
531
532static void release_ixp_crypto(struct device *dev)
533{
534	qmgr_disable_irq(RECV_QID);
535	tasklet_kill(&crypto_done_tasklet);
536
537	qmgr_release_queue(SEND_QID);
538	qmgr_release_queue(RECV_QID);
539
540	dma_pool_destroy(ctx_pool);
541	dma_pool_destroy(buffer_pool);
542
543	npe_release(npe_c);
544
545	if (crypt_virt) {
546		dma_free_coherent(dev,
547			NPE_QLEN * sizeof(struct crypt_ctl),
548			crypt_virt, crypt_phys);
549	}
550}
551
552static void reset_sa_dir(struct ix_sa_dir *dir)
553{
554	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
555	dir->npe_ctx_idx = 0;
556	dir->npe_mode = 0;
557}
558
559static int init_sa_dir(struct ix_sa_dir *dir)
560{
561	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
562	if (!dir->npe_ctx) {
563		return -ENOMEM;
564	}
565	reset_sa_dir(dir);
566	return 0;
567}
568
569static void free_sa_dir(struct ix_sa_dir *dir)
570{
571	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
572	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
573}
574
575static int init_tfm(struct crypto_tfm *tfm)
576{
577	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
578	int ret;
579
580	atomic_set(&ctx->configuring, 0);
581	ret = init_sa_dir(&ctx->encrypt);
582	if (ret)
583		return ret;
584	ret = init_sa_dir(&ctx->decrypt);
585	if (ret) {
586		free_sa_dir(&ctx->encrypt);
587	}
588	return ret;
589}
590
591static int init_tfm_ablk(struct crypto_skcipher *tfm)
592{
593	crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
594	return init_tfm(crypto_skcipher_tfm(tfm));
595}
596
597static int init_tfm_aead(struct crypto_aead *tfm)
598{
599	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
600	return init_tfm(crypto_aead_tfm(tfm));
601}
602
603static void exit_tfm(struct crypto_tfm *tfm)
604{
605	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
606	free_sa_dir(&ctx->encrypt);
607	free_sa_dir(&ctx->decrypt);
608}
609
610static void exit_tfm_ablk(struct crypto_skcipher *tfm)
611{
612	exit_tfm(crypto_skcipher_tfm(tfm));
613}
614
615static void exit_tfm_aead(struct crypto_aead *tfm)
616{
617	exit_tfm(crypto_aead_tfm(tfm));
618}
619
620static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
621		int init_len, u32 ctx_addr, const u8 *key, int key_len)
622{
623	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
624	struct crypt_ctl *crypt;
625	struct buffer_desc *buf;
626	int i;
627	u8 *pad;
628	dma_addr_t pad_phys, buf_phys;
629
630	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
631	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
632	if (!pad)
633		return -ENOMEM;
634	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
635	if (!buf) {
636		dma_pool_free(ctx_pool, pad, pad_phys);
637		return -ENOMEM;
638	}
639	crypt = get_crypt_desc_emerg();
640	if (!crypt) {
641		dma_pool_free(ctx_pool, pad, pad_phys);
642		dma_pool_free(buffer_pool, buf, buf_phys);
643		return -EAGAIN;
644	}
645
646	memcpy(pad, key, key_len);
647	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
648	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
649		pad[i] ^= xpad;
650	}
651
652	crypt->data.tfm = tfm;
653	crypt->regist_ptr = pad;
654	crypt->regist_buf = buf;
655
656	crypt->auth_offs = 0;
657	crypt->auth_len = HMAC_PAD_BLOCKLEN;
658	crypt->crypto_ctx = ctx_addr;
659	crypt->src_buf = buf_phys;
660	crypt->icv_rev_aes = target;
661	crypt->mode = NPE_OP_HASH_GEN_ICV;
662	crypt->init_len = init_len;
663	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
664
665	buf->next = 0;
666	buf->buf_len = HMAC_PAD_BLOCKLEN;
667	buf->pkt_len = 0;
668	buf->phys_addr = pad_phys;
669
670	atomic_inc(&ctx->configuring);
671	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
672	BUG_ON(qmgr_stat_overflow(SEND_QID));
673	return 0;
674}
675
676static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
677		const u8 *key, int key_len, unsigned digest_len)
678{
679	u32 itarget, otarget, npe_ctx_addr;
680	unsigned char *cinfo;
681	int init_len, ret = 0;
682	u32 cfgword;
683	struct ix_sa_dir *dir;
684	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
685	const struct ix_hash_algo *algo;
686
687	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
688	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
689	algo = ix_hash(tfm);
690
691	/* write cfg word to cryptinfo */
692	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
693#ifndef __ARMEB__
694	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
695#endif
696	*(u32*)cinfo = cpu_to_be32(cfgword);
697	cinfo += sizeof(cfgword);
698
699	/* write ICV to cryptinfo */
700	memcpy(cinfo, algo->icv, digest_len);
701	cinfo += digest_len;
702
703	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
704				+ sizeof(algo->cfgword);
705	otarget = itarget + digest_len;
706	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
707	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
708
709	dir->npe_ctx_idx += init_len;
710	dir->npe_mode |= NPE_OP_HASH_ENABLE;
711
712	if (!encrypt)
713		dir->npe_mode |= NPE_OP_HASH_VERIFY;
714
715	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
716			init_len, npe_ctx_addr, key, key_len);
717	if (ret)
718		return ret;
719	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
720			init_len, npe_ctx_addr, key, key_len);
721}
722
723static int gen_rev_aes_key(struct crypto_tfm *tfm)
724{
725	struct crypt_ctl *crypt;
726	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
727	struct ix_sa_dir *dir = &ctx->decrypt;
728
729	crypt = get_crypt_desc_emerg();
730	if (!crypt) {
731		return -EAGAIN;
732	}
733	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
734
735	crypt->data.tfm = tfm;
736	crypt->crypt_offs = 0;
737	crypt->crypt_len = AES_BLOCK128;
738	crypt->src_buf = 0;
739	crypt->crypto_ctx = dir->npe_ctx_phys;
740	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
741	crypt->mode = NPE_OP_ENC_GEN_KEY;
742	crypt->init_len = dir->npe_ctx_idx;
743	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
744
745	atomic_inc(&ctx->configuring);
746	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
747	BUG_ON(qmgr_stat_overflow(SEND_QID));
748	return 0;
749}
750
751static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
752		const u8 *key, int key_len)
753{
754	u8 *cinfo;
755	u32 cipher_cfg;
756	u32 keylen_cfg = 0;
757	struct ix_sa_dir *dir;
758	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
759	int err;
760
761	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
762	cinfo = dir->npe_ctx;
763
764	if (encrypt) {
765		cipher_cfg = cipher_cfg_enc(tfm);
766		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
767	} else {
768		cipher_cfg = cipher_cfg_dec(tfm);
769	}
770	if (cipher_cfg & MOD_AES) {
771		switch (key_len) {
772		case 16: keylen_cfg = MOD_AES128; break;
773		case 24: keylen_cfg = MOD_AES192; break;
774		case 32: keylen_cfg = MOD_AES256; break;
775		default:
776			return -EINVAL;
777		}
778		cipher_cfg |= keylen_cfg;
779	} else {
780		err = crypto_des_verify_key(tfm, key);
781		if (err)
782			return err;
783	}
784	/* write cfg word to cryptinfo */
785	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
786	cinfo += sizeof(cipher_cfg);
787
788	/* write cipher key to cryptinfo */
789	memcpy(cinfo, key, key_len);
790	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
791	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
792		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
793		key_len = DES3_EDE_KEY_SIZE;
794	}
795	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
796	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
797	if ((cipher_cfg & MOD_AES) && !encrypt) {
798		return gen_rev_aes_key(tfm);
799	}
800	return 0;
801}
802
803static struct buffer_desc *chainup_buffers(struct device *dev,
804		struct scatterlist *sg,	unsigned nbytes,
805		struct buffer_desc *buf, gfp_t flags,
806		enum dma_data_direction dir)
807{
808	for (; nbytes > 0; sg = sg_next(sg)) {
809		unsigned len = min(nbytes, sg->length);
810		struct buffer_desc *next_buf;
811		dma_addr_t next_buf_phys;
812		void *ptr;
813
814		nbytes -= len;
815		ptr = sg_virt(sg);
816		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
817		if (!next_buf) {
818			buf = NULL;
819			break;
820		}
821		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
822		buf->next = next_buf;
823		buf->phys_next = next_buf_phys;
824		buf = next_buf;
825
826		buf->phys_addr = sg_dma_address(sg);
827		buf->buf_len = len;
828		buf->dir = dir;
829	}
830	buf->next = NULL;
831	buf->phys_next = 0;
832	return buf;
833}
834
835static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
836			unsigned int key_len)
837{
838	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
839	int ret;
840
841	init_completion(&ctx->completion);
842	atomic_inc(&ctx->configuring);
843
844	reset_sa_dir(&ctx->encrypt);
845	reset_sa_dir(&ctx->decrypt);
846
847	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
848	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
849
850	ret = setup_cipher(&tfm->base, 0, key, key_len);
851	if (ret)
852		goto out;
853	ret = setup_cipher(&tfm->base, 1, key, key_len);
854out:
855	if (!atomic_dec_and_test(&ctx->configuring))
856		wait_for_completion(&ctx->completion);
857	return ret;
858}
859
860static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
861			    unsigned int key_len)
862{
863	return verify_skcipher_des3_key(tfm, key) ?:
864	       ablk_setkey(tfm, key, key_len);
865}
866
867static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
868		unsigned int key_len)
869{
870	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
871
872	/* the nonce is stored in bytes at end of key */
873	if (key_len < CTR_RFC3686_NONCE_SIZE)
874		return -EINVAL;
875
876	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
877			CTR_RFC3686_NONCE_SIZE);
878
879	key_len -= CTR_RFC3686_NONCE_SIZE;
880	return ablk_setkey(tfm, key, key_len);
881}
882
883static int ablk_perform(struct skcipher_request *req, int encrypt)
884{
885	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
886	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
887	unsigned ivsize = crypto_skcipher_ivsize(tfm);
888	struct ix_sa_dir *dir;
889	struct crypt_ctl *crypt;
890	unsigned int nbytes = req->cryptlen;
891	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
892	struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
893	struct buffer_desc src_hook;
894	struct device *dev = &pdev->dev;
895	unsigned int offset;
896	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
897				GFP_KERNEL : GFP_ATOMIC;
898
899	if (qmgr_stat_full(SEND_QID))
900		return -EAGAIN;
901	if (atomic_read(&ctx->configuring))
902		return -EAGAIN;
903
904	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
905	req_ctx->encrypt = encrypt;
906
907	crypt = get_crypt_desc();
908	if (!crypt)
909		return -ENOMEM;
910
911	crypt->data.ablk_req = req;
912	crypt->crypto_ctx = dir->npe_ctx_phys;
913	crypt->mode = dir->npe_mode;
914	crypt->init_len = dir->npe_ctx_idx;
915
916	crypt->crypt_offs = 0;
917	crypt->crypt_len = nbytes;
918
919	BUG_ON(ivsize && !req->iv);
920	memcpy(crypt->iv, req->iv, ivsize);
921	if (ivsize > 0 && !encrypt) {
922		offset = req->cryptlen - ivsize;
923		scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
924	}
925	if (req->src != req->dst) {
926		struct buffer_desc dst_hook;
927		crypt->mode |= NPE_OP_NOT_IN_PLACE;
928		/* This was never tested by Intel
929		 * for more than one dst buffer, I think. */
930		req_ctx->dst = NULL;
931		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
932					flags, DMA_FROM_DEVICE))
933			goto free_buf_dest;
934		src_direction = DMA_TO_DEVICE;
935		req_ctx->dst = dst_hook.next;
936		crypt->dst_buf = dst_hook.phys_next;
937	} else {
938		req_ctx->dst = NULL;
939	}
940	req_ctx->src = NULL;
941	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
942				flags, src_direction))
943		goto free_buf_src;
944
945	req_ctx->src = src_hook.next;
946	crypt->src_buf = src_hook.phys_next;
947	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
948	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
949	BUG_ON(qmgr_stat_overflow(SEND_QID));
950	return -EINPROGRESS;
951
952free_buf_src:
953	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
954free_buf_dest:
955	if (req->src != req->dst) {
956		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
957	}
958	crypt->ctl_flags = CTL_FLAG_UNUSED;
959	return -ENOMEM;
960}
961
962static int ablk_encrypt(struct skcipher_request *req)
963{
964	return ablk_perform(req, 1);
965}
966
967static int ablk_decrypt(struct skcipher_request *req)
968{
969	return ablk_perform(req, 0);
970}
971
972static int ablk_rfc3686_crypt(struct skcipher_request *req)
973{
974	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
975	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
976	u8 iv[CTR_RFC3686_BLOCK_SIZE];
977	u8 *info = req->iv;
978	int ret;
979
980	/* set up counter block */
981        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
982	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
983
984	/* initialize counter portion of counter block */
985	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
986		cpu_to_be32(1);
987
988	req->iv = iv;
989	ret = ablk_perform(req, 1);
990	req->iv = info;
991	return ret;
992}
993
994static int aead_perform(struct aead_request *req, int encrypt,
995		int cryptoffset, int eff_cryptlen, u8 *iv)
996{
997	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
998	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
999	unsigned ivsize = crypto_aead_ivsize(tfm);
1000	unsigned authsize = crypto_aead_authsize(tfm);
1001	struct ix_sa_dir *dir;
1002	struct crypt_ctl *crypt;
1003	unsigned int cryptlen;
1004	struct buffer_desc *buf, src_hook;
1005	struct aead_ctx *req_ctx = aead_request_ctx(req);
1006	struct device *dev = &pdev->dev;
1007	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1008				GFP_KERNEL : GFP_ATOMIC;
1009	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1010	unsigned int lastlen;
1011
1012	if (qmgr_stat_full(SEND_QID))
1013		return -EAGAIN;
1014	if (atomic_read(&ctx->configuring))
1015		return -EAGAIN;
1016
1017	if (encrypt) {
1018		dir = &ctx->encrypt;
1019		cryptlen = req->cryptlen;
1020	} else {
1021		dir = &ctx->decrypt;
1022		/* req->cryptlen includes the authsize when decrypting */
1023		cryptlen = req->cryptlen -authsize;
1024		eff_cryptlen -= authsize;
1025	}
1026	crypt = get_crypt_desc();
1027	if (!crypt)
1028		return -ENOMEM;
1029
1030	crypt->data.aead_req = req;
1031	crypt->crypto_ctx = dir->npe_ctx_phys;
1032	crypt->mode = dir->npe_mode;
1033	crypt->init_len = dir->npe_ctx_idx;
1034
1035	crypt->crypt_offs = cryptoffset;
1036	crypt->crypt_len = eff_cryptlen;
1037
1038	crypt->auth_offs = 0;
1039	crypt->auth_len = req->assoclen + cryptlen;
1040	BUG_ON(ivsize && !req->iv);
1041	memcpy(crypt->iv, req->iv, ivsize);
1042
1043	buf = chainup_buffers(dev, req->src, crypt->auth_len,
1044			      &src_hook, flags, src_direction);
1045	req_ctx->src = src_hook.next;
1046	crypt->src_buf = src_hook.phys_next;
1047	if (!buf)
1048		goto free_buf_src;
1049
1050	lastlen = buf->buf_len;
1051	if (lastlen >= authsize)
1052		crypt->icv_rev_aes = buf->phys_addr +
1053				     buf->buf_len - authsize;
1054
1055	req_ctx->dst = NULL;
1056
1057	if (req->src != req->dst) {
1058		struct buffer_desc dst_hook;
1059
1060		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1061		src_direction = DMA_TO_DEVICE;
1062
1063		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1064				      &dst_hook, flags, DMA_FROM_DEVICE);
1065		req_ctx->dst = dst_hook.next;
1066		crypt->dst_buf = dst_hook.phys_next;
1067
1068		if (!buf)
1069			goto free_buf_dst;
1070
1071		if (encrypt) {
1072			lastlen = buf->buf_len;
1073			if (lastlen >= authsize)
1074				crypt->icv_rev_aes = buf->phys_addr +
1075						     buf->buf_len - authsize;
1076		}
1077	}
1078
1079	if (unlikely(lastlen < authsize)) {
1080		/* The 12 hmac bytes are scattered,
1081		 * we need to copy them into a safe buffer */
1082		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1083				&crypt->icv_rev_aes);
1084		if (unlikely(!req_ctx->hmac_virt))
1085			goto free_buf_dst;
1086		if (!encrypt) {
1087			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1088				req->src, cryptlen, authsize, 0);
1089		}
1090		req_ctx->encrypt = encrypt;
1091	} else {
1092		req_ctx->hmac_virt = NULL;
1093	}
1094
1095	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1096	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1097	BUG_ON(qmgr_stat_overflow(SEND_QID));
1098	return -EINPROGRESS;
1099
1100free_buf_dst:
1101	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1102free_buf_src:
1103	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1104	crypt->ctl_flags = CTL_FLAG_UNUSED;
1105	return -ENOMEM;
1106}
1107
1108static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1109{
1110	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1111	unsigned digest_len = crypto_aead_maxauthsize(tfm);
1112	int ret;
1113
1114	if (!ctx->enckey_len && !ctx->authkey_len)
1115		return 0;
1116	init_completion(&ctx->completion);
1117	atomic_inc(&ctx->configuring);
1118
1119	reset_sa_dir(&ctx->encrypt);
1120	reset_sa_dir(&ctx->decrypt);
1121
1122	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1123	if (ret)
1124		goto out;
1125	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1126	if (ret)
1127		goto out;
1128	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1129			ctx->authkey_len, digest_len);
1130	if (ret)
1131		goto out;
1132	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1133			ctx->authkey_len, digest_len);
1134out:
1135	if (!atomic_dec_and_test(&ctx->configuring))
1136		wait_for_completion(&ctx->completion);
1137	return ret;
1138}
1139
1140static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1141{
1142	int max = crypto_aead_maxauthsize(tfm) >> 2;
1143
1144	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1145		return -EINVAL;
1146	return aead_setup(tfm, authsize);
1147}
1148
1149static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1150			unsigned int keylen)
1151{
1152	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1153	struct crypto_authenc_keys keys;
1154
1155	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1156		goto badkey;
1157
1158	if (keys.authkeylen > sizeof(ctx->authkey))
1159		goto badkey;
1160
1161	if (keys.enckeylen > sizeof(ctx->enckey))
1162		goto badkey;
1163
1164	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1165	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1166	ctx->authkey_len = keys.authkeylen;
1167	ctx->enckey_len = keys.enckeylen;
1168
1169	memzero_explicit(&keys, sizeof(keys));
1170	return aead_setup(tfm, crypto_aead_authsize(tfm));
1171badkey:
1172	memzero_explicit(&keys, sizeof(keys));
1173	return -EINVAL;
1174}
1175
1176static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1177			    unsigned int keylen)
1178{
1179	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1180	struct crypto_authenc_keys keys;
1181	int err;
1182
1183	err = crypto_authenc_extractkeys(&keys, key, keylen);
1184	if (unlikely(err))
1185		goto badkey;
1186
1187	err = -EINVAL;
1188	if (keys.authkeylen > sizeof(ctx->authkey))
1189		goto badkey;
1190
1191	err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1192	if (err)
1193		goto badkey;
1194
1195	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1196	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1197	ctx->authkey_len = keys.authkeylen;
1198	ctx->enckey_len = keys.enckeylen;
1199
1200	memzero_explicit(&keys, sizeof(keys));
1201	return aead_setup(tfm, crypto_aead_authsize(tfm));
1202badkey:
1203	memzero_explicit(&keys, sizeof(keys));
1204	return err;
1205}
1206
1207static int aead_encrypt(struct aead_request *req)
1208{
1209	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1210}
1211
1212static int aead_decrypt(struct aead_request *req)
1213{
1214	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1215}
1216
1217static struct ixp_alg ixp4xx_algos[] = {
1218{
1219	.crypto	= {
1220		.base.cra_name		= "cbc(des)",
1221		.base.cra_blocksize	= DES_BLOCK_SIZE,
1222
1223		.min_keysize		= DES_KEY_SIZE,
1224		.max_keysize		= DES_KEY_SIZE,
1225		.ivsize			= DES_BLOCK_SIZE,
1226	},
1227	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1228	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1229
1230}, {
1231	.crypto	= {
1232		.base.cra_name		= "ecb(des)",
1233		.base.cra_blocksize	= DES_BLOCK_SIZE,
1234		.min_keysize		= DES_KEY_SIZE,
1235		.max_keysize		= DES_KEY_SIZE,
1236	},
1237	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1238	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1239}, {
1240	.crypto	= {
1241		.base.cra_name		= "cbc(des3_ede)",
1242		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1243
1244		.min_keysize		= DES3_EDE_KEY_SIZE,
1245		.max_keysize		= DES3_EDE_KEY_SIZE,
1246		.ivsize			= DES3_EDE_BLOCK_SIZE,
1247		.setkey			= ablk_des3_setkey,
1248	},
1249	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1250	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1251}, {
1252	.crypto	= {
1253		.base.cra_name		= "ecb(des3_ede)",
1254		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1255
1256		.min_keysize		= DES3_EDE_KEY_SIZE,
1257		.max_keysize		= DES3_EDE_KEY_SIZE,
1258		.setkey			= ablk_des3_setkey,
1259	},
1260	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1261	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1262}, {
1263	.crypto	= {
1264		.base.cra_name		= "cbc(aes)",
1265		.base.cra_blocksize	= AES_BLOCK_SIZE,
1266
1267		.min_keysize		= AES_MIN_KEY_SIZE,
1268		.max_keysize		= AES_MAX_KEY_SIZE,
1269		.ivsize			= AES_BLOCK_SIZE,
1270	},
1271	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1272	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1273}, {
1274	.crypto	= {
1275		.base.cra_name		= "ecb(aes)",
1276		.base.cra_blocksize	= AES_BLOCK_SIZE,
1277
1278		.min_keysize		= AES_MIN_KEY_SIZE,
1279		.max_keysize		= AES_MAX_KEY_SIZE,
1280	},
1281	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1282	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1283}, {
1284	.crypto	= {
1285		.base.cra_name		= "ctr(aes)",
1286		.base.cra_blocksize	= 1,
1287
1288		.min_keysize		= AES_MIN_KEY_SIZE,
1289		.max_keysize		= AES_MAX_KEY_SIZE,
1290		.ivsize			= AES_BLOCK_SIZE,
1291	},
1292	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1293	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1294}, {
1295	.crypto	= {
1296		.base.cra_name		= "rfc3686(ctr(aes))",
1297		.base.cra_blocksize	= 1,
1298
1299		.min_keysize		= AES_MIN_KEY_SIZE,
1300		.max_keysize		= AES_MAX_KEY_SIZE,
1301		.ivsize			= AES_BLOCK_SIZE,
1302		.setkey			= ablk_rfc3686_setkey,
1303		.encrypt		= ablk_rfc3686_crypt,
1304		.decrypt		= ablk_rfc3686_crypt,
1305	},
1306	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1307	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1308} };
1309
1310static struct ixp_aead_alg ixp4xx_aeads[] = {
1311{
1312	.crypto	= {
1313		.base = {
1314			.cra_name	= "authenc(hmac(md5),cbc(des))",
1315			.cra_blocksize	= DES_BLOCK_SIZE,
1316		},
1317		.ivsize		= DES_BLOCK_SIZE,
1318		.maxauthsize	= MD5_DIGEST_SIZE,
1319	},
1320	.hash = &hash_alg_md5,
1321	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1322	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1323}, {
1324	.crypto	= {
1325		.base = {
1326			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1327			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1328		},
1329		.ivsize		= DES3_EDE_BLOCK_SIZE,
1330		.maxauthsize	= MD5_DIGEST_SIZE,
1331		.setkey		= des3_aead_setkey,
1332	},
1333	.hash = &hash_alg_md5,
1334	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1335	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1336}, {
1337	.crypto	= {
1338		.base = {
1339			.cra_name	= "authenc(hmac(sha1),cbc(des))",
1340			.cra_blocksize	= DES_BLOCK_SIZE,
1341		},
1342			.ivsize		= DES_BLOCK_SIZE,
1343			.maxauthsize	= SHA1_DIGEST_SIZE,
1344	},
1345	.hash = &hash_alg_sha1,
1346	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1347	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1348}, {
1349	.crypto	= {
1350		.base = {
1351			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1352			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1353		},
1354		.ivsize		= DES3_EDE_BLOCK_SIZE,
1355		.maxauthsize	= SHA1_DIGEST_SIZE,
1356		.setkey		= des3_aead_setkey,
1357	},
1358	.hash = &hash_alg_sha1,
1359	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1360	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1361}, {
1362	.crypto	= {
1363		.base = {
1364			.cra_name	= "authenc(hmac(md5),cbc(aes))",
1365			.cra_blocksize	= AES_BLOCK_SIZE,
1366		},
1367		.ivsize		= AES_BLOCK_SIZE,
1368		.maxauthsize	= MD5_DIGEST_SIZE,
1369	},
1370	.hash = &hash_alg_md5,
1371	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1372	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1373}, {
1374	.crypto	= {
1375		.base = {
1376			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1377			.cra_blocksize	= AES_BLOCK_SIZE,
1378		},
1379		.ivsize		= AES_BLOCK_SIZE,
1380		.maxauthsize	= SHA1_DIGEST_SIZE,
1381	},
1382	.hash = &hash_alg_sha1,
1383	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1384	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1385} };
1386
1387#define IXP_POSTFIX "-ixp4xx"
1388
1389static const struct platform_device_info ixp_dev_info __initdata = {
1390	.name		= DRIVER_NAME,
1391	.id		= 0,
1392	.dma_mask	= DMA_BIT_MASK(32),
1393};
1394
1395static int __init ixp_module_init(void)
1396{
1397	int num = ARRAY_SIZE(ixp4xx_algos);
1398	int i, err;
1399
1400	pdev = platform_device_register_full(&ixp_dev_info);
1401	if (IS_ERR(pdev))
1402		return PTR_ERR(pdev);
1403
1404	spin_lock_init(&desc_lock);
1405	spin_lock_init(&emerg_lock);
1406
1407	err = init_ixp_crypto(&pdev->dev);
1408	if (err) {
1409		platform_device_unregister(pdev);
1410		return err;
1411	}
1412	for (i=0; i< num; i++) {
1413		struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1414
1415		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1416			"%s"IXP_POSTFIX, cra->base.cra_name) >=
1417			CRYPTO_MAX_ALG_NAME)
1418		{
1419			continue;
1420		}
1421		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1422			continue;
1423		}
1424
1425		/* block ciphers */
1426		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1427				      CRYPTO_ALG_ASYNC |
1428				      CRYPTO_ALG_ALLOCATES_MEMORY;
1429		if (!cra->setkey)
1430			cra->setkey = ablk_setkey;
1431		if (!cra->encrypt)
1432			cra->encrypt = ablk_encrypt;
1433		if (!cra->decrypt)
1434			cra->decrypt = ablk_decrypt;
1435		cra->init = init_tfm_ablk;
1436		cra->exit = exit_tfm_ablk;
1437
1438		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1439		cra->base.cra_module = THIS_MODULE;
1440		cra->base.cra_alignmask = 3;
1441		cra->base.cra_priority = 300;
1442		if (crypto_register_skcipher(cra))
1443			printk(KERN_ERR "Failed to register '%s'\n",
1444				cra->base.cra_name);
1445		else
1446			ixp4xx_algos[i].registered = 1;
1447	}
1448
1449	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1450		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1451
1452		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1453			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1454		    CRYPTO_MAX_ALG_NAME)
1455			continue;
1456		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1457			continue;
1458
1459		/* authenc */
1460		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1461				      CRYPTO_ALG_ASYNC |
1462				      CRYPTO_ALG_ALLOCATES_MEMORY;
1463		cra->setkey = cra->setkey ?: aead_setkey;
1464		cra->setauthsize = aead_setauthsize;
1465		cra->encrypt = aead_encrypt;
1466		cra->decrypt = aead_decrypt;
1467		cra->init = init_tfm_aead;
1468		cra->exit = exit_tfm_aead;
1469
1470		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1471		cra->base.cra_module = THIS_MODULE;
1472		cra->base.cra_alignmask = 3;
1473		cra->base.cra_priority = 300;
1474
1475		if (crypto_register_aead(cra))
1476			printk(KERN_ERR "Failed to register '%s'\n",
1477				cra->base.cra_driver_name);
1478		else
1479			ixp4xx_aeads[i].registered = 1;
1480	}
1481	return 0;
1482}
1483
1484static void __exit ixp_module_exit(void)
1485{
1486	int num = ARRAY_SIZE(ixp4xx_algos);
1487	int i;
1488
1489	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1490		if (ixp4xx_aeads[i].registered)
1491			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1492	}
1493
1494	for (i=0; i< num; i++) {
1495		if (ixp4xx_algos[i].registered)
1496			crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1497	}
1498	release_ixp_crypto(&pdev->dev);
1499	platform_device_unregister(pdev);
1500}
1501
1502module_init(ixp_module_init);
1503module_exit(ixp_module_exit);
1504
1505MODULE_LICENSE("GPL");
1506MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1507MODULE_DESCRIPTION("IXP4xx hardware crypto");
1508
1509