1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha.h>
10 #include <crypto/hash.h>
11 #include <crypto/hmac.h>
12 #include <crypto/algapi.h>
13 #include <crypto/authenc.h>
14 #include <crypto/xts.h>
15 #include <linux/dma-mapping.h>
16 #include "adf_accel_devices.h"
17 #include "adf_transport.h"
18 #include "adf_common_drv.h"
19 #include "qat_crypto.h"
20 #include "icp_qat_hw.h"
21 #include "icp_qat_fw.h"
22 #include "icp_qat_fw_la.h"
23 
24 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
25 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
26 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
27 				       ICP_QAT_HW_CIPHER_ENCRYPT)
28 
29 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
30 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
31 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
32 				       ICP_QAT_HW_CIPHER_DECRYPT)
33 
34 static DEFINE_MUTEX(algs_lock);
35 static unsigned int active_devs;
36 
37 /* Common content descriptor */
38 struct qat_alg_cd {
39 	union {
40 		struct qat_enc { /* Encrypt content desc */
41 			struct icp_qat_hw_cipher_algo_blk cipher;
42 			struct icp_qat_hw_auth_algo_blk hash;
43 		} qat_enc_cd;
44 		struct qat_dec { /* Decrypt content desc */
45 			struct icp_qat_hw_auth_algo_blk hash;
46 			struct icp_qat_hw_cipher_algo_blk cipher;
47 		} qat_dec_cd;
48 	};
49 } __aligned(64);
50 
51 struct qat_alg_aead_ctx {
52 	struct qat_alg_cd *enc_cd;
53 	struct qat_alg_cd *dec_cd;
54 	dma_addr_t enc_cd_paddr;
55 	dma_addr_t dec_cd_paddr;
56 	struct icp_qat_fw_la_bulk_req enc_fw_req;
57 	struct icp_qat_fw_la_bulk_req dec_fw_req;
58 	struct crypto_shash *hash_tfm;
59 	enum icp_qat_hw_auth_algo qat_hash_alg;
60 	struct qat_crypto_instance *inst;
61 	union {
62 		struct sha1_state sha1;
63 		struct sha256_state sha256;
64 		struct sha512_state sha512;
65 	};
66 	char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
67 	char opad[SHA512_BLOCK_SIZE];
68 };
69 
70 struct qat_alg_skcipher_ctx {
71 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
72 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
73 	dma_addr_t enc_cd_paddr;
74 	dma_addr_t dec_cd_paddr;
75 	struct icp_qat_fw_la_bulk_req enc_fw_req;
76 	struct icp_qat_fw_la_bulk_req dec_fw_req;
77 	struct qat_crypto_instance *inst;
78 	struct crypto_skcipher *ftfm;
79 	bool fallback;
80 };
81 
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)82 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
83 {
84 	switch (qat_hash_alg) {
85 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
86 		return ICP_QAT_HW_SHA1_STATE1_SZ;
87 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
88 		return ICP_QAT_HW_SHA256_STATE1_SZ;
89 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
90 		return ICP_QAT_HW_SHA512_STATE1_SZ;
91 	default:
92 		return -EFAULT;
93 	};
94 	return -EFAULT;
95 }
96 
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_aead_ctx *ctx, const u8 *auth_key, unsigned int auth_keylen)97 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
98 				  struct qat_alg_aead_ctx *ctx,
99 				  const u8 *auth_key,
100 				  unsigned int auth_keylen)
101 {
102 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
103 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
104 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
105 	__be32 *hash_state_out;
106 	__be64 *hash512_state_out;
107 	int i, offset;
108 
109 	memset(ctx->ipad, 0, block_size);
110 	memset(ctx->opad, 0, block_size);
111 	shash->tfm = ctx->hash_tfm;
112 
113 	if (auth_keylen > block_size) {
114 		int ret = crypto_shash_digest(shash, auth_key,
115 					      auth_keylen, ctx->ipad);
116 		if (ret)
117 			return ret;
118 
119 		memcpy(ctx->opad, ctx->ipad, digest_size);
120 	} else {
121 		memcpy(ctx->ipad, auth_key, auth_keylen);
122 		memcpy(ctx->opad, auth_key, auth_keylen);
123 	}
124 
125 	for (i = 0; i < block_size; i++) {
126 		char *ipad_ptr = ctx->ipad + i;
127 		char *opad_ptr = ctx->opad + i;
128 		*ipad_ptr ^= HMAC_IPAD_VALUE;
129 		*opad_ptr ^= HMAC_OPAD_VALUE;
130 	}
131 
132 	if (crypto_shash_init(shash))
133 		return -EFAULT;
134 
135 	if (crypto_shash_update(shash, ctx->ipad, block_size))
136 		return -EFAULT;
137 
138 	hash_state_out = (__be32 *)hash->sha.state1;
139 	hash512_state_out = (__be64 *)hash_state_out;
140 
141 	switch (ctx->qat_hash_alg) {
142 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
143 		if (crypto_shash_export(shash, &ctx->sha1))
144 			return -EFAULT;
145 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
146 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
147 		break;
148 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
149 		if (crypto_shash_export(shash, &ctx->sha256))
150 			return -EFAULT;
151 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
152 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
153 		break;
154 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
155 		if (crypto_shash_export(shash, &ctx->sha512))
156 			return -EFAULT;
157 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
158 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
159 		break;
160 	default:
161 		return -EFAULT;
162 	}
163 
164 	if (crypto_shash_init(shash))
165 		return -EFAULT;
166 
167 	if (crypto_shash_update(shash, ctx->opad, block_size))
168 		return -EFAULT;
169 
170 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
171 	if (offset < 0)
172 		return -EFAULT;
173 
174 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
175 	hash512_state_out = (__be64 *)hash_state_out;
176 
177 	switch (ctx->qat_hash_alg) {
178 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
179 		if (crypto_shash_export(shash, &ctx->sha1))
180 			return -EFAULT;
181 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
182 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
183 		break;
184 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
185 		if (crypto_shash_export(shash, &ctx->sha256))
186 			return -EFAULT;
187 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
188 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
189 		break;
190 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
191 		if (crypto_shash_export(shash, &ctx->sha512))
192 			return -EFAULT;
193 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
194 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
195 		break;
196 	default:
197 		return -EFAULT;
198 	}
199 	memzero_explicit(ctx->ipad, block_size);
200 	memzero_explicit(ctx->opad, block_size);
201 	return 0;
202 }
203 
qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)204 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
205 {
206 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
207 					   ICP_QAT_FW_CIPH_IV_64BIT_PTR);
208 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
209 				       ICP_QAT_FW_LA_UPDATE_STATE);
210 }
211 
qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)212 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
213 {
214 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
215 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
216 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
217 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
218 }
219 
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, int aead)220 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
221 				    int aead)
222 {
223 	header->hdr_flags =
224 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
225 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
226 	header->comn_req_flags =
227 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
228 					    QAT_COMN_PTR_TYPE_SGL);
229 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
230 				  ICP_QAT_FW_LA_PARTIAL_NONE);
231 	if (aead)
232 		qat_alg_init_hdr_no_iv_updt(header);
233 	else
234 		qat_alg_init_hdr_iv_updt(header);
235 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
236 				ICP_QAT_FW_LA_NO_PROTO);
237 }
238 
qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, int alg, struct crypto_authenc_keys *keys, int mode)239 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
240 					 int alg,
241 					 struct crypto_authenc_keys *keys,
242 					 int mode)
243 {
244 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
245 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
246 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
247 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
248 	struct icp_qat_hw_auth_algo_blk *hash =
249 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
250 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
251 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
252 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
253 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
254 	void *ptr = &req_tmpl->cd_ctrl;
255 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
256 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
257 
258 	/* CD setup */
259 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
260 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
261 	hash->sha.inner_setup.auth_config.config =
262 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
263 					     ctx->qat_hash_alg, digestsize);
264 	hash->sha.inner_setup.auth_counter.counter =
265 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
266 
267 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
268 		return -EFAULT;
269 
270 	/* Request setup */
271 	qat_alg_init_common_hdr(header, 1);
272 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
273 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
274 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
275 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
276 				   ICP_QAT_FW_LA_RET_AUTH_RES);
277 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
278 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
279 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
280 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
281 
282 	/* Cipher CD config setup */
283 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
284 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
285 	cipher_cd_ctrl->cipher_cfg_offset = 0;
286 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
287 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
288 	/* Auth CD config setup */
289 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
290 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
291 	hash_cd_ctrl->inner_res_sz = digestsize;
292 	hash_cd_ctrl->final_sz = digestsize;
293 
294 	switch (ctx->qat_hash_alg) {
295 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
296 		hash_cd_ctrl->inner_state1_sz =
297 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
298 		hash_cd_ctrl->inner_state2_sz =
299 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
300 		break;
301 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
302 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
303 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
304 		break;
305 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
306 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
307 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
308 		break;
309 	default:
310 		break;
311 	}
312 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
313 			((sizeof(struct icp_qat_hw_auth_setup) +
314 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
315 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
316 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
317 	return 0;
318 }
319 
qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, int alg, struct crypto_authenc_keys *keys, int mode)320 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
321 					 int alg,
322 					 struct crypto_authenc_keys *keys,
323 					 int mode)
324 {
325 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
326 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
327 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
328 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
329 	struct icp_qat_hw_cipher_algo_blk *cipher =
330 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
331 		sizeof(struct icp_qat_hw_auth_setup) +
332 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
333 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
334 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
335 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
336 	void *ptr = &req_tmpl->cd_ctrl;
337 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
338 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
339 	struct icp_qat_fw_la_auth_req_params *auth_param =
340 		(struct icp_qat_fw_la_auth_req_params *)
341 		((char *)&req_tmpl->serv_specif_rqpars +
342 		sizeof(struct icp_qat_fw_la_cipher_req_params));
343 
344 	/* CD setup */
345 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
346 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
347 	hash->sha.inner_setup.auth_config.config =
348 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
349 					     ctx->qat_hash_alg,
350 					     digestsize);
351 	hash->sha.inner_setup.auth_counter.counter =
352 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
353 
354 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
355 		return -EFAULT;
356 
357 	/* Request setup */
358 	qat_alg_init_common_hdr(header, 1);
359 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
360 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
361 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
362 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
363 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
364 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
365 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
366 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
367 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
368 
369 	/* Cipher CD config setup */
370 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
371 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
372 	cipher_cd_ctrl->cipher_cfg_offset =
373 		(sizeof(struct icp_qat_hw_auth_setup) +
374 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
375 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
376 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
377 
378 	/* Auth CD config setup */
379 	hash_cd_ctrl->hash_cfg_offset = 0;
380 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
381 	hash_cd_ctrl->inner_res_sz = digestsize;
382 	hash_cd_ctrl->final_sz = digestsize;
383 
384 	switch (ctx->qat_hash_alg) {
385 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
386 		hash_cd_ctrl->inner_state1_sz =
387 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
388 		hash_cd_ctrl->inner_state2_sz =
389 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
390 		break;
391 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
392 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
393 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
394 		break;
395 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
396 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
397 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
398 		break;
399 	default:
400 		break;
401 	}
402 
403 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
404 			((sizeof(struct icp_qat_hw_auth_setup) +
405 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
406 	auth_param->auth_res_sz = digestsize;
407 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
408 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 	return 0;
410 }
411 
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, struct icp_qat_fw_la_bulk_req *req, struct icp_qat_hw_cipher_algo_blk *cd, const u8 *key, unsigned int keylen)412 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
413 				      struct icp_qat_fw_la_bulk_req *req,
414 				      struct icp_qat_hw_cipher_algo_blk *cd,
415 				      const u8 *key, unsigned int keylen)
416 {
417 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
418 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
419 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
420 
421 	memcpy(cd->aes.key, key, keylen);
422 	qat_alg_init_common_hdr(header, 0);
423 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
424 	cd_pars->u.s.content_desc_params_sz =
425 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
426 	/* Cipher CD config setup */
427 	cd_ctrl->cipher_key_sz = keylen >> 3;
428 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
429 	cd_ctrl->cipher_cfg_offset = 0;
430 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
431 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
432 }
433 
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, int alg, const u8 *key, unsigned int keylen, int mode)434 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
435 				      int alg, const u8 *key,
436 				      unsigned int keylen, int mode)
437 {
438 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
439 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
440 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
441 
442 	qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
443 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
444 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
445 }
446 
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, int alg, const u8 *key, unsigned int keylen, int mode)447 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
448 				      int alg, const u8 *key,
449 				      unsigned int keylen, int mode)
450 {
451 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
452 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
453 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454 
455 	qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
456 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
457 
458 	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
459 		dec_cd->aes.cipher_config.val =
460 					QAT_AES_HW_CONFIG_DEC(alg, mode);
461 	else
462 		dec_cd->aes.cipher_config.val =
463 					QAT_AES_HW_CONFIG_ENC(alg, mode);
464 }
465 
qat_alg_validate_key(int key_len, int *alg, int mode)466 static int qat_alg_validate_key(int key_len, int *alg, int mode)
467 {
468 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
469 		switch (key_len) {
470 		case AES_KEYSIZE_128:
471 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
472 			break;
473 		case AES_KEYSIZE_192:
474 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
475 			break;
476 		case AES_KEYSIZE_256:
477 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
478 			break;
479 		default:
480 			return -EINVAL;
481 		}
482 	} else {
483 		switch (key_len) {
484 		case AES_KEYSIZE_128 << 1:
485 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
486 			break;
487 		case AES_KEYSIZE_256 << 1:
488 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
489 			break;
490 		default:
491 			return -EINVAL;
492 		}
493 	}
494 	return 0;
495 }
496 
qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, unsigned int keylen, int mode)497 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
498 				      unsigned int keylen,  int mode)
499 {
500 	struct crypto_authenc_keys keys;
501 	int alg;
502 
503 	if (crypto_authenc_extractkeys(&keys, key, keylen))
504 		goto bad_key;
505 
506 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
507 		goto bad_key;
508 
509 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
510 		goto error;
511 
512 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
513 		goto error;
514 
515 	memzero_explicit(&keys, sizeof(keys));
516 	return 0;
517 bad_key:
518 	memzero_explicit(&keys, sizeof(keys));
519 	return -EINVAL;
520 error:
521 	memzero_explicit(&keys, sizeof(keys));
522 	return -EFAULT;
523 }
524 
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, const u8 *key, unsigned int keylen, int mode)525 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
526 					  const u8 *key,
527 					  unsigned int keylen,
528 					  int mode)
529 {
530 	int alg;
531 
532 	if (qat_alg_validate_key(keylen, &alg, mode))
533 		return -EINVAL;
534 
535 	qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
536 	qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
537 	return 0;
538 }
539 
qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)540 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
541 			      unsigned int keylen)
542 {
543 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
544 
545 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
546 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
547 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
548 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
549 
550 	return qat_alg_aead_init_sessions(tfm, key, keylen,
551 					  ICP_QAT_HW_CIPHER_CBC_MODE);
552 }
553 
qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)554 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
555 			       unsigned int keylen)
556 {
557 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
558 	struct qat_crypto_instance *inst = NULL;
559 	int node = get_current_node();
560 	struct device *dev;
561 	int ret;
562 
563 	inst = qat_crypto_get_instance_node(node);
564 	if (!inst)
565 		return -EINVAL;
566 	dev = &GET_DEV(inst->accel_dev);
567 	ctx->inst = inst;
568 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
569 					 &ctx->enc_cd_paddr,
570 					 GFP_ATOMIC);
571 	if (!ctx->enc_cd) {
572 		ret = -ENOMEM;
573 		goto out_free_inst;
574 	}
575 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
576 					 &ctx->dec_cd_paddr,
577 					 GFP_ATOMIC);
578 	if (!ctx->dec_cd) {
579 		ret = -ENOMEM;
580 		goto out_free_enc;
581 	}
582 
583 	ret = qat_alg_aead_init_sessions(tfm, key, keylen,
584 					 ICP_QAT_HW_CIPHER_CBC_MODE);
585 	if (ret)
586 		goto out_free_all;
587 
588 	return 0;
589 
590 out_free_all:
591 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
592 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
593 			  ctx->dec_cd, ctx->dec_cd_paddr);
594 	ctx->dec_cd = NULL;
595 out_free_enc:
596 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
597 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
598 			  ctx->enc_cd, ctx->enc_cd_paddr);
599 	ctx->enc_cd = NULL;
600 out_free_inst:
601 	ctx->inst = NULL;
602 	qat_crypto_put_instance(inst);
603 	return ret;
604 }
605 
qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)606 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
607 			       unsigned int keylen)
608 {
609 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
610 
611 	if (ctx->enc_cd)
612 		return qat_alg_aead_rekey(tfm, key, keylen);
613 	else
614 		return qat_alg_aead_newkey(tfm, key, keylen);
615 }
616 
qat_alg_free_bufl(struct qat_crypto_instance *inst, struct qat_crypto_request *qat_req)617 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
618 			      struct qat_crypto_request *qat_req)
619 {
620 	struct device *dev = &GET_DEV(inst->accel_dev);
621 	struct qat_alg_buf_list *bl = qat_req->buf.bl;
622 	struct qat_alg_buf_list *blout = qat_req->buf.blout;
623 	dma_addr_t blp = qat_req->buf.blp;
624 	dma_addr_t blpout = qat_req->buf.bloutp;
625 	size_t sz = qat_req->buf.sz;
626 	size_t sz_out = qat_req->buf.sz_out;
627 	int bl_dma_dir;
628 	int i;
629 
630 	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
631 
632 	for (i = 0; i < bl->num_bufs; i++)
633 		dma_unmap_single(dev, bl->bufers[i].addr,
634 				 bl->bufers[i].len, bl_dma_dir);
635 
636 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
637 
638 	if (!qat_req->buf.sgl_src_valid)
639 		kfree(bl);
640 
641 	if (blp != blpout) {
642 		/* If out of place operation dma unmap only data */
643 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
644 
645 		for (i = bufless; i < blout->num_bufs; i++) {
646 			dma_unmap_single(dev, blout->bufers[i].addr,
647 					 blout->bufers[i].len,
648 					 DMA_FROM_DEVICE);
649 		}
650 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
651 
652 		if (!qat_req->buf.sgl_dst_valid)
653 			kfree(blout);
654 	}
655 }
656 
qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct scatterlist *sgl, struct scatterlist *sglout, struct qat_crypto_request *qat_req)657 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
658 			       struct scatterlist *sgl,
659 			       struct scatterlist *sglout,
660 			       struct qat_crypto_request *qat_req)
661 {
662 	struct device *dev = &GET_DEV(inst->accel_dev);
663 	int i, sg_nctr = 0;
664 	int n = sg_nents(sgl);
665 	struct qat_alg_buf_list *bufl;
666 	struct qat_alg_buf_list *buflout = NULL;
667 	dma_addr_t blp = DMA_MAPPING_ERROR;
668 	dma_addr_t bloutp = DMA_MAPPING_ERROR;
669 	struct scatterlist *sg;
670 	size_t sz_out, sz = struct_size(bufl, bufers, n);
671 	int node = dev_to_node(&GET_DEV(inst->accel_dev));
672 	int bufl_dma_dir;
673 
674 	if (unlikely(!n))
675 		return -EINVAL;
676 
677 	qat_req->buf.sgl_src_valid = false;
678 	qat_req->buf.sgl_dst_valid = false;
679 
680 	if (n > QAT_MAX_BUFF_DESC) {
681 		bufl = kzalloc_node(sz, GFP_ATOMIC, node);
682 		if (unlikely(!bufl))
683 			return -ENOMEM;
684 	} else {
685 		bufl = &qat_req->buf.sgl_src.sgl_hdr;
686 		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
687 		qat_req->buf.sgl_src_valid = true;
688 	}
689 
690 	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
691 
692 	for_each_sg(sgl, sg, n, i)
693 		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
694 
695 	for_each_sg(sgl, sg, n, i) {
696 		int y = sg_nctr;
697 
698 		if (!sg->length)
699 			continue;
700 
701 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
702 						      sg->length,
703 						      bufl_dma_dir);
704 		bufl->bufers[y].len = sg->length;
705 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
706 			goto err_in;
707 		sg_nctr++;
708 	}
709 	bufl->num_bufs = sg_nctr;
710 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
711 	if (unlikely(dma_mapping_error(dev, blp)))
712 		goto err_in;
713 	qat_req->buf.bl = bufl;
714 	qat_req->buf.blp = blp;
715 	qat_req->buf.sz = sz;
716 	/* Handle out of place operation */
717 	if (sgl != sglout) {
718 		struct qat_alg_buf *bufers;
719 
720 		n = sg_nents(sglout);
721 		sz_out = struct_size(buflout, bufers, n);
722 		sg_nctr = 0;
723 
724 		if (n > QAT_MAX_BUFF_DESC) {
725 			buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
726 			if (unlikely(!buflout))
727 				goto err_in;
728 		} else {
729 			buflout = &qat_req->buf.sgl_dst.sgl_hdr;
730 			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
731 			qat_req->buf.sgl_dst_valid = true;
732 		}
733 
734 		bufers = buflout->bufers;
735 		for_each_sg(sglout, sg, n, i)
736 			bufers[i].addr = DMA_MAPPING_ERROR;
737 
738 		for_each_sg(sglout, sg, n, i) {
739 			int y = sg_nctr;
740 
741 			if (!sg->length)
742 				continue;
743 
744 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
745 							sg->length,
746 							DMA_FROM_DEVICE);
747 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
748 				goto err_out;
749 			bufers[y].len = sg->length;
750 			sg_nctr++;
751 		}
752 		buflout->num_bufs = sg_nctr;
753 		buflout->num_mapped_bufs = sg_nctr;
754 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
755 		if (unlikely(dma_mapping_error(dev, bloutp)))
756 			goto err_out;
757 		qat_req->buf.blout = buflout;
758 		qat_req->buf.bloutp = bloutp;
759 		qat_req->buf.sz_out = sz_out;
760 	} else {
761 		/* Otherwise set the src and dst to the same address */
762 		qat_req->buf.bloutp = qat_req->buf.blp;
763 		qat_req->buf.sz_out = 0;
764 	}
765 	return 0;
766 
767 err_out:
768 	if (!dma_mapping_error(dev, bloutp))
769 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
770 
771 	n = sg_nents(sglout);
772 	for (i = 0; i < n; i++)
773 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
774 			dma_unmap_single(dev, buflout->bufers[i].addr,
775 					 buflout->bufers[i].len,
776 					 DMA_FROM_DEVICE);
777 
778 	if (!qat_req->buf.sgl_dst_valid)
779 		kfree(buflout);
780 
781 err_in:
782 	if (!dma_mapping_error(dev, blp))
783 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
784 
785 	n = sg_nents(sgl);
786 	for (i = 0; i < n; i++)
787 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
788 			dma_unmap_single(dev, bufl->bufers[i].addr,
789 					 bufl->bufers[i].len,
790 					 bufl_dma_dir);
791 
792 	if (!qat_req->buf.sgl_src_valid)
793 		kfree(bufl);
794 
795 	dev_err(dev, "Failed to map buf for dma\n");
796 	return -ENOMEM;
797 }
798 
qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_crypto_request *qat_req)799 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
800 				  struct qat_crypto_request *qat_req)
801 {
802 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
803 	struct qat_crypto_instance *inst = ctx->inst;
804 	struct aead_request *areq = qat_req->aead_req;
805 	u8 stat_filed = qat_resp->comn_resp.comn_status;
806 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
807 
808 	qat_alg_free_bufl(inst, qat_req);
809 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
810 		res = -EBADMSG;
811 	areq->base.complete(&areq->base, res);
812 }
813 
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_crypto_request *qat_req)814 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
815 				      struct qat_crypto_request *qat_req)
816 {
817 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
818 	struct qat_crypto_instance *inst = ctx->inst;
819 	struct skcipher_request *sreq = qat_req->skcipher_req;
820 	u8 stat_filed = qat_resp->comn_resp.comn_status;
821 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
822 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
823 
824 	qat_alg_free_bufl(inst, qat_req);
825 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
826 		res = -EINVAL;
827 
828 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
829 	dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
830 			  qat_req->iv_paddr);
831 
832 	sreq->base.complete(&sreq->base, res);
833 }
834 
qat_alg_callback(void *resp)835 void qat_alg_callback(void *resp)
836 {
837 	struct icp_qat_fw_la_resp *qat_resp = resp;
838 	struct qat_crypto_request *qat_req =
839 				(void *)(__force long)qat_resp->opaque_data;
840 
841 	qat_req->cb(qat_resp, qat_req);
842 }
843 
qat_alg_aead_dec(struct aead_request *areq)844 static int qat_alg_aead_dec(struct aead_request *areq)
845 {
846 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
847 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
848 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
849 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
850 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
851 	struct icp_qat_fw_la_auth_req_params *auth_param;
852 	struct icp_qat_fw_la_bulk_req *msg;
853 	int digst_size = crypto_aead_authsize(aead_tfm);
854 	int ret, ctr = 0;
855 	u32 cipher_len;
856 
857 	cipher_len = areq->cryptlen - digst_size;
858 	if (cipher_len % AES_BLOCK_SIZE != 0)
859 		return -EINVAL;
860 
861 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
862 	if (unlikely(ret))
863 		return ret;
864 
865 	msg = &qat_req->req;
866 	*msg = ctx->dec_fw_req;
867 	qat_req->aead_ctx = ctx;
868 	qat_req->aead_req = areq;
869 	qat_req->cb = qat_aead_alg_callback;
870 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
871 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
872 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
873 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
874 	cipher_param->cipher_length = cipher_len;
875 	cipher_param->cipher_offset = areq->assoclen;
876 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
877 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
878 	auth_param->auth_off = 0;
879 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
880 	do {
881 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
882 	} while (ret == -EAGAIN && ctr++ < 10);
883 
884 	if (ret == -EAGAIN) {
885 		qat_alg_free_bufl(ctx->inst, qat_req);
886 		return -EBUSY;
887 	}
888 	return -EINPROGRESS;
889 }
890 
qat_alg_aead_enc(struct aead_request *areq)891 static int qat_alg_aead_enc(struct aead_request *areq)
892 {
893 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
894 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
895 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
896 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
897 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
898 	struct icp_qat_fw_la_auth_req_params *auth_param;
899 	struct icp_qat_fw_la_bulk_req *msg;
900 	u8 *iv = areq->iv;
901 	int ret, ctr = 0;
902 
903 	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
904 		return -EINVAL;
905 
906 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
907 	if (unlikely(ret))
908 		return ret;
909 
910 	msg = &qat_req->req;
911 	*msg = ctx->enc_fw_req;
912 	qat_req->aead_ctx = ctx;
913 	qat_req->aead_req = areq;
914 	qat_req->cb = qat_aead_alg_callback;
915 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
916 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
917 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
918 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
919 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
920 
921 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
922 	cipher_param->cipher_length = areq->cryptlen;
923 	cipher_param->cipher_offset = areq->assoclen;
924 
925 	auth_param->auth_off = 0;
926 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
927 
928 	do {
929 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
930 	} while (ret == -EAGAIN && ctr++ < 10);
931 
932 	if (ret == -EAGAIN) {
933 		qat_alg_free_bufl(ctx->inst, qat_req);
934 		return -EBUSY;
935 	}
936 	return -EINPROGRESS;
937 }
938 
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, const u8 *key, unsigned int keylen, int mode)939 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
940 				  const u8 *key, unsigned int keylen,
941 				  int mode)
942 {
943 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
944 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
945 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
946 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
947 
948 	return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
949 }
950 
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, const u8 *key, unsigned int keylen, int mode)951 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
952 				   const u8 *key, unsigned int keylen,
953 				   int mode)
954 {
955 	struct qat_crypto_instance *inst = NULL;
956 	struct device *dev;
957 	int node = get_current_node();
958 	int ret;
959 
960 	inst = qat_crypto_get_instance_node(node);
961 	if (!inst)
962 		return -EINVAL;
963 	dev = &GET_DEV(inst->accel_dev);
964 	ctx->inst = inst;
965 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
966 					 &ctx->enc_cd_paddr,
967 					 GFP_ATOMIC);
968 	if (!ctx->enc_cd) {
969 		ret = -ENOMEM;
970 		goto out_free_instance;
971 	}
972 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
973 					 &ctx->dec_cd_paddr,
974 					 GFP_ATOMIC);
975 	if (!ctx->dec_cd) {
976 		ret = -ENOMEM;
977 		goto out_free_enc;
978 	}
979 
980 	ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
981 	if (ret)
982 		goto out_free_all;
983 
984 	return 0;
985 
986 out_free_all:
987 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
988 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
989 			  ctx->dec_cd, ctx->dec_cd_paddr);
990 	ctx->dec_cd = NULL;
991 out_free_enc:
992 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
993 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
994 			  ctx->enc_cd, ctx->enc_cd_paddr);
995 	ctx->enc_cd = NULL;
996 out_free_instance:
997 	ctx->inst = NULL;
998 	qat_crypto_put_instance(inst);
999 	return ret;
1000 }
1001 
qat_alg_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen, int mode)1002 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1003 				   const u8 *key, unsigned int keylen,
1004 				   int mode)
1005 {
1006 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1007 
1008 	if (ctx->enc_cd)
1009 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1010 	else
1011 		return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1012 }
1013 
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)1014 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1015 				       const u8 *key, unsigned int keylen)
1016 {
1017 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1018 				       ICP_QAT_HW_CIPHER_CBC_MODE);
1019 }
1020 
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)1021 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1022 				       const u8 *key, unsigned int keylen)
1023 {
1024 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1025 				       ICP_QAT_HW_CIPHER_CTR_MODE);
1026 }
1027 
qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)1028 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1029 				       const u8 *key, unsigned int keylen)
1030 {
1031 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1032 	int ret;
1033 
1034 	ret = xts_verify_key(tfm, key, keylen);
1035 	if (ret)
1036 		return ret;
1037 
1038 	if (keylen >> 1 == AES_KEYSIZE_192) {
1039 		ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1040 		if (ret)
1041 			return ret;
1042 
1043 		ctx->fallback = true;
1044 
1045 		return 0;
1046 	}
1047 
1048 	ctx->fallback = false;
1049 
1050 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1051 				       ICP_QAT_HW_CIPHER_XTS_MODE);
1052 }
1053 
qat_alg_skcipher_encrypt(struct skcipher_request *req)1054 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1055 {
1056 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1057 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1058 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1059 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1060 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1061 	struct icp_qat_fw_la_bulk_req *msg;
1062 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1063 	int ret, ctr = 0;
1064 
1065 	if (req->cryptlen == 0)
1066 		return 0;
1067 
1068 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1069 					 &qat_req->iv_paddr, GFP_ATOMIC);
1070 	if (!qat_req->iv)
1071 		return -ENOMEM;
1072 
1073 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1074 	if (unlikely(ret)) {
1075 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1076 				  qat_req->iv_paddr);
1077 		return ret;
1078 	}
1079 
1080 	msg = &qat_req->req;
1081 	*msg = ctx->enc_fw_req;
1082 	qat_req->skcipher_ctx = ctx;
1083 	qat_req->skcipher_req = req;
1084 	qat_req->cb = qat_skcipher_alg_callback;
1085 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1086 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1087 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1088 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1089 	cipher_param->cipher_length = req->cryptlen;
1090 	cipher_param->cipher_offset = 0;
1091 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1092 	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1093 	do {
1094 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1095 	} while (ret == -EAGAIN && ctr++ < 10);
1096 
1097 	if (ret == -EAGAIN) {
1098 		qat_alg_free_bufl(ctx->inst, qat_req);
1099 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1100 				  qat_req->iv_paddr);
1101 		return -EBUSY;
1102 	}
1103 	return -EINPROGRESS;
1104 }
1105 
qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)1106 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1107 {
1108 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1109 		return -EINVAL;
1110 
1111 	return qat_alg_skcipher_encrypt(req);
1112 }
1113 
qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)1114 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1115 {
1116 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1117 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1118 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1119 
1120 	if (req->cryptlen < XTS_BLOCK_SIZE)
1121 		return -EINVAL;
1122 
1123 	if (ctx->fallback) {
1124 		memcpy(nreq, req, sizeof(*req));
1125 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1126 		return crypto_skcipher_encrypt(nreq);
1127 	}
1128 
1129 	return qat_alg_skcipher_encrypt(req);
1130 }
1131 
qat_alg_skcipher_decrypt(struct skcipher_request *req)1132 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1133 {
1134 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1135 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1136 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1137 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1138 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1139 	struct icp_qat_fw_la_bulk_req *msg;
1140 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1141 	int ret, ctr = 0;
1142 
1143 	if (req->cryptlen == 0)
1144 		return 0;
1145 
1146 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1147 					 &qat_req->iv_paddr, GFP_ATOMIC);
1148 	if (!qat_req->iv)
1149 		return -ENOMEM;
1150 
1151 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1152 	if (unlikely(ret)) {
1153 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1154 				  qat_req->iv_paddr);
1155 		return ret;
1156 	}
1157 
1158 	msg = &qat_req->req;
1159 	*msg = ctx->dec_fw_req;
1160 	qat_req->skcipher_ctx = ctx;
1161 	qat_req->skcipher_req = req;
1162 	qat_req->cb = qat_skcipher_alg_callback;
1163 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1164 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1165 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1166 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1167 	cipher_param->cipher_length = req->cryptlen;
1168 	cipher_param->cipher_offset = 0;
1169 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1170 	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1171 	do {
1172 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1173 	} while (ret == -EAGAIN && ctr++ < 10);
1174 
1175 	if (ret == -EAGAIN) {
1176 		qat_alg_free_bufl(ctx->inst, qat_req);
1177 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1178 				  qat_req->iv_paddr);
1179 		return -EBUSY;
1180 	}
1181 	return -EINPROGRESS;
1182 }
1183 
qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)1184 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1185 {
1186 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1187 		return -EINVAL;
1188 
1189 	return qat_alg_skcipher_decrypt(req);
1190 }
1191 
qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)1192 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1193 {
1194 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1195 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1196 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1197 
1198 	if (req->cryptlen < XTS_BLOCK_SIZE)
1199 		return -EINVAL;
1200 
1201 	if (ctx->fallback) {
1202 		memcpy(nreq, req, sizeof(*req));
1203 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1204 		return crypto_skcipher_decrypt(nreq);
1205 	}
1206 
1207 	return qat_alg_skcipher_decrypt(req);
1208 }
1209 
qat_alg_aead_init(struct crypto_aead *tfm, enum icp_qat_hw_auth_algo hash, const char *hash_name)1210 static int qat_alg_aead_init(struct crypto_aead *tfm,
1211 			     enum icp_qat_hw_auth_algo hash,
1212 			     const char *hash_name)
1213 {
1214 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215 
1216 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1217 	if (IS_ERR(ctx->hash_tfm))
1218 		return PTR_ERR(ctx->hash_tfm);
1219 	ctx->qat_hash_alg = hash;
1220 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1221 	return 0;
1222 }
1223 
qat_alg_aead_sha1_init(struct crypto_aead *tfm)1224 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1225 {
1226 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1227 }
1228 
qat_alg_aead_sha256_init(struct crypto_aead *tfm)1229 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1230 {
1231 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1232 }
1233 
qat_alg_aead_sha512_init(struct crypto_aead *tfm)1234 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1235 {
1236 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1237 }
1238 
qat_alg_aead_exit(struct crypto_aead *tfm)1239 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1240 {
1241 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1242 	struct qat_crypto_instance *inst = ctx->inst;
1243 	struct device *dev;
1244 
1245 	crypto_free_shash(ctx->hash_tfm);
1246 
1247 	if (!inst)
1248 		return;
1249 
1250 	dev = &GET_DEV(inst->accel_dev);
1251 	if (ctx->enc_cd) {
1252 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1253 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1254 				  ctx->enc_cd, ctx->enc_cd_paddr);
1255 	}
1256 	if (ctx->dec_cd) {
1257 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1258 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1259 				  ctx->dec_cd, ctx->dec_cd_paddr);
1260 	}
1261 	qat_crypto_put_instance(inst);
1262 }
1263 
qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)1264 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1265 {
1266 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1267 	return 0;
1268 }
1269 
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)1270 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1271 {
1272 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1273 	int reqsize;
1274 
1275 	ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1276 					  CRYPTO_ALG_NEED_FALLBACK);
1277 	if (IS_ERR(ctx->ftfm))
1278 		return PTR_ERR(ctx->ftfm);
1279 
1280 	reqsize = max(sizeof(struct qat_crypto_request),
1281 		      sizeof(struct skcipher_request) +
1282 		      crypto_skcipher_reqsize(ctx->ftfm));
1283 	crypto_skcipher_set_reqsize(tfm, reqsize);
1284 
1285 	return 0;
1286 }
1287 
qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)1288 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1289 {
1290 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1291 	struct qat_crypto_instance *inst = ctx->inst;
1292 	struct device *dev;
1293 
1294 	if (!inst)
1295 		return;
1296 
1297 	dev = &GET_DEV(inst->accel_dev);
1298 	if (ctx->enc_cd) {
1299 		memset(ctx->enc_cd, 0,
1300 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1301 		dma_free_coherent(dev,
1302 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1303 				  ctx->enc_cd, ctx->enc_cd_paddr);
1304 	}
1305 	if (ctx->dec_cd) {
1306 		memset(ctx->dec_cd, 0,
1307 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1308 		dma_free_coherent(dev,
1309 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1310 				  ctx->dec_cd, ctx->dec_cd_paddr);
1311 	}
1312 	qat_crypto_put_instance(inst);
1313 }
1314 
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)1315 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1316 {
1317 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1318 
1319 	if (ctx->ftfm)
1320 		crypto_free_skcipher(ctx->ftfm);
1321 
1322 	qat_alg_skcipher_exit_tfm(tfm);
1323 }
1324 
1325 static struct aead_alg qat_aeads[] = { {
1326 	.base = {
1327 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1328 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1329 		.cra_priority = 4001,
1330 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1331 		.cra_blocksize = AES_BLOCK_SIZE,
1332 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1333 		.cra_module = THIS_MODULE,
1334 	},
1335 	.init = qat_alg_aead_sha1_init,
1336 	.exit = qat_alg_aead_exit,
1337 	.setkey = qat_alg_aead_setkey,
1338 	.decrypt = qat_alg_aead_dec,
1339 	.encrypt = qat_alg_aead_enc,
1340 	.ivsize = AES_BLOCK_SIZE,
1341 	.maxauthsize = SHA1_DIGEST_SIZE,
1342 }, {
1343 	.base = {
1344 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1345 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1346 		.cra_priority = 4001,
1347 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1348 		.cra_blocksize = AES_BLOCK_SIZE,
1349 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1350 		.cra_module = THIS_MODULE,
1351 	},
1352 	.init = qat_alg_aead_sha256_init,
1353 	.exit = qat_alg_aead_exit,
1354 	.setkey = qat_alg_aead_setkey,
1355 	.decrypt = qat_alg_aead_dec,
1356 	.encrypt = qat_alg_aead_enc,
1357 	.ivsize = AES_BLOCK_SIZE,
1358 	.maxauthsize = SHA256_DIGEST_SIZE,
1359 }, {
1360 	.base = {
1361 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1362 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1363 		.cra_priority = 4001,
1364 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1365 		.cra_blocksize = AES_BLOCK_SIZE,
1366 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1367 		.cra_module = THIS_MODULE,
1368 	},
1369 	.init = qat_alg_aead_sha512_init,
1370 	.exit = qat_alg_aead_exit,
1371 	.setkey = qat_alg_aead_setkey,
1372 	.decrypt = qat_alg_aead_dec,
1373 	.encrypt = qat_alg_aead_enc,
1374 	.ivsize = AES_BLOCK_SIZE,
1375 	.maxauthsize = SHA512_DIGEST_SIZE,
1376 } };
1377 
1378 static struct skcipher_alg qat_skciphers[] = { {
1379 	.base.cra_name = "cbc(aes)",
1380 	.base.cra_driver_name = "qat_aes_cbc",
1381 	.base.cra_priority = 4001,
1382 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1383 	.base.cra_blocksize = AES_BLOCK_SIZE,
1384 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1385 	.base.cra_alignmask = 0,
1386 	.base.cra_module = THIS_MODULE,
1387 
1388 	.init = qat_alg_skcipher_init_tfm,
1389 	.exit = qat_alg_skcipher_exit_tfm,
1390 	.setkey = qat_alg_skcipher_cbc_setkey,
1391 	.decrypt = qat_alg_skcipher_blk_decrypt,
1392 	.encrypt = qat_alg_skcipher_blk_encrypt,
1393 	.min_keysize = AES_MIN_KEY_SIZE,
1394 	.max_keysize = AES_MAX_KEY_SIZE,
1395 	.ivsize = AES_BLOCK_SIZE,
1396 }, {
1397 	.base.cra_name = "ctr(aes)",
1398 	.base.cra_driver_name = "qat_aes_ctr",
1399 	.base.cra_priority = 4001,
1400 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1401 	.base.cra_blocksize = 1,
1402 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1403 	.base.cra_alignmask = 0,
1404 	.base.cra_module = THIS_MODULE,
1405 
1406 	.init = qat_alg_skcipher_init_tfm,
1407 	.exit = qat_alg_skcipher_exit_tfm,
1408 	.setkey = qat_alg_skcipher_ctr_setkey,
1409 	.decrypt = qat_alg_skcipher_decrypt,
1410 	.encrypt = qat_alg_skcipher_encrypt,
1411 	.min_keysize = AES_MIN_KEY_SIZE,
1412 	.max_keysize = AES_MAX_KEY_SIZE,
1413 	.ivsize = AES_BLOCK_SIZE,
1414 }, {
1415 	.base.cra_name = "xts(aes)",
1416 	.base.cra_driver_name = "qat_aes_xts",
1417 	.base.cra_priority = 4001,
1418 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1419 			  CRYPTO_ALG_ALLOCATES_MEMORY,
1420 	.base.cra_blocksize = AES_BLOCK_SIZE,
1421 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1422 	.base.cra_alignmask = 0,
1423 	.base.cra_module = THIS_MODULE,
1424 
1425 	.init = qat_alg_skcipher_init_xts_tfm,
1426 	.exit = qat_alg_skcipher_exit_xts_tfm,
1427 	.setkey = qat_alg_skcipher_xts_setkey,
1428 	.decrypt = qat_alg_skcipher_xts_decrypt,
1429 	.encrypt = qat_alg_skcipher_xts_encrypt,
1430 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1431 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1432 	.ivsize = AES_BLOCK_SIZE,
1433 } };
1434 
qat_algs_register(void)1435 int qat_algs_register(void)
1436 {
1437 	int ret = 0;
1438 
1439 	mutex_lock(&algs_lock);
1440 	if (++active_devs != 1)
1441 		goto unlock;
1442 
1443 	ret = crypto_register_skciphers(qat_skciphers,
1444 					ARRAY_SIZE(qat_skciphers));
1445 	if (ret)
1446 		goto unlock;
1447 
1448 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1449 	if (ret)
1450 		goto unreg_algs;
1451 
1452 unlock:
1453 	mutex_unlock(&algs_lock);
1454 	return ret;
1455 
1456 unreg_algs:
1457 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1458 	goto unlock;
1459 }
1460 
qat_algs_unregister(void)1461 void qat_algs_unregister(void)
1462 {
1463 	mutex_lock(&algs_lock);
1464 	if (--active_devs != 0)
1465 		goto unlock;
1466 
1467 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1468 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1469 
1470 unlock:
1471 	mutex_unlock(&algs_lock);
1472 }
1473