Lines Matching refs:rctx

117  * @rctx:	crypto request context
134 struct iproc_reqctx_s *rctx,
140 struct iproc_ctx_s *ctx = rctx->ctx;
144 rctx->gfp);
151 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
156 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
160 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161 rctx->dst_nents, chunksize);
169 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
171 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
172 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
182 * @rctx: crypto request context
198 struct iproc_reqctx_s *rctx,
203 struct iproc_ctx_s *ctx = rctx->ctx;
208 rctx->gfp);
215 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
221 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
224 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
225 rctx->src_nents, chunksize);
233 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
237 memset(rctx->msg_buf.tx_stat, 0, stat_len);
238 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
285 * @rctx: Crypto request context
299 static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
302 struct crypto_async_request *areq = rctx->parent;
305 struct iproc_ctx_s *ctx = rctx->ctx;
330 cipher_parms.iv_len = rctx->iv_ctr_len;
332 mssg = &rctx->mb_mssg;
333 chunk_start = rctx->src_sent;
334 remaining = rctx->total_todo - chunk_start;
343 rctx->src_sent += chunksize;
344 rctx->total_sent = rctx->src_sent;
347 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
348 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
351 rctx->is_encrypt && chunk_start)
356 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
357 rctx->iv_ctr_len,
358 chunk_start - rctx->iv_ctr_len);
360 if (rctx->iv_ctr_len) {
362 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
363 rctx->iv_ctr_len);
367 !rctx->is_encrypt) {
372 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
373 rctx->iv_ctr_len,
374 rctx->src_sent - rctx->iv_ctr_len);
385 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
395 rctx->src_sent, chunk_start, remaining, chunksize);
398 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
399 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
401 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
402 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
413 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
415 rctx->total_sent, stat_pad_len);
418 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
421 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
422 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
430 mssg->ctx = rctx; /* Will be returned in response */
433 rx_frag_num += rctx->dst_nents;
439 err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
445 tx_frag_num += rctx->src_nents;
453 err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
458 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
468 * @rctx: Crypto request context
470 static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
474 struct crypto_async_request *areq = rctx->parent;
477 struct iproc_ctx_s *ctx = rctx->ctx;
481 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
495 __func__, rctx->total_received, payload_len);
497 dump_sg(req->dst, rctx->total_received, payload_len);
499 rctx->total_received += payload_len;
500 if (rctx->total_received == rctx->total_todo) {
511 * @rctx: crypto request context
528 struct iproc_reqctx_s *rctx,
534 struct iproc_ctx_s *ctx = rctx->ctx;
537 rctx->gfp);
544 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
547 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
550 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
552 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
553 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
562 * @rctx: crypto request context
580 struct iproc_reqctx_s *rctx,
592 rctx->gfp);
599 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
603 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
607 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
608 rctx->src_nents, new_data_len);
617 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
621 memset(rctx->msg_buf.tx_stat, 0, stat_len);
622 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
631 * @rctx: Crypto request context
654 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
657 struct crypto_async_request *areq = rctx->parent;
662 struct iproc_ctx_s *ctx = rctx->ctx;
697 rctx->total_todo, rctx->total_sent);
720 mssg = &rctx->mb_mssg;
721 chunk_start = rctx->src_sent;
727 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
738 if (!rctx->is_final) {
739 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
748 new_len = rem - rctx->hash_carry_len;
750 rctx->src_sent);
751 rctx->hash_carry_len = rem;
753 rctx->hash_carry_len);
755 rctx->hash_carry,
756 rctx->hash_carry_len);
763 local_nbuf = rctx->hash_carry_len;
764 rctx->hash_carry_len = 0;
770 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
777 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
784 rctx->total_sent += chunksize;
786 rctx->src_sent += new_data_len;
788 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
800 hash_parms.key_buf = rctx->incr_hash;
807 __func__, rctx->is_final, local_nbuf);
817 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
820 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
845 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
847 ctx->auth.mode, rctx->total_sent,
851 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
853 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
855 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
856 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
864 mssg->ctx = rctx; /* Will be returned in response */
867 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
873 tx_frag_num += rctx->src_nents;
876 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
881 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
940 * @rctx: Crypto request context
945 static int ahash_req_done(struct iproc_reqctx_s *rctx)
948 struct crypto_async_request *areq = rctx->parent;
950 struct iproc_ctx_s *ctx = rctx->ctx;
953 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
971 if (rctx->is_sw_hmac) {
978 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
993 * @rctx: Crypto request context
995 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
997 struct iproc_ctx_s *ctx = rctx->ctx;
999 struct crypto_async_request *areq = rctx->parent;
1009 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1016 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1017 ahash_req_done(rctx);
1025 * @rctx: crypto request context
1046 struct iproc_reqctx_s *rctx,
1054 struct iproc_ctx_s *ctx = rctx->ctx;
1069 rctx->is_encrypt);
1083 rctx->gfp);
1091 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1098 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1099 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1107 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1108 rctx->dst_nents, resp_len);
1118 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1119 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1123 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1127 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1128 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1131 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1132 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1142 * @rctx: crypto request context
1165 struct iproc_reqctx_s *rctx,
1178 struct iproc_ctx_s *ctx = rctx->ctx;
1185 rctx->gfp);
1192 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1207 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1210 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1211 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1219 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1220 rctx->src_nents, datalen);
1229 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1230 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1234 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1238 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1239 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1247 * @rctx: Crypto request context
1261 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1264 struct crypto_async_request *areq = rctx->parent;
1267 struct iproc_ctx_s *ctx = rctx->ctx;
1290 chunksize = rctx->total_todo;
1298 req_opts.is_inbound = !(rctx->is_encrypt);
1308 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1309 cipher_parms.iv_len = rctx->iv_ctr_len;
1331 if (rctx->is_encrypt) {
1346 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1347 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1349 assoc_nents = spu_sg_count(rctx->assoc, 0,
1352 mssg = &rctx->mb_mssg;
1354 rctx->total_sent = chunksize;
1355 rctx->src_sent = chunksize;
1359 rctx->is_encrypt))
1363 rctx->iv_ctr_len);
1389 if (!rctx->is_encrypt)
1396 chunksize, rctx->is_encrypt,
1406 if (!rctx->is_encrypt)
1419 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1423 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1424 req->assoclen + rctx->total_sent -
1433 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1435 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1452 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1455 rctx->total_sent, stat_pad_len);
1458 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1460 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1461 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1463 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1464 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1472 mssg->ctx = rctx; /* Will be returned in response */
1475 rx_frag_num += rctx->dst_nents;
1486 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1494 rx_frag_num -= rctx->dst_nents;
1497 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1505 tx_frag_num += rctx->src_nents;
1513 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1514 rctx->assoc, aead_parms.assoc_size,
1520 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1529 * @rctx: Crypto request context
1531 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1534 struct crypto_async_request *areq = rctx->parent;
1537 struct iproc_ctx_s *ctx = rctx->ctx;
1543 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1550 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1559 if (rctx->is_encrypt) {
1560 icv_offset = req->assoclen + rctx->total_sent;
1561 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1563 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1586 * @rctx: request context
1591 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1594 struct brcm_message *mssg = &rctx->mb_mssg;
1604 * @rctx: Request context
1609 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1611 struct crypto_async_request *areq = rctx->parent;
1616 spu_chunk_cleanup(rctx);
1631 struct iproc_reqctx_s *rctx;
1634 rctx = mssg->ctx;
1635 if (unlikely(!rctx)) {
1643 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1652 switch (rctx->ctx->alg->type) {
1654 handle_skcipher_resp(rctx);
1657 handle_ahash_resp(rctx);
1660 handle_aead_resp(rctx);
1671 if (rctx->total_sent < rctx->total_todo) {
1673 spu_chunk_cleanup(rctx);
1675 switch (rctx->ctx->alg->type) {
1677 err = handle_skcipher_req(rctx);
1680 err = handle_ahash_req(rctx);
1689 err = handle_aead_req(rctx);
1701 finish_req(rctx, err);
1717 struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1724 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1726 rctx->parent = &req->base;
1727 rctx->is_encrypt = encrypt;
1728 rctx->bd_suppress = false;
1729 rctx->total_todo = req->cryptlen;
1730 rctx->src_sent = 0;
1731 rctx->total_sent = 0;
1732 rctx->total_received = 0;
1733 rctx->ctx = ctx;
1736 rctx->src_sg = req->src;
1737 rctx->src_nents = 0;
1738 rctx->src_skip = 0;
1739 rctx->dst_sg = req->dst;
1740 rctx->dst_nents = 0;
1741 rctx->dst_skip = 0;
1749 rctx->iv_ctr_len =
1751 memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1753 rctx->iv_ctr_len = 0;
1757 rctx->chan_idx = select_channel();
1758 err = handle_skcipher_req(rctx);
1761 spu_chunk_cleanup(rctx);
1907 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1915 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1917 rctx->parent = &req->base;
1918 rctx->ctx = ctx;
1919 rctx->bd_suppress = true;
1920 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1923 rctx->src_sg = req->src;
1924 rctx->src_skip = 0;
1925 rctx->src_nents = 0;
1926 rctx->dst_sg = NULL;
1927 rctx->dst_skip = 0;
1928 rctx->dst_nents = 0;
1931 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1935 rctx->is_final ? "" : "non-", alg_name);
1944 rctx->chan_idx = select_channel();
1946 err = handle_ahash_req(rctx);
1949 spu_chunk_cleanup(rctx);
1964 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1971 rctx->hash_carry_len = 0;
1972 rctx->is_final = 0;
1974 rctx->total_todo = 0;
1975 rctx->src_sent = 0;
1976 rctx->total_sent = 0;
1977 rctx->total_received = 0;
1983 rctx->is_sw_hmac = false;
2080 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2086 rctx->total_todo += req->nbytes;
2087 rctx->src_sent = 0;
2138 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2142 rctx->is_final = 1;
2175 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2179 rctx->total_todo += req->nbytes;
2180 rctx->src_sent = 0;
2181 rctx->is_final = 1;
2290 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2293 spu_exp->total_todo = rctx->total_todo;
2294 spu_exp->total_sent = rctx->total_sent;
2295 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2296 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2297 spu_exp->hash_carry_len = rctx->hash_carry_len;
2298 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2305 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2308 rctx->total_todo = spu_exp->total_todo;
2309 rctx->total_sent = spu_exp->total_sent;
2310 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2311 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2312 rctx->hash_carry_len = spu_exp->hash_carry_len;
2313 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2420 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2433 rctx->is_sw_hmac = true;
2436 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2437 rctx->hash_carry_len = blocksize;
2438 rctx->total_todo += blocksize;
2470 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2490 rctx->is_sw_hmac = false;
2493 rctx->is_sw_hmac = true;
2496 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2497 rctx->hash_carry_len = blocksize;
2498 rctx->total_todo += blocksize;
2508 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2521 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2522 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2556 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2579 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2586 areq->complete = rctx->old_complete;
2587 areq->data = rctx->old_data;
2596 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2605 rctx->old_tfm = tfm;
2611 rctx->old_complete = req->base.complete;
2612 rctx->old_data = req->base.data;
2624 rctx->old_complete, req);
2625 req->base.data = rctx->old_data;
2639 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2653 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2655 rctx->parent = &req->base;
2656 rctx->is_encrypt = is_encrypt;
2657 rctx->bd_suppress = false;
2658 rctx->total_todo = req->cryptlen;
2659 rctx->src_sent = 0;
2660 rctx->total_sent = 0;
2661 rctx->total_received = 0;
2662 rctx->is_sw_hmac = false;
2663 rctx->ctx = ctx;
2664 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2667 rctx->assoc = req->src;
2674 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2675 &rctx->src_skip) < 0) {
2681 rctx->src_nents = 0;
2682 rctx->dst_nents = 0;
2684 rctx->dst_sg = rctx->src_sg;
2685 rctx->dst_skip = rctx->src_skip;
2692 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2693 &rctx->dst_skip) < 0) {
2705 rctx->iv_ctr_len =
2709 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2711 rctx->iv_ctr_len = 0;
2714 rctx->hash_carry_len = 0;
2717 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2718 rctx->src_sg, rctx->src_skip);
2719 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2721 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2722 rctx->dst_sg, rctx->dst_skip);
2723 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2724 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2740 if (rctx->iv_ctr_len) {
2742 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2744 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2746 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2749 rctx->chan_idx = select_channel();
2750 err = handle_aead_req(rctx);
2753 spu_chunk_cleanup(rctx);