Lines Matching refs:rctx

117  * @rctx:	crypto request context
134 struct iproc_reqctx_s *rctx,
140 struct iproc_ctx_s *ctx = rctx->ctx;
144 rctx->gfp);
151 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
156 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
160 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161 rctx->dst_nents, chunksize);
169 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
171 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
172 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
182 * @rctx: crypto request context
198 struct iproc_reqctx_s *rctx,
203 struct iproc_ctx_s *ctx = rctx->ctx;
208 rctx->gfp);
215 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
221 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
224 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
225 rctx->src_nents, chunksize);
233 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
237 memset(rctx->msg_buf.tx_stat, 0, stat_len);
238 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
285 * @rctx: Crypto request context
299 static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
302 struct crypto_async_request *areq = rctx->parent;
305 struct iproc_ctx_s *ctx = rctx->ctx;
330 cipher_parms.iv_len = rctx->iv_ctr_len;
332 mssg = &rctx->mb_mssg;
333 chunk_start = rctx->src_sent;
334 remaining = rctx->total_todo - chunk_start;
343 rctx->src_sent += chunksize;
344 rctx->total_sent = rctx->src_sent;
347 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
348 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
351 rctx->is_encrypt && chunk_start)
356 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
357 rctx->iv_ctr_len,
358 chunk_start - rctx->iv_ctr_len);
360 if (rctx->iv_ctr_len) {
362 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
363 rctx->iv_ctr_len);
367 !rctx->is_encrypt) {
372 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
373 rctx->iv_ctr_len,
374 rctx->src_sent - rctx->iv_ctr_len);
385 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
395 rctx->src_sent, chunk_start, remaining, chunksize);
398 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
399 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
401 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
402 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
413 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
415 rctx->total_sent, stat_pad_len);
418 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
421 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
422 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
430 mssg->ctx = rctx; /* Will be returned in response */
433 rx_frag_num += rctx->dst_nents;
439 err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
445 tx_frag_num += rctx->src_nents;
453 err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
458 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
468 * @rctx: Crypto request context
470 static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
473 struct crypto_async_request *areq = rctx->parent;
475 struct iproc_ctx_s *ctx = rctx->ctx;
479 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
493 __func__, rctx->total_received, payload_len);
495 dump_sg(req->dst, rctx->total_received, payload_len);
497 rctx->total_received += payload_len;
498 if (rctx->total_received == rctx->total_todo) {
509 * @rctx: crypto request context
526 struct iproc_reqctx_s *rctx,
532 struct iproc_ctx_s *ctx = rctx->ctx;
535 rctx->gfp);
542 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
545 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
548 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
550 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
551 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
560 * @rctx: crypto request context
578 struct iproc_reqctx_s *rctx,
590 rctx->gfp);
597 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
601 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
605 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
606 rctx->src_nents, new_data_len);
615 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
619 memset(rctx->msg_buf.tx_stat, 0, stat_len);
620 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
629 * @rctx: Crypto request context
652 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
655 struct crypto_async_request *areq = rctx->parent;
660 struct iproc_ctx_s *ctx = rctx->ctx;
695 rctx->total_todo, rctx->total_sent);
718 mssg = &rctx->mb_mssg;
719 chunk_start = rctx->src_sent;
725 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
736 if (!rctx->is_final) {
737 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
746 new_len = rem - rctx->hash_carry_len;
748 rctx->src_sent);
749 rctx->hash_carry_len = rem;
751 rctx->hash_carry_len);
753 rctx->hash_carry,
754 rctx->hash_carry_len);
761 local_nbuf = rctx->hash_carry_len;
762 rctx->hash_carry_len = 0;
768 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
775 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
782 rctx->total_sent += chunksize;
784 rctx->src_sent += new_data_len;
786 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
798 hash_parms.key_buf = rctx->incr_hash;
805 __func__, rctx->is_final, local_nbuf);
815 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
818 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
843 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
845 ctx->auth.mode, rctx->total_sent,
849 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
851 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
853 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
854 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
862 mssg->ctx = rctx; /* Will be returned in response */
865 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
871 tx_frag_num += rctx->src_nents;
874 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
879 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
938 * @rctx: Crypto request context
943 static int ahash_req_done(struct iproc_reqctx_s *rctx)
946 struct crypto_async_request *areq = rctx->parent;
948 struct iproc_ctx_s *ctx = rctx->ctx;
951 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
969 if (rctx->is_sw_hmac) {
976 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
991 * @rctx: Crypto request context
993 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
995 struct iproc_ctx_s *ctx = rctx->ctx;
996 struct crypto_async_request *areq = rctx->parent;
1005 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1012 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1013 ahash_req_done(rctx);
1022 * @rctx: crypto request context
1043 struct iproc_reqctx_s *rctx,
1051 struct iproc_ctx_s *ctx = rctx->ctx;
1066 rctx->is_encrypt);
1080 rctx->gfp);
1088 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1095 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1096 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1104 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1105 rctx->dst_nents, resp_len);
1115 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1116 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1120 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1124 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1125 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1128 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1129 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1139 * @rctx: crypto request context
1162 struct iproc_reqctx_s *rctx,
1175 struct iproc_ctx_s *ctx = rctx->ctx;
1182 rctx->gfp);
1189 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1204 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1207 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1208 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1216 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1217 rctx->src_nents, datalen);
1226 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1227 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1231 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1235 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1236 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1244 * @rctx: Crypto request context
1258 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1261 struct crypto_async_request *areq = rctx->parent;
1264 struct iproc_ctx_s *ctx = rctx->ctx;
1287 chunksize = rctx->total_todo;
1295 req_opts.is_inbound = !(rctx->is_encrypt);
1305 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1306 cipher_parms.iv_len = rctx->iv_ctr_len;
1328 if (rctx->is_encrypt) {
1343 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1344 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1346 assoc_nents = spu_sg_count(rctx->assoc, 0,
1349 mssg = &rctx->mb_mssg;
1351 rctx->total_sent = chunksize;
1352 rctx->src_sent = chunksize;
1356 rctx->is_encrypt))
1360 rctx->iv_ctr_len);
1386 if (!rctx->is_encrypt)
1393 chunksize, rctx->is_encrypt,
1403 if (!rctx->is_encrypt)
1416 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1420 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1421 req->assoclen + rctx->total_sent -
1430 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1432 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1449 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1452 rctx->total_sent, stat_pad_len);
1455 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1457 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1458 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1460 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1461 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1469 mssg->ctx = rctx; /* Will be returned in response */
1472 rx_frag_num += rctx->dst_nents;
1483 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1491 rx_frag_num -= rctx->dst_nents;
1494 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1502 tx_frag_num += rctx->src_nents;
1510 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1511 rctx->assoc, aead_parms.assoc_size,
1517 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1526 * @rctx: Crypto request context
1528 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1531 struct crypto_async_request *areq = rctx->parent;
1534 struct iproc_ctx_s *ctx = rctx->ctx;
1540 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1547 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1556 if (rctx->is_encrypt) {
1557 icv_offset = req->assoclen + rctx->total_sent;
1558 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1560 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1583 * @rctx: request context
1588 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1591 struct brcm_message *mssg = &rctx->mb_mssg;
1601 * @rctx: Request context
1606 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1608 struct crypto_async_request *areq = rctx->parent;
1613 spu_chunk_cleanup(rctx);
1628 struct iproc_reqctx_s *rctx;
1631 rctx = mssg->ctx;
1632 if (unlikely(!rctx)) {
1640 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1649 switch (rctx->ctx->alg->type) {
1651 handle_skcipher_resp(rctx);
1654 handle_ahash_resp(rctx);
1657 handle_aead_resp(rctx);
1668 if (rctx->total_sent < rctx->total_todo) {
1670 spu_chunk_cleanup(rctx);
1672 switch (rctx->ctx->alg->type) {
1674 err = handle_skcipher_req(rctx);
1677 err = handle_ahash_req(rctx);
1686 err = handle_aead_req(rctx);
1698 finish_req(rctx, err);
1714 struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1721 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1723 rctx->parent = &req->base;
1724 rctx->is_encrypt = encrypt;
1725 rctx->bd_suppress = false;
1726 rctx->total_todo = req->cryptlen;
1727 rctx->src_sent = 0;
1728 rctx->total_sent = 0;
1729 rctx->total_received = 0;
1730 rctx->ctx = ctx;
1733 rctx->src_sg = req->src;
1734 rctx->src_nents = 0;
1735 rctx->src_skip = 0;
1736 rctx->dst_sg = req->dst;
1737 rctx->dst_nents = 0;
1738 rctx->dst_skip = 0;
1746 rctx->iv_ctr_len =
1748 memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1750 rctx->iv_ctr_len = 0;
1754 rctx->chan_idx = select_channel();
1755 err = handle_skcipher_req(rctx);
1758 spu_chunk_cleanup(rctx);
1904 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1912 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1914 rctx->parent = &req->base;
1915 rctx->ctx = ctx;
1916 rctx->bd_suppress = true;
1917 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1920 rctx->src_sg = req->src;
1921 rctx->src_skip = 0;
1922 rctx->src_nents = 0;
1923 rctx->dst_sg = NULL;
1924 rctx->dst_skip = 0;
1925 rctx->dst_nents = 0;
1928 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1932 rctx->is_final ? "" : "non-", alg_name);
1941 rctx->chan_idx = select_channel();
1943 err = handle_ahash_req(rctx);
1946 spu_chunk_cleanup(rctx);
1961 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1968 rctx->hash_carry_len = 0;
1969 rctx->is_final = 0;
1971 rctx->total_todo = 0;
1972 rctx->src_sent = 0;
1973 rctx->total_sent = 0;
1974 rctx->total_received = 0;
1980 rctx->is_sw_hmac = false;
2077 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2083 rctx->total_todo += req->nbytes;
2084 rctx->src_sent = 0;
2135 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2139 rctx->is_final = 1;
2172 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2176 rctx->total_todo += req->nbytes;
2177 rctx->src_sent = 0;
2178 rctx->is_final = 1;
2287 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2290 spu_exp->total_todo = rctx->total_todo;
2291 spu_exp->total_sent = rctx->total_sent;
2292 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2293 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2294 spu_exp->hash_carry_len = rctx->hash_carry_len;
2295 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2302 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2305 rctx->total_todo = spu_exp->total_todo;
2306 rctx->total_sent = spu_exp->total_sent;
2307 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2308 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2309 rctx->hash_carry_len = spu_exp->hash_carry_len;
2310 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2418 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2431 rctx->is_sw_hmac = true;
2434 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2435 rctx->hash_carry_len = blocksize;
2436 rctx->total_todo += blocksize;
2468 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2488 rctx->is_sw_hmac = false;
2491 rctx->is_sw_hmac = true;
2494 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2495 rctx->hash_carry_len = blocksize;
2496 rctx->total_todo += blocksize;
2506 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2519 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2520 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2554 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2577 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2586 subreq = &rctx->req;
2600 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2614 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2616 rctx->parent = &req->base;
2617 rctx->is_encrypt = is_encrypt;
2618 rctx->bd_suppress = false;
2619 rctx->total_todo = req->cryptlen;
2620 rctx->src_sent = 0;
2621 rctx->total_sent = 0;
2622 rctx->total_received = 0;
2623 rctx->is_sw_hmac = false;
2624 rctx->ctx = ctx;
2625 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2628 rctx->assoc = req->src;
2635 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2636 &rctx->src_skip) < 0) {
2642 rctx->src_nents = 0;
2643 rctx->dst_nents = 0;
2645 rctx->dst_sg = rctx->src_sg;
2646 rctx->dst_skip = rctx->src_skip;
2653 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2654 &rctx->dst_skip) < 0) {
2666 rctx->iv_ctr_len =
2670 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2672 rctx->iv_ctr_len = 0;
2675 rctx->hash_carry_len = 0;
2678 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2679 rctx->src_sg, rctx->src_skip);
2680 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2682 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2683 rctx->dst_sg, rctx->dst_skip);
2684 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2685 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2701 if (rctx->iv_ctr_len) {
2703 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2705 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2707 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2710 rctx->chan_idx = select_channel();
2711 err = handle_aead_req(rctx);
2714 spu_chunk_cleanup(rctx);