Lines Matching defs:rctx
253 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
260 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
277 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
279 if (rctx->flags & HASH_FLAGS_HMAC) {
296 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
300 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
301 count = min(rctx->sg->length - rctx->offset, rctx->total);
302 count = min(count, rctx->buflen - rctx->bufcnt);
305 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
306 rctx->sg = sg_next(rctx->sg);
313 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
314 rctx->offset, count, 0);
316 rctx->bufcnt += count;
317 rctx->offset += count;
318 rctx->total -= count;
320 if (rctx->offset == rctx->sg->length) {
321 rctx->sg = sg_next(rctx->sg);
322 if (rctx->sg)
323 rctx->offset = 0;
325 rctx->total = 0;
381 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
384 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
386 final = (rctx->flags & HASH_FLAGS_FINUP);
388 while ((rctx->total >= rctx->buflen) ||
389 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
390 stm32_hash_append_sg(rctx);
391 bufcnt = rctx->bufcnt;
392 rctx->bufcnt = 0;
393 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
396 stm32_hash_append_sg(rctx);
399 bufcnt = rctx->bufcnt;
400 rctx->bufcnt = 0;
401 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
402 (rctx->flags & HASH_FLAGS_FINUP));
479 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
490 sg_init_one(&rctx->sg_key, ctx->key,
493 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
495 if (rctx->dma_ct == 0) {
500 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
502 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
544 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
548 u32 *buffer = (void *)rctx->buffer;
550 rctx->sg = hdev->req->src;
551 rctx->total = hdev->req->nbytes;
553 rctx->nents = sg_nents(rctx->sg);
555 if (rctx->nents < 0)
566 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
575 rctx->sg, rctx->nents,
576 rctx->buffer, sg->length - len,
577 rctx->total - sg->length + len);
589 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
591 if (rctx->dma_ct == 0) {
685 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
688 rctx->hdev = hdev;
690 rctx->flags = HASH_FLAGS_CPU;
692 rctx->digcnt = crypto_ahash_digestsize(tfm);
693 switch (rctx->digcnt) {
695 rctx->flags |= HASH_FLAGS_MD5;
698 rctx->flags |= HASH_FLAGS_SHA1;
701 rctx->flags |= HASH_FLAGS_SHA224;
704 rctx->flags |= HASH_FLAGS_SHA256;
710 rctx->bufcnt = 0;
711 rctx->buflen = HASH_BUFLEN;
712 rctx->total = 0;
713 rctx->offset = 0;
714 rctx->data_type = HASH_DATA_8_BITS;
716 memset(rctx->buffer, 0, HASH_BUFLEN);
719 rctx->flags |= HASH_FLAGS_HMAC;
721 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
734 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
736 int buflen = rctx->bufcnt;
738 rctx->bufcnt = 0;
740 if (!(rctx->flags & HASH_FLAGS_CPU))
743 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
751 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
752 __be32 *hash = (void *)rctx->digest;
755 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
773 hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
779 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
784 memcpy(req->result, rctx->digest, rctx->digcnt);
791 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
792 struct stm32_hash_dev *hdev = rctx->hdev;
803 rctx->flags |= HASH_FLAGS_ERRORS;
813 struct stm32_hash_request_ctx *rctx)
843 struct stm32_hash_request_ctx *rctx;
850 rctx = ahash_request_ctx(req);
853 rctx->op, req->nbytes);
855 return stm32_hash_hw_init(hdev, rctx);
864 struct stm32_hash_request_ctx *rctx;
872 rctx = ahash_request_ctx(req);
874 if (rctx->op == HASH_OP_UPDATE)
876 else if (rctx->op == HASH_OP_FINAL)
888 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
892 rctx->op = op;
899 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
901 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
904 rctx->total = req->nbytes;
905 rctx->sg = req->src;
906 rctx->offset = 0;
908 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
909 stm32_hash_append_sg(rctx);
918 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
920 rctx->flags |= HASH_FLAGS_FINUP;
927 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 rctx->flags |= HASH_FLAGS_FINUP;
935 rctx->flags &= ~HASH_FLAGS_CPU;
958 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
969 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
973 preg = rctx->hw_context;
984 memcpy(out, rctx, sizeof(*rctx));
991 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
998 memcpy(rctx, in, sizeof(*rctx));
1000 preg = rctx->hw_context;
1016 kfree(rctx->hw_context);