Lines Matching refs:ctx

302 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
306 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
307 count = min(ctx->sg->length - ctx->offset, ctx->total);
308 count = min(count, ctx->buflen - ctx->bufcnt);
317 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
318 ctx->sg = sg_next(ctx->sg);
325 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
326 ctx->offset, count, 0);
328 ctx->bufcnt += count;
329 ctx->offset += count;
330 ctx->total -= count;
332 if (ctx->offset == ctx->sg->length) {
333 ctx->sg = sg_next(ctx->sg);
334 if (ctx->sg)
335 ctx->offset = 0;
337 ctx->total = 0;
360 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
366 size[0] = ctx->digcnt[0];
367 size[1] = ctx->digcnt[1];
369 size[0] += ctx->bufcnt;
370 if (size[0] < ctx->bufcnt)
380 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
383 index = ctx->bufcnt & 0x7f;
385 *(ctx->buffer + ctx->bufcnt) = 0x80;
386 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
387 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
388 ctx->bufcnt += padlen + 16;
389 ctx->flags |= SHA_FLAGS_PAD;
393 index = ctx->bufcnt & 0x3f;
395 *(ctx->buffer + ctx->bufcnt) = 0x80;
396 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
397 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
398 ctx->bufcnt += padlen + 8;
399 ctx->flags |= SHA_FLAGS_PAD;
429 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
432 ctx->dd = dd;
434 ctx->flags = 0;
441 ctx->flags |= SHA_FLAGS_SHA1;
442 ctx->block_size = SHA1_BLOCK_SIZE;
445 ctx->flags |= SHA_FLAGS_SHA224;
446 ctx->block_size = SHA224_BLOCK_SIZE;
449 ctx->flags |= SHA_FLAGS_SHA256;
450 ctx->block_size = SHA256_BLOCK_SIZE;
453 ctx->flags |= SHA_FLAGS_SHA384;
454 ctx->block_size = SHA384_BLOCK_SIZE;
457 ctx->flags |= SHA_FLAGS_SHA512;
458 ctx->block_size = SHA512_BLOCK_SIZE;
465 ctx->bufcnt = 0;
466 ctx->digcnt[0] = 0;
467 ctx->digcnt[1] = 0;
468 ctx->buflen = SHA_BUFFER_LEN;
475 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
489 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
520 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
522 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
523 const u32 *hash = (const u32 *)ctx->digest;
531 ctx->flags &= ~SHA_FLAGS_RESTORE;
564 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
569 ctx->digcnt[1], ctx->digcnt[0], length, final);
574 ctx->digcnt[0] += length;
575 if (ctx->digcnt[0] < length)
576 ctx->digcnt[1]++;
594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
598 ctx->digcnt[1], ctx->digcnt[0], length1, final);
612 ctx->digcnt[0] += length1;
613 if (ctx->digcnt[0] < length1)
614 ctx->digcnt[1]++;
640 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
645 ctx->digcnt[1], ctx->digcnt[0], length1, final);
676 ctx->digcnt[0] += length1;
677 if (ctx->digcnt[0] < length1)
678 ctx->digcnt[1]++;
705 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
708 atmel_sha_append_sg(ctx);
709 atmel_sha_fill_padding(ctx, 0);
710 bufcnt = ctx->bufcnt;
711 ctx->bufcnt = 0;
713 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
717 struct atmel_sha_reqctx *ctx,
720 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
721 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
722 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
723 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
724 ctx->block_size);
728 ctx->flags &= ~SHA_FLAGS_SG;
731 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
736 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
740 atmel_sha_append_sg(ctx);
742 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
745 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
748 atmel_sha_fill_padding(ctx, 0);
750 if (final || (ctx->bufcnt == ctx->buflen)) {
751 count = ctx->bufcnt;
752 ctx->bufcnt = 0;
753 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
761 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
766 if (!ctx->total)
769 if (ctx->bufcnt || ctx->offset)
773 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
775 sg = ctx->sg;
780 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
781 /* size is not ctx->block_size aligned */
784 length = min(ctx->total, sg->length);
787 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
788 /* not last sg must be ctx->block_size aligned */
789 tail = length & (ctx->block_size - 1);
794 ctx->total -= length;
795 ctx->offset = length; /* offset where to start slow */
797 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
801 tail = length & (ctx->block_size - 1);
803 ctx->total += tail;
804 ctx->offset = length; /* offset where to start slow */
806 sg = ctx->sg;
807 atmel_sha_append_sg(ctx);
809 atmel_sha_fill_padding(ctx, length);
811 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
812 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
813 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
815 ctx->buflen + ctx->block_size);
820 ctx->flags &= ~SHA_FLAGS_SG;
821 count = ctx->bufcnt;
822 ctx->bufcnt = 0;
823 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
826 ctx->sg = sg;
827 if (!dma_map_sg(dd->dev, ctx->sg, 1,
833 ctx->flags |= SHA_FLAGS_SG;
835 count = ctx->bufcnt;
836 ctx->bufcnt = 0;
837 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
838 length, ctx->dma_addr, count, final);
842 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
847 ctx->flags |= SHA_FLAGS_SG;
850 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
856 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
858 if (ctx->flags & SHA_FLAGS_SG) {
859 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
860 if (ctx->sg->length == ctx->offset) {
861 ctx->sg = sg_next(ctx->sg);
862 if (ctx->sg)
863 ctx->offset = 0;
865 if (ctx->flags & SHA_FLAGS_PAD) {
866 dma_unmap_single(dd->dev, ctx->dma_addr,
867 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
870 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
871 ctx->block_size, DMA_TO_DEVICE);
878 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
882 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
884 if (ctx->flags & SHA_FLAGS_CPU)
891 err, ctx->digcnt[1], ctx->digcnt[0]);
899 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
903 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
904 atmel_sha_fill_padding(ctx, 0);
905 count = ctx->bufcnt;
906 ctx->bufcnt = 0;
907 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
911 atmel_sha_fill_padding(ctx, 0);
912 count = ctx->bufcnt;
913 ctx->bufcnt = 0;
914 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
924 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
925 u32 *hash = (u32 *)ctx->digest;
928 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
949 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
950 ctx->flags |= SHA_FLAGS_RESTORE;
955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
960 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
963 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
967 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
971 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
975 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
979 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
986 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
987 struct atmel_sha_dev *dd = ctx->dd;
989 if (ctx->digcnt[0] || ctx->digcnt[1])
992 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
993 ctx->digcnt[0], ctx->bufcnt);
1000 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1001 struct atmel_sha_dev *dd = ctx->dd;
1008 ctx->flags |= SHA_FLAGS_ERROR;
1058 struct atmel_sha_ctx *ctx;
1085 ctx = crypto_tfm_ctx(async_req->tfm);
1092 /* WARNING: ctx->start() MAY change dd->is_async. */
1093 err = ctx->start(dd);
1102 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1106 ctx->op, req->nbytes);
1132 if (ctx->op == SHA_OP_UPDATE) {
1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
1137 } else if (ctx->op == SHA_OP_FINAL) {
1152 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1156 ctx->op = op;
1163 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1168 ctx->total = req->nbytes;
1169 ctx->sg = req->src;
1170 ctx->offset = 0;
1172 if (ctx->flags & SHA_FLAGS_FINUP) {
1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1175 ctx->flags |= SHA_FLAGS_CPU;
1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1177 atmel_sha_append_sg(ctx);
1185 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1187 ctx->flags |= SHA_FLAGS_FINUP;
1189 if (ctx->flags & SHA_FLAGS_ERROR)
1192 if (ctx->flags & SHA_FLAGS_PAD)
1201 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1204 ctx->flags |= SHA_FLAGS_FINUP;
1229 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1231 memcpy(out, ctx, sizeof(*ctx));
1237 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1239 memcpy(ctx, in, sizeof(*ctx));
1245 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1249 ctx->start = atmel_sha_start;
1395 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1396 size_t bs = ctx->block_size;
1504 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1505 const u32 *words = (const u32 *)ctx->buffer;
1509 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1512 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1516 ctx->offset += ctx->bufcnt;
1517 ctx->total -= ctx->bufcnt;
1519 if (!ctx->total)
1524 * Fill ctx->buffer now with the next data to be written into
1530 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1531 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1532 ctx->offset, ctx->bufcnt, 0);
1544 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1558 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1563 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1566 ctx->flags |= SHA_FLAGS_IDATAR0;
1569 ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1571 ctx->sg = sg;
1572 ctx->total = len;
1573 ctx->offset = 0;
1576 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1577 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1578 ctx->offset, ctx->bufcnt, 0);
1590 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1594 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1597 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1688 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1696 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1698 ctx->block_size = SHA1_BLOCK_SIZE;
1699 ctx->hash_size = SHA1_DIGEST_SIZE;
1703 ctx->block_size = SHA224_BLOCK_SIZE;
1704 ctx->hash_size = SHA256_DIGEST_SIZE;
1708 ctx->block_size = SHA256_BLOCK_SIZE;
1709 ctx->hash_size = SHA256_DIGEST_SIZE;
1713 ctx->block_size = SHA384_BLOCK_SIZE;
1714 ctx->hash_size = SHA512_DIGEST_SIZE;
1718 ctx->block_size = SHA512_BLOCK_SIZE;
1719 ctx->hash_size = SHA512_DIGEST_SIZE;
1725 bs = ctx->block_size;
1752 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1754 size_t bs = ctx->block_size;
1769 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1770 size_t bs = ctx->block_size;
1788 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1789 size_t bs = ctx->block_size;
1790 size_t hs = ctx->hash_size;
1804 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1805 size_t hs = ctx->hash_size;
1817 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1824 switch (ctx->op) {
1872 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1875 size_t bs = ctx->block_size;
1876 size_t hs = ctx->hash_size;
1878 ctx->bufcnt = 0;
1879 ctx->digcnt[0] = bs;
1880 ctx->digcnt[1] = 0;
1881 ctx->flags |= SHA_FLAGS_RESTORE;
1882 memcpy(ctx->digest, hmac->ipad, hs);
1889 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1892 u32 *digest = (u32 *)ctx->digest;
1894 size_t bs = ctx->block_size;
1895 size_t hs = ctx->hash_size;
1911 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1926 * digest into ctx->digest[] before memcpy() the data into
1948 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1951 size_t hs = ctx->hash_size;
1976 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2225 struct atmel_sha_reqctx *ctx = &authctx->base;
2239 ctx->dd = dd;
2240 ctx->buflen = SHA_BUFFER_LEN;
2257 struct atmel_sha_reqctx *ctx = &authctx->base;
2260 struct atmel_sha_dev *dd = ctx->dd;
2271 ctx->flags = hmac->base.flags;
2280 struct atmel_sha_reqctx *ctx = &authctx->base;
2283 size_t hs = ctx->hash_size;
2298 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2327 struct atmel_sha_reqctx *ctx = &authctx->base;
2328 struct atmel_sha_dev *dd = ctx->dd;
2330 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2380 struct atmel_sha_reqctx *ctx = &authctx->base;
2381 struct atmel_sha_dev *dd = ctx->dd;