Lines Matching refs:rctx

78 				      struct aspeed_sham_reqctx *rctx)
83 AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
85 switch (rctx->flags & SHA_FLAGS_MASK) {
89 bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
90 index = rctx->bufcnt & 0x3f;
92 *(rctx->buffer + rctx->bufcnt) = 0x80;
93 memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
94 memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
95 rctx->bufcnt += padlen + 8;
98 bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
99 bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
100 rctx->digcnt[0] >> 61);
101 index = rctx->bufcnt & 0x7f;
103 *(rctx->buffer + rctx->bufcnt) = 0x80;
104 memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
105 memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
106 rctx->bufcnt += padlen + 16;
119 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
122 length = rctx->total + rctx->bufcnt;
123 remain = length % rctx->block_size;
127 if (rctx->bufcnt)
128 memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
130 if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
132 rctx->bufcnt, rctx->src_sg,
133 rctx->offset, rctx->total - remain, 0);
134 rctx->offset += rctx->total - remain;
141 scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
142 rctx->offset, remain, 0);
144 rctx->bufcnt = remain;
145 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
148 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
149 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
155 hash_engine->digest_dma = rctx->digest_dma_addr;
168 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
174 remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
175 length = rctx->total + rctx->bufcnt - remain;
178 "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
181 sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
190 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
193 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
194 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
199 if (rctx->bufcnt != 0) {
203 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
204 rctx->buffer,
205 rctx->block_size * 2,
207 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
208 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
213 phy_addr = rctx->buffer_dma_addr;
214 len = rctx->bufcnt;
227 for_each_sg(rctx->src_sg, s, sg_len, i) {
250 rctx->offset = rctx->total - remain;
251 hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
253 hash_engine->digest_dma = rctx->digest_dma_addr;
258 if (rctx->bufcnt != 0)
259 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
260 rctx->block_size * 2, DMA_TO_DEVICE);
262 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
265 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
293 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
297 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
300 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
301 rctx->block_size * 2, DMA_TO_DEVICE);
303 memcpy(req->result, rctx->digest, rctx->digsize);
316 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
322 rctx->cmd |= HASH_CMD_INT_ENABLE;
336 ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
350 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
358 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
361 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
362 rctx->block_size * 2, DMA_TO_DEVICE);
365 memcpy(rctx->buffer, bctx->opad, rctx->block_size);
366 memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
368 rctx->bufcnt = rctx->block_size + rctx->digsize;
369 rctx->digcnt[0] = rctx->block_size + rctx->digsize;
371 aspeed_ahash_fill_padding(hace_dev, rctx);
372 memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
374 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
377 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
378 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
383 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
384 rctx->block_size * 2,
386 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
387 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
392 hash_engine->src_dma = rctx->buffer_dma_addr;
393 hash_engine->src_length = rctx->bufcnt;
394 hash_engine->digest_dma = rctx->digest_dma_addr;
399 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
409 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
414 aspeed_ahash_fill_padding(hace_dev, rctx);
416 rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
417 rctx->digest,
420 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
421 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
426 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
427 rctx->buffer,
428 rctx->block_size * 2,
430 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
431 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
436 hash_engine->src_dma = rctx->buffer_dma_addr;
437 hash_engine->src_length = rctx->bufcnt;
438 hash_engine->digest_dma = rctx->digest_dma_addr;
440 if (rctx->flags & SHA_FLAGS_HMAC)
447 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
457 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
461 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
464 if (rctx->bufcnt != 0)
465 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
466 rctx->block_size * 2,
469 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
472 scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
473 rctx->total - rctx->offset, 0);
475 rctx->bufcnt = rctx->total - rctx->offset;
476 rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
478 if (rctx->flags & SHA_FLAGS_FINUP)
488 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
492 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
495 if (rctx->flags & SHA_FLAGS_FINUP)
505 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
512 rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
536 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
546 if (rctx->op == SHA_OP_UPDATE)
548 else if (rctx->op == SHA_OP_FINAL)
583 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
590 rctx->total = req->nbytes;
591 rctx->src_sg = req->src;
592 rctx->offset = 0;
593 rctx->src_nents = sg_nents(req->src);
594 rctx->op = SHA_OP_UPDATE;
596 rctx->digcnt[0] += rctx->total;
597 if (rctx->digcnt[0] < rctx->total)
598 rctx->digcnt[1]++;
600 if (rctx->bufcnt + rctx->total < rctx->block_size) {
601 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
602 rctx->src_sg, rctx->offset,
603 rctx->total, 0);
604 rctx->bufcnt += rctx->total;
624 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
629 AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
630 req->nbytes, rctx->total);
631 rctx->op = SHA_OP_FINAL;
638 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
646 rctx->flags |= SHA_FLAGS_FINUP;
663 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
673 rctx->cmd = HASH_CMD_ACC_MODE;
674 rctx->flags = 0;
678 rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
679 rctx->flags |= SHA_FLAGS_SHA1;
680 rctx->digsize = SHA1_DIGEST_SIZE;
681 rctx->block_size = SHA1_BLOCK_SIZE;
682 rctx->sha_iv = sha1_iv;
683 rctx->ivsize = 32;
684 memcpy(rctx->digest, sha1_iv, rctx->ivsize);
687 rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
688 rctx->flags |= SHA_FLAGS_SHA224;
689 rctx->digsize = SHA224_DIGEST_SIZE;
690 rctx->block_size = SHA224_BLOCK_SIZE;
691 rctx->sha_iv = sha224_iv;
692 rctx->ivsize = 32;
693 memcpy(rctx->digest, sha224_iv, rctx->ivsize);
696 rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
697 rctx->flags |= SHA_FLAGS_SHA256;
698 rctx->digsize = SHA256_DIGEST_SIZE;
699 rctx->block_size = SHA256_BLOCK_SIZE;
700 rctx->sha_iv = sha256_iv;
701 rctx->ivsize = 32;
702 memcpy(rctx->digest, sha256_iv, rctx->ivsize);
705 rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
707 rctx->flags |= SHA_FLAGS_SHA384;
708 rctx->digsize = SHA384_DIGEST_SIZE;
709 rctx->block_size = SHA384_BLOCK_SIZE;
710 rctx->sha_iv = (const __be32 *)sha384_iv;
711 rctx->ivsize = 64;
712 memcpy(rctx->digest, sha384_iv, rctx->ivsize);
715 rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
717 rctx->flags |= SHA_FLAGS_SHA512;
718 rctx->digsize = SHA512_DIGEST_SIZE;
719 rctx->block_size = SHA512_BLOCK_SIZE;
720 rctx->sha_iv = (const __be32 *)sha512_iv;
721 rctx->ivsize = 64;
722 memcpy(rctx->digest, sha512_iv, rctx->ivsize);
730 rctx->bufcnt = 0;
731 rctx->total = 0;
732 rctx->digcnt[0] = 0;
733 rctx->digcnt[1] = 0;
737 rctx->digcnt[0] = rctx->block_size;
738 rctx->bufcnt = rctx->block_size;
739 memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
740 rctx->flags |= SHA_FLAGS_HMAC;
835 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
837 memcpy(out, rctx, sizeof(*rctx));
844 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
846 memcpy(rctx, in, sizeof(*rctx));