Lines Matching refs:ctx
268 * @ctx: Configuration for currently handled crypto request
281 * req, ctx, sg_src/dst (and copies). This essentially
308 struct s5p_aes_ctx *ctx;
789 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
790 struct s5p_aes_dev *dd = ctx->dd;
791 u32 *hash = (u32 *)ctx->digest;
794 for (i = 0; i < ctx->nregs; i++)
801 * @ctx: request context
804 const struct s5p_hash_reqctx *ctx)
806 const u32 *hash = (const u32 *)ctx->digest;
809 for (i = 0; i < ctx->nregs; i++)
819 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
821 s5p_hash_write_ctx_iv(ctx->dd, ctx);
830 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
835 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
937 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
942 configflags = ctx->engine | SSS_HASH_INIT_BIT;
944 if (likely(ctx->digcnt)) {
945 s5p_hash_write_ctx_iv(dd, ctx);
954 tmplen = ctx->digcnt * 8;
987 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
990 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
993 ctx->error = true;
998 dd->hash_sg_iter = ctx->sg;
1001 ctx->digcnt += length;
1002 ctx->total -= length;
1015 * @ctx: request context
1025 static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1031 len = new_len + ctx->bufcnt;
1036 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1037 ctx->error = true;
1041 if (ctx->bufcnt)
1042 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1044 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1046 sg_init_table(ctx->sgl, 1);
1047 sg_set_buf(ctx->sgl, buf, len);
1048 ctx->sg = ctx->sgl;
1049 ctx->sg_len = 1;
1050 ctx->bufcnt = 0;
1051 ctx->skip = 0;
1052 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1059 * @ctx: request context
1068 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1071 static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1074 unsigned int skip = ctx->skip, n = sg_nents(sg);
1078 if (ctx->bufcnt)
1081 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1082 if (!ctx->sg) {
1083 ctx->error = true;
1087 sg_init_table(ctx->sg, n);
1089 tmp = ctx->sg;
1091 ctx->sg_len = 0;
1093 if (ctx->bufcnt) {
1094 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1096 ctx->sg_len++;
1116 ctx->sg_len++;
1120 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1127 * @ctx: request context
1141 static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1145 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1182 return s5p_hash_copy_sgs(ctx, sg, new_len);
1184 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1190 if (ctx->bufcnt) {
1191 ctx->sg_len = n;
1192 sg_init_table(ctx->sgl, 2);
1193 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1194 sg_chain(ctx->sgl, 2, sg);
1195 ctx->sg = ctx->sgl;
1196 ctx->sg_len++;
1198 ctx->sg = sg;
1199 ctx->sg_len = n;
1212 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1217 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1218 bool final = ctx->finup;
1227 ctx->total = nbytes + ctx->bufcnt;
1228 if (!ctx->total)
1231 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1233 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1238 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1240 ctx->bufcnt += len;
1242 ctx->skip = len;
1244 ctx->skip = 0;
1247 if (ctx->bufcnt)
1248 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1250 xmit_len = ctx->total;
1259 hash_later = ctx->total - xmit_len;
1262 scatterwalk_map_and_copy(ctx->buffer, req->src,
1268 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1274 if (unlikely(!ctx->bufcnt)) {
1276 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1280 sg_init_table(ctx->sgl, 1);
1281 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1283 ctx->sg = ctx->sgl;
1284 ctx->sg_len = 1;
1287 ctx->bufcnt = hash_later;
1289 ctx->total = xmit_len;
1298 * Unmap scatterlist ctx->sg.
1302 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1304 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1314 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1315 struct s5p_aes_dev *dd = ctx->dd;
1317 if (ctx->digcnt)
1320 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1330 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1331 struct s5p_aes_dev *dd = ctx->dd;
1335 free_pages((unsigned long)sg_virt(ctx->sg),
1336 get_order(ctx->sg->length));
1339 kfree(ctx->sg);
1341 ctx->sg = NULL;
1345 if (!err && !ctx->error) {
1350 ctx->error = true;
1377 struct s5p_hash_reqctx *ctx;
1406 ctx = ahash_request_ctx(req);
1408 err = s5p_hash_prepare_request(req, ctx->op_update);
1409 if (err || !ctx->total)
1413 ctx->op_update, req->nbytes);
1416 if (ctx->digcnt)
1419 if (ctx->op_update) { /* HASH_OP_UPDATE */
1420 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1421 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1423 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1425 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1490 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1493 ctx->op_update = op;
1509 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1514 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1515 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1517 ctx->bufcnt += req->nbytes;
1537 * previous update op, so there are always some buffered bytes in ctx->buffer,
1538 * which means that ctx->bufcnt!=0
1549 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1551 ctx->finup = true;
1552 if (ctx->error)
1555 if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1558 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1559 ctx->bufcnt, req->result);
1573 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1576 ctx->finup = true;
1600 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1604 ctx->dd = tctx->dd;
1605 ctx->error = false;
1606 ctx->finup = false;
1607 ctx->bufcnt = 0;
1608 ctx->digcnt = 0;
1609 ctx->total = 0;
1610 ctx->skip = 0;
1617 ctx->engine = SSS_HASH_ENGINE_MD5;
1618 ctx->nregs = HASH_MD5_MAX_REG;
1621 ctx->engine = SSS_HASH_ENGINE_SHA1;
1622 ctx->nregs = HASH_SHA1_MAX_REG;
1625 ctx->engine = SSS_HASH_ENGINE_SHA256;
1626 ctx->nregs = HASH_SHA256_MAX_REG;
1629 ctx->error = true;
1701 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1703 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1715 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1720 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1722 ctx->error = true;
1726 ctx->dd = tctx->dd;
1727 ctx->error = false;
1926 if (dev->ctx->keylen == AES_KEYSIZE_192)
1928 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1955 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1999 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2030 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2031 struct s5p_aes_dev *dev = ctx->dev;
2051 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2058 memcpy(ctx->aes_key, key, keylen);
2059 ctx->keylen = keylen;
2091 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2093 ctx->dev = s5p_dev;