Lines Matching refs:ctx
269 * @ctx: Configuration for currently handled crypto request
282 * req, ctx, sg_src/dst (and copies). This essentially
309 struct s5p_aes_ctx *ctx;
786 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
787 struct s5p_aes_dev *dd = ctx->dd;
788 u32 *hash = (u32 *)ctx->digest;
791 for (i = 0; i < ctx->nregs; i++)
798 * @ctx: request context
801 const struct s5p_hash_reqctx *ctx)
803 const u32 *hash = (const u32 *)ctx->digest;
806 for (i = 0; i < ctx->nregs; i++)
816 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
818 s5p_hash_write_ctx_iv(ctx->dd, ctx);
827 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
832 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
934 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
939 configflags = ctx->engine | SSS_HASH_INIT_BIT;
941 if (likely(ctx->digcnt)) {
942 s5p_hash_write_ctx_iv(dd, ctx);
951 tmplen = ctx->digcnt * 8;
984 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
987 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
990 ctx->error = true;
995 dd->hash_sg_iter = ctx->sg;
998 ctx->digcnt += length;
999 ctx->total -= length;
1012 * @ctx: request context
1022 static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1028 len = new_len + ctx->bufcnt;
1033 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1034 ctx->error = true;
1038 if (ctx->bufcnt)
1039 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1041 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1043 sg_init_table(ctx->sgl, 1);
1044 sg_set_buf(ctx->sgl, buf, len);
1045 ctx->sg = ctx->sgl;
1046 ctx->sg_len = 1;
1047 ctx->bufcnt = 0;
1048 ctx->skip = 0;
1049 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1056 * @ctx: request context
1065 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1068 static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1071 unsigned int skip = ctx->skip, n = sg_nents(sg);
1075 if (ctx->bufcnt)
1078 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1079 if (!ctx->sg) {
1080 ctx->error = true;
1084 sg_init_table(ctx->sg, n);
1086 tmp = ctx->sg;
1088 ctx->sg_len = 0;
1090 if (ctx->bufcnt) {
1091 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1093 ctx->sg_len++;
1113 ctx->sg_len++;
1117 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1124 * @ctx: request context
1138 static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1142 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1179 return s5p_hash_copy_sgs(ctx, sg, new_len);
1181 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1187 if (ctx->bufcnt) {
1188 ctx->sg_len = n;
1189 sg_init_table(ctx->sgl, 2);
1190 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1191 sg_chain(ctx->sgl, 2, sg);
1192 ctx->sg = ctx->sgl;
1193 ctx->sg_len++;
1195 ctx->sg = sg;
1196 ctx->sg_len = n;
1209 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1214 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1215 bool final = ctx->finup;
1224 ctx->total = nbytes + ctx->bufcnt;
1225 if (!ctx->total)
1228 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1230 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1235 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1237 ctx->bufcnt += len;
1239 ctx->skip = len;
1241 ctx->skip = 0;
1244 if (ctx->bufcnt)
1245 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1247 xmit_len = ctx->total;
1256 hash_later = ctx->total - xmit_len;
1259 scatterwalk_map_and_copy(ctx->buffer, req->src,
1265 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1271 if (unlikely(!ctx->bufcnt)) {
1273 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1277 sg_init_table(ctx->sgl, 1);
1278 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1280 ctx->sg = ctx->sgl;
1281 ctx->sg_len = 1;
1284 ctx->bufcnt = hash_later;
1286 ctx->total = xmit_len;
1295 * Unmap scatterlist ctx->sg.
1299 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1301 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1311 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1312 struct s5p_aes_dev *dd = ctx->dd;
1314 if (ctx->digcnt)
1317 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1327 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1328 struct s5p_aes_dev *dd = ctx->dd;
1332 free_pages((unsigned long)sg_virt(ctx->sg),
1333 get_order(ctx->sg->length));
1336 kfree(ctx->sg);
1338 ctx->sg = NULL;
1342 if (!err && !ctx->error) {
1347 ctx->error = true;
1374 struct s5p_hash_reqctx *ctx;
1403 ctx = ahash_request_ctx(req);
1405 err = s5p_hash_prepare_request(req, ctx->op_update);
1406 if (err || !ctx->total)
1410 ctx->op_update, req->nbytes);
1413 if (ctx->digcnt)
1416 if (ctx->op_update) { /* HASH_OP_UPDATE */
1417 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1418 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1420 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1422 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1487 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1490 ctx->op_update = op;
1506 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1511 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1512 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1514 ctx->bufcnt += req->nbytes;
1534 * previous update op, so there are always some buffered bytes in ctx->buffer,
1535 * which means that ctx->bufcnt!=0
1546 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1548 ctx->finup = true;
1549 if (ctx->error)
1552 if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1555 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1556 ctx->bufcnt, req->result);
1570 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1573 ctx->finup = true;
1597 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1601 ctx->dd = tctx->dd;
1602 ctx->error = false;
1603 ctx->finup = false;
1604 ctx->bufcnt = 0;
1605 ctx->digcnt = 0;
1606 ctx->total = 0;
1607 ctx->skip = 0;
1614 ctx->engine = SSS_HASH_ENGINE_MD5;
1615 ctx->nregs = HASH_MD5_MAX_REG;
1618 ctx->engine = SSS_HASH_ENGINE_SHA1;
1619 ctx->nregs = HASH_SHA1_MAX_REG;
1622 ctx->engine = SSS_HASH_ENGINE_SHA256;
1623 ctx->nregs = HASH_SHA256_MAX_REG;
1626 ctx->error = true;
1698 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1700 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1712 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1717 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1719 ctx->error = true;
1723 ctx->dd = tctx->dd;
1724 ctx->error = false;
1923 if (dev->ctx->keylen == AES_KEYSIZE_192)
1925 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1952 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1996 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2027 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2028 struct s5p_aes_dev *dev = ctx->dev;
2048 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2055 memcpy(ctx->aes_key, key, keylen);
2056 ctx->keylen = keylen;
2088 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2090 ctx->dev = s5p_dev;