Lines Matching defs:creq
30 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
31 unsigned int len = req->nbytes + creq->cache_ptr;
33 if (!creq->last_req)
38 iter->src.op_offset = creq->cache_ptr;
96 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
98 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
103 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
105 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
106 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
107 mv_cesa_dma_cleanup(&creq->base);
112 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
114 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
120 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
122 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
126 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
130 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
136 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
142 padlen = mv_cesa_ahash_pad_len(creq);
145 if (creq->algo_le) {
146 __le64 bits = cpu_to_le64(creq->len << 3);
150 __be64 bits = cpu_to_be64(creq->len << 3);
160 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
161 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
162 struct mv_cesa_engine *engine = creq->base.engine;
170 mv_cesa_adjust_op(engine, &creq->op_tmpl);
172 memcpy(engine->sram_pool, &creq->op_tmpl,
173 sizeof(creq->op_tmpl));
175 memcpy_toio(engine->sram, &creq->op_tmpl,
176 sizeof(creq->op_tmpl));
181 writel_relaxed(creq->state[i],
185 if (creq->cache_ptr) {
188 creq->cache, creq->cache_ptr);
191 creq->cache, creq->cache_ptr);
194 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
197 if (!creq->last_req) {
202 if (len - creq->cache_ptr)
204 engine, req->src, creq->src_nents,
205 CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
206 len - creq->cache_ptr, sreq->offset);
208 op = &creq->op_tmpl;
212 if (creq->last_req && sreq->offset == req->nbytes &&
213 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
223 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
224 mv_cesa_set_mac_op_total_len(op, creq->len);
226 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
232 memcpy(creq->cache,
237 memcpy_fromio(creq->cache,
243 i = mv_cesa_ahash_pad_req(creq, creq->cache);
248 creq->cache, i);
252 creq->cache, i);
275 creq->cache_ptr = new_cache_ptr;
286 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
287 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
289 if (sreq->offset < (req->nbytes - creq->cache_ptr))
297 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
298 struct mv_cesa_req *basereq = &creq->base;
305 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
306 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
313 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
314 struct mv_cesa_req *base = &creq->base;
322 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
323 writel_relaxed(creq->state[i], engine->regs +
333 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
335 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
344 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
346 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
347 return mv_cesa_dma_process(&creq->base, status);
355 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
356 struct mv_cesa_engine *engine = creq->base.engine;
362 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
363 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
371 data = creq->base.chain.last->op->ctx.hash.hash;
373 creq->state[i] = le32_to_cpu(data[i]);
378 creq->state[i] = readl_relaxed(engine->regs +
380 if (creq->last_req) {
385 if (creq->algo_le) {
389 result[i] = cpu_to_le32(creq->state[i]);
394 result[i] = cpu_to_be32(creq->state[i]);
406 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
408 creq->base.engine = engine;
410 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
419 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
421 if (creq->last_req)
426 if (creq->cache_ptr)
427 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
428 creq->cache,
429 creq->cache_ptr,
430 ahashreq->nbytes - creq->cache_ptr);
443 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
445 memset(creq, 0, sizeof(*creq));
453 creq->op_tmpl = *tmpl;
454 creq->len = 0;
455 creq->algo_le = algo_le;
471 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
474 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
475 !creq->last_req) {
481 sg_pcopy_to_buffer(req->src, creq->src_nents,
482 creq->cache + creq->cache_ptr,
485 creq->cache_ptr += req->nbytes;
521 struct mv_cesa_ahash_req *creq,
524 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
527 if (!creq->cache_ptr)
534 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
539 creq->cache_ptr,
547 struct mv_cesa_ahash_req *creq,
550 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
559 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
560 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
565 mv_cesa_set_mac_op_total_len(op, creq->len);
589 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
602 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
623 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
629 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
632 struct mv_cesa_req *basereq = &creq->base;
643 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
646 if (creq->src_nents) {
647 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
662 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
685 &creq->op_tmpl,
702 if (creq->last_req)
703 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
706 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
729 if (!creq->last_req)
730 creq->cache_ptr = req->nbytes + creq->cache_ptr -
733 creq->cache_ptr = 0;
753 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
763 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
765 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
766 if (creq->src_nents < 0) {
768 return creq->src_nents;
784 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
799 ret = mv_cesa_queue_req(&req->base, &creq->base);
809 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
811 creq->len += req->nbytes;
818 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
819 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
821 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
822 creq->last_req = true;
830 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
831 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
833 creq->len += req->nbytes;
834 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
835 creq->last_req = true;
844 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
850 *len = creq->len;
851 memcpy(hash, creq->state, digsize);
853 memcpy(cache, creq->cache, creq->cache_ptr);
862 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
874 mv_cesa_update_op_cfg(&creq->op_tmpl,
878 creq->len = len;
879 memcpy(creq->state, hash, digsize);
880 creq->cache_ptr = 0;
886 memcpy(creq->cache, cache, cache_ptr);
887 creq->cache_ptr = cache_ptr;
894 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
901 creq->state[0] = MD5_H0;
902 creq->state[1] = MD5_H1;
903 creq->state[2] = MD5_H2;
904 creq->state[3] = MD5_H3;
964 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
971 creq->state[0] = SHA1_H0;
972 creq->state[1] = SHA1_H1;
973 creq->state[2] = SHA1_H2;
974 creq->state[3] = SHA1_H3;
975 creq->state[4] = SHA1_H4;
1035 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1042 creq->state[0] = SHA256_H0;
1043 creq->state[1] = SHA256_H1;
1044 creq->state[2] = SHA256_H2;
1045 creq->state[3] = SHA256_H3;
1046 creq->state[4] = SHA256_H4;
1047 creq->state[5] = SHA256_H5;
1048 creq->state[6] = SHA256_H6;
1049 creq->state[7] = SHA256_H7;