Lines Matching defs:creq

29 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
32 if (!creq->last_req)
37 iter->src.op_offset = creq->cache_ptr;
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->base);
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
125 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
135 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
141 padlen = mv_cesa_ahash_pad_len(creq);
144 if (creq->algo_le) {
145 __le64 bits = cpu_to_le64(creq->len << 3);
149 __be64 bits = cpu_to_be64(creq->len << 3);
159 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161 struct mv_cesa_engine *engine = creq->base.engine;
169 mv_cesa_adjust_op(engine, &creq->op_tmpl);
170 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
175 writel_relaxed(creq->state[i],
179 if (creq->cache_ptr)
181 creq->cache, creq->cache_ptr);
183 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
186 if (!creq->last_req) {
191 if (len - creq->cache_ptr)
192 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
195 creq->cache_ptr,
196 len - creq->cache_ptr,
199 op = &creq->op_tmpl;
203 if (creq->last_req && sreq->offset == req->nbytes &&
204 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
214 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215 mv_cesa_set_mac_op_total_len(op, creq->len);
217 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
222 memcpy_fromio(creq->cache,
227 i = mv_cesa_ahash_pad_req(creq, creq->cache);
231 creq->cache, i);
251 creq->cache_ptr = new_cache_ptr;
262 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
263 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
265 if (sreq->offset < (req->nbytes - creq->cache_ptr))
273 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
274 struct mv_cesa_req *basereq = &creq->base;
281 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
282 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
289 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290 struct mv_cesa_req *base = &creq->base;
298 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
299 writel_relaxed(creq->state[i], engine->regs +
309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
311 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
320 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
322 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
323 return mv_cesa_dma_process(&creq->base, status);
331 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
332 struct mv_cesa_engine *engine = creq->base.engine;
338 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
339 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
347 data = creq->base.chain.last->op->ctx.hash.hash;
349 creq->state[i] = le32_to_cpu(data[i]);
354 creq->state[i] = readl_relaxed(engine->regs +
356 if (creq->last_req) {
361 if (creq->algo_le) {
365 result[i] = cpu_to_le32(creq->state[i]);
370 result[i] = cpu_to_be32(creq->state[i]);
382 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
384 creq->base.engine = engine;
386 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
395 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
397 if (creq->last_req)
402 if (creq->cache_ptr)
403 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
404 creq->cache,
405 creq->cache_ptr,
406 ahashreq->nbytes - creq->cache_ptr);
419 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
421 memset(creq, 0, sizeof(*creq));
429 creq->op_tmpl = *tmpl;
430 creq->len = 0;
431 creq->algo_le = algo_le;
447 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
450 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
451 !creq->last_req) {
457 sg_pcopy_to_buffer(req->src, creq->src_nents,
458 creq->cache + creq->cache_ptr,
461 creq->cache_ptr += req->nbytes;
497 struct mv_cesa_ahash_req *creq,
500 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
503 if (!creq->cache_ptr)
510 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
515 creq->cache_ptr,
523 struct mv_cesa_ahash_req *creq,
526 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
535 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
536 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
541 mv_cesa_set_mac_op_total_len(op, creq->len);
565 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
578 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
599 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
605 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
608 struct mv_cesa_req *basereq = &creq->base;
619 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
622 if (creq->src_nents) {
623 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
638 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
661 &creq->op_tmpl,
678 if (creq->last_req)
679 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
682 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
705 if (!creq->last_req)
706 creq->cache_ptr = req->nbytes + creq->cache_ptr -
709 creq->cache_ptr = 0;
729 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
739 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
741 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
742 if (creq->src_nents < 0) {
744 return creq->src_nents;
760 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
775 ret = mv_cesa_queue_req(&req->base, &creq->base);
785 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
787 creq->len += req->nbytes;
794 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
795 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
797 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
798 creq->last_req = true;
806 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
807 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
809 creq->len += req->nbytes;
810 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
811 creq->last_req = true;
820 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
826 *len = creq->len;
827 memcpy(hash, creq->state, digsize);
829 memcpy(cache, creq->cache, creq->cache_ptr);
838 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
850 mv_cesa_update_op_cfg(&creq->op_tmpl,
854 creq->len = len;
855 memcpy(creq->state, hash, digsize);
856 creq->cache_ptr = 0;
862 memcpy(creq->cache, cache, cache_ptr);
863 creq->cache_ptr = cache_ptr;
870 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
877 creq->state[0] = MD5_H0;
878 creq->state[1] = MD5_H1;
879 creq->state[2] = MD5_H2;
880 creq->state[3] = MD5_H3;
940 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
947 creq->state[0] = SHA1_H0;
948 creq->state[1] = SHA1_H1;
949 creq->state[2] = SHA1_H2;
950 creq->state[3] = SHA1_H3;
951 creq->state[4] = SHA1_H4;
1011 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1018 creq->state[0] = SHA256_H0;
1019 creq->state[1] = SHA256_H1;
1020 creq->state[2] = SHA256_H2;
1021 creq->state[3] = SHA256_H3;
1022 creq->state[4] = SHA256_H4;
1023 creq->state[5] = SHA256_H5;
1024 creq->state[6] = SHA256_H6;
1025 creq->state[7] = SHA256_H7;