Lines Matching defs:hace_dev

77 static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
83 AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
115 static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
117 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
125 AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
137 dev_warn(hace_dev->dev, "Hash data length is too large\n");
145 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
148 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
149 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
164 static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
166 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
177 AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
181 sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
184 dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
190 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
193 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
194 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
203 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
207 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
208 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
259 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
262 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
265 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
271 static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
273 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
276 AHASH_DBG(hace_dev, "\n");
280 crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
289 static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
291 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
295 AHASH_DBG(hace_dev, "\n");
297 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
300 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
305 return aspeed_ahash_complete(hace_dev);
311 static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
314 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
318 AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
325 ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
326 ast_hace_write(hace_dev, hash_engine->digest_dma,
328 ast_hace_write(hace_dev, hash_engine->digest_dma,
330 ast_hace_write(hace_dev, hash_engine->src_length,
336 ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
346 static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
348 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
356 AHASH_DBG(hace_dev, "\n");
358 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
361 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
371 aspeed_ahash_fill_padding(hace_dev, rctx);
374 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
377 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
378 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
383 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
386 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
387 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
396 return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
399 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
405 static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
407 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
412 AHASH_DBG(hace_dev, "\n");
414 aspeed_ahash_fill_padding(hace_dev, rctx);
416 rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
420 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
421 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
426 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
430 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
431 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
441 return aspeed_hace_ahash_trigger(hace_dev,
444 return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
447 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
453 static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
455 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
459 AHASH_DBG(hace_dev, "\n");
461 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
465 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
469 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
479 return aspeed_ahash_req_final(hace_dev);
481 return aspeed_ahash_complete(hace_dev);
484 static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
486 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
490 AHASH_DBG(hace_dev, "\n");
492 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
496 return aspeed_ahash_req_final(hace_dev);
498 return aspeed_ahash_complete(hace_dev);
501 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
503 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
509 AHASH_DBG(hace_dev, "\n");
511 if (hace_dev->version == AST2600_VERSION) {
519 ret = hash_engine->dma_prepare(hace_dev);
523 return aspeed_hace_ahash_trigger(hace_dev, resume);
526 static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
530 hace_dev->crypt_engine_hash, req);
539 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
543 hash_engine = &hace_dev->hash_engine;
547 ret = aspeed_ahash_req_update(hace_dev);
549 ret = aspeed_ahash_req_final(hace_dev);
563 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
566 hash_engine = &hace_dev->hash_engine;
569 if (hace_dev->version == AST2600_VERSION)
586 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
588 AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
609 return aspeed_hace_hash_handle_queue(hace_dev, req);
627 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
629 AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
633 return aspeed_hace_hash_handle_queue(hace_dev, req);
641 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
644 AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
666 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
669 AHASH_DBG(hace_dev, "%s: digest size:%d\n",
725 dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
755 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
762 AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
795 tctx->hace_dev = ast_alg->hace_dev;
809 dev_warn(ast_alg->hace_dev->dev,
822 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
824 AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
1192 void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1199 if (hace_dev->version != AST2600_VERSION)
1206 void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1210 AHASH_DBG(hace_dev, "\n");
1213 aspeed_ahash_algs[i].hace_dev = hace_dev;
1216 AHASH_DBG(hace_dev, "Failed to register %s\n",
1221 if (hace_dev->version != AST2600_VERSION)
1225 aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
1228 AHASH_DBG(hace_dev, "Failed to register %s\n",