Lines Matching refs:dd
75 struct atmel_tdes_dev *dd;
181 static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
183 return readl_relaxed(dd->io_base + offset);
186 static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
189 writel_relaxed(value, dd->io_base + offset);
192 static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
196 atmel_tdes_write(dd, offset, *value);
205 if (!ctx->dd) {
210 ctx->dd = tdes_dd;
212 tdes_dd = ctx->dd;
219 static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
223 err = clk_prepare_enable(dd->iclk);
227 if (!(dd->flags & TDES_FLAGS_INIT)) {
228 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
229 dd->flags |= TDES_FLAGS_INIT;
235 static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
237 return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
240 static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
244 err = atmel_tdes_hw_init(dd);
248 dd->hw_version = atmel_tdes_get_version(dd);
250 dev_info(dd->dev,
251 "version: 0x%x\n", dd->hw_version);
253 clk_disable_unprepare(dd->iclk);
260 struct atmel_tdes_dev *dd = data;
263 tasklet_schedule(&dd->done_task);
266 static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
271 err = atmel_tdes_hw_init(dd);
276 if (!dd->caps.has_dma)
277 atmel_tdes_write(dd, TDES_PTCR,
281 if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
284 } else if (dd->ctx->keylen > DES_KEY_SIZE) {
291 valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
293 atmel_tdes_write(dd, TDES_MR, valmr);
295 atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
296 dd->ctx->keylen >> 2);
298 if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
299 atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
304 static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
309 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
311 if (dd->flags & TDES_FLAGS_FAST) {
312 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
313 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
315 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
316 dd->dma_size, DMA_FROM_DEVICE);
319 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
320 dd->buf_out, dd->buflen, dd->dma_size, 1);
321 if (count != dd->dma_size) {
330 static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
334 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
335 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
336 dd->buflen = PAGE_SIZE;
337 dd->buflen &= ~(DES_BLOCK_SIZE - 1);
339 if (!dd->buf_in || !dd->buf_out) {
340 dev_err(dd->dev, "unable to alloc pages.\n");
345 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
346 dd->buflen, DMA_TO_DEVICE);
347 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
348 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
353 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
354 dd->buflen, DMA_FROM_DEVICE);
355 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
356 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
364 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
368 free_page((unsigned long)dd->buf_out);
369 free_page((unsigned long)dd->buf_in);
375 static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
377 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
379 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
381 free_page((unsigned long)dd->buf_out);
382 free_page((unsigned long)dd->buf_in);
385 static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
389 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
392 dd->dma_size = length;
394 if (!(dd->flags & TDES_FLAGS_FAST)) {
395 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
413 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
414 atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
415 atmel_tdes_write(dd, TDES_TCR, len32);
416 atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
417 atmel_tdes_write(dd, TDES_RCR, len32);
420 atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
423 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
428 static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
432 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
437 dd->dma_size = length;
439 if (!(dd->flags & TDES_FLAGS_FAST)) {
440 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
458 dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
459 dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
461 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
462 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
464 dd->flags |= TDES_FLAGS_DMA;
474 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
480 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
487 out_desc->callback_param = dd;
490 dma_async_issue_pending(dd->dma_lch_out.chan);
493 dma_async_issue_pending(dd->dma_lch_in.chan);
498 static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
504 if ((!dd->in_offset) && (!dd->out_offset)) {
506 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
507 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
508 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
509 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
512 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
518 count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
519 count = min_t(size_t, count, sg_dma_len(dd->out_sg));
521 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
523 dev_err(dd->dev, "dma_map_sg() error\n");
527 err = dma_map_sg(dd->dev, dd->out_sg, 1,
530 dev_err(dd->dev, "dma_map_sg() error\n");
531 dma_unmap_sg(dd->dev, dd->in_sg, 1,
536 addr_in = sg_dma_address(dd->in_sg);
537 addr_out = sg_dma_address(dd->out_sg);
539 dd->flags |= TDES_FLAGS_FAST;
543 count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
544 dd->buf_in, dd->buflen, dd->total, 0);
546 addr_in = dd->dma_addr_in;
547 addr_out = dd->dma_addr_out;
549 dd->flags &= ~TDES_FLAGS_FAST;
552 dd->total -= count;
554 if (dd->caps.has_dma)
555 err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
557 err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
559 if (err && (dd->flags & TDES_FLAGS_FAST)) {
560 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
561 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
568 atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
570 struct skcipher_request *req = dd->req;
591 static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
593 struct skcipher_request *req = dd->req;
596 clk_disable_unprepare(dd->iclk);
598 dd->flags &= ~TDES_FLAGS_BUSY;
601 atmel_tdes_set_iv_as_last_ciphertext_block(dd);
606 static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
615 spin_lock_irqsave(&dd->lock, flags);
617 ret = crypto_enqueue_request(&dd->queue, &req->base);
618 if (dd->flags & TDES_FLAGS_BUSY) {
619 spin_unlock_irqrestore(&dd->lock, flags);
622 backlog = crypto_get_backlog(&dd->queue);
623 async_req = crypto_dequeue_request(&dd->queue);
625 dd->flags |= TDES_FLAGS_BUSY;
626 spin_unlock_irqrestore(&dd->lock, flags);
637 dd->req = req;
638 dd->total = req->cryptlen;
639 dd->in_offset = 0;
640 dd->in_sg = req->src;
641 dd->out_offset = 0;
642 dd->out_sg = req->dst;
647 dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
648 dd->ctx = ctx;
649 ctx->dd = dd;
651 err = atmel_tdes_write_ctrl(dd);
653 err = atmel_tdes_crypt_start(dd);
656 atmel_tdes_finish_req(dd, err);
657 tasklet_schedule(&dd->queue_task);
663 static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
668 if (dd->flags & TDES_FLAGS_DMA) {
670 if (dd->flags & TDES_FLAGS_FAST) {
671 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
672 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
674 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
675 dd->dma_size, DMA_FROM_DEVICE);
678 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
679 dd->buf_out, dd->buflen, dd->dma_size, 1);
680 if (count != dd->dma_size) {
741 return atmel_tdes_handle_queue(ctx->dd, req);
744 static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
749 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
750 if (IS_ERR(dd->dma_lch_in.chan)) {
751 ret = PTR_ERR(dd->dma_lch_in.chan);
755 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
757 dd->dma_lch_in.dma_conf.src_maxburst = 1;
758 dd->dma_lch_in.dma_conf.src_addr_width =
760 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
761 dd->dma_lch_in.dma_conf.dst_addr_width =
763 dd->dma_lch_in.dma_conf.device_fc = false;
765 dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
766 if (IS_ERR(dd->dma_lch_out.chan)) {
767 ret = PTR_ERR(dd->dma_lch_out.chan);
771 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
773 dd->dma_lch_out.dma_conf.src_maxburst = 1;
774 dd->dma_lch_out.dma_conf.src_addr_width =
776 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
777 dd->dma_lch_out.dma_conf.dst_addr_width =
779 dd->dma_lch_out.dma_conf.device_fc = false;
784 dma_release_channel(dd->dma_lch_in.chan);
786 dev_err(dd->dev, "no DMA channel available\n");
790 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
792 dma_release_channel(dd->dma_lch_in.chan);
793 dma_release_channel(dd->dma_lch_out.chan);
900 struct atmel_tdes_dev *dd;
904 dd = atmel_tdes_find_dev(ctx);
905 if (!dd)
1054 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1056 atmel_tdes_handle_queue(dd, NULL);
1061 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1064 if (!(dd->flags & TDES_FLAGS_DMA))
1065 err = atmel_tdes_crypt_pdc_stop(dd);
1067 err = atmel_tdes_crypt_dma_stop(dd);
1069 if (dd->total && !err) {
1070 if (dd->flags & TDES_FLAGS_FAST) {
1071 dd->in_sg = sg_next(dd->in_sg);
1072 dd->out_sg = sg_next(dd->out_sg);
1073 if (!dd->in_sg || !dd->out_sg)
1077 err = atmel_tdes_crypt_start(dd);
1082 atmel_tdes_finish_req(dd, err);
1083 atmel_tdes_handle_queue(dd, NULL);
1104 static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1112 static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1133 static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1136 dd->caps.has_dma = 0;
1137 dd->caps.has_cfb_3keys = 0;
1140 switch (dd->hw_version & 0xf00) {
1142 dd->caps.has_dma = 1;
1143 dd->caps.has_cfb_3keys = 1;
1148 dev_warn(dd->dev,