Lines Matching refs:dd

49 #define omap_aes_read(dd, offset)				\
52 _read_ret = __raw_readl(dd->io_base + offset); \
58 inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
60 return __raw_readl(dd->io_base + offset);
65 #define omap_aes_write(dd, offset, value) \
69 __raw_writel(value, dd->io_base + offset); \
72 inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
75 __raw_writel(value, dd->io_base + offset);
79 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
84 val = omap_aes_read(dd, offset);
87 omap_aes_write(dd, offset, val);
90 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
94 omap_aes_write(dd, offset, *value);
97 static int omap_aes_hw_init(struct omap_aes_dev *dd)
101 if (!(dd->flags & FLAGS_INIT)) {
102 dd->flags |= FLAGS_INIT;
103 dd->err = 0;
106 err = pm_runtime_resume_and_get(dd->dev);
108 dev_err(dd->dev, "failed to get sync: %d\n", err);
115 void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
117 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
118 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
119 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
122 int omap_aes_write_ctrl(struct omap_aes_dev *dd)
129 err = omap_aes_hw_init(dd);
133 key32 = dd->ctx->keylen / sizeof(u32);
136 if (dd->flags & FLAGS_GCM)
138 omap_aes_write(dd, i, 0x0);
141 omap_aes_write(dd, AES_REG_KEY(dd, i),
142 (__force u32)cpu_to_le32(dd->ctx->key[i]));
145 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
146 omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4);
148 if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
149 rctx = aead_request_ctx(dd->aead_req);
150 omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
153 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
154 if (dd->flags & FLAGS_CBC)
157 if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
160 if (dd->flags & FLAGS_GCM)
163 if (dd->flags & FLAGS_ENCRYPT)
166 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
171 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
175 val = dd->pdata->dma_start;
177 if (dd->dma_lch_out != NULL)
178 val |= dd->pdata->dma_enable_out;
179 if (dd->dma_lch_in != NULL)
180 val |= dd->pdata->dma_enable_in;
182 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
183 dd->pdata->dma_start;
185 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
189 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
191 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
192 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
193 if (dd->flags & FLAGS_GCM)
194 omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
196 omap_aes_dma_trigger_omap2(dd, length);
199 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
203 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
204 dd->pdata->dma_start;
206 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
211 struct omap_aes_dev *dd;
214 dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
215 list_move_tail(&dd->list, &dev_list);
216 rctx->dd = dd;
219 return dd;
224 struct omap_aes_dev *dd = data;
227 tasklet_schedule(&dd->done_task);
230 static int omap_aes_dma_init(struct omap_aes_dev *dd)
234 dd->dma_lch_out = NULL;
235 dd->dma_lch_in = NULL;
237 dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
238 if (IS_ERR(dd->dma_lch_in)) {
239 dev_err(dd->dev, "Unable to request in DMA channel\n");
240 return PTR_ERR(dd->dma_lch_in);
243 dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
244 if (IS_ERR(dd->dma_lch_out)) {
245 dev_err(dd->dev, "Unable to request out DMA channel\n");
246 err = PTR_ERR(dd->dma_lch_out);
253 dma_release_channel(dd->dma_lch_in);
258 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
260 if (dd->pio_only)
263 dma_release_channel(dd->dma_lch_out);
264 dma_release_channel(dd->dma_lch_in);
267 static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
276 if (dd->pio_only) {
277 scatterwalk_start(&dd->in_walk, dd->in_sg);
279 scatterwalk_start(&dd->out_walk, dd->out_sg);
283 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
287 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
291 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
292 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
299 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
301 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
306 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
310 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
315 tx_in->callback_param = dd;
320 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
322 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
327 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
332 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
341 if (dd->flags & FLAGS_GCM)
345 cb_desc->callback_param = dd;
352 dma_async_issue_pending(dd->dma_lch_in);
354 dma_async_issue_pending(dd->dma_lch_out);
357 dd->pdata->trigger(dd, dd->total);
362 int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
366 pr_debug("total: %zu\n", dd->total);
368 if (!dd->pio_only) {
369 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
372 dev_err(dd->dev, "dma_map_sg() error\n");
376 if (dd->out_sg_len) {
377 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
380 dev_err(dd->dev, "dma_map_sg() error\n");
386 err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
387 dd->out_sg_len);
388 if (err && !dd->pio_only) {
389 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
390 if (dd->out_sg_len)
391 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
398 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
400 struct skcipher_request *req = dd->req;
404 crypto_finalize_skcipher_request(dd->engine, req, err);
406 pm_runtime_mark_last_busy(dd->dev);
407 pm_runtime_put_autosuspend(dd->dev);
410 int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
412 pr_debug("total: %zu\n", dd->total);
414 omap_aes_dma_stop(dd);
420 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
424 return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
436 struct omap_aes_dev *dd = rctx->dd;
440 if (!dd)
444 dd->req = req;
445 dd->total = req->cryptlen;
446 dd->total_save = req->cryptlen;
447 dd->in_sg = req->src;
448 dd->out_sg = req->dst;
449 dd->orig_out = req->dst;
455 ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
456 dd->in_sgl, flags,
457 FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
461 ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
462 &dd->out_sgl, 0,
463 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
467 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
468 if (dd->in_sg_len < 0)
469 return dd->in_sg_len;
471 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
472 if (dd->out_sg_len < 0)
473 return dd->out_sg_len;
476 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
478 dd->ctx = ctx;
479 rctx->dd = dd;
481 return omap_aes_write_ctrl(dd);
489 struct omap_aes_dev *dd = rctx->dd;
491 if (!dd)
494 return omap_aes_crypt_dma_start(dd);
497 static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
502 ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
507 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
511 if (!dd->pio_only) {
512 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
514 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
515 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
517 omap_aes_crypt_dma_stop(dd);
520 omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
521 FLAGS_IN_DATA_ST_SHIFT, dd->flags);
523 omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
524 FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
527 if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
528 omap_aes_copy_ivout(dd, dd->req->iv);
530 omap_aes_finish_req(dd, 0);
540 struct omap_aes_dev *dd;
565 dd = omap_aes_find_dev(rctx);
566 if (!dd)
571 return omap_aes_handle_queue(dd, req);
864 struct omap_aes_dev *dd = dev_id;
868 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
870 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
872 BUG_ON(!dd->in_sg);
874 BUG_ON(_calc_walked(in) > dd->in_sg->length);
876 src = sg_virt(dd->in_sg) + _calc_walked(in);
879 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
881 scatterwalk_advance(&dd->in_walk, 4);
882 if (dd->in_sg->length == _calc_walked(in)) {
883 dd->in_sg = sg_next(dd->in_sg);
884 if (dd->in_sg) {
885 scatterwalk_start(&dd->in_walk,
886 dd->in_sg);
887 src = sg_virt(dd->in_sg) +
897 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
900 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
903 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
905 BUG_ON(!dd->out_sg);
907 BUG_ON(_calc_walked(out) > dd->out_sg->length);
909 dst = sg_virt(dd->out_sg) + _calc_walked(out);
912 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
913 scatterwalk_advance(&dd->out_walk, 4);
914 if (dd->out_sg->length == _calc_walked(out)) {
915 dd->out_sg = sg_next(dd->out_sg);
916 if (dd->out_sg) {
917 scatterwalk_start(&dd->out_walk,
918 dd->out_sg);
919 dst = sg_virt(dd->out_sg) +
927 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
931 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
933 if (!dd->total)
935 tasklet_schedule(&dd->done_task);
938 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
961 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
967 dd->pdata = of_device_get_match_data(dev);
968 if (!dd->pdata) {
989 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
996 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1013 dd->pdata = &omap_aes_pdata_omap2;
1049 struct omap_aes_dev *dd = dev_get_drvdata(dev);
1051 return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
1058 struct omap_aes_dev *dd;
1076 list_for_each_entry(dd, &dev_list, list) {
1077 spin_lock_irqsave(&dd->lock, flags);
1078 dd->engine->queue.max_qlen = value;
1079 dd->aead_queue.base.max_qlen = value;
1080 spin_unlock_irqrestore(&dd->lock, flags);
1103 struct omap_aes_dev *dd;
1110 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1111 if (dd == NULL) {
1115 dd->dev = dev;
1116 platform_set_drvdata(pdev, dd);
1118 aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
1120 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1121 omap_aes_get_res_pdev(dd, pdev, &res);
1125 dd->io_base = devm_ioremap_resource(dev, &res);
1126 if (IS_ERR(dd->io_base)) {
1127 err = PTR_ERR(dd->io_base);
1130 dd->phys_base = res.start;
1143 omap_aes_dma_stop(dd);
1145 reg = omap_aes_read(dd, AES_REG_REV(dd));
1150 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1151 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1153 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1155 err = omap_aes_dma_init(dd);
1158 } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1159 dd->pio_only = 1;
1168 dev_name(dev), dd);
1175 spin_lock_init(&dd->lock);
1177 INIT_LIST_HEAD(&dd->list);
1179 list_add_tail(&dd->list, &dev_list);
1183 dd->engine = crypto_engine_alloc_init(dev, 1);
1184 if (!dd->engine) {
1189 err = crypto_engine_start(dd->engine);
1193 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1194 if (!dd->pdata->algs_info[i].registered) {
1195 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1196 algp = &dd->pdata->algs_info[i].algs_list[j];
1204 dd->pdata->algs_info[i].registered++;
1209 if (dd->pdata->aead_algs_info &&
1210 !dd->pdata->aead_algs_info->registered) {
1211 for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
1212 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1220 dd->pdata->aead_algs_info->registered++;
1232 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
1233 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1237 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1238 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1240 &dd->pdata->algs_info[i].algs_list[j]);
1243 if (dd->engine)
1244 crypto_engine_exit(dd->engine);
1246 omap_aes_dma_cleanup(dd);
1248 tasklet_kill(&dd->done_task);
1252 dd = NULL;
1260 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1264 if (!dd)
1268 list_del(&dd->list);
1271 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1272 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
1274 &dd->pdata->algs_info[i].algs_list[j]);
1275 dd->pdata->algs_info[i].registered--;
1278 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
1279 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1281 dd->pdata->aead_algs_info->registered--;
1285 crypto_engine_exit(dd->engine);
1287 tasklet_kill(&dd->done_task);
1288 omap_aes_dma_cleanup(dd);
1289 pm_runtime_disable(dd->dev);
1291 sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);