1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright 2019 NXP
3
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/dmapool.h>
7 #include <linux/of_irq.h>
8 #include <linux/iommu.h>
9 #include <linux/sys_soc.h>
10 #include <linux/fsl/mc.h>
11 #include <soc/fsl/dpaa2-io.h>
12
13 #include "../virt-dma.h"
14 #include "dpdmai.h"
15 #include "dpaa2-qdma.h"
16
17 static bool smmu_disable = true;
18
to_dpaa2_qdma_chan(struct dma_chan *chan)19 static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
20 {
21 return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
22 }
23
to_fsl_qdma_comp(struct virt_dma_desc *vd)24 static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
25 {
26 return container_of(vd, struct dpaa2_qdma_comp, vdesc);
27 }
28
dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)29 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
30 {
31 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
32 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
33 struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
34
35 dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
36 sizeof(struct dpaa2_fd),
37 sizeof(struct dpaa2_fd), 0);
38 if (!dpaa2_chan->fd_pool)
39 goto err;
40
41 dpaa2_chan->fl_pool =
42 dma_pool_create("fl_pool", dev,
43 sizeof(struct dpaa2_fl_entry) * 3,
44 sizeof(struct dpaa2_fl_entry), 0);
45
46 if (!dpaa2_chan->fl_pool)
47 goto err_fd;
48
49 dpaa2_chan->sdd_pool =
50 dma_pool_create("sdd_pool", dev,
51 sizeof(struct dpaa2_qdma_sd_d) * 2,
52 sizeof(struct dpaa2_qdma_sd_d), 0);
53 if (!dpaa2_chan->sdd_pool)
54 goto err_fl;
55
56 return dpaa2_qdma->desc_allocated++;
57 err_fl:
58 dma_pool_destroy(dpaa2_chan->fl_pool);
59 err_fd:
60 dma_pool_destroy(dpaa2_chan->fd_pool);
61 err:
62 return -ENOMEM;
63 }
64
dpaa2_qdma_free_chan_resources(struct dma_chan *chan)65 static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
66 {
67 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
68 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
69 unsigned long flags;
70
71 LIST_HEAD(head);
72
73 spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
74 vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
75 spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
76
77 vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
78
79 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
80 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
81
82 dma_pool_destroy(dpaa2_chan->fd_pool);
83 dma_pool_destroy(dpaa2_chan->fl_pool);
84 dma_pool_destroy(dpaa2_chan->sdd_pool);
85 dpaa2_qdma->desc_allocated--;
86 }
87
88 /*
89 * Request a command descriptor for enqueue.
90 */
91 static struct dpaa2_qdma_comp *
dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)92 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
93 {
94 struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
95 struct device *dev = &qdma_priv->dpdmai_dev->dev;
96 struct dpaa2_qdma_comp *comp_temp = NULL;
97 unsigned long flags;
98
99 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
100 if (list_empty(&dpaa2_chan->comp_free)) {
101 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
102 comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
103 if (!comp_temp)
104 goto err;
105 comp_temp->fd_virt_addr =
106 dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
107 &comp_temp->fd_bus_addr);
108 if (!comp_temp->fd_virt_addr)
109 goto err_comp;
110
111 comp_temp->fl_virt_addr =
112 dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
113 &comp_temp->fl_bus_addr);
114 if (!comp_temp->fl_virt_addr)
115 goto err_fd_virt;
116
117 comp_temp->desc_virt_addr =
118 dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
119 &comp_temp->desc_bus_addr);
120 if (!comp_temp->desc_virt_addr)
121 goto err_fl_virt;
122
123 comp_temp->qchan = dpaa2_chan;
124 return comp_temp;
125 }
126
127 comp_temp = list_first_entry(&dpaa2_chan->comp_free,
128 struct dpaa2_qdma_comp, list);
129 list_del(&comp_temp->list);
130 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
131
132 comp_temp->qchan = dpaa2_chan;
133
134 return comp_temp;
135
136 err_fl_virt:
137 dma_pool_free(dpaa2_chan->fl_pool,
138 comp_temp->fl_virt_addr,
139 comp_temp->fl_bus_addr);
140 err_fd_virt:
141 dma_pool_free(dpaa2_chan->fd_pool,
142 comp_temp->fd_virt_addr,
143 comp_temp->fd_bus_addr);
144 err_comp:
145 kfree(comp_temp);
146 err:
147 dev_err(dev, "Failed to request descriptor\n");
148 return NULL;
149 }
150
151 static void
dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)152 dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
153 {
154 struct dpaa2_fd *fd;
155
156 fd = dpaa2_comp->fd_virt_addr;
157 memset(fd, 0, sizeof(struct dpaa2_fd));
158
159 /* fd populated */
160 dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
161
162 /*
163 * Bypass memory translation, Frame list format, short length disable
164 * we need to disable BMT if fsl-mc use iova addr
165 */
166 if (smmu_disable)
167 dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
168 dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
169
170 dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
171 }
172
173 /* first frame list for descriptor buffer */
174 static void
dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list, struct dpaa2_qdma_comp *dpaa2_comp, bool wrt_changed)175 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
176 struct dpaa2_qdma_comp *dpaa2_comp,
177 bool wrt_changed)
178 {
179 struct dpaa2_qdma_sd_d *sdd;
180
181 sdd = dpaa2_comp->desc_virt_addr;
182 memset(sdd, 0, 2 * (sizeof(*sdd)));
183
184 /* source descriptor CMD */
185 sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
186 sdd++;
187
188 /* dest descriptor CMD */
189 if (wrt_changed)
190 sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
191 else
192 sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
193
194 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
195
196 /* first frame list to source descriptor */
197 dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
198 dpaa2_fl_set_len(f_list, 0x20);
199 dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
200
201 /* bypass memory translation */
202 if (smmu_disable)
203 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
204 }
205
206 /* source and destination frame list */
207 static void
dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list, dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)208 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
209 dma_addr_t dst, dma_addr_t src,
210 size_t len, uint8_t fmt)
211 {
212 /* source frame list to source buffer */
213 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
214
215 dpaa2_fl_set_addr(f_list, src);
216 dpaa2_fl_set_len(f_list, len);
217
218 /* single buffer frame or scatter gather frame */
219 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
220
221 /* bypass memory translation */
222 if (smmu_disable)
223 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
224
225 f_list++;
226
227 /* destination frame list to destination buffer */
228 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
229
230 dpaa2_fl_set_addr(f_list, dst);
231 dpaa2_fl_set_len(f_list, len);
232 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
233 /* single buffer frame or scatter gather frame */
234 dpaa2_fl_set_final(f_list, QDMA_FL_F);
235 /* bypass memory translation */
236 if (smmu_disable)
237 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
238 }
239
240 static struct dma_async_tx_descriptor
dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, ulong flags)241 *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
242 dma_addr_t src, size_t len, ulong flags)
243 {
244 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
245 struct dpaa2_qdma_engine *dpaa2_qdma;
246 struct dpaa2_qdma_comp *dpaa2_comp;
247 struct dpaa2_fl_entry *f_list;
248 bool wrt_changed;
249
250 dpaa2_qdma = dpaa2_chan->qdma;
251 dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
252 if (!dpaa2_comp)
253 return NULL;
254
255 wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
256
257 /* populate Frame descriptor */
258 dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
259
260 f_list = dpaa2_comp->fl_virt_addr;
261
262 /* first frame list for descriptor buffer (logn format) */
263 dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
264
265 f_list++;
266
267 dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
268
269 return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
270 }
271
dpaa2_qdma_issue_pending(struct dma_chan *chan)272 static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
273 {
274 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
275 struct dpaa2_qdma_comp *dpaa2_comp;
276 struct virt_dma_desc *vdesc;
277 struct dpaa2_fd *fd;
278 unsigned long flags;
279 int err;
280
281 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
282 spin_lock(&dpaa2_chan->vchan.lock);
283 if (vchan_issue_pending(&dpaa2_chan->vchan)) {
284 vdesc = vchan_next_desc(&dpaa2_chan->vchan);
285 if (!vdesc)
286 goto err_enqueue;
287 dpaa2_comp = to_fsl_qdma_comp(vdesc);
288
289 fd = dpaa2_comp->fd_virt_addr;
290
291 list_del(&vdesc->node);
292 list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
293
294 err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
295 if (err) {
296 list_del(&dpaa2_comp->list);
297 list_add_tail(&dpaa2_comp->list,
298 &dpaa2_chan->comp_free);
299 }
300 }
301 err_enqueue:
302 spin_unlock(&dpaa2_chan->vchan.lock);
303 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
304 }
305
dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)306 static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
307 {
308 struct dpaa2_qdma_priv_per_prio *ppriv;
309 struct device *dev = &ls_dev->dev;
310 struct dpaa2_qdma_priv *priv;
311 u8 prio_def = DPDMAI_PRIO_NUM;
312 int err = -EINVAL;
313 int i;
314
315 priv = dev_get_drvdata(dev);
316
317 priv->dev = dev;
318 priv->dpqdma_id = ls_dev->obj_desc.id;
319
320 /* Get the handle for the DPDMAI this interface is associate with */
321 err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
322 if (err) {
323 dev_err(dev, "dpdmai_open() failed\n");
324 return err;
325 }
326
327 dev_dbg(dev, "Opened dpdmai object successfully\n");
328
329 err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
330 &priv->dpdmai_attr);
331 if (err) {
332 dev_err(dev, "dpdmai_get_attributes() failed\n");
333 goto exit;
334 }
335
336 if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
337 err = -EINVAL;
338 dev_err(dev, "DPDMAI major version mismatch\n"
339 "Found %u.%u, supported version is %u.%u\n",
340 priv->dpdmai_attr.version.major,
341 priv->dpdmai_attr.version.minor,
342 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
343 goto exit;
344 }
345
346 if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
347 err = -EINVAL;
348 dev_err(dev, "DPDMAI minor version mismatch\n"
349 "Found %u.%u, supported version is %u.%u\n",
350 priv->dpdmai_attr.version.major,
351 priv->dpdmai_attr.version.minor,
352 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
353 goto exit;
354 }
355
356 priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
357 ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
358 if (!ppriv) {
359 err = -ENOMEM;
360 goto exit;
361 }
362 priv->ppriv = ppriv;
363
364 for (i = 0; i < priv->num_pairs; i++) {
365 err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
366 i, &priv->rx_queue_attr[i]);
367 if (err) {
368 dev_err(dev, "dpdmai_get_rx_queue() failed\n");
369 goto exit;
370 }
371 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
372
373 err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
374 i, &priv->tx_fqid[i]);
375 if (err) {
376 dev_err(dev, "dpdmai_get_tx_queue() failed\n");
377 goto exit;
378 }
379 ppriv->req_fqid = priv->tx_fqid[i];
380 ppriv->prio = i;
381 ppriv->priv = priv;
382 ppriv++;
383 }
384
385 return 0;
386 exit:
387 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
388 return err;
389 }
390
dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)391 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
392 {
393 struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
394 struct dpaa2_qdma_priv_per_prio, nctx);
395 struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
396 struct dpaa2_qdma_priv *priv = ppriv->priv;
397 u32 n_chans = priv->dpaa2_qdma->n_chans;
398 struct dpaa2_qdma_chan *qchan;
399 const struct dpaa2_fd *fd_eq;
400 const struct dpaa2_fd *fd;
401 struct dpaa2_dq *dq;
402 int is_last = 0;
403 int found;
404 u8 status;
405 int err;
406 int i;
407
408 do {
409 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
410 ppriv->store);
411 } while (err);
412
413 while (!is_last) {
414 do {
415 dq = dpaa2_io_store_next(ppriv->store, &is_last);
416 } while (!is_last && !dq);
417 if (!dq) {
418 dev_err(priv->dev, "FQID returned no valid frames!\n");
419 continue;
420 }
421
422 /* obtain FD and process the error */
423 fd = dpaa2_dq_fd(dq);
424
425 status = dpaa2_fd_get_ctrl(fd) & 0xff;
426 if (status)
427 dev_err(priv->dev, "FD error occurred\n");
428 found = 0;
429 for (i = 0; i < n_chans; i++) {
430 qchan = &priv->dpaa2_qdma->chans[i];
431 spin_lock(&qchan->queue_lock);
432 if (list_empty(&qchan->comp_used)) {
433 spin_unlock(&qchan->queue_lock);
434 continue;
435 }
436 list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
437 &qchan->comp_used, list) {
438 fd_eq = dpaa2_comp->fd_virt_addr;
439
440 if (le64_to_cpu(fd_eq->simple.addr) ==
441 le64_to_cpu(fd->simple.addr)) {
442 spin_lock(&qchan->vchan.lock);
443 vchan_cookie_complete(&
444 dpaa2_comp->vdesc);
445 spin_unlock(&qchan->vchan.lock);
446 found = 1;
447 break;
448 }
449 }
450 spin_unlock(&qchan->queue_lock);
451 if (found)
452 break;
453 }
454 }
455
456 dpaa2_io_service_rearm(NULL, ctx);
457 }
458
dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)459 static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
460 {
461 struct dpaa2_qdma_priv_per_prio *ppriv;
462 struct device *dev = priv->dev;
463 int err = -EINVAL;
464 int i, num;
465
466 num = priv->num_pairs;
467 ppriv = priv->ppriv;
468 for (i = 0; i < num; i++) {
469 ppriv->nctx.is_cdan = 0;
470 ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
471 ppriv->nctx.id = ppriv->rsp_fqid;
472 ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
473 err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
474 if (err) {
475 dev_err(dev, "Notification register failed\n");
476 goto err_service;
477 }
478
479 ppriv->store =
480 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
481 if (!ppriv->store) {
482 err = -ENOMEM;
483 dev_err(dev, "dpaa2_io_store_create() failed\n");
484 goto err_store;
485 }
486
487 ppriv++;
488 }
489 return 0;
490
491 err_store:
492 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
493 err_service:
494 ppriv--;
495 while (ppriv >= priv->ppriv) {
496 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
497 dpaa2_io_store_destroy(ppriv->store);
498 ppriv--;
499 }
500 return err;
501 }
502
dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)503 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
504 {
505 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
506 int i;
507
508 for (i = 0; i < priv->num_pairs; i++) {
509 dpaa2_io_store_destroy(ppriv->store);
510 ppriv++;
511 }
512 }
513
dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)514 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
515 {
516 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
517 struct device *dev = priv->dev;
518 int i;
519
520 for (i = 0; i < priv->num_pairs; i++) {
521 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
522 ppriv++;
523 }
524 }
525
dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)526 static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
527 {
528 struct dpdmai_rx_queue_cfg rx_queue_cfg;
529 struct dpaa2_qdma_priv_per_prio *ppriv;
530 struct device *dev = priv->dev;
531 struct fsl_mc_device *ls_dev;
532 int i, num;
533 int err;
534
535 ls_dev = to_fsl_mc_device(dev);
536 num = priv->num_pairs;
537 ppriv = priv->ppriv;
538 for (i = 0; i < num; i++) {
539 rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
540 DPDMAI_QUEUE_OPT_DEST;
541 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
542 rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
543 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
544 rx_queue_cfg.dest_cfg.priority = ppriv->prio;
545 err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
546 rx_queue_cfg.dest_cfg.priority,
547 &rx_queue_cfg);
548 if (err) {
549 dev_err(dev, "dpdmai_set_rx_queue() failed\n");
550 return err;
551 }
552
553 ppriv++;
554 }
555
556 return 0;
557 }
558
dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)559 static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
560 {
561 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
562 struct device *dev = priv->dev;
563 struct fsl_mc_device *ls_dev;
564 int err = 0;
565 int i;
566
567 ls_dev = to_fsl_mc_device(dev);
568
569 for (i = 0; i < priv->num_pairs; i++) {
570 ppriv->nctx.qman64 = 0;
571 ppriv->nctx.dpio_id = 0;
572 ppriv++;
573 }
574
575 err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
576 if (err)
577 dev_err(dev, "dpdmai_reset() failed\n");
578
579 return err;
580 }
581
dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, struct list_head *head)582 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
583 struct list_head *head)
584 {
585 struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
586 unsigned long flags;
587
588 list_for_each_entry_safe(comp_tmp, _comp_tmp,
589 head, list) {
590 spin_lock_irqsave(&qchan->queue_lock, flags);
591 list_del(&comp_tmp->list);
592 spin_unlock_irqrestore(&qchan->queue_lock, flags);
593 dma_pool_free(qchan->fd_pool,
594 comp_tmp->fd_virt_addr,
595 comp_tmp->fd_bus_addr);
596 dma_pool_free(qchan->fl_pool,
597 comp_tmp->fl_virt_addr,
598 comp_tmp->fl_bus_addr);
599 dma_pool_free(qchan->sdd_pool,
600 comp_tmp->desc_virt_addr,
601 comp_tmp->desc_bus_addr);
602 kfree(comp_tmp);
603 }
604 }
605
dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)606 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
607 {
608 struct dpaa2_qdma_chan *qchan;
609 int num, i;
610
611 num = dpaa2_qdma->n_chans;
612 for (i = 0; i < num; i++) {
613 qchan = &dpaa2_qdma->chans[i];
614 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
615 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
616 dma_pool_destroy(qchan->fd_pool);
617 dma_pool_destroy(qchan->fl_pool);
618 dma_pool_destroy(qchan->sdd_pool);
619 }
620 }
621
dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)622 static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
623 {
624 struct dpaa2_qdma_comp *dpaa2_comp;
625 struct dpaa2_qdma_chan *qchan;
626 unsigned long flags;
627
628 dpaa2_comp = to_fsl_qdma_comp(vdesc);
629 qchan = dpaa2_comp->qchan;
630 spin_lock_irqsave(&qchan->queue_lock, flags);
631 list_del(&dpaa2_comp->list);
632 list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
633 spin_unlock_irqrestore(&qchan->queue_lock, flags);
634 }
635
dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)636 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
637 {
638 struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
639 struct dpaa2_qdma_chan *dpaa2_chan;
640 int num = priv->num_pairs;
641 int i;
642
643 INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
644 for (i = 0; i < dpaa2_qdma->n_chans; i++) {
645 dpaa2_chan = &dpaa2_qdma->chans[i];
646 dpaa2_chan->qdma = dpaa2_qdma;
647 dpaa2_chan->fqid = priv->tx_fqid[i % num];
648 dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
649 vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
650 spin_lock_init(&dpaa2_chan->queue_lock);
651 INIT_LIST_HEAD(&dpaa2_chan->comp_used);
652 INIT_LIST_HEAD(&dpaa2_chan->comp_free);
653 }
654 return 0;
655 }
656
dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)657 static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
658 {
659 struct device *dev = &dpdmai_dev->dev;
660 struct dpaa2_qdma_engine *dpaa2_qdma;
661 struct dpaa2_qdma_priv *priv;
662 int err;
663
664 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
665 if (!priv)
666 return -ENOMEM;
667 dev_set_drvdata(dev, priv);
668 priv->dpdmai_dev = dpdmai_dev;
669
670 priv->iommu_domain = iommu_get_domain_for_dev(dev);
671 if (priv->iommu_domain)
672 smmu_disable = false;
673
674 /* obtain a MC portal */
675 err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
676 if (err) {
677 if (err == -ENXIO)
678 err = -EPROBE_DEFER;
679 else
680 dev_err(dev, "MC portal allocation failed\n");
681 goto err_mcportal;
682 }
683
684 /* DPDMAI initialization */
685 err = dpaa2_qdma_setup(dpdmai_dev);
686 if (err) {
687 dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
688 goto err_dpdmai_setup;
689 }
690
691 /* DPIO */
692 err = dpaa2_qdma_dpio_setup(priv);
693 if (err) {
694 dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
695 goto err_dpio_setup;
696 }
697
698 /* DPDMAI binding to DPIO */
699 err = dpaa2_dpdmai_bind(priv);
700 if (err) {
701 dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
702 goto err_bind;
703 }
704
705 /* DPDMAI enable */
706 err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
707 if (err) {
708 dev_err(dev, "dpdmai_enable() faile\n");
709 goto err_enable;
710 }
711
712 dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
713 if (!dpaa2_qdma) {
714 err = -ENOMEM;
715 goto err_eng;
716 }
717
718 priv->dpaa2_qdma = dpaa2_qdma;
719 dpaa2_qdma->priv = priv;
720
721 dpaa2_qdma->desc_allocated = 0;
722 dpaa2_qdma->n_chans = NUM_CH;
723
724 dpaa2_dpdmai_init_channels(dpaa2_qdma);
725
726 if (soc_device_match(soc_fixup_tuning))
727 dpaa2_qdma->qdma_wrtype_fixup = true;
728 else
729 dpaa2_qdma->qdma_wrtype_fixup = false;
730
731 dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
732 dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
733 dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
734
735 dpaa2_qdma->dma_dev.dev = dev;
736 dpaa2_qdma->dma_dev.device_alloc_chan_resources =
737 dpaa2_qdma_alloc_chan_resources;
738 dpaa2_qdma->dma_dev.device_free_chan_resources =
739 dpaa2_qdma_free_chan_resources;
740 dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
741 dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
742 dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
743
744 err = dma_async_device_register(&dpaa2_qdma->dma_dev);
745 if (err) {
746 dev_err(dev, "Can't register NXP QDMA engine.\n");
747 goto err_dpaa2_qdma;
748 }
749
750 return 0;
751
752 err_dpaa2_qdma:
753 kfree(dpaa2_qdma);
754 err_eng:
755 dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
756 err_enable:
757 dpaa2_dpdmai_dpio_unbind(priv);
758 err_bind:
759 dpaa2_dpmai_store_free(priv);
760 dpaa2_dpdmai_dpio_free(priv);
761 err_dpio_setup:
762 kfree(priv->ppriv);
763 dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
764 err_dpdmai_setup:
765 fsl_mc_portal_free(priv->mc_io);
766 err_mcportal:
767 kfree(priv);
768 dev_set_drvdata(dev, NULL);
769 return err;
770 }
771
dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)772 static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
773 {
774 struct dpaa2_qdma_engine *dpaa2_qdma;
775 struct dpaa2_qdma_priv *priv;
776 struct device *dev;
777
778 dev = &ls_dev->dev;
779 priv = dev_get_drvdata(dev);
780 dpaa2_qdma = priv->dpaa2_qdma;
781
782 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
783 dpaa2_dpdmai_dpio_unbind(priv);
784 dpaa2_dpmai_store_free(priv);
785 dpaa2_dpdmai_dpio_free(priv);
786 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
787 fsl_mc_portal_free(priv->mc_io);
788 dev_set_drvdata(dev, NULL);
789 dpaa2_dpdmai_free_channels(dpaa2_qdma);
790
791 dma_async_device_unregister(&dpaa2_qdma->dma_dev);
792 kfree(priv);
793 kfree(dpaa2_qdma);
794
795 return 0;
796 }
797
dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)798 static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
799 {
800 struct dpaa2_qdma_priv *priv;
801 struct device *dev;
802
803 dev = &ls_dev->dev;
804 priv = dev_get_drvdata(dev);
805
806 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
807 dpaa2_dpdmai_dpio_unbind(priv);
808 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
809 dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
810 }
811
812 static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
813 {
814 .vendor = FSL_MC_VENDOR_FREESCALE,
815 .obj_type = "dpdmai",
816 },
817 { .vendor = 0x0 }
818 };
819
820 static struct fsl_mc_driver dpaa2_qdma_driver = {
821 .driver = {
822 .name = "dpaa2-qdma",
823 .owner = THIS_MODULE,
824 },
825 .probe = dpaa2_qdma_probe,
826 .remove = dpaa2_qdma_remove,
827 .shutdown = dpaa2_qdma_shutdown,
828 .match_id_table = dpaa2_qdma_id_table
829 };
830
dpaa2_qdma_driver_init(void)831 static int __init dpaa2_qdma_driver_init(void)
832 {
833 return fsl_mc_driver_register(&(dpaa2_qdma_driver));
834 }
835 late_initcall(dpaa2_qdma_driver_init);
836
fsl_qdma_exit(void)837 static void __exit fsl_qdma_exit(void)
838 {
839 fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
840 }
841 module_exit(fsl_qdma_exit);
842
843 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
844 MODULE_LICENSE("GPL v2");
845 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
846