Lines Matching refs:dt
122 static void dma_test_free_rings(struct dma_test *dt)
124 if (dt->rx_ring) {
125 tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
126 tb_ring_free(dt->rx_ring);
127 dt->rx_ring = NULL;
129 if (dt->tx_ring) {
130 tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
131 tb_ring_free(dt->tx_ring);
132 dt->tx_ring = NULL;
136 static int dma_test_start_rings(struct dma_test *dt)
139 struct tb_xdomain *xd = dt->xd;
148 if (dt->packets_to_send && dt->packets_to_receive)
151 if (dt->packets_to_send) {
157 dt->tx_ring = ring;
162 dma_test_free_rings(dt);
166 dt->tx_hopid = ret;
169 if (dt->packets_to_receive) {
179 dma_test_free_rings(dt);
183 dt->rx_ring = ring;
187 dma_test_free_rings(dt);
191 dt->rx_hopid = ret;
194 ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
195 dt->tx_ring ? dt->tx_ring->hop : -1,
196 dt->rx_hopid,
197 dt->rx_ring ? dt->rx_ring->hop : -1);
199 dma_test_free_rings(dt);
203 if (dt->tx_ring)
204 tb_ring_start(dt->tx_ring);
205 if (dt->rx_ring)
206 tb_ring_start(dt->rx_ring);
211 static void dma_test_stop_rings(struct dma_test *dt)
215 if (dt->rx_ring)
216 tb_ring_stop(dt->rx_ring);
217 if (dt->tx_ring)
218 tb_ring_stop(dt->tx_ring);
220 ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
221 dt->tx_ring ? dt->tx_ring->hop : -1,
222 dt->rx_hopid,
223 dt->rx_ring ? dt->rx_ring->hop : -1);
225 dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
227 dma_test_free_rings(dt);
234 struct dma_test *dt = tf->dma_test;
235 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
246 dt->packets_received++;
247 dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
248 dt->packets_to_receive);
251 dt->crc_errors++;
253 dt->buffer_overflow_errors++;
257 if (dt->packets_received == dt->packets_to_receive)
258 complete(&dt->complete);
261 static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
263 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
290 tf->dma_test = dt;
293 tb_ring_rx(dt->rx_ring, &tf->frame);
303 struct dma_test *dt = tf->dma_test;
304 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
312 static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
314 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
326 tf->dma_test = dt;
348 dt->packets_sent++;
349 dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
350 dt->packets_to_send);
352 tb_ring_tx(dt->tx_ring, &tf->frame);
362 struct dma_test *dt = tb_service_get_drvdata(svc); \
365 ret = mutex_lock_interruptible(&dt->lock); \
368 __get(dt, val); \
369 mutex_unlock(&dt->lock); \
375 struct dma_test *dt = tb_service_get_drvdata(svc); \
381 ret = mutex_lock_interruptible(&dt->lock); \
384 __set(dt, val); \
385 mutex_unlock(&dt->lock); \
391 static void lanes_get(const struct dma_test *dt, u64 *val)
393 *val = dt->link_width;
401 static void lanes_set(struct dma_test *dt, u64 val)
403 dt->link_width = val;
407 static void speed_get(const struct dma_test *dt, u64 *val)
409 *val = dt->link_speed;
425 static void speed_set(struct dma_test *dt, u64 val)
427 dt->link_speed = val;
431 static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
433 *val = dt->packets_to_receive;
441 static void packets_to_receive_set(struct dma_test *dt, u64 val)
443 dt->packets_to_receive = val;
448 static void packets_to_send_get(const struct dma_test *dt, u64 *val)
450 *val = dt->packets_to_send;
458 static void packets_to_send_set(struct dma_test *dt, u64 val)
460 dt->packets_to_send = val;
465 static int dma_test_set_bonding(struct dma_test *dt)
467 switch (dt->link_width) {
469 return tb_xdomain_lane_bonding_enable(dt->xd);
471 tb_xdomain_lane_bonding_disable(dt->xd);
478 static bool dma_test_validate_config(struct dma_test *dt)
480 if (!dt->packets_to_send && !dt->packets_to_receive)
482 if (dt->packets_to_send && dt->packets_to_receive &&
483 dt->packets_to_send != dt->packets_to_receive)
488 static void dma_test_check_errors(struct dma_test *dt, int ret)
490 if (!dt->error_code) {
491 if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
492 dt->error_code = DMA_TEST_SPEED_ERROR;
493 } else if (dt->link_width) {
494 const struct tb_xdomain *xd = dt->xd;
496 if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
497 (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
498 dt->error_code = DMA_TEST_WIDTH_ERROR;
499 } else if (dt->packets_to_send != dt->packets_sent ||
500 dt->packets_to_receive != dt->packets_received ||
501 dt->crc_errors || dt->buffer_overflow_errors) {
502 dt->error_code = DMA_TEST_PACKET_ERROR;
508 dt->result = DMA_TEST_FAIL;
514 struct dma_test *dt = tb_service_get_drvdata(svc);
520 ret = mutex_lock_interruptible(&dt->lock);
524 dt->packets_sent = 0;
525 dt->packets_received = 0;
526 dt->crc_errors = 0;
527 dt->buffer_overflow_errors = 0;
528 dt->result = DMA_TEST_SUCCESS;
529 dt->error_code = DMA_TEST_NO_ERROR;
532 if (dt->link_speed)
533 dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
534 if (dt->link_width)
535 dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
536 dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
537 dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
539 if (!dma_test_validate_config(dt)) {
541 dt->error_code = DMA_TEST_CONFIG_ERROR;
545 ret = dma_test_set_bonding(dt);
548 dt->error_code = DMA_TEST_BONDING_ERROR;
552 ret = dma_test_start_rings(dt);
555 dt->error_code = DMA_TEST_DMA_ERROR;
559 if (dt->packets_to_receive) {
560 reinit_completion(&dt->complete);
561 ret = dma_test_submit_rx(dt, dt->packets_to_receive);
564 dt->error_code = DMA_TEST_BUFFER_ERROR;
569 if (dt->packets_to_send) {
570 ret = dma_test_submit_tx(dt, dt->packets_to_send);
573 dt->error_code = DMA_TEST_BUFFER_ERROR;
578 if (dt->packets_to_receive) {
579 ret = wait_for_completion_interruptible(&dt->complete);
581 dt->error_code = DMA_TEST_INTERRUPTED;
587 dma_test_stop_rings(dt);
589 dma_test_check_errors(dt, ret);
590 mutex_unlock(&dt->lock);
592 dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
600 struct dma_test *dt = tb_service_get_drvdata(svc);
603 ret = mutex_lock_interruptible(&dt->lock);
607 seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
608 if (dt->result == DMA_TEST_NOT_RUN)
611 seq_printf(s, "packets received: %u\n", dt->packets_received);
612 seq_printf(s, "packets sent: %u\n", dt->packets_sent);
613 seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
615 dt->buffer_overflow_errors);
616 seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
619 mutex_unlock(&dt->lock);
626 struct dma_test *dt = tb_service_get_drvdata(svc);
628 dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
630 debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
631 debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
632 debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
634 debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
636 debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
637 debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
643 struct dma_test *dt;
645 dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
646 if (!dt)
649 dt->svc = svc;
650 dt->xd = xd;
651 mutex_init(&dt->lock);
652 init_completion(&dt->complete);
654 tb_service_set_drvdata(svc, dt);
662 struct dma_test *dt = tb_service_get_drvdata(svc);
664 mutex_lock(&dt->lock);
665 debugfs_remove_recursive(dt->debugfs_dir);
666 mutex_unlock(&dt->lock);