Lines Matching defs:pthr

779 	struct perf_thread *pthr = data;
781 atomic_dec(&pthr->dma_sync);
782 wake_up(&pthr->dma_wait);
785 static int perf_copy_chunk(struct perf_thread *pthr,
792 struct perf_peer *peer = pthr->perf->test_peer;
802 dma_dev = pthr->dma_chan->device->dev;
804 if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
826 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr,
838 tx->callback_param = pthr;
849 atomic_inc(&pthr->dma_sync);
850 dma_async_issue_pending(pthr->dma_chan);
853 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
871 static int perf_init_test(struct perf_thread *pthr)
873 struct perf_ctx *perf = pthr->perf;
875 struct perf_peer *peer = pthr->perf->test_peer;
877 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
879 if (!pthr->src)
882 get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
889 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
890 if (!pthr->dma_chan) {
892 pthr->tidx);
896 dma_map_resource(pthr->dma_chan->device->dev,
899 if (dma_mapping_error(pthr->dma_chan->device->dev,
901 dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
902 pthr->tidx);
904 dma_release_channel(pthr->dma_chan);
907 dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
908 pthr->tidx,
912 atomic_set(&pthr->dma_sync, 0);
918 kfree(pthr->src);
922 static int perf_run_test(struct perf_thread *pthr)
924 struct perf_peer *peer = pthr->perf->test_peer;
925 struct perf_ctx *perf = pthr->perf;
935 flt_src = pthr->src;
939 pthr->duration = ktime_get();
942 while (pthr->copied < total_size) {
943 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
946 pthr->tidx, ret);
950 pthr->copied += chunk_size;
956 flt_src = pthr->src;
966 static int perf_sync_test(struct perf_thread *pthr)
968 struct perf_ctx *perf = pthr->perf;
973 wait_event(pthr->dma_wait,
974 (atomic_read(&pthr->dma_sync) == 0 ||
981 pthr->duration = ktime_sub(ktime_get(), pthr->duration);
984 pthr->tidx, pthr->copied);
987 pthr->tidx, ktime_to_us(pthr->duration));
989 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
990 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
995 static void perf_clear_test(struct perf_thread *pthr)
997 struct perf_ctx *perf = pthr->perf;
1006 (void)dmaengine_terminate_sync(pthr->dma_chan);
1007 if (pthr->perf->test_peer->dma_dst_addr)
1008 dma_unmap_resource(pthr->dma_chan->device->dev,
1009 pthr->perf->test_peer->dma_dst_addr,
1010 pthr->perf->test_peer->outbuf_size,
1013 dma_release_channel(pthr->dma_chan);
1018 kfree(pthr->src);
1023 struct perf_thread *pthr = to_thread_work(work);
1033 ret = perf_init_test(pthr);
1035 pthr->status = ret;
1039 ret = perf_run_test(pthr);
1041 pthr->status = ret;
1045 pthr->status = perf_sync_test(pthr);
1048 perf_clear_test(pthr);
1082 struct perf_thread *pthr;
1096 pthr = &perf->threads[tidx];
1098 pthr->status = -ENODATA;
1099 pthr->copied = 0;
1100 pthr->duration = ktime_set(0, 0);
1102 (void)queue_work(perf_wq, &pthr->work);
1120 struct perf_thread *pthr;
1130 pthr = &perf->threads[tidx];
1132 if (pthr->status == -ENODATA)
1135 if (pthr->status) {
1137 "%d: error status %d\n", tidx, pthr->status);
1143 tidx, pthr->copied, ktime_to_us(pthr->duration),
1144 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
1154 struct perf_thread *pthr;
1162 pthr = &perf->threads[tidx];
1164 pthr->perf = perf;
1165 pthr->tidx = tidx;
1166 pthr->status = -ENODATA;
1167 init_waitqueue_head(&pthr->dma_wait);
1168 INIT_WORK(&pthr->work, perf_thread_work);