Lines Matching refs:req
38 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
41 static void user_sdma_free_request(struct user_sdma_request *req);
42 static int check_header_template(struct user_sdma_request *req,
45 static int set_txreq_header(struct user_sdma_request *req,
47 static int set_txreq_header_ahg(struct user_sdma_request *req,
290 struct user_sdma_request *req;
297 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
302 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
352 req = pq->reqs + info.comp_idx;
353 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
354 req->data_len = 0;
355 req->pq = pq;
356 req->cq = cq;
357 req->ahg_idx = -1;
358 req->iov_idx = 0;
359 req->sent = 0;
360 req->seqnum = 0;
361 req->seqcomp = 0;
362 req->seqsubmitted = 0;
363 req->tids = NULL;
364 req->has_error = 0;
365 INIT_LIST_HEAD(&req->txps);
367 memcpy(&req->info, &info, sizeof(info));
374 if (req->data_iovs < 2) {
375 SDMA_DBG(req,
380 req->data_iovs--;
383 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
384 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
391 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
392 sizeof(req->hdr));
394 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
401 req->hdr.pbc[2] = 0;
404 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
407 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
416 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
417 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
418 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
421 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
427 pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
428 slid = be16_to_cpu(req->hdr.lrh[3]);
439 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
440 SDMA_DBG(req, "User tried to pass in a GRH");
445 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
450 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
451 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
454 info.comp_idx, req->tidoffset);
458 for (i = 0; i < req->data_iovs; i++) {
459 req->iovs[i].offset = 0;
460 INIT_LIST_HEAD(&req->iovs[i].list);
461 memcpy(&req->iovs[i].iov,
463 sizeof(req->iovs[i].iov));
464 if (req->iovs[i].iov.iov_len == 0) {
468 req->data_len += req->iovs[i].iov.iov_len;
471 info.comp_idx, req->data_len);
472 if (pcount > req->info.npkts)
473 pcount = req->info.npkts;
482 if (req_opcode(req->info.ctrl) == EXPECTED) {
483 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
498 ntids * sizeof(*req->tids));
501 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
505 req->tids = tmp;
506 req->n_tids = ntids;
507 req->tididx = 0;
511 dlid = be16_to_cpu(req->hdr.lrh[1]);
514 req->sde = sdma_select_user_engine(dd, selector, vl);
516 if (!req->sde || !sdma_running(req->sde)) {
522 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
523 req->ahg_idx = sdma_ahg_alloc(req->sde);
534 while (req->seqsubmitted != req->info.npkts) {
535 ret = user_sdma_send_pkts(req, pcount);
559 if (req->seqsubmitted < req->info.npkts) {
560 if (req->seqsubmitted)
562 (req->seqcomp == req->seqsubmitted - 1));
563 user_sdma_free_request(req);
570 static inline u32 compute_data_length(struct user_sdma_request *req,
587 if (!req->seqnum) {
588 if (req->data_len < sizeof(u32))
589 len = req->data_len;
591 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
593 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
594 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
600 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
602 if (unlikely(!len) && ++req->tididx < req->n_tids &&
603 req->tids[req->tididx]) {
604 tidlen = EXP_TID_GET(req->tids[req->tididx],
606 req->tidoffset = 0;
607 len = min_t(u32, tidlen, req->info.fragsize);
614 len = min(len, req->data_len - req->sent);
616 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
618 trace_hfi1_sdma_user_compute_length(req->pq->dd,
619 req->pq->ctxt,
620 req->pq->subctxt,
621 req->info.comp_idx,
639 static int user_sdma_txadd_ahg(struct user_sdma_request *req,
644 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
645 u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
646 struct hfi1_user_sdma_pkt_q *pq = req->pq;
656 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
661 ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
665 sizeof(tx->hdr) + datalen, req->ahg_idx,
675 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
684 if (!req->pq)
687 pq = req->pq;
690 if (READ_ONCE(req->has_error))
696 if (unlikely(req->seqnum == req->info.npkts)) {
697 if (!list_empty(&req->txps))
702 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
703 maxpkts = req->info.npkts - req->seqnum;
713 if (READ_ONCE(req->has_error))
721 tx->req = req;
728 if (req->seqnum == req->info.npkts - 1)
737 if (req->data_len) {
738 iovec = &req->iovs[req->iov_idx];
740 if (++req->iov_idx == req->data_iovs) {
744 iovec = &req->iovs[req->iov_idx];
748 datalen = compute_data_length(req, tx);
759 SDMA_DBG(req,
768 if (req->ahg_idx >= 0) {
769 if (!req->seqnum) {
770 ret = user_sdma_txadd_ahg(req, tx, datalen);
776 changes = set_txreq_header_ahg(req, tx,
784 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
794 ret = set_txreq_header(req, tx, datalen);
799 req->koffset += datalen;
800 if (req_opcode(req->info.ctrl) == EXPECTED)
801 req->tidoffset += datalen;
802 req->sent += datalen;
804 ret = hfi1_add_pages_to_sdma_packet(req, tx, iovec,
808 iovec = &req->iovs[req->iov_idx];
810 list_add_tail(&tx->txreq.list, &req->txps);
816 tx->seqnum = req->seqnum++;
820 ret = sdma_send_txlist(req->sde,
822 &req->txps, &count);
823 req->seqsubmitted += count;
824 if (req->seqsubmitted == req->info.npkts) {
831 if (req->ahg_idx >= 0)
832 sdma_ahg_free(req->sde, req->ahg_idx);
843 static int check_header_template(struct user_sdma_request *req,
857 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
858 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
861 if (req_opcode(req->info.ctrl) == EXPECTED) {
868 u32 tidval = req->tids[req->tididx],
876 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
913 static int set_txreq_header(struct user_sdma_request *req,
916 struct hfi1_user_sdma_pkt_q *pq = req->pq;
924 memcpy(hdr, &req->hdr, sizeof(*hdr));
941 if (unlikely(req->seqnum == 2)) {
949 req->hdr.pbc[0] = hdr->pbc[0];
950 req->hdr.lrh[2] = hdr->lrh[2];
958 if (unlikely(!req->seqnum)) {
959 ret = check_header_template(req, hdr, lrhlen, datalen);
967 (req_opcode(req->info.ctrl) == EXPECTED),
968 req->seqnum));
975 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
977 if (req_opcode(req->info.ctrl) == EXPECTED) {
978 tidval = req->tids[req->tididx];
983 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
985 req->tidoffset = 0;
990 if (++req->tididx > req->n_tids - 1 ||
991 !req->tids[req->tididx]) {
994 tidval = req->tids[req->tididx];
1013 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1014 req->tidoffset, req->tidoffset >> omfactor,
1017 req->tidoffset >> omfactor);
1023 req->info.comp_idx, hdr, tidval);
1027 static int set_txreq_header_ahg(struct user_sdma_request *req,
1033 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1034 struct hfi1_pkt_header *hdr = &req->hdr;
1056 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1070 (__force u16)cpu_to_le16(req->koffset & 0xffff));
1074 (__force u16)cpu_to_le16(req->koffset >> 16));
1077 if (req_opcode(req->info.ctrl) == EXPECTED) {
1080 tidval = req->tids[req->tididx];
1086 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1088 req->tidoffset = 0;
1093 if (++req->tididx > req->n_tids - 1 ||
1094 !req->tids[req->tididx])
1096 tidval = req->tids[req->tididx];
1106 ((req->tidoffset >> omfactor)
1133 req->info.comp_idx, req->sde->this_idx,
1134 req->ahg_idx, ahg, idx, tidval);
1137 datalen, req->ahg_idx, idx,
1138 ahg, sizeof(req->hdr),
1158 struct user_sdma_request *req;
1163 if (!tx->req)
1166 req = tx->req;
1167 pq = req->pq;
1168 cq = req->cq;
1171 SDMA_DBG(req, "SDMA completion with error %d",
1173 WRITE_ONCE(req->has_error, 1);
1177 req->seqcomp = tx->seqnum;
1181 if (req->seqcomp != req->info.npkts - 1)
1184 user_sdma_free_request(req);
1185 set_comp_state(pq, cq, req->info.comp_idx, state, status);
1195 static void user_sdma_free_request(struct user_sdma_request *req)
1197 if (!list_empty(&req->txps)) {
1200 list_for_each_entry_safe(t, p, &req->txps, list) {
1204 sdma_txclean(req->pq->dd, t);
1205 kmem_cache_free(req->pq->txreq_cache, tx);
1209 kfree(req->tids);
1210 clear_bit(req->info.comp_idx, req->pq->req_in_use);