Lines Matching defs:pdu
81 void *pdu;
127 void *pdu;
213 return req->pdu;
218 /* use the pdu space in the back for the data pdu */
219 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
410 void *pdu, size_t len)
414 sg_init_one(&sg, pdu, len);
415 ahash_request_set_crypt(hash, &sg, pdu + len, len);
420 void *pdu, size_t pdu_len)
422 struct nvme_tcp_hdr *hdr = pdu;
433 recv_digest = *(__le32 *)(pdu + hdr->hlen);
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
435 exp_digest = *(__le32 *)(pdu + hdr->hlen);
446 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
448 struct nvme_tcp_hdr *hdr = pdu;
471 page_frag_free(req->pdu);
480 struct nvme_tcp_cmd_pdu *pdu;
485 req->pdu = page_frag_alloc(&queue->pf_cache,
488 if (!req->pdu)
491 pdu = req->pdu;
494 nvme_req(rq)->cmd = &pdu->cmd;
572 struct nvme_tcp_data_pdu *pdu)
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
580 pdu->command_id, nvme_tcp_queue_id(queue));
591 queue->data_remaining = le32_to_cpu(pdu->data_length);
593 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
594 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
606 struct nvme_tcp_rsp_pdu *pdu)
608 struct nvme_completion *cqe = &pdu->cqe;
662 struct nvme_tcp_r2t_pdu *pdu)
666 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
667 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
673 pdu->command_id, nvme_tcp_queue_id(queue));
702 req->ttag = pdu->ttag;
714 char *pdu = queue->pdu;
719 &pdu[queue->pdu_offset], rcv_len);
730 hdr = queue->pdu;
732 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
739 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
746 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
749 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
752 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
755 "unsupported pdu type (%d)\n", hdr->type);
771 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
773 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
827 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
842 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
860 pdu->command_id);
871 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
873 pdu->command_id);
1059 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1064 int len = sizeof(*pdu) + hdgst - req->offset;
1073 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1075 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1100 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1104 int len = sizeof(*pdu) - req->offset + hdgst;
1108 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1113 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1302 page_frag_free(async->pdu);
1311 async->pdu = page_frag_alloc(&queue->pf_cache,
1314 if (!async->pdu)
1344 kfree(queue->pdu);
1403 pr_err("queue %d: bad pdu length returned %d\n",
1610 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1611 if (!queue->pdu) {
1638 kfree(queue->pdu);
1728 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1731 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
2238 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2239 struct nvme_command *cmd = &pdu->cmd;
2242 memset(pdu, 0, sizeof(*pdu));
2243 pdu->hdr.type = nvme_tcp_cmd;
2245 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2246 pdu->hdr.hlen = sizeof(*pdu);
2247 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2275 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2276 u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
2281 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2314 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2315 struct nvme_command *c = &pdu->cmd;
2334 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2360 pdu->hdr.type = nvme_tcp_cmd;
2361 pdu->hdr.flags = 0;
2363 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2365 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2368 pdu->hdr.hlen = sizeof(*pdu);
2369 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2370 pdu->hdr.plen =
2371 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);