Lines Matching defs:pdu

80 	void			*pdu;
123 void *pdu;
392 void *pdu, size_t len)
396 sg_init_one(&sg, pdu, len);
397 ahash_request_set_crypt(hash, &sg, pdu + len, len);
402 void *pdu, size_t pdu_len)
404 struct nvme_tcp_hdr *hdr = pdu;
415 recv_digest = *(__le32 *)(pdu + hdr->hlen);
416 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
417 exp_digest = *(__le32 *)(pdu + hdr->hlen);
428 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
430 struct nvme_tcp_hdr *hdr = pdu;
453 page_frag_free(req->pdu);
466 req->pdu = page_frag_alloc(&queue->pf_cache,
469 if (!req->pdu)
546 struct nvme_tcp_data_pdu *pdu)
550 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
554 pdu->command_id, nvme_tcp_queue_id(queue));
565 queue->data_remaining = le32_to_cpu(pdu->data_length);
567 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
568 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
580 struct nvme_tcp_rsp_pdu *pdu)
582 struct nvme_completion *cqe = &pdu->cqe;
602 struct nvme_tcp_r2t_pdu *pdu)
604 struct nvme_tcp_data_pdu *data = req->pdu;
610 req->pdu_len = le32_to_cpu(pdu->r2t_length);
628 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
631 rq->tag, le32_to_cpu(pdu->r2t_offset),
647 data->ttag = pdu->ttag;
649 data->data_offset = pdu->r2t_offset;
655 struct nvme_tcp_r2t_pdu *pdu)
661 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
665 pdu->command_id, nvme_tcp_queue_id(queue));
670 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
686 char *pdu = queue->pdu;
691 &pdu[queue->pdu_offset], rcv_len);
702 hdr = queue->pdu;
704 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
711 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
718 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
721 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
724 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
727 "unsupported pdu type (%d)\n", hdr->type);
743 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
745 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
799 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
813 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
837 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
839 pdu->command_id);
1012 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1015 int len = sizeof(*pdu) + hdgst - req->offset;
1025 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1027 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1028 offset_in_page(pdu) + req->offset, len, flags);
1052 struct nvme_tcp_data_pdu *pdu = req->pdu;
1054 int len = sizeof(*pdu) - req->offset + hdgst;
1058 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1060 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1061 offset_in_page(pdu) + req->offset, len,
1243 page_frag_free(async->pdu);
1252 async->pdu = page_frag_alloc(&queue->pf_cache,
1255 if (!async->pdu)
1274 kfree(queue->pdu);
1331 pr_err("queue %d: bad pdu length returned %d\n",
1517 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1518 if (!queue->pdu) {
1545 kfree(queue->pdu);
1672 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1675 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
2239 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2240 struct nvme_command *cmd = &pdu->cmd;
2243 memset(pdu, 0, sizeof(*pdu));
2244 pdu->hdr.type = nvme_tcp_cmd;
2246 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2247 pdu->hdr.hlen = sizeof(*pdu);
2248 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2280 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2284 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2316 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2317 struct nvme_command *c = &pdu->cmd;
2336 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2341 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2360 pdu->hdr.type = nvme_tcp_cmd;
2361 pdu->hdr.flags = 0;
2363 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2365 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2368 pdu->hdr.hlen = sizeof(*pdu);
2369 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2370 pdu->hdr.plen =
2371 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);