Lines Matching refs:queue
81 struct nvme_tcp_queue *queue;
176 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
183 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
185 return queue - queue->ctrl->queues;
188 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
190 u32 queue_idx = nvme_tcp_queue_id(queue);
193 return queue->ctrl->admin_tag_set.tags[queue_idx];
194 return queue->ctrl->tag_set.tags[queue_idx - 1];
197 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
199 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
202 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
204 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
207 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
209 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
214 return req == &req->queue->ctrl->async_req;
227 req->data_len <= nvme_tcp_inline_data_size(req->queue);
303 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
307 /* drain the send queue as much as we can... */
309 ret = nvme_tcp_try_send(queue);
313 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
315 return !list_empty(&queue->send_list) ||
316 !llist_empty(&queue->req_list);
322 struct nvme_tcp_queue *queue = req->queue;
325 empty = llist_add(&req->lentry, &queue->req_list) &&
326 list_empty(&queue->send_list) && !queue->request;
330 * directly, otherwise queue io_work. Also, only do that if we
333 if (queue->io_cpu == raw_smp_processor_id() &&
334 sync && empty && mutex_trylock(&queue->send_mutex)) {
335 nvme_tcp_send_all(queue);
336 mutex_unlock(&queue->send_mutex);
339 if (last && nvme_tcp_queue_more(queue))
340 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
343 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
348 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
350 list_add(&req->entry, &queue->send_list);
355 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
359 req = list_first_entry_or_null(&queue->send_list,
362 nvme_tcp_process_req_list(queue);
363 req = list_first_entry_or_null(&queue->send_list,
401 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
409 dev_err(queue->ctrl->ctrl.device,
410 "queue %d: header digest flag is cleared\n",
411 nvme_tcp_queue_id(queue));
416 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
419 dev_err(queue->ctrl->ctrl.device,
428 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
431 u8 digest_len = nvme_tcp_hdgst_len(queue);
438 dev_err(queue->ctrl->ctrl.device,
439 "queue %d: data digest flag is cleared\n",
440 nvme_tcp_queue_id(queue));
443 crypto_ahash_init(queue->rcv_hash);
463 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
464 u8 hdgst = nvme_tcp_hdgst_len(queue);
466 req->pdu = page_frag_alloc(&queue->pf_cache,
472 req->queue = queue;
482 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
484 hctx->driver_data = queue;
492 struct nvme_tcp_queue *queue = &ctrl->queues[0];
494 hctx->driver_data = queue;
499 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
501 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
502 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
506 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
508 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
509 nvme_tcp_hdgst_len(queue);
510 queue->pdu_offset = 0;
511 queue->data_remaining = -1;
512 queue->ddgst_remaining = 0;
524 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
529 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
531 dev_err(queue->ctrl->ctrl.device,
532 "got bad cqe.command_id %#x on queue %d\n",
533 cqe->command_id, nvme_tcp_queue_id(queue));
534 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
540 queue->nr_cqe++;
545 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
550 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
552 dev_err(queue->ctrl->ctrl.device,
553 "got bad c2hdata.command_id %#x on queue %d\n",
554 pdu->command_id, nvme_tcp_queue_id(queue));
559 dev_err(queue->ctrl->ctrl.device,
560 "queue %d tag %#x unexpected data\n",
561 nvme_tcp_queue_id(queue), rq->tag);
565 queue->data_remaining = le32_to_cpu(pdu->data_length);
569 dev_err(queue->ctrl->ctrl.device,
570 "queue %d tag %#x SUCCESS set but not last PDU\n",
571 nvme_tcp_queue_id(queue), rq->tag);
572 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
579 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
587 * survive any kind of queue freeze and often don't respond to
591 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
593 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
596 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
605 struct nvme_tcp_queue *queue = req->queue;
607 u8 hdgst = nvme_tcp_hdgst_len(queue);
608 u8 ddgst = nvme_tcp_ddgst_len(queue);
614 dev_err(queue->ctrl->ctrl.device,
621 dev_err(queue->ctrl->ctrl.device,
629 dev_err(queue->ctrl->ctrl.device,
639 if (queue->hdr_digest)
641 if (queue->data_digest)
654 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
661 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
663 dev_err(queue->ctrl->ctrl.device,
664 "got bad r2t.command_id %#x on queue %d\n",
665 pdu->command_id, nvme_tcp_queue_id(queue));
682 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
686 char *pdu = queue->pdu;
687 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
691 &pdu[queue->pdu_offset], rcv_len);
695 queue->pdu_remaining -= rcv_len;
696 queue->pdu_offset += rcv_len;
699 if (queue->pdu_remaining)
702 hdr = queue->pdu;
703 if (queue->hdr_digest) {
704 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
710 if (queue->data_digest) {
711 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
718 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
720 nvme_tcp_init_recv_ctx(queue);
721 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
723 nvme_tcp_init_recv_ctx(queue);
724 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
726 dev_err(queue->ctrl->ctrl.device,
740 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
743 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
745 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
751 recv_len = min_t(size_t, *len, queue->data_remaining);
763 dev_err(queue->ctrl->ctrl.device,
764 "queue %d no space in request %#x",
765 nvme_tcp_queue_id(queue), rq->tag);
766 nvme_tcp_init_recv_ctx(queue);
776 if (queue->data_digest)
778 &req->iter, recv_len, queue->rcv_hash);
783 dev_err(queue->ctrl->ctrl.device,
784 "queue %d failed to copy request %#x data",
785 nvme_tcp_queue_id(queue), rq->tag);
791 queue->data_remaining -= recv_len;
794 if (!queue->data_remaining) {
795 if (queue->data_digest) {
796 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
797 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
801 queue->nr_cqe++;
803 nvme_tcp_init_recv_ctx(queue);
810 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
813 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
814 char *ddgst = (char *)&queue->recv_ddgst;
815 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
816 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
823 queue->ddgst_remaining -= recv_len;
826 if (queue->ddgst_remaining)
829 if (queue->recv_ddgst != queue->exp_ddgst) {
830 dev_err(queue->ctrl->ctrl.device,
832 le32_to_cpu(queue->recv_ddgst),
833 le32_to_cpu(queue->exp_ddgst));
838 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
842 queue->nr_cqe++;
845 nvme_tcp_init_recv_ctx(queue);
852 struct nvme_tcp_queue *queue = desc->arg.data;
857 switch (nvme_tcp_recv_state(queue)) {
859 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
862 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
865 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
871 dev_err(queue->ctrl->ctrl.device,
873 queue->rd_enabled = false;
874 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
884 struct nvme_tcp_queue *queue;
887 queue = sk->sk_user_data;
888 if (likely(queue && queue->rd_enabled) &&
889 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
890 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
896 struct nvme_tcp_queue *queue;
899 queue = sk->sk_user_data;
900 if (likely(queue && sk_stream_is_writeable(sk))) {
902 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
909 struct nvme_tcp_queue *queue;
912 queue = sk->sk_user_data;
913 if (!queue)
922 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
925 dev_info(queue->ctrl->ctrl.device,
926 "queue %d socket state %d\n",
927 nvme_tcp_queue_id(queue), sk->sk_state);
930 queue->state_change(sk);
935 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
937 queue->request = NULL;
945 nvme_complete_async_event(&req->queue->ctrl->ctrl,
955 struct nvme_tcp_queue *queue = req->queue;
966 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
972 ret = kernel_sendpage(queue->sock, page, offset, len,
975 ret = sock_no_sendpage(queue->sock, page, offset, len,
981 if (queue->data_digest)
982 nvme_tcp_ddgst_update(queue->snd_hash, page,
995 if (queue->data_digest) {
996 nvme_tcp_ddgst_final(queue->snd_hash,
1001 nvme_tcp_done_send_req(queue);
1011 struct nvme_tcp_queue *queue = req->queue;
1014 u8 hdgst = nvme_tcp_hdgst_len(queue);
1019 if (inline_data || nvme_tcp_queue_more(queue))
1024 if (queue->hdr_digest && !req->offset)
1025 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1027 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1036 if (queue->data_digest)
1037 crypto_ahash_init(queue->snd_hash);
1040 nvme_tcp_done_send_req(queue);
1051 struct nvme_tcp_queue *queue = req->queue;
1053 u8 hdgst = nvme_tcp_hdgst_len(queue);
1057 if (queue->hdr_digest && !req->offset)
1058 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1060 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1069 if (queue->data_digest)
1070 crypto_ahash_init(queue->snd_hash);
1082 struct nvme_tcp_queue *queue = req->queue;
1091 if (nvme_tcp_queue_more(queue))
1096 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1101 nvme_tcp_done_send_req(queue);
1109 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1114 if (!queue->request) {
1115 queue->request = nvme_tcp_fetch_request(queue);
1116 if (!queue->request)
1119 req = queue->request;
1147 dev_err(queue->ctrl->ctrl.device,
1149 nvme_tcp_fail_request(queue->request);
1150 nvme_tcp_done_send_req(queue);
1155 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1157 struct socket *sock = queue->sock;
1162 rd_desc.arg.data = queue;
1165 queue->nr_cqe = 0;
1173 struct nvme_tcp_queue *queue =
1181 if (mutex_trylock(&queue->send_mutex)) {
1182 result = nvme_tcp_try_send(queue);
1183 mutex_unlock(&queue->send_mutex);
1190 result = nvme_tcp_try_recv(queue);
1196 if (!pending || !queue->rd_enabled)
1201 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1204 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1206 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1208 ahash_request_free(queue->rcv_hash);
1209 ahash_request_free(queue->snd_hash);
1213 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1221 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1222 if (!queue->snd_hash)
1224 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1226 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1227 if (!queue->rcv_hash)
1229 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1233 ahash_request_free(queue->snd_hash);
1248 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1250 u8 hdgst = nvme_tcp_hdgst_len(queue);
1252 async->pdu = page_frag_alloc(&queue->pf_cache,
1258 async->queue = &ctrl->queues[0];
1265 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1267 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1270 if (queue->hdr_digest || queue->data_digest)
1271 nvme_tcp_free_crypto(queue);
1273 sock_release(queue->sock);
1274 kfree(queue->pdu);
1275 mutex_destroy(&queue->queue_lock);
1278 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1304 if (queue->hdr_digest)
1306 if (queue->data_digest)
1311 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1318 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1325 pr_err("queue %d: bad type returned %d\n",
1326 nvme_tcp_queue_id(queue), icresp->hdr.type);
1331 pr_err("queue %d: bad pdu length returned %d\n",
1332 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1337 pr_err("queue %d: bad pfv returned %d\n",
1338 nvme_tcp_queue_id(queue), icresp->pfv);
1343 if ((queue->data_digest && !ctrl_ddgst) ||
1344 (!queue->data_digest && ctrl_ddgst)) {
1345 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1346 nvme_tcp_queue_id(queue),
1347 queue->data_digest ? "enabled" : "disabled",
1353 if ((queue->hdr_digest && !ctrl_hdgst) ||
1354 (!queue->hdr_digest && ctrl_hdgst)) {
1355 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1356 nvme_tcp_queue_id(queue),
1357 queue->hdr_digest ? "enabled" : "disabled",
1363 pr_err("queue %d: unsupported cpda returned %d\n",
1364 nvme_tcp_queue_id(queue), icresp->cpda);
1376 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1378 return nvme_tcp_queue_id(queue) == 0;
1381 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1383 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1384 int qid = nvme_tcp_queue_id(queue);
1386 return !nvme_tcp_admin_queue(queue) &&
1390 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1392 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1393 int qid = nvme_tcp_queue_id(queue);
1395 return !nvme_tcp_admin_queue(queue) &&
1396 !nvme_tcp_default_queue(queue) &&
1401 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1403 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1404 int qid = nvme_tcp_queue_id(queue);
1406 return !nvme_tcp_admin_queue(queue) &&
1407 !nvme_tcp_default_queue(queue) &&
1408 !nvme_tcp_read_queue(queue) &&
1414 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1416 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1417 int qid = nvme_tcp_queue_id(queue);
1420 if (nvme_tcp_default_queue(queue))
1422 else if (nvme_tcp_read_queue(queue))
1424 else if (nvme_tcp_poll_queue(queue))
1427 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1434 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1437 mutex_init(&queue->queue_lock);
1438 queue->ctrl = ctrl;
1439 init_llist_head(&queue->req_list);
1440 INIT_LIST_HEAD(&queue->send_list);
1441 mutex_init(&queue->send_mutex);
1442 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1443 queue->queue_size = queue_size;
1446 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1448 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1452 IPPROTO_TCP, &queue->sock);
1459 nvme_tcp_reclassify_socket(queue->sock);
1462 tcp_sock_set_syncnt(queue->sock->sk, 1);
1465 tcp_sock_set_nodelay(queue->sock->sk);
1468 * Cleanup whatever is sitting in the TCP transmit queue on socket
1472 sock_no_linger(queue->sock->sk);
1475 sock_set_priority(queue->sock->sk, so_priority);
1479 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1482 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1484 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1485 nvme_tcp_set_queue_io_cpu(queue);
1486 queue->request = NULL;
1487 queue->data_remaining = 0;
1488 queue->ddgst_remaining = 0;
1489 queue->pdu_remaining = 0;
1490 queue->pdu_offset = 0;
1491 sk_set_memalloc(queue->sock->sk);
1494 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1498 "failed to bind queue %d socket %d\n",
1504 queue->hdr_digest = nctrl->opts->hdr_digest;
1505 queue->data_digest = nctrl->opts->data_digest;
1506 if (queue->hdr_digest || queue->data_digest) {
1507 ret = nvme_tcp_alloc_crypto(queue);
1510 "failed to allocate queue %d crypto\n", qid);
1516 nvme_tcp_hdgst_len(queue);
1517 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1518 if (!queue->pdu) {
1523 dev_dbg(nctrl->device, "connecting queue %d\n",
1524 nvme_tcp_queue_id(queue));
1526 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1534 ret = nvme_tcp_init_connection(queue);
1538 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1543 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1545 kfree(queue->pdu);
1547 if (queue->hdr_digest || queue->data_digest)
1548 nvme_tcp_free_crypto(queue);
1550 sock_release(queue->sock);
1551 queue->sock = NULL;
1553 mutex_destroy(&queue->queue_lock);
1557 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1559 struct socket *sock = queue->sock;
1563 sock->sk->sk_data_ready = queue->data_ready;
1564 sock->sk->sk_state_change = queue->state_change;
1565 sock->sk->sk_write_space = queue->write_space;
1569 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1571 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1572 nvme_tcp_restore_sock_ops(queue);
1573 cancel_work_sync(&queue->io_work);
1579 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1581 mutex_lock(&queue->queue_lock);
1582 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1583 __nvme_tcp_stop_queue(queue);
1584 mutex_unlock(&queue->queue_lock);
1587 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1589 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1590 queue->sock->sk->sk_user_data = queue;
1591 queue->state_change = queue->sock->sk->sk_state_change;
1592 queue->data_ready = queue->sock->sk->sk_data_ready;
1593 queue->write_space = queue->sock->sk->sk_write_space;
1594 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1595 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1596 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1598 queue->sock->sk->sk_ll_usec = 1;
1600 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1606 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1609 queue->rd_enabled = true;
1610 nvme_tcp_init_recv_ctx(queue);
1611 nvme_tcp_setup_sock_ops(queue);
1619 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1621 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1622 __nvme_tcp_stop_queue(queue);
1624 "failed to connect queue: %d ret=%d\n", idx, ret);
1786 * sufficient queue count to have dedicated default queues.
2214 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2219 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2238 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2241 u8 hdgst = nvme_tcp_hdgst_len(queue);
2245 if (queue->hdr_digest)
2266 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2268 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2279 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2283 "queue %d: timeout request %#x type %d\n",
2284 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2312 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2324 req->data_len <= nvme_tcp_inline_data_size(queue))
2325 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2337 struct nvme_tcp_queue *queue = req->queue;
2338 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2355 req->data_len <= nvme_tcp_inline_data_size(queue))
2362 if (queue->hdr_digest)
2364 if (queue->data_digest && req->pdu_len) {
2366 ddgst = nvme_tcp_ddgst_len(queue);
2373 ret = nvme_tcp_map_data(queue, rq);
2376 dev_err(queue->ctrl->ctrl.device,
2386 struct nvme_tcp_queue *queue = hctx->driver_data;
2388 if (!llist_empty(&queue->req_list))
2389 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2395 struct nvme_ns *ns = hctx->queue->queuedata;
2396 struct nvme_tcp_queue *queue = hctx->driver_data;
2399 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2402 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2403 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2463 struct nvme_tcp_queue *queue = hctx->driver_data;
2464 struct sock *sk = queue->sock->sk;
2466 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2469 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2472 nvme_tcp_try_recv(queue);
2473 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2474 return queue->nr_cqe;