Lines Matching refs:queue

82 	struct nvme_tcp_queue	*queue;
180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
189 return queue - queue->ctrl->queues;
192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
194 u32 queue_idx = nvme_tcp_queue_id(queue);
197 return queue->ctrl->admin_tag_set.tags[queue_idx];
198 return queue->ctrl->tag_set.tags[queue_idx - 1];
201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
206 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
232 return req == &req->queue->ctrl->async_req;
321 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
325 /* drain the send queue as much as we can... */
327 ret = nvme_tcp_try_send(queue);
331 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
333 return !list_empty(&queue->send_list) ||
334 !llist_empty(&queue->req_list);
340 struct nvme_tcp_queue *queue = req->queue;
343 empty = llist_add(&req->lentry, &queue->req_list) &&
344 list_empty(&queue->send_list) && !queue->request;
348 * directly, otherwise queue io_work. Also, only do that if we
351 if (queue->io_cpu == raw_smp_processor_id() &&
352 sync && empty && mutex_trylock(&queue->send_mutex)) {
353 nvme_tcp_send_all(queue);
354 mutex_unlock(&queue->send_mutex);
357 if (last && nvme_tcp_queue_more(queue))
358 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
361 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
366 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
368 list_add(&req->entry, &queue->send_list);
373 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
377 req = list_first_entry_or_null(&queue->send_list,
380 nvme_tcp_process_req_list(queue);
381 req = list_first_entry_or_null(&queue->send_list,
419 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
427 dev_err(queue->ctrl->ctrl.device,
428 "queue %d: header digest flag is cleared\n",
429 nvme_tcp_queue_id(queue));
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
437 dev_err(queue->ctrl->ctrl.device,
446 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
449 u8 digest_len = nvme_tcp_hdgst_len(queue);
456 dev_err(queue->ctrl->ctrl.device,
457 "queue %d: data digest flag is cleared\n",
458 nvme_tcp_queue_id(queue));
461 crypto_ahash_init(queue->rcv_hash);
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
483 u8 hdgst = nvme_tcp_hdgst_len(queue);
485 req->pdu = page_frag_alloc(&queue->pf_cache,
492 req->queue = queue;
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
505 hctx->driver_data = queue;
513 struct nvme_tcp_queue *queue = &ctrl->queues[0];
515 hctx->driver_data = queue;
520 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
522 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
523 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
527 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
529 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
530 nvme_tcp_hdgst_len(queue);
531 queue->pdu_offset = 0;
532 queue->data_remaining = -1;
533 queue->ddgst_remaining = 0;
545 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
551 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
553 dev_err(queue->ctrl->ctrl.device,
554 "got bad cqe.command_id %#x on queue %d\n",
555 cqe->command_id, nvme_tcp_queue_id(queue));
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
566 queue->nr_cqe++;
571 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
578 dev_err(queue->ctrl->ctrl.device,
579 "got bad c2hdata.command_id %#x on queue %d\n",
580 pdu->command_id, nvme_tcp_queue_id(queue));
585 dev_err(queue->ctrl->ctrl.device,
586 "queue %d tag %#x unexpected data\n",
587 nvme_tcp_queue_id(queue), rq->tag);
591 queue->data_remaining = le32_to_cpu(pdu->data_length);
595 dev_err(queue->ctrl->ctrl.device,
596 "queue %d tag %#x SUCCESS set but not last PDU\n",
597 nvme_tcp_queue_id(queue), rq->tag);
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
605 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
613 * survive any kind of queue freeze and often don't respond to
617 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
622 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
630 struct nvme_tcp_queue *queue = req->queue;
633 u8 hdgst = nvme_tcp_hdgst_len(queue);
634 u8 ddgst = nvme_tcp_ddgst_len(queue);
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
647 if (queue->hdr_digest)
649 if (queue->data_digest)
661 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
671 dev_err(queue->ctrl->ctrl.device,
672 "got bad r2t.command_id %#x on queue %d\n",
673 pdu->command_id, nvme_tcp_queue_id(queue));
679 dev_err(queue->ctrl->ctrl.device,
686 dev_err(queue->ctrl->ctrl.device,
693 dev_err(queue->ctrl->ctrl.device,
710 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
714 char *pdu = queue->pdu;
715 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
719 &pdu[queue->pdu_offset], rcv_len);
723 queue->pdu_remaining -= rcv_len;
724 queue->pdu_offset += rcv_len;
727 if (queue->pdu_remaining)
730 hdr = queue->pdu;
731 if (queue->hdr_digest) {
732 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
738 if (queue->data_digest) {
739 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
746 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
748 nvme_tcp_init_recv_ctx(queue);
749 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
751 nvme_tcp_init_recv_ctx(queue);
752 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
754 dev_err(queue->ctrl->ctrl.device,
768 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
771 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
773 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
779 recv_len = min_t(size_t, *len, queue->data_remaining);
791 dev_err(queue->ctrl->ctrl.device,
792 "queue %d no space in request %#x",
793 nvme_tcp_queue_id(queue), rq->tag);
794 nvme_tcp_init_recv_ctx(queue);
804 if (queue->data_digest)
806 &req->iter, recv_len, queue->rcv_hash);
811 dev_err(queue->ctrl->ctrl.device,
812 "queue %d failed to copy request %#x data",
813 nvme_tcp_queue_id(queue), rq->tag);
819 queue->data_remaining -= recv_len;
822 if (!queue->data_remaining) {
823 if (queue->data_digest) {
824 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
825 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
830 queue->nr_cqe++;
832 nvme_tcp_init_recv_ctx(queue);
839 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
842 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
843 char *ddgst = (char *)&queue->recv_ddgst;
844 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
845 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
852 queue->ddgst_remaining -= recv_len;
855 if (queue->ddgst_remaining)
858 if (queue->recv_ddgst != queue->exp_ddgst) {
859 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
865 dev_err(queue->ctrl->ctrl.device,
867 le32_to_cpu(queue->recv_ddgst),
868 le32_to_cpu(queue->exp_ddgst));
872 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
877 queue->nr_cqe++;
880 nvme_tcp_init_recv_ctx(queue);
887 struct nvme_tcp_queue *queue = desc->arg.data;
891 if (unlikely(!queue->rd_enabled))
895 switch (nvme_tcp_recv_state(queue)) {
897 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
900 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
903 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
909 dev_err(queue->ctrl->ctrl.device,
911 queue->rd_enabled = false;
912 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
922 struct nvme_tcp_queue *queue;
927 queue = sk->sk_user_data;
928 if (likely(queue && queue->rd_enabled) &&
929 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
930 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
936 struct nvme_tcp_queue *queue;
939 queue = sk->sk_user_data;
940 if (likely(queue && sk_stream_is_writeable(sk))) {
942 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
949 struct nvme_tcp_queue *queue;
952 queue = sk->sk_user_data;
953 if (!queue)
962 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
965 dev_info(queue->ctrl->ctrl.device,
966 "queue %d socket state %d\n",
967 nvme_tcp_queue_id(queue), sk->sk_state);
970 queue->state_change(sk);
975 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
977 queue->request = NULL;
985 nvme_complete_async_event(&req->queue->ctrl->ctrl,
995 struct nvme_tcp_queue *queue = req->queue;
1011 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1021 ret = sock_sendmsg(queue->sock, &msg);
1025 if (queue->data_digest)
1026 nvme_tcp_ddgst_update(queue->snd_hash, page,
1039 if (queue->data_digest) {
1040 nvme_tcp_ddgst_final(queue->snd_hash,
1048 nvme_tcp_done_send_req(queue);
1058 struct nvme_tcp_queue *queue = req->queue;
1063 u8 hdgst = nvme_tcp_hdgst_len(queue);
1067 if (inline_data || nvme_tcp_queue_more(queue))
1072 if (queue->hdr_digest && !req->offset)
1073 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1077 ret = sock_sendmsg(queue->sock, &msg);
1085 if (queue->data_digest)
1086 crypto_ahash_init(queue->snd_hash);
1088 nvme_tcp_done_send_req(queue);
1099 struct nvme_tcp_queue *queue = req->queue;
1103 u8 hdgst = nvme_tcp_hdgst_len(queue);
1107 if (queue->hdr_digest && !req->offset)
1108 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1115 ret = sock_sendmsg(queue->sock, &msg);
1122 if (queue->data_digest)
1123 crypto_ahash_init(queue->snd_hash);
1133 struct nvme_tcp_queue *queue = req->queue;
1143 if (nvme_tcp_queue_more(queue))
1148 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1156 nvme_tcp_done_send_req(queue);
1164 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1170 if (!queue->request) {
1171 queue->request = nvme_tcp_fetch_request(queue);
1172 if (!queue->request)
1175 req = queue->request;
1204 dev_err(queue->ctrl->ctrl.device,
1206 nvme_tcp_fail_request(queue->request);
1207 nvme_tcp_done_send_req(queue);
1214 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1216 struct socket *sock = queue->sock;
1221 rd_desc.arg.data = queue;
1224 queue->nr_cqe = 0;
1232 struct nvme_tcp_queue *queue =
1240 if (mutex_trylock(&queue->send_mutex)) {
1241 result = nvme_tcp_try_send(queue);
1242 mutex_unlock(&queue->send_mutex);
1249 result = nvme_tcp_try_recv(queue);
1255 if (!pending || !queue->rd_enabled)
1260 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1263 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1265 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1267 ahash_request_free(queue->rcv_hash);
1268 ahash_request_free(queue->snd_hash);
1272 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1280 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1281 if (!queue->snd_hash)
1283 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1285 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1286 if (!queue->rcv_hash)
1288 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1292 ahash_request_free(queue->snd_hash);
1307 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1309 u8 hdgst = nvme_tcp_hdgst_len(queue);
1311 async->pdu = page_frag_alloc(&queue->pf_cache,
1317 async->queue = &ctrl->queues[0];
1325 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1328 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1331 if (queue->hdr_digest || queue->data_digest)
1332 nvme_tcp_free_crypto(queue);
1334 if (queue->pf_cache.va) {
1335 page = virt_to_head_page(queue->pf_cache.va);
1336 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1337 queue->pf_cache.va = NULL;
1341 sock_release(queue->sock);
1344 kfree(queue->pdu);
1345 mutex_destroy(&queue->send_mutex);
1346 mutex_destroy(&queue->queue_lock);
1349 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1376 if (queue->hdr_digest)
1378 if (queue->data_digest)
1383 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1390 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1397 pr_err("queue %d: bad type returned %d\n",
1398 nvme_tcp_queue_id(queue), icresp->hdr.type);
1403 pr_err("queue %d: bad pdu length returned %d\n",
1404 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1409 pr_err("queue %d: bad pfv returned %d\n",
1410 nvme_tcp_queue_id(queue), icresp->pfv);
1415 if ((queue->data_digest && !ctrl_ddgst) ||
1416 (!queue->data_digest && ctrl_ddgst)) {
1417 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1418 nvme_tcp_queue_id(queue),
1419 queue->data_digest ? "enabled" : "disabled",
1425 if ((queue->hdr_digest && !ctrl_hdgst) ||
1426 (!queue->hdr_digest && ctrl_hdgst)) {
1427 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1428 nvme_tcp_queue_id(queue),
1429 queue->hdr_digest ? "enabled" : "disabled",
1435 pr_err("queue %d: unsupported cpda returned %d\n",
1436 nvme_tcp_queue_id(queue), icresp->cpda);
1442 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1443 nvme_tcp_queue_id(queue), maxh2cdata);
1446 queue->maxh2cdata = maxh2cdata;
1456 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1458 return nvme_tcp_queue_id(queue) == 0;
1461 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1463 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1464 int qid = nvme_tcp_queue_id(queue);
1466 return !nvme_tcp_admin_queue(queue) &&
1470 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1472 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1473 int qid = nvme_tcp_queue_id(queue);
1475 return !nvme_tcp_admin_queue(queue) &&
1476 !nvme_tcp_default_queue(queue) &&
1481 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1483 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1484 int qid = nvme_tcp_queue_id(queue);
1486 return !nvme_tcp_admin_queue(queue) &&
1487 !nvme_tcp_default_queue(queue) &&
1488 !nvme_tcp_read_queue(queue) &&
1494 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1496 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1497 int qid = nvme_tcp_queue_id(queue);
1500 if (nvme_tcp_default_queue(queue))
1502 else if (nvme_tcp_read_queue(queue))
1504 else if (nvme_tcp_poll_queue(queue))
1507 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1513 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1516 mutex_init(&queue->queue_lock);
1517 queue->ctrl = ctrl;
1518 init_llist_head(&queue->req_list);
1519 INIT_LIST_HEAD(&queue->send_list);
1520 mutex_init(&queue->send_mutex);
1521 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1524 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1526 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1530 IPPROTO_TCP, &queue->sock);
1537 nvme_tcp_reclassify_socket(queue->sock);
1540 tcp_sock_set_syncnt(queue->sock->sk, 1);
1543 tcp_sock_set_nodelay(queue->sock->sk);
1546 * Cleanup whatever is sitting in the TCP transmit queue on socket
1550 sock_no_linger(queue->sock->sk);
1553 sock_set_priority(queue->sock->sk, so_priority);
1557 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1560 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1562 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1563 queue->sock->sk->sk_use_task_frag = false;
1564 nvme_tcp_set_queue_io_cpu(queue);
1565 queue->request = NULL;
1566 queue->data_remaining = 0;
1567 queue->ddgst_remaining = 0;
1568 queue->pdu_remaining = 0;
1569 queue->pdu_offset = 0;
1570 sk_set_memalloc(queue->sock->sk);
1573 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1577 "failed to bind queue %d socket %d\n",
1587 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1591 "failed to bind to interface %s queue %d err %d\n",
1597 queue->hdr_digest = nctrl->opts->hdr_digest;
1598 queue->data_digest = nctrl->opts->data_digest;
1599 if (queue->hdr_digest || queue->data_digest) {
1600 ret = nvme_tcp_alloc_crypto(queue);
1603 "failed to allocate queue %d crypto\n", qid);
1609 nvme_tcp_hdgst_len(queue);
1610 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1611 if (!queue->pdu) {
1616 dev_dbg(nctrl->device, "connecting queue %d\n",
1617 nvme_tcp_queue_id(queue));
1619 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1627 ret = nvme_tcp_init_connection(queue);
1631 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1636 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1638 kfree(queue->pdu);
1640 if (queue->hdr_digest || queue->data_digest)
1641 nvme_tcp_free_crypto(queue);
1643 sock_release(queue->sock);
1644 queue->sock = NULL;
1646 mutex_destroy(&queue->send_mutex);
1647 mutex_destroy(&queue->queue_lock);
1651 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1653 struct socket *sock = queue->sock;
1657 sock->sk->sk_data_ready = queue->data_ready;
1658 sock->sk->sk_state_change = queue->state_change;
1659 sock->sk->sk_write_space = queue->write_space;
1663 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1665 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1666 nvme_tcp_restore_sock_ops(queue);
1667 cancel_work_sync(&queue->io_work);
1673 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1675 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1678 mutex_lock(&queue->queue_lock);
1679 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1680 __nvme_tcp_stop_queue(queue);
1681 mutex_unlock(&queue->queue_lock);
1684 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1686 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1687 queue->sock->sk->sk_user_data = queue;
1688 queue->state_change = queue->sock->sk->sk_state_change;
1689 queue->data_ready = queue->sock->sk->sk_data_ready;
1690 queue->write_space = queue->sock->sk->sk_write_space;
1691 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1692 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1693 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1695 queue->sock->sk->sk_ll_usec = 1;
1697 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1703 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1706 queue->rd_enabled = true;
1707 nvme_tcp_init_recv_ctx(queue);
1708 nvme_tcp_setup_sock_ops(queue);
1716 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1718 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1719 __nvme_tcp_stop_queue(queue);
1721 "failed to connect queue: %d ret=%d\n", idx, ret);
1863 * queue number might have changed.
2213 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2218 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2237 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2240 u8 hdgst = nvme_tcp_hdgst_len(queue);
2244 if (queue->hdr_digest)
2265 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2267 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2274 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2277 int qid = nvme_tcp_queue_id(req->queue);
2280 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
2281 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2310 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2323 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2335 struct nvme_tcp_queue *queue = req->queue;
2336 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2362 if (queue->hdr_digest)
2364 if (queue->data_digest && req->pdu_len) {
2366 ddgst = nvme_tcp_ddgst_len(queue);
2373 ret = nvme_tcp_map_data(queue, rq);
2376 dev_err(queue->ctrl->ctrl.device,
2386 struct nvme_tcp_queue *queue = hctx->driver_data;
2388 if (!llist_empty(&queue->req_list))
2389 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2395 struct nvme_ns *ns = hctx->queue->queuedata;
2396 struct nvme_tcp_queue *queue = hctx->driver_data;
2399 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2402 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2403 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2425 struct nvme_tcp_queue *queue = hctx->driver_data;
2426 struct sock *sk = queue->sock->sk;
2428 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2431 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2434 nvme_tcp_try_recv(queue);
2435 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2436 return queue->nr_cqe;
2441 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2447 mutex_lock(&queue->queue_lock);
2449 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2451 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2459 mutex_unlock(&queue->queue_lock);