Lines Matching refs:queue
60 * queue before determining it to be idle. This optional module behavior
94 struct nvmet_tcp_queue *queue;
197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
200 if (unlikely(!queue->nr_cmds)) {
205 return cmd - queue->cmds;
233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
237 cmd = list_first_entry_or_null(&queue->free_list,
253 if (unlikely(cmd == &cmd->queue->connect))
256 list_add_tail(&cmd->entry, &cmd->queue->free_list);
259 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
261 return queue->sock->sk->sk_incoming_cpu;
264 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
266 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
269 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
271 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
284 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
292 pr_err("queue %d: header digest enabled but no header digest\n",
293 queue->idx);
298 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
301 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
302 queue->idx, le32_to_cpu(recv_digest),
310 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
313 u8 digest_len = nvmet_tcp_hdgst_len(queue);
320 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
365 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
367 queue->rcv_state = NVMET_TCP_RECV_ERR;
368 if (queue->nvme_sq.ctrl)
369 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
371 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
374 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
376 queue->rcv_state = NVMET_TCP_RECV_ERR;
378 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
380 nvmet_tcp_fatal_error(queue);
431 struct nvmet_tcp_queue *queue = cmd->queue;
432 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
433 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
439 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
450 if (queue->data_digest) {
452 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
455 if (cmd->queue->hdr_digest) {
457 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
464 struct nvmet_tcp_queue *queue = cmd->queue;
465 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
477 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
480 if (cmd->queue->hdr_digest) {
482 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
489 struct nvmet_tcp_queue *queue = cmd->queue;
490 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
500 if (cmd->queue->hdr_digest) {
502 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
506 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
511 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
513 list_add(&cmd->entry, &queue->resp_send_list);
514 queue->send_list_len++;
518 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
520 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
522 if (!queue->snd_cmd) {
523 nvmet_tcp_process_resp_list(queue);
524 queue->snd_cmd =
525 list_first_entry_or_null(&queue->resp_send_list,
527 if (unlikely(!queue->snd_cmd))
531 list_del_init(&queue->snd_cmd->entry);
532 queue->send_list_len--;
534 if (nvmet_tcp_need_data_out(queue->snd_cmd))
535 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
536 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
537 nvmet_setup_r2t_pdu(queue->snd_cmd);
539 nvmet_setup_response_pdu(queue->snd_cmd);
541 return queue->snd_cmd;
548 struct nvmet_tcp_queue *queue = cmd->queue;
552 if (unlikely(cmd == queue->cmd)) {
561 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
567 llist_add(&cmd->lentry, &queue->resp_list);
568 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
585 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
591 ret = sock_sendmsg(cmd->queue->sock, &msg);
608 struct nvmet_tcp_queue *queue = cmd->queue;
619 if ((!last_in_batch && cmd->queue->send_list_len) ||
621 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
626 ret = sock_sendmsg(cmd->queue->sock, &msg);
640 if (queue->data_digest) {
644 if (queue->nvme_sq.sqhd_disabled) {
645 cmd->queue->snd_cmd = NULL;
652 if (queue->nvme_sq.sqhd_disabled)
664 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
668 if (!last_in_batch && cmd->queue->send_list_len)
675 ret = sock_sendmsg(cmd->queue->sock, &msg);
685 cmd->queue->snd_cmd = NULL;
694 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
698 if (!last_in_batch && cmd->queue->send_list_len)
705 ret = sock_sendmsg(cmd->queue->sock, &msg);
714 cmd->queue->snd_cmd = NULL;
720 struct nvmet_tcp_queue *queue = cmd->queue;
729 if (!last_in_batch && cmd->queue->send_list_len)
734 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
744 if (queue->nvme_sq.sqhd_disabled) {
745 cmd->queue->snd_cmd = NULL;
753 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
756 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
759 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
760 cmd = nvmet_tcp_fetch_cmd(queue);
802 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
808 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
810 nvmet_tcp_socket_error(queue, ret);
821 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
823 queue->offset = 0;
824 queue->left = sizeof(struct nvme_tcp_hdr);
825 queue->cmd = NULL;
826 queue->rcv_state = NVMET_TCP_RECV_PDU;
829 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
831 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
833 ahash_request_free(queue->rcv_hash);
834 ahash_request_free(queue->snd_hash);
838 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
846 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
847 if (!queue->snd_hash)
849 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
851 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
852 if (!queue->rcv_hash)
854 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
858 ahash_request_free(queue->snd_hash);
865 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
867 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
868 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
876 nvmet_tcp_fatal_error(queue);
880 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
885 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
890 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
891 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
892 if (queue->hdr_digest || queue->data_digest) {
893 ret = nvmet_tcp_alloc_crypto(queue);
906 if (queue->hdr_digest)
908 if (queue->data_digest)
913 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
915 return ret; /* queue removal will cleanup */
917 queue->state = NVMET_TCP_Q_LIVE;
918 nvmet_prepare_receive_pdu(queue);
922 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
937 nvmet_prepare_receive_pdu(queue);
943 pr_err("queue %d: failed to map data\n", queue->idx);
944 nvmet_tcp_fatal_error(queue);
948 queue->rcv_state = NVMET_TCP_RECV_DATA;
953 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
955 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
959 if (likely(queue->nr_cmds)) {
960 if (unlikely(data->ttag >= queue->nr_cmds)) {
961 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
962 queue->idx, data->ttag, queue->nr_cmds);
963 nvmet_tcp_fatal_error(queue);
966 cmd = &queue->cmds[data->ttag];
968 cmd = &queue->connect;
976 nvmet_tcp_fatal_error(queue);
981 nvmet_tcp_hdgst_len(queue) -
982 nvmet_tcp_ddgst_len(queue) -
991 nvmet_tcp_fatal_error(queue);
996 queue->cmd = cmd;
997 queue->rcv_state = NVMET_TCP_RECV_DATA;
1002 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1004 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1005 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1009 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1013 nvmet_tcp_fatal_error(queue);
1016 return nvmet_tcp_handle_icreq(queue);
1020 pr_err("queue %d: received icreq pdu in state %d\n",
1021 queue->idx, queue->state);
1022 nvmet_tcp_fatal_error(queue);
1027 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1033 queue->cmd = nvmet_tcp_get_cmd(queue);
1034 if (unlikely(!queue->cmd)) {
1036 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1037 queue->idx, queue->nr_cmds, queue->send_list_len,
1039 nvmet_tcp_fatal_error(queue);
1043 req = &queue->cmd->req;
1046 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1047 &queue->nvme_sq, &nvmet_tcp_ops))) {
1053 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1057 ret = nvmet_tcp_map_data(queue->cmd);
1059 pr_err("queue %d: failed to map data\n", queue->idx);
1060 if (nvmet_tcp_has_inline_data(queue->cmd))
1061 nvmet_tcp_fatal_error(queue);
1068 if (nvmet_tcp_need_data_in(queue->cmd)) {
1069 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1070 queue->rcv_state = NVMET_TCP_RECV_DATA;
1071 nvmet_tcp_build_pdu_iovec(queue->cmd);
1075 nvmet_tcp_queue_response(&queue->cmd->req);
1079 queue->cmd->req.execute(&queue->cmd->req);
1081 nvmet_prepare_receive_pdu(queue);
1113 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1115 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1121 iov.iov_base = (void *)&queue->pdu + queue->offset;
1122 iov.iov_len = queue->left;
1123 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1128 queue->offset += len;
1129 queue->left -= len;
1130 if (queue->left)
1133 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1134 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1138 nvmet_tcp_fatal_error(queue);
1147 queue->left = hdr->hlen - queue->offset + hdgst;
1151 if (queue->hdr_digest &&
1152 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1153 nvmet_tcp_fatal_error(queue); /* fatal */
1157 if (queue->data_digest &&
1158 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1159 nvmet_tcp_fatal_error(queue); /* fatal */
1163 return nvmet_tcp_done_recv_pdu(queue);
1168 struct nvmet_tcp_queue *queue = cmd->queue;
1170 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1171 queue->offset = 0;
1172 queue->left = NVME_TCP_DIGEST_LENGTH;
1173 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1176 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1178 struct nvmet_tcp_cmd *cmd = queue->cmd;
1182 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1191 if (queue->data_digest) {
1199 nvmet_prepare_receive_pdu(queue);
1203 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1205 struct nvmet_tcp_cmd *cmd = queue->cmd;
1209 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1210 .iov_len = queue->left
1213 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1218 queue->offset += ret;
1219 queue->left -= ret;
1220 if (queue->left)
1223 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1224 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1225 queue->idx, cmd->req.cmd->common.command_id,
1226 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1230 nvmet_tcp_fatal_error(queue);
1240 nvmet_prepare_receive_pdu(queue);
1244 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1248 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1251 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1252 result = nvmet_tcp_try_recv_pdu(queue);
1257 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1258 result = nvmet_tcp_try_recv_data(queue);
1263 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1264 result = nvmet_tcp_try_recv_ddgst(queue);
1278 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1284 ret = nvmet_tcp_try_recv_one(queue);
1286 nvmet_tcp_socket_error(queue, ret);
1297 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1299 spin_lock(&queue->state_lock);
1300 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1301 queue->state = NVMET_TCP_Q_DISCONNECTING;
1302 queue_work(nvmet_wq, &queue->release_work);
1304 spin_unlock(&queue->state_lock);
1307 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1309 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1312 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1319 nvmet_tcp_arm_queue_deadline(queue);
1321 return !time_after(jiffies, queue->poll_end);
1326 struct nvmet_tcp_queue *queue =
1334 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1340 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1352 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1353 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1356 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1359 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1361 c->queue = queue;
1362 c->req.port = queue->port->nport;
1364 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1370 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1376 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1381 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1388 list_add_tail(&c->entry, &queue->free_list);
1408 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1411 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1418 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1423 queue->cmds = cmds;
1434 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1436 struct nvmet_tcp_cmd *cmds = queue->cmds;
1439 for (i = 0; i < queue->nr_cmds; i++)
1442 nvmet_tcp_free_cmd(&queue->connect);
1446 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1448 struct socket *sock = queue->sock;
1451 sock->sk->sk_data_ready = queue->data_ready;
1452 sock->sk->sk_state_change = queue->state_change;
1453 sock->sk->sk_write_space = queue->write_space;
1458 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1460 struct nvmet_tcp_cmd *cmd = queue->cmds;
1463 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1468 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1470 nvmet_req_uninit(&queue->connect.req);
1474 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1476 struct nvmet_tcp_cmd *cmd = queue->cmds;
1479 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1484 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1485 nvmet_tcp_free_cmd_buffers(&queue->connect);
1491 struct nvmet_tcp_queue *queue =
1495 list_del_init(&queue->queue_list);
1498 nvmet_tcp_restore_socket_callbacks(queue);
1499 cancel_work_sync(&queue->io_work);
1501 queue->rcv_state = NVMET_TCP_RECV_ERR;
1503 nvmet_tcp_uninit_data_in_cmds(queue);
1504 nvmet_sq_destroy(&queue->nvme_sq);
1505 cancel_work_sync(&queue->io_work);
1506 nvmet_tcp_free_cmd_data_in_buffers(queue);
1507 sock_release(queue->sock);
1508 nvmet_tcp_free_cmds(queue);
1509 if (queue->hdr_digest || queue->data_digest)
1510 nvmet_tcp_free_crypto(queue);
1511 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1513 page = virt_to_head_page(queue->pf_cache.va);
1514 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1515 kfree(queue);
1520 struct nvmet_tcp_queue *queue;
1525 queue = sk->sk_user_data;
1526 if (likely(queue))
1527 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1533 struct nvmet_tcp_queue *queue;
1536 queue = sk->sk_user_data;
1537 if (unlikely(!queue))
1540 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1541 queue->write_space(sk);
1547 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1555 struct nvmet_tcp_queue *queue;
1558 queue = sk->sk_user_data;
1559 if (!queue)
1570 nvmet_tcp_schedule_release_queue(queue);
1573 pr_warn("queue %d unhandled state %d\n",
1574 queue->idx, sk->sk_state);
1580 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1582 struct socket *sock = queue->sock;
1587 (struct sockaddr *)&queue->sockaddr);
1592 (struct sockaddr *)&queue->sockaddr_peer);
1597 * Cleanup whatever is sitting in the TCP transmit queue on socket
1619 sock->sk->sk_user_data = queue;
1620 queue->data_ready = sock->sk->sk_data_ready;
1622 queue->state_change = sock->sk->sk_state_change;
1624 queue->write_space = sock->sk->sk_write_space;
1627 nvmet_tcp_arm_queue_deadline(queue);
1628 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1638 struct nvmet_tcp_queue *queue;
1641 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1642 if (!queue)
1645 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1646 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1647 queue->sock = newsock;
1648 queue->port = port;
1649 queue->nr_cmds = 0;
1650 spin_lock_init(&queue->state_lock);
1651 queue->state = NVMET_TCP_Q_CONNECTING;
1652 INIT_LIST_HEAD(&queue->free_list);
1653 init_llist_head(&queue->resp_list);
1654 INIT_LIST_HEAD(&queue->resp_send_list);
1656 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1657 if (queue->idx < 0) {
1658 ret = queue->idx;
1662 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1666 ret = nvmet_sq_init(&queue->nvme_sq);
1670 nvmet_prepare_receive_pdu(queue);
1673 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1676 ret = nvmet_tcp_set_queue_sock(queue);
1683 list_del_init(&queue->queue_list);
1685 nvmet_sq_destroy(&queue->nvme_sq);
1687 nvmet_tcp_free_cmd(&queue->connect);
1689 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1691 kfree(queue);
1711 pr_err("failed to allocate queue\n");
1814 struct nvmet_tcp_queue *queue;
1817 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1818 if (queue->port == port)
1819 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1844 struct nvmet_tcp_queue *queue;
1847 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1848 if (queue->nvme_sq.ctrl == ctrl)
1849 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1855 struct nvmet_tcp_queue *queue =
1863 queue->nr_cmds = sq->size * 2;
1864 if (nvmet_tcp_alloc_cmds(queue))
1877 struct nvmet_tcp_queue *queue = cmd->queue;
1879 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1918 struct nvmet_tcp_queue *queue;
1924 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1925 kernel_sock_shutdown(queue->sock, SHUT_RDWR);