Lines Matching refs:queue
57 struct nvmet_tcp_queue *queue;
159 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
162 if (unlikely(!queue->nr_cmds)) {
167 return cmd - queue->cmds;
195 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
199 cmd = list_first_entry_or_null(&queue->free_list,
215 if (unlikely(cmd == &cmd->queue->connect))
218 list_add_tail(&cmd->entry, &cmd->queue->free_list);
221 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
223 return queue->sock->sk->sk_incoming_cpu;
226 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
228 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
231 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
233 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
246 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
254 pr_err("queue %d: header digest enabled but no header digest\n",
255 queue->idx);
260 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
263 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
264 queue->idx, le32_to_cpu(recv_digest),
272 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
275 u8 digest_len = nvmet_tcp_hdgst_len(queue);
282 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
329 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
331 queue->rcv_state = NVMET_TCP_RECV_ERR;
332 if (queue->nvme_sq.ctrl)
333 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
335 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
338 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
340 queue->rcv_state = NVMET_TCP_RECV_ERR;
342 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
344 nvmet_tcp_fatal_error(queue);
412 struct nvmet_tcp_queue *queue = cmd->queue;
413 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
414 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
420 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
431 if (queue->data_digest) {
433 nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
436 if (cmd->queue->hdr_digest) {
438 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
445 struct nvmet_tcp_queue *queue = cmd->queue;
446 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
458 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
461 if (cmd->queue->hdr_digest) {
463 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
470 struct nvmet_tcp_queue *queue = cmd->queue;
471 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
481 if (cmd->queue->hdr_digest) {
483 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
487 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
492 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
494 list_add(&cmd->entry, &queue->resp_send_list);
495 queue->send_list_len++;
499 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
501 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
503 if (!queue->snd_cmd) {
504 nvmet_tcp_process_resp_list(queue);
505 queue->snd_cmd =
506 list_first_entry_or_null(&queue->resp_send_list,
508 if (unlikely(!queue->snd_cmd))
512 list_del_init(&queue->snd_cmd->entry);
513 queue->send_list_len--;
515 if (nvmet_tcp_need_data_out(queue->snd_cmd))
516 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
517 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
518 nvmet_setup_r2t_pdu(queue->snd_cmd);
520 nvmet_setup_response_pdu(queue->snd_cmd);
522 return queue->snd_cmd;
529 struct nvmet_tcp_queue *queue = cmd->queue;
533 if (unlikely(cmd == queue->cmd)) {
542 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
548 llist_add(&cmd->lentry, &queue->resp_list);
549 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
562 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
566 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
585 struct nvmet_tcp_queue *queue = cmd->queue;
593 if ((!last_in_batch && cmd->queue->send_list_len) ||
595 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
598 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
613 if (queue->data_digest) {
617 if (queue->nvme_sq.sqhd_disabled) {
618 cmd->queue->snd_cmd = NULL;
625 if (queue->nvme_sq.sqhd_disabled) {
637 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
642 if (!last_in_batch && cmd->queue->send_list_len)
647 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
659 cmd->queue->snd_cmd = NULL;
666 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
671 if (!last_in_batch && cmd->queue->send_list_len)
676 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
686 cmd->queue->snd_cmd = NULL;
692 struct nvmet_tcp_queue *queue = cmd->queue;
701 if (!last_in_batch && cmd->queue->send_list_len)
706 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
716 if (queue->nvme_sq.sqhd_disabled) {
717 cmd->queue->snd_cmd = NULL;
725 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
728 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
731 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
732 cmd = nvmet_tcp_fetch_cmd(queue);
774 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
780 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
782 nvmet_tcp_socket_error(queue, ret);
793 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
795 queue->offset = 0;
796 queue->left = sizeof(struct nvme_tcp_hdr);
797 queue->cmd = NULL;
798 queue->rcv_state = NVMET_TCP_RECV_PDU;
801 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
803 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
805 ahash_request_free(queue->rcv_hash);
806 ahash_request_free(queue->snd_hash);
810 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
818 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
819 if (!queue->snd_hash)
821 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
823 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
824 if (!queue->rcv_hash)
826 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
830 ahash_request_free(queue->snd_hash);
837 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
839 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
840 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
848 nvmet_tcp_fatal_error(queue);
852 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
857 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
862 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
863 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
864 if (queue->hdr_digest || queue->data_digest) {
865 ret = nvmet_tcp_alloc_crypto(queue);
878 if (queue->hdr_digest)
880 if (queue->data_digest)
885 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
887 return ret; /* queue removal will cleanup */
889 queue->state = NVMET_TCP_Q_LIVE;
890 nvmet_prepare_receive_pdu(queue);
894 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
902 nvmet_prepare_receive_pdu(queue);
908 pr_err("queue %d: failed to map data\n", queue->idx);
909 nvmet_tcp_fatal_error(queue);
913 queue->rcv_state = NVMET_TCP_RECV_DATA;
918 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
920 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
924 if (likely(queue->nr_cmds)) {
925 if (unlikely(data->ttag >= queue->nr_cmds)) {
926 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
927 queue->idx, data->ttag, queue->nr_cmds);
928 nvmet_tcp_fatal_error(queue);
931 cmd = &queue->cmds[data->ttag];
933 cmd = &queue->connect;
941 nvmet_tcp_fatal_error(queue);
946 nvmet_tcp_hdgst_len(queue) -
947 nvmet_tcp_ddgst_len(queue) -
956 nvmet_tcp_fatal_error(queue);
961 queue->cmd = cmd;
962 queue->rcv_state = NVMET_TCP_RECV_DATA;
967 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
969 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
970 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
974 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
978 nvmet_tcp_fatal_error(queue);
981 return nvmet_tcp_handle_icreq(queue);
985 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
991 queue->cmd = nvmet_tcp_get_cmd(queue);
992 if (unlikely(!queue->cmd)) {
994 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
995 queue->idx, queue->nr_cmds, queue->send_list_len,
997 nvmet_tcp_fatal_error(queue);
1001 req = &queue->cmd->req;
1004 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1005 &queue->nvme_sq, &nvmet_tcp_ops))) {
1011 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1015 ret = nvmet_tcp_map_data(queue->cmd);
1017 pr_err("queue %d: failed to map data\n", queue->idx);
1018 if (nvmet_tcp_has_inline_data(queue->cmd))
1019 nvmet_tcp_fatal_error(queue);
1026 if (nvmet_tcp_need_data_in(queue->cmd)) {
1027 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1028 queue->rcv_state = NVMET_TCP_RECV_DATA;
1029 nvmet_tcp_map_pdu_iovec(queue->cmd);
1033 nvmet_tcp_queue_response(&queue->cmd->req);
1037 queue->cmd->req.execute(&queue->cmd->req);
1039 nvmet_prepare_receive_pdu(queue);
1071 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1073 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1079 iov.iov_base = (void *)&queue->pdu + queue->offset;
1080 iov.iov_len = queue->left;
1081 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1086 queue->offset += len;
1087 queue->left -= len;
1088 if (queue->left)
1091 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1092 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1096 nvmet_tcp_fatal_error(queue);
1105 queue->left = hdr->hlen - queue->offset + hdgst;
1109 if (queue->hdr_digest &&
1110 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1111 nvmet_tcp_fatal_error(queue); /* fatal */
1115 if (queue->data_digest &&
1116 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1117 nvmet_tcp_fatal_error(queue); /* fatal */
1121 return nvmet_tcp_done_recv_pdu(queue);
1126 struct nvmet_tcp_queue *queue = cmd->queue;
1128 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
1129 queue->offset = 0;
1130 queue->left = NVME_TCP_DIGEST_LENGTH;
1131 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1134 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1136 struct nvmet_tcp_cmd *cmd = queue->cmd;
1140 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1150 if (queue->data_digest) {
1158 nvmet_prepare_receive_pdu(queue);
1162 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1164 struct nvmet_tcp_cmd *cmd = queue->cmd;
1168 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1169 .iov_len = queue->left
1172 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1177 queue->offset += ret;
1178 queue->left -= ret;
1179 if (queue->left)
1182 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1183 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1184 queue->idx, cmd->req.cmd->common.command_id,
1185 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1188 nvmet_tcp_fatal_error(queue);
1198 nvmet_prepare_receive_pdu(queue);
1202 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1206 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1209 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1210 result = nvmet_tcp_try_recv_pdu(queue);
1215 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1216 result = nvmet_tcp_try_recv_data(queue);
1221 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1222 result = nvmet_tcp_try_recv_ddgst(queue);
1236 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1242 ret = nvmet_tcp_try_recv_one(queue);
1244 nvmet_tcp_socket_error(queue, ret);
1255 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1257 spin_lock(&queue->state_lock);
1258 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1259 queue->state = NVMET_TCP_Q_DISCONNECTING;
1260 schedule_work(&queue->release_work);
1262 spin_unlock(&queue->state_lock);
1267 struct nvmet_tcp_queue *queue =
1275 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1281 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1293 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1296 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1299 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1301 c->queue = queue;
1302 c->req.port = queue->port->nport;
1304 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1310 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1316 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1321 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1328 list_add_tail(&c->entry, &queue->free_list);
1348 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1351 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1358 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1363 queue->cmds = cmds;
1374 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1376 struct nvmet_tcp_cmd *cmds = queue->cmds;
1379 for (i = 0; i < queue->nr_cmds; i++)
1382 nvmet_tcp_free_cmd(&queue->connect);
1386 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1388 struct socket *sock = queue->sock;
1391 sock->sk->sk_data_ready = queue->data_ready;
1392 sock->sk->sk_state_change = queue->state_change;
1393 sock->sk->sk_write_space = queue->write_space;
1406 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1408 struct nvmet_tcp_cmd *cmd = queue->cmds;
1411 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1416 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1418 nvmet_tcp_finish_cmd(&queue->connect);
1425 struct nvmet_tcp_queue *queue =
1429 list_del_init(&queue->queue_list);
1432 nvmet_tcp_restore_socket_callbacks(queue);
1433 flush_work(&queue->io_work);
1435 nvmet_tcp_uninit_data_in_cmds(queue);
1436 nvmet_sq_destroy(&queue->nvme_sq);
1437 cancel_work_sync(&queue->io_work);
1438 sock_release(queue->sock);
1439 nvmet_tcp_free_cmds(queue);
1440 if (queue->hdr_digest || queue->data_digest)
1441 nvmet_tcp_free_crypto(queue);
1442 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1444 page = virt_to_head_page(queue->pf_cache.va);
1445 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1446 kfree(queue);
1451 struct nvmet_tcp_queue *queue;
1454 queue = sk->sk_user_data;
1455 if (likely(queue))
1456 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1462 struct nvmet_tcp_queue *queue;
1465 queue = sk->sk_user_data;
1466 if (unlikely(!queue))
1469 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1470 queue->write_space(sk);
1476 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1484 struct nvmet_tcp_queue *queue;
1487 queue = sk->sk_user_data;
1488 if (!queue)
1499 nvmet_tcp_schedule_release_queue(queue);
1502 pr_warn("queue %d unhandled state %d\n",
1503 queue->idx, sk->sk_state);
1509 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1511 struct socket *sock = queue->sock;
1516 (struct sockaddr *)&queue->sockaddr);
1521 (struct sockaddr *)&queue->sockaddr_peer);
1526 * Cleanup whatever is sitting in the TCP transmit queue on socket
1548 sock->sk->sk_user_data = queue;
1549 queue->data_ready = sock->sk->sk_data_ready;
1551 queue->state_change = sock->sk->sk_state_change;
1553 queue->write_space = sock->sk->sk_write_space;
1555 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1565 struct nvmet_tcp_queue *queue;
1568 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1569 if (!queue)
1572 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1573 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1574 queue->sock = newsock;
1575 queue->port = port;
1576 queue->nr_cmds = 0;
1577 spin_lock_init(&queue->state_lock);
1578 queue->state = NVMET_TCP_Q_CONNECTING;
1579 INIT_LIST_HEAD(&queue->free_list);
1580 init_llist_head(&queue->resp_list);
1581 INIT_LIST_HEAD(&queue->resp_send_list);
1583 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1584 if (queue->idx < 0) {
1585 ret = queue->idx;
1589 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1593 ret = nvmet_sq_init(&queue->nvme_sq);
1597 nvmet_prepare_receive_pdu(queue);
1600 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1603 ret = nvmet_tcp_set_queue_sock(queue);
1610 list_del_init(&queue->queue_list);
1612 nvmet_sq_destroy(&queue->nvme_sq);
1614 nvmet_tcp_free_cmd(&queue->connect);
1616 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1618 kfree(queue);
1638 pr_err("failed to allocate queue\n");
1739 struct nvmet_tcp_queue *queue;
1742 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1743 if (queue->port == port)
1744 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1769 struct nvmet_tcp_queue *queue;
1772 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1773 if (queue->nvme_sq.ctrl == ctrl)
1774 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1780 struct nvmet_tcp_queue *queue =
1788 queue->nr_cmds = sq->size * 2;
1789 if (nvmet_tcp_alloc_cmds(queue)) {
1790 queue->nr_cmds = 0;
1804 struct nvmet_tcp_queue *queue = cmd->queue;
1806 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1845 struct nvmet_tcp_queue *queue;
1851 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1852 kernel_sock_shutdown(queue->sock, SHUT_RDWR);