1// SPDX-License-Identifier: GPL-2.0 2/* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7#include <linux/module.h> 8#include <linux/init.h> 9#include <linux/slab.h> 10#include <linux/err.h> 11#include <linux/nvme-tcp.h> 12#include <net/sock.h> 13#include <net/tcp.h> 14#include <linux/inet.h> 15#include <linux/llist.h> 16#include <crypto/hash.h> 17 18#include "nvmet.h" 19 20#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 21#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ 22 23/* Define the socket priority to use for connections were it is desirable 24 * that the NIC consider performing optimized packet processing or filtering. 25 * A non-zero value being sufficient to indicate general consideration of any 26 * possible optimization. Making it a module param allows for alternative 27 * values that may be unique for some NIC implementations. 28 */ 29static int so_priority; 30module_param(so_priority, int, 0644); 31MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); 32 33#define NVMET_TCP_RECV_BUDGET 8 34#define NVMET_TCP_SEND_BUDGET 8 35#define NVMET_TCP_IO_WORK_BUDGET 64 36 37enum nvmet_tcp_send_state { 38 NVMET_TCP_SEND_DATA_PDU, 39 NVMET_TCP_SEND_DATA, 40 NVMET_TCP_SEND_R2T, 41 NVMET_TCP_SEND_DDGST, 42 NVMET_TCP_SEND_RESPONSE 43}; 44 45enum nvmet_tcp_recv_state { 46 NVMET_TCP_RECV_PDU, 47 NVMET_TCP_RECV_DATA, 48 NVMET_TCP_RECV_DDGST, 49 NVMET_TCP_RECV_ERR, 50}; 51 52enum { 53 NVMET_TCP_F_INIT_FAILED = (1 << 0), 54}; 55 56struct nvmet_tcp_cmd { 57 struct nvmet_tcp_queue *queue; 58 struct nvmet_req req; 59 60 struct nvme_tcp_cmd_pdu *cmd_pdu; 61 struct nvme_tcp_rsp_pdu *rsp_pdu; 62 struct nvme_tcp_data_pdu *data_pdu; 63 struct nvme_tcp_r2t_pdu *r2t_pdu; 64 65 u32 rbytes_done; 66 u32 wbytes_done; 67 68 u32 pdu_len; 69 u32 pdu_recv; 70 int sg_idx; 71 int nr_mapped; 72 struct msghdr recv_msg; 73 struct kvec *iov; 74 u32 flags; 75 76 struct list_head entry; 77 struct llist_node lentry; 78 79 /* send state */ 80 u32 offset; 81 struct scatterlist *cur_sg; 82 enum nvmet_tcp_send_state state; 83 84 __le32 exp_ddgst; 85 __le32 recv_ddgst; 86}; 87 88enum nvmet_tcp_queue_state { 89 NVMET_TCP_Q_CONNECTING, 90 NVMET_TCP_Q_LIVE, 91 NVMET_TCP_Q_DISCONNECTING, 92}; 93 94struct nvmet_tcp_queue { 95 struct socket *sock; 96 struct nvmet_tcp_port *port; 97 struct work_struct io_work; 98 struct nvmet_cq nvme_cq; 99 struct nvmet_sq nvme_sq; 100 101 /* send state */ 102 struct nvmet_tcp_cmd *cmds; 103 unsigned int nr_cmds; 104 struct list_head free_list; 105 struct llist_head resp_list; 106 struct list_head resp_send_list; 107 int send_list_len; 108 struct nvmet_tcp_cmd *snd_cmd; 109 110 /* recv state */ 111 int offset; 112 int left; 113 enum nvmet_tcp_recv_state rcv_state; 114 struct nvmet_tcp_cmd *cmd; 115 union nvme_tcp_pdu pdu; 116 117 /* digest state */ 118 bool hdr_digest; 119 bool data_digest; 120 struct ahash_request *snd_hash; 121 struct ahash_request *rcv_hash; 122 123 spinlock_t state_lock; 124 enum nvmet_tcp_queue_state state; 125 126 struct sockaddr_storage sockaddr; 127 struct sockaddr_storage sockaddr_peer; 128 struct work_struct release_work; 129 130 int idx; 131 struct list_head queue_list; 132 133 struct nvmet_tcp_cmd connect; 134 135 struct page_frag_cache pf_cache; 136 137 void (*data_ready)(struct sock *); 138 void (*state_change)(struct sock *); 139 void (*write_space)(struct sock *); 140}; 141 142struct nvmet_tcp_port { 143 struct socket *sock; 144 struct work_struct accept_work; 145 struct nvmet_port *nport; 146 struct sockaddr_storage addr; 147 void (*data_ready)(struct sock *); 148}; 149 150static DEFINE_IDA(nvmet_tcp_queue_ida); 151static LIST_HEAD(nvmet_tcp_queue_list); 152static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 153 154static struct workqueue_struct *nvmet_tcp_wq; 155static const struct nvmet_fabrics_ops nvmet_tcp_ops; 156static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 157static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); 158 159static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 160 struct nvmet_tcp_cmd *cmd) 161{ 162 if (unlikely(!queue->nr_cmds)) { 163 /* We didn't allocate cmds yet, send 0xffff */ 164 return USHRT_MAX; 165 } 166 167 return cmd - queue->cmds; 168} 169 170static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 171{ 172 return nvme_is_write(cmd->req.cmd) && 173 cmd->rbytes_done < cmd->req.transfer_len; 174} 175 176static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 177{ 178 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 179} 180 181static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 182{ 183 return !nvme_is_write(cmd->req.cmd) && 184 cmd->req.transfer_len > 0 && 185 !cmd->req.cqe->status; 186} 187 188static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 189{ 190 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 191 !cmd->rbytes_done; 192} 193 194static inline struct nvmet_tcp_cmd * 195nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 196{ 197 struct nvmet_tcp_cmd *cmd; 198 199 cmd = list_first_entry_or_null(&queue->free_list, 200 struct nvmet_tcp_cmd, entry); 201 if (!cmd) 202 return NULL; 203 list_del_init(&cmd->entry); 204 205 cmd->rbytes_done = cmd->wbytes_done = 0; 206 cmd->pdu_len = 0; 207 cmd->pdu_recv = 0; 208 cmd->iov = NULL; 209 cmd->flags = 0; 210 return cmd; 211} 212 213static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 214{ 215 if (unlikely(cmd == &cmd->queue->connect)) 216 return; 217 218 list_add_tail(&cmd->entry, &cmd->queue->free_list); 219} 220 221static inline int queue_cpu(struct nvmet_tcp_queue *queue) 222{ 223 return queue->sock->sk->sk_incoming_cpu; 224} 225 226static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 227{ 228 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 229} 230 231static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 232{ 233 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 234} 235 236static inline void nvmet_tcp_hdgst(struct ahash_request *hash, 237 void *pdu, size_t len) 238{ 239 struct scatterlist sg; 240 241 sg_init_one(&sg, pdu, len); 242 ahash_request_set_crypt(hash, &sg, pdu + len, len); 243 crypto_ahash_digest(hash); 244} 245 246static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 247 void *pdu, size_t len) 248{ 249 struct nvme_tcp_hdr *hdr = pdu; 250 __le32 recv_digest; 251 __le32 exp_digest; 252 253 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 254 pr_err("queue %d: header digest enabled but no header digest\n", 255 queue->idx); 256 return -EPROTO; 257 } 258 259 recv_digest = *(__le32 *)(pdu + hdr->hlen); 260 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); 261 exp_digest = *(__le32 *)(pdu + hdr->hlen); 262 if (recv_digest != exp_digest) { 263 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 264 queue->idx, le32_to_cpu(recv_digest), 265 le32_to_cpu(exp_digest)); 266 return -EPROTO; 267 } 268 269 return 0; 270} 271 272static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 273{ 274 struct nvme_tcp_hdr *hdr = pdu; 275 u8 digest_len = nvmet_tcp_hdgst_len(queue); 276 u32 len; 277 278 len = le32_to_cpu(hdr->plen) - hdr->hlen - 279 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 280 281 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 282 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 283 return -EPROTO; 284 } 285 286 return 0; 287} 288 289static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) 290{ 291 struct scatterlist *sg; 292 int i; 293 294 sg = &cmd->req.sg[cmd->sg_idx]; 295 296 for (i = 0; i < cmd->nr_mapped; i++) 297 kunmap(sg_page(&sg[i])); 298} 299 300static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) 301{ 302 struct kvec *iov = cmd->iov; 303 struct scatterlist *sg; 304 u32 length, offset, sg_offset; 305 306 length = cmd->pdu_len; 307 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); 308 offset = cmd->rbytes_done; 309 cmd->sg_idx = offset / PAGE_SIZE; 310 sg_offset = offset % PAGE_SIZE; 311 sg = &cmd->req.sg[cmd->sg_idx]; 312 313 while (length) { 314 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 315 316 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; 317 iov->iov_len = iov_len; 318 319 length -= iov_len; 320 sg = sg_next(sg); 321 iov++; 322 sg_offset = 0; 323 } 324 325 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, 326 cmd->nr_mapped, cmd->pdu_len); 327} 328 329static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 330{ 331 queue->rcv_state = NVMET_TCP_RECV_ERR; 332 if (queue->nvme_sq.ctrl) 333 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 334 else 335 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 336} 337 338static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 339{ 340 queue->rcv_state = NVMET_TCP_RECV_ERR; 341 if (status == -EPIPE || status == -ECONNRESET) 342 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 343 else 344 nvmet_tcp_fatal_error(queue); 345} 346 347static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 348{ 349 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 350 u32 len = le32_to_cpu(sgl->length); 351 352 if (!len) 353 return 0; 354 355 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 356 NVME_SGL_FMT_OFFSET)) { 357 if (!nvme_is_write(cmd->req.cmd)) 358 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 359 360 if (len > cmd->req.port->inline_data_size) 361 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 362 cmd->pdu_len = len; 363 } 364 cmd->req.transfer_len += len; 365 366 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 367 if (!cmd->req.sg) 368 return NVME_SC_INTERNAL; 369 cmd->cur_sg = cmd->req.sg; 370 371 if (nvmet_tcp_has_data_in(cmd)) { 372 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 373 sizeof(*cmd->iov), GFP_KERNEL); 374 if (!cmd->iov) 375 goto err; 376 } 377 378 return 0; 379err: 380 sgl_free(cmd->req.sg); 381 return NVME_SC_INTERNAL; 382} 383 384static void nvmet_tcp_send_ddgst(struct ahash_request *hash, 385 struct nvmet_tcp_cmd *cmd) 386{ 387 ahash_request_set_crypt(hash, cmd->req.sg, 388 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); 389 crypto_ahash_digest(hash); 390} 391 392static void nvmet_tcp_recv_ddgst(struct ahash_request *hash, 393 struct nvmet_tcp_cmd *cmd) 394{ 395 struct scatterlist sg; 396 struct kvec *iov; 397 int i; 398 399 crypto_ahash_init(hash); 400 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { 401 sg_init_one(&sg, iov->iov_base, iov->iov_len); 402 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len); 403 crypto_ahash_update(hash); 404 } 405 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); 406 crypto_ahash_final(hash); 407} 408 409static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 410{ 411 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 412 struct nvmet_tcp_queue *queue = cmd->queue; 413 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 414 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 415 416 cmd->offset = 0; 417 cmd->state = NVMET_TCP_SEND_DATA_PDU; 418 419 pdu->hdr.type = nvme_tcp_c2h_data; 420 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 421 NVME_TCP_F_DATA_SUCCESS : 0); 422 pdu->hdr.hlen = sizeof(*pdu); 423 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 424 pdu->hdr.plen = 425 cpu_to_le32(pdu->hdr.hlen + hdgst + 426 cmd->req.transfer_len + ddgst); 427 pdu->command_id = cmd->req.cqe->command_id; 428 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 429 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 430 431 if (queue->data_digest) { 432 pdu->hdr.flags |= NVME_TCP_F_DDGST; 433 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); 434 } 435 436 if (cmd->queue->hdr_digest) { 437 pdu->hdr.flags |= NVME_TCP_F_HDGST; 438 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 439 } 440} 441 442static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 443{ 444 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 445 struct nvmet_tcp_queue *queue = cmd->queue; 446 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 447 448 cmd->offset = 0; 449 cmd->state = NVMET_TCP_SEND_R2T; 450 451 pdu->hdr.type = nvme_tcp_r2t; 452 pdu->hdr.flags = 0; 453 pdu->hdr.hlen = sizeof(*pdu); 454 pdu->hdr.pdo = 0; 455 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 456 457 pdu->command_id = cmd->req.cmd->common.command_id; 458 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 459 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 460 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 461 if (cmd->queue->hdr_digest) { 462 pdu->hdr.flags |= NVME_TCP_F_HDGST; 463 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 464 } 465} 466 467static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 468{ 469 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 470 struct nvmet_tcp_queue *queue = cmd->queue; 471 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 472 473 cmd->offset = 0; 474 cmd->state = NVMET_TCP_SEND_RESPONSE; 475 476 pdu->hdr.type = nvme_tcp_rsp; 477 pdu->hdr.flags = 0; 478 pdu->hdr.hlen = sizeof(*pdu); 479 pdu->hdr.pdo = 0; 480 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 481 if (cmd->queue->hdr_digest) { 482 pdu->hdr.flags |= NVME_TCP_F_HDGST; 483 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 484 } 485} 486 487static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 488{ 489 struct llist_node *node; 490 struct nvmet_tcp_cmd *cmd; 491 492 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { 493 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); 494 list_add(&cmd->entry, &queue->resp_send_list); 495 queue->send_list_len++; 496 } 497} 498 499static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 500{ 501 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 502 struct nvmet_tcp_cmd, entry); 503 if (!queue->snd_cmd) { 504 nvmet_tcp_process_resp_list(queue); 505 queue->snd_cmd = 506 list_first_entry_or_null(&queue->resp_send_list, 507 struct nvmet_tcp_cmd, entry); 508 if (unlikely(!queue->snd_cmd)) 509 return NULL; 510 } 511 512 list_del_init(&queue->snd_cmd->entry); 513 queue->send_list_len--; 514 515 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 516 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 517 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 518 nvmet_setup_r2t_pdu(queue->snd_cmd); 519 else 520 nvmet_setup_response_pdu(queue->snd_cmd); 521 522 return queue->snd_cmd; 523} 524 525static void nvmet_tcp_queue_response(struct nvmet_req *req) 526{ 527 struct nvmet_tcp_cmd *cmd = 528 container_of(req, struct nvmet_tcp_cmd, req); 529 struct nvmet_tcp_queue *queue = cmd->queue; 530 struct nvme_sgl_desc *sgl; 531 u32 len; 532 533 if (unlikely(cmd == queue->cmd)) { 534 sgl = &cmd->req.cmd->common.dptr.sgl; 535 len = le32_to_cpu(sgl->length); 536 537 /* 538 * Wait for inline data before processing the response. 539 * Avoid using helpers, this might happen before 540 * nvmet_req_init is completed. 541 */ 542 if (queue->rcv_state == NVMET_TCP_RECV_PDU && 543 len && len <= cmd->req.port->inline_data_size && 544 nvme_is_write(cmd->req.cmd)) 545 return; 546 } 547 548 llist_add(&cmd->lentry, &queue->resp_list); 549 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); 550} 551 552static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) 553{ 554 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 555 nvmet_tcp_queue_response(&cmd->req); 556 else 557 cmd->req.execute(&cmd->req); 558} 559 560static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 561{ 562 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 563 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 564 int ret; 565 566 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), 567 offset_in_page(cmd->data_pdu) + cmd->offset, 568 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 569 if (ret <= 0) 570 return ret; 571 572 cmd->offset += ret; 573 left -= ret; 574 575 if (left) 576 return -EAGAIN; 577 578 cmd->state = NVMET_TCP_SEND_DATA; 579 cmd->offset = 0; 580 return 1; 581} 582 583static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 584{ 585 struct nvmet_tcp_queue *queue = cmd->queue; 586 int ret; 587 588 while (cmd->cur_sg) { 589 struct page *page = sg_page(cmd->cur_sg); 590 u32 left = cmd->cur_sg->length - cmd->offset; 591 int flags = MSG_DONTWAIT; 592 593 if ((!last_in_batch && cmd->queue->send_list_len) || 594 cmd->wbytes_done + left < cmd->req.transfer_len || 595 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 596 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 597 598 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, 599 left, flags); 600 if (ret <= 0) 601 return ret; 602 603 cmd->offset += ret; 604 cmd->wbytes_done += ret; 605 606 /* Done with sg?*/ 607 if (cmd->offset == cmd->cur_sg->length) { 608 cmd->cur_sg = sg_next(cmd->cur_sg); 609 cmd->offset = 0; 610 } 611 } 612 613 if (queue->data_digest) { 614 cmd->state = NVMET_TCP_SEND_DDGST; 615 cmd->offset = 0; 616 } else { 617 if (queue->nvme_sq.sqhd_disabled) { 618 cmd->queue->snd_cmd = NULL; 619 nvmet_tcp_put_cmd(cmd); 620 } else { 621 nvmet_setup_response_pdu(cmd); 622 } 623 } 624 625 if (queue->nvme_sq.sqhd_disabled) { 626 kfree(cmd->iov); 627 sgl_free(cmd->req.sg); 628 } 629 630 return 1; 631 632} 633 634static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 635 bool last_in_batch) 636{ 637 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 638 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 639 int flags = MSG_DONTWAIT; 640 int ret; 641 642 if (!last_in_batch && cmd->queue->send_list_len) 643 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 644 else 645 flags |= MSG_EOR; 646 647 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), 648 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); 649 if (ret <= 0) 650 return ret; 651 cmd->offset += ret; 652 left -= ret; 653 654 if (left) 655 return -EAGAIN; 656 657 kfree(cmd->iov); 658 sgl_free(cmd->req.sg); 659 cmd->queue->snd_cmd = NULL; 660 nvmet_tcp_put_cmd(cmd); 661 return 1; 662} 663 664static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 665{ 666 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 667 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 668 int flags = MSG_DONTWAIT; 669 int ret; 670 671 if (!last_in_batch && cmd->queue->send_list_len) 672 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 673 else 674 flags |= MSG_EOR; 675 676 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), 677 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); 678 if (ret <= 0) 679 return ret; 680 cmd->offset += ret; 681 left -= ret; 682 683 if (left) 684 return -EAGAIN; 685 686 cmd->queue->snd_cmd = NULL; 687 return 1; 688} 689 690static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 691{ 692 struct nvmet_tcp_queue *queue = cmd->queue; 693 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; 694 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 695 struct kvec iov = { 696 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, 697 .iov_len = left 698 }; 699 int ret; 700 701 if (!last_in_batch && cmd->queue->send_list_len) 702 msg.msg_flags |= MSG_MORE; 703 else 704 msg.msg_flags |= MSG_EOR; 705 706 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 707 if (unlikely(ret <= 0)) 708 return ret; 709 710 cmd->offset += ret; 711 left -= ret; 712 713 if (left) 714 return -EAGAIN; 715 716 if (queue->nvme_sq.sqhd_disabled) { 717 cmd->queue->snd_cmd = NULL; 718 nvmet_tcp_put_cmd(cmd); 719 } else { 720 nvmet_setup_response_pdu(cmd); 721 } 722 return 1; 723} 724 725static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 726 bool last_in_batch) 727{ 728 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 729 int ret = 0; 730 731 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 732 cmd = nvmet_tcp_fetch_cmd(queue); 733 if (unlikely(!cmd)) 734 return 0; 735 } 736 737 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 738 ret = nvmet_try_send_data_pdu(cmd); 739 if (ret <= 0) 740 goto done_send; 741 } 742 743 if (cmd->state == NVMET_TCP_SEND_DATA) { 744 ret = nvmet_try_send_data(cmd, last_in_batch); 745 if (ret <= 0) 746 goto done_send; 747 } 748 749 if (cmd->state == NVMET_TCP_SEND_DDGST) { 750 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 751 if (ret <= 0) 752 goto done_send; 753 } 754 755 if (cmd->state == NVMET_TCP_SEND_R2T) { 756 ret = nvmet_try_send_r2t(cmd, last_in_batch); 757 if (ret <= 0) 758 goto done_send; 759 } 760 761 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 762 ret = nvmet_try_send_response(cmd, last_in_batch); 763 764done_send: 765 if (ret < 0) { 766 if (ret == -EAGAIN) 767 return 0; 768 return ret; 769 } 770 771 return 1; 772} 773 774static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 775 int budget, int *sends) 776{ 777 int i, ret = 0; 778 779 for (i = 0; i < budget; i++) { 780 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 781 if (unlikely(ret < 0)) { 782 nvmet_tcp_socket_error(queue, ret); 783 goto done; 784 } else if (ret == 0) { 785 break; 786 } 787 (*sends)++; 788 } 789done: 790 return ret; 791} 792 793static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 794{ 795 queue->offset = 0; 796 queue->left = sizeof(struct nvme_tcp_hdr); 797 queue->cmd = NULL; 798 queue->rcv_state = NVMET_TCP_RECV_PDU; 799} 800 801static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) 802{ 803 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 804 805 ahash_request_free(queue->rcv_hash); 806 ahash_request_free(queue->snd_hash); 807 crypto_free_ahash(tfm); 808} 809 810static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) 811{ 812 struct crypto_ahash *tfm; 813 814 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 815 if (IS_ERR(tfm)) 816 return PTR_ERR(tfm); 817 818 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 819 if (!queue->snd_hash) 820 goto free_tfm; 821 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 822 823 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 824 if (!queue->rcv_hash) 825 goto free_snd_hash; 826 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 827 828 return 0; 829free_snd_hash: 830 ahash_request_free(queue->snd_hash); 831free_tfm: 832 crypto_free_ahash(tfm); 833 return -ENOMEM; 834} 835 836 837static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 838{ 839 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 840 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 841 struct msghdr msg = {}; 842 struct kvec iov; 843 int ret; 844 845 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 846 pr_err("bad nvme-tcp pdu length (%d)\n", 847 le32_to_cpu(icreq->hdr.plen)); 848 nvmet_tcp_fatal_error(queue); 849 } 850 851 if (icreq->pfv != NVME_TCP_PFV_1_0) { 852 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 853 return -EPROTO; 854 } 855 856 if (icreq->hpda != 0) { 857 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 858 icreq->hpda); 859 return -EPROTO; 860 } 861 862 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 863 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 864 if (queue->hdr_digest || queue->data_digest) { 865 ret = nvmet_tcp_alloc_crypto(queue); 866 if (ret) 867 return ret; 868 } 869 870 memset(icresp, 0, sizeof(*icresp)); 871 icresp->hdr.type = nvme_tcp_icresp; 872 icresp->hdr.hlen = sizeof(*icresp); 873 icresp->hdr.pdo = 0; 874 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 875 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 876 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); 877 icresp->cpda = 0; 878 if (queue->hdr_digest) 879 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 880 if (queue->data_digest) 881 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 882 883 iov.iov_base = icresp; 884 iov.iov_len = sizeof(*icresp); 885 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 886 if (ret < 0) 887 return ret; /* queue removal will cleanup */ 888 889 queue->state = NVMET_TCP_Q_LIVE; 890 nvmet_prepare_receive_pdu(queue); 891 return 0; 892} 893 894static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 895 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 896{ 897 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 898 int ret; 899 900 if (!nvme_is_write(cmd->req.cmd) || 901 data_len > cmd->req.port->inline_data_size) { 902 nvmet_prepare_receive_pdu(queue); 903 return; 904 } 905 906 ret = nvmet_tcp_map_data(cmd); 907 if (unlikely(ret)) { 908 pr_err("queue %d: failed to map data\n", queue->idx); 909 nvmet_tcp_fatal_error(queue); 910 return; 911 } 912 913 queue->rcv_state = NVMET_TCP_RECV_DATA; 914 nvmet_tcp_map_pdu_iovec(cmd); 915 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 916} 917 918static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 919{ 920 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 921 struct nvmet_tcp_cmd *cmd; 922 unsigned int exp_data_len; 923 924 if (likely(queue->nr_cmds)) { 925 if (unlikely(data->ttag >= queue->nr_cmds)) { 926 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", 927 queue->idx, data->ttag, queue->nr_cmds); 928 nvmet_tcp_fatal_error(queue); 929 return -EPROTO; 930 } 931 cmd = &queue->cmds[data->ttag]; 932 } else { 933 cmd = &queue->connect; 934 } 935 936 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 937 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 938 data->ttag, le32_to_cpu(data->data_offset), 939 cmd->rbytes_done); 940 /* FIXME: use path and transport errors */ 941 nvmet_tcp_fatal_error(queue); 942 return -EPROTO; 943 } 944 945 exp_data_len = le32_to_cpu(data->hdr.plen) - 946 nvmet_tcp_hdgst_len(queue) - 947 nvmet_tcp_ddgst_len(queue) - 948 sizeof(*data); 949 950 cmd->pdu_len = le32_to_cpu(data->data_length); 951 if (unlikely(cmd->pdu_len != exp_data_len || 952 cmd->pdu_len == 0 || 953 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { 954 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); 955 /* FIXME: use proper transport errors */ 956 nvmet_tcp_fatal_error(queue); 957 return -EPROTO; 958 } 959 cmd->pdu_recv = 0; 960 nvmet_tcp_map_pdu_iovec(cmd); 961 queue->cmd = cmd; 962 queue->rcv_state = NVMET_TCP_RECV_DATA; 963 964 return 0; 965} 966 967static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 968{ 969 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 970 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 971 struct nvmet_req *req; 972 int ret; 973 974 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 975 if (hdr->type != nvme_tcp_icreq) { 976 pr_err("unexpected pdu type (%d) before icreq\n", 977 hdr->type); 978 nvmet_tcp_fatal_error(queue); 979 return -EPROTO; 980 } 981 return nvmet_tcp_handle_icreq(queue); 982 } 983 984 if (hdr->type == nvme_tcp_h2c_data) { 985 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 986 if (unlikely(ret)) 987 return ret; 988 return 0; 989 } 990 991 queue->cmd = nvmet_tcp_get_cmd(queue); 992 if (unlikely(!queue->cmd)) { 993 /* This should never happen */ 994 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 995 queue->idx, queue->nr_cmds, queue->send_list_len, 996 nvme_cmd->common.opcode); 997 nvmet_tcp_fatal_error(queue); 998 return -ENOMEM; 999 } 1000 1001 req = &queue->cmd->req; 1002 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 1003 1004 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, 1005 &queue->nvme_sq, &nvmet_tcp_ops))) { 1006 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", 1007 req->cmd, req->cmd->common.command_id, 1008 req->cmd->common.opcode, 1009 le32_to_cpu(req->cmd->common.dptr.sgl.length)); 1010 1011 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1012 return 0; 1013 } 1014 1015 ret = nvmet_tcp_map_data(queue->cmd); 1016 if (unlikely(ret)) { 1017 pr_err("queue %d: failed to map data\n", queue->idx); 1018 if (nvmet_tcp_has_inline_data(queue->cmd)) 1019 nvmet_tcp_fatal_error(queue); 1020 else 1021 nvmet_req_complete(req, ret); 1022 ret = -EAGAIN; 1023 goto out; 1024 } 1025 1026 if (nvmet_tcp_need_data_in(queue->cmd)) { 1027 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1028 queue->rcv_state = NVMET_TCP_RECV_DATA; 1029 nvmet_tcp_map_pdu_iovec(queue->cmd); 1030 return 0; 1031 } 1032 /* send back R2T */ 1033 nvmet_tcp_queue_response(&queue->cmd->req); 1034 goto out; 1035 } 1036 1037 queue->cmd->req.execute(&queue->cmd->req); 1038out: 1039 nvmet_prepare_receive_pdu(queue); 1040 return ret; 1041} 1042 1043static const u8 nvme_tcp_pdu_sizes[] = { 1044 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 1045 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 1046 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 1047}; 1048 1049static inline u8 nvmet_tcp_pdu_size(u8 type) 1050{ 1051 size_t idx = type; 1052 1053 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 1054 nvme_tcp_pdu_sizes[idx]) ? 1055 nvme_tcp_pdu_sizes[idx] : 0; 1056} 1057 1058static inline bool nvmet_tcp_pdu_valid(u8 type) 1059{ 1060 switch (type) { 1061 case nvme_tcp_icreq: 1062 case nvme_tcp_cmd: 1063 case nvme_tcp_h2c_data: 1064 /* fallthru */ 1065 return true; 1066 } 1067 1068 return false; 1069} 1070 1071static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1072{ 1073 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1074 int len; 1075 struct kvec iov; 1076 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1077 1078recv: 1079 iov.iov_base = (void *)&queue->pdu + queue->offset; 1080 iov.iov_len = queue->left; 1081 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1082 iov.iov_len, msg.msg_flags); 1083 if (unlikely(len < 0)) 1084 return len; 1085 1086 queue->offset += len; 1087 queue->left -= len; 1088 if (queue->left) 1089 return -EAGAIN; 1090 1091 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1092 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1093 1094 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1095 pr_err("unexpected pdu type %d\n", hdr->type); 1096 nvmet_tcp_fatal_error(queue); 1097 return -EIO; 1098 } 1099 1100 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1101 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1102 return -EIO; 1103 } 1104 1105 queue->left = hdr->hlen - queue->offset + hdgst; 1106 goto recv; 1107 } 1108 1109 if (queue->hdr_digest && 1110 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1111 nvmet_tcp_fatal_error(queue); /* fatal */ 1112 return -EPROTO; 1113 } 1114 1115 if (queue->data_digest && 1116 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1117 nvmet_tcp_fatal_error(queue); /* fatal */ 1118 return -EPROTO; 1119 } 1120 1121 return nvmet_tcp_done_recv_pdu(queue); 1122} 1123 1124static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1125{ 1126 struct nvmet_tcp_queue *queue = cmd->queue; 1127 1128 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); 1129 queue->offset = 0; 1130 queue->left = NVME_TCP_DIGEST_LENGTH; 1131 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1132} 1133 1134static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1135{ 1136 struct nvmet_tcp_cmd *cmd = queue->cmd; 1137 int ret; 1138 1139 while (msg_data_left(&cmd->recv_msg)) { 1140 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1141 cmd->recv_msg.msg_flags); 1142 if (ret <= 0) 1143 return ret; 1144 1145 cmd->pdu_recv += ret; 1146 cmd->rbytes_done += ret; 1147 } 1148 1149 nvmet_tcp_unmap_pdu_iovec(cmd); 1150 if (queue->data_digest) { 1151 nvmet_tcp_prep_recv_ddgst(cmd); 1152 return 0; 1153 } 1154 1155 if (cmd->rbytes_done == cmd->req.transfer_len) 1156 nvmet_tcp_execute_request(cmd); 1157 1158 nvmet_prepare_receive_pdu(queue); 1159 return 0; 1160} 1161 1162static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1163{ 1164 struct nvmet_tcp_cmd *cmd = queue->cmd; 1165 int ret; 1166 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1167 struct kvec iov = { 1168 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1169 .iov_len = queue->left 1170 }; 1171 1172 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1173 iov.iov_len, msg.msg_flags); 1174 if (unlikely(ret < 0)) 1175 return ret; 1176 1177 queue->offset += ret; 1178 queue->left -= ret; 1179 if (queue->left) 1180 return -EAGAIN; 1181 1182 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1183 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1184 queue->idx, cmd->req.cmd->common.command_id, 1185 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1186 le32_to_cpu(cmd->exp_ddgst)); 1187 nvmet_tcp_finish_cmd(cmd); 1188 nvmet_tcp_fatal_error(queue); 1189 ret = -EPROTO; 1190 goto out; 1191 } 1192 1193 if (cmd->rbytes_done == cmd->req.transfer_len) 1194 nvmet_tcp_execute_request(cmd); 1195 1196 ret = 0; 1197out: 1198 nvmet_prepare_receive_pdu(queue); 1199 return ret; 1200} 1201 1202static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1203{ 1204 int result = 0; 1205 1206 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1207 return 0; 1208 1209 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1210 result = nvmet_tcp_try_recv_pdu(queue); 1211 if (result != 0) 1212 goto done_recv; 1213 } 1214 1215 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1216 result = nvmet_tcp_try_recv_data(queue); 1217 if (result != 0) 1218 goto done_recv; 1219 } 1220 1221 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1222 result = nvmet_tcp_try_recv_ddgst(queue); 1223 if (result != 0) 1224 goto done_recv; 1225 } 1226 1227done_recv: 1228 if (result < 0) { 1229 if (result == -EAGAIN) 1230 return 0; 1231 return result; 1232 } 1233 return 1; 1234} 1235 1236static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1237 int budget, int *recvs) 1238{ 1239 int i, ret = 0; 1240 1241 for (i = 0; i < budget; i++) { 1242 ret = nvmet_tcp_try_recv_one(queue); 1243 if (unlikely(ret < 0)) { 1244 nvmet_tcp_socket_error(queue, ret); 1245 goto done; 1246 } else if (ret == 0) { 1247 break; 1248 } 1249 (*recvs)++; 1250 } 1251done: 1252 return ret; 1253} 1254 1255static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1256{ 1257 spin_lock(&queue->state_lock); 1258 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1259 queue->state = NVMET_TCP_Q_DISCONNECTING; 1260 schedule_work(&queue->release_work); 1261 } 1262 spin_unlock(&queue->state_lock); 1263} 1264 1265static void nvmet_tcp_io_work(struct work_struct *w) 1266{ 1267 struct nvmet_tcp_queue *queue = 1268 container_of(w, struct nvmet_tcp_queue, io_work); 1269 bool pending; 1270 int ret, ops = 0; 1271 1272 do { 1273 pending = false; 1274 1275 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1276 if (ret > 0) 1277 pending = true; 1278 else if (ret < 0) 1279 return; 1280 1281 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1282 if (ret > 0) 1283 pending = true; 1284 else if (ret < 0) 1285 return; 1286 1287 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1288 1289 /* 1290 * We exahusted our budget, requeue our selves 1291 */ 1292 if (pending) 1293 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1294} 1295 1296static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1297 struct nvmet_tcp_cmd *c) 1298{ 1299 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1300 1301 c->queue = queue; 1302 c->req.port = queue->port->nport; 1303 1304 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1305 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1306 if (!c->cmd_pdu) 1307 return -ENOMEM; 1308 c->req.cmd = &c->cmd_pdu->cmd; 1309 1310 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1311 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1312 if (!c->rsp_pdu) 1313 goto out_free_cmd; 1314 c->req.cqe = &c->rsp_pdu->cqe; 1315 1316 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1317 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1318 if (!c->data_pdu) 1319 goto out_free_rsp; 1320 1321 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1322 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1323 if (!c->r2t_pdu) 1324 goto out_free_data; 1325 1326 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1327 1328 list_add_tail(&c->entry, &queue->free_list); 1329 1330 return 0; 1331out_free_data: 1332 page_frag_free(c->data_pdu); 1333out_free_rsp: 1334 page_frag_free(c->rsp_pdu); 1335out_free_cmd: 1336 page_frag_free(c->cmd_pdu); 1337 return -ENOMEM; 1338} 1339 1340static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1341{ 1342 page_frag_free(c->r2t_pdu); 1343 page_frag_free(c->data_pdu); 1344 page_frag_free(c->rsp_pdu); 1345 page_frag_free(c->cmd_pdu); 1346} 1347 1348static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1349{ 1350 struct nvmet_tcp_cmd *cmds; 1351 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1352 1353 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1354 if (!cmds) 1355 goto out; 1356 1357 for (i = 0; i < nr_cmds; i++) { 1358 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1359 if (ret) 1360 goto out_free; 1361 } 1362 1363 queue->cmds = cmds; 1364 1365 return 0; 1366out_free: 1367 while (--i >= 0) 1368 nvmet_tcp_free_cmd(cmds + i); 1369 kfree(cmds); 1370out: 1371 return ret; 1372} 1373 1374static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1375{ 1376 struct nvmet_tcp_cmd *cmds = queue->cmds; 1377 int i; 1378 1379 for (i = 0; i < queue->nr_cmds; i++) 1380 nvmet_tcp_free_cmd(cmds + i); 1381 1382 nvmet_tcp_free_cmd(&queue->connect); 1383 kfree(cmds); 1384} 1385 1386static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1387{ 1388 struct socket *sock = queue->sock; 1389 1390 write_lock_bh(&sock->sk->sk_callback_lock); 1391 sock->sk->sk_data_ready = queue->data_ready; 1392 sock->sk->sk_state_change = queue->state_change; 1393 sock->sk->sk_write_space = queue->write_space; 1394 sock->sk->sk_user_data = NULL; 1395 write_unlock_bh(&sock->sk->sk_callback_lock); 1396} 1397 1398static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) 1399{ 1400 nvmet_req_uninit(&cmd->req); 1401 nvmet_tcp_unmap_pdu_iovec(cmd); 1402 kfree(cmd->iov); 1403 sgl_free(cmd->req.sg); 1404} 1405 1406static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1407{ 1408 struct nvmet_tcp_cmd *cmd = queue->cmds; 1409 int i; 1410 1411 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1412 if (nvmet_tcp_need_data_in(cmd)) 1413 nvmet_tcp_finish_cmd(cmd); 1414 } 1415 1416 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1417 /* failed in connect */ 1418 nvmet_tcp_finish_cmd(&queue->connect); 1419 } 1420} 1421 1422static void nvmet_tcp_release_queue_work(struct work_struct *w) 1423{ 1424 struct page *page; 1425 struct nvmet_tcp_queue *queue = 1426 container_of(w, struct nvmet_tcp_queue, release_work); 1427 1428 mutex_lock(&nvmet_tcp_queue_mutex); 1429 list_del_init(&queue->queue_list); 1430 mutex_unlock(&nvmet_tcp_queue_mutex); 1431 1432 nvmet_tcp_restore_socket_callbacks(queue); 1433 flush_work(&queue->io_work); 1434 1435 nvmet_tcp_uninit_data_in_cmds(queue); 1436 nvmet_sq_destroy(&queue->nvme_sq); 1437 cancel_work_sync(&queue->io_work); 1438 sock_release(queue->sock); 1439 nvmet_tcp_free_cmds(queue); 1440 if (queue->hdr_digest || queue->data_digest) 1441 nvmet_tcp_free_crypto(queue); 1442 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1443 1444 page = virt_to_head_page(queue->pf_cache.va); 1445 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); 1446 kfree(queue); 1447} 1448 1449static void nvmet_tcp_data_ready(struct sock *sk) 1450{ 1451 struct nvmet_tcp_queue *queue; 1452 1453 read_lock_bh(&sk->sk_callback_lock); 1454 queue = sk->sk_user_data; 1455 if (likely(queue)) 1456 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1457 read_unlock_bh(&sk->sk_callback_lock); 1458} 1459 1460static void nvmet_tcp_write_space(struct sock *sk) 1461{ 1462 struct nvmet_tcp_queue *queue; 1463 1464 read_lock_bh(&sk->sk_callback_lock); 1465 queue = sk->sk_user_data; 1466 if (unlikely(!queue)) 1467 goto out; 1468 1469 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1470 queue->write_space(sk); 1471 goto out; 1472 } 1473 1474 if (sk_stream_is_writeable(sk)) { 1475 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1476 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1477 } 1478out: 1479 read_unlock_bh(&sk->sk_callback_lock); 1480} 1481 1482static void nvmet_tcp_state_change(struct sock *sk) 1483{ 1484 struct nvmet_tcp_queue *queue; 1485 1486 read_lock_bh(&sk->sk_callback_lock); 1487 queue = sk->sk_user_data; 1488 if (!queue) 1489 goto done; 1490 1491 switch (sk->sk_state) { 1492 case TCP_FIN_WAIT2: 1493 case TCP_LAST_ACK: 1494 break; 1495 case TCP_FIN_WAIT1: 1496 case TCP_CLOSE_WAIT: 1497 case TCP_CLOSE: 1498 /* FALLTHRU */ 1499 nvmet_tcp_schedule_release_queue(queue); 1500 break; 1501 default: 1502 pr_warn("queue %d unhandled state %d\n", 1503 queue->idx, sk->sk_state); 1504 } 1505done: 1506 read_unlock_bh(&sk->sk_callback_lock); 1507} 1508 1509static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1510{ 1511 struct socket *sock = queue->sock; 1512 struct inet_sock *inet = inet_sk(sock->sk); 1513 int ret; 1514 1515 ret = kernel_getsockname(sock, 1516 (struct sockaddr *)&queue->sockaddr); 1517 if (ret < 0) 1518 return ret; 1519 1520 ret = kernel_getpeername(sock, 1521 (struct sockaddr *)&queue->sockaddr_peer); 1522 if (ret < 0) 1523 return ret; 1524 1525 /* 1526 * Cleanup whatever is sitting in the TCP transmit queue on socket 1527 * close. This is done to prevent stale data from being sent should 1528 * the network connection be restored before TCP times out. 1529 */ 1530 sock_no_linger(sock->sk); 1531 1532 if (so_priority > 0) 1533 sock_set_priority(sock->sk, so_priority); 1534 1535 /* Set socket type of service */ 1536 if (inet->rcv_tos > 0) 1537 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1538 1539 ret = 0; 1540 write_lock_bh(&sock->sk->sk_callback_lock); 1541 if (sock->sk->sk_state != TCP_ESTABLISHED) { 1542 /* 1543 * If the socket is already closing, don't even start 1544 * consuming it 1545 */ 1546 ret = -ENOTCONN; 1547 } else { 1548 sock->sk->sk_user_data = queue; 1549 queue->data_ready = sock->sk->sk_data_ready; 1550 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1551 queue->state_change = sock->sk->sk_state_change; 1552 sock->sk->sk_state_change = nvmet_tcp_state_change; 1553 queue->write_space = sock->sk->sk_write_space; 1554 sock->sk->sk_write_space = nvmet_tcp_write_space; 1555 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1556 } 1557 write_unlock_bh(&sock->sk->sk_callback_lock); 1558 1559 return ret; 1560} 1561 1562static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1563 struct socket *newsock) 1564{ 1565 struct nvmet_tcp_queue *queue; 1566 int ret; 1567 1568 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1569 if (!queue) 1570 return -ENOMEM; 1571 1572 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1573 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1574 queue->sock = newsock; 1575 queue->port = port; 1576 queue->nr_cmds = 0; 1577 spin_lock_init(&queue->state_lock); 1578 queue->state = NVMET_TCP_Q_CONNECTING; 1579 INIT_LIST_HEAD(&queue->free_list); 1580 init_llist_head(&queue->resp_list); 1581 INIT_LIST_HEAD(&queue->resp_send_list); 1582 1583 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); 1584 if (queue->idx < 0) { 1585 ret = queue->idx; 1586 goto out_free_queue; 1587 } 1588 1589 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1590 if (ret) 1591 goto out_ida_remove; 1592 1593 ret = nvmet_sq_init(&queue->nvme_sq); 1594 if (ret) 1595 goto out_free_connect; 1596 1597 nvmet_prepare_receive_pdu(queue); 1598 1599 mutex_lock(&nvmet_tcp_queue_mutex); 1600 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1601 mutex_unlock(&nvmet_tcp_queue_mutex); 1602 1603 ret = nvmet_tcp_set_queue_sock(queue); 1604 if (ret) 1605 goto out_destroy_sq; 1606 1607 return 0; 1608out_destroy_sq: 1609 mutex_lock(&nvmet_tcp_queue_mutex); 1610 list_del_init(&queue->queue_list); 1611 mutex_unlock(&nvmet_tcp_queue_mutex); 1612 nvmet_sq_destroy(&queue->nvme_sq); 1613out_free_connect: 1614 nvmet_tcp_free_cmd(&queue->connect); 1615out_ida_remove: 1616 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1617out_free_queue: 1618 kfree(queue); 1619 return ret; 1620} 1621 1622static void nvmet_tcp_accept_work(struct work_struct *w) 1623{ 1624 struct nvmet_tcp_port *port = 1625 container_of(w, struct nvmet_tcp_port, accept_work); 1626 struct socket *newsock; 1627 int ret; 1628 1629 while (true) { 1630 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1631 if (ret < 0) { 1632 if (ret != -EAGAIN) 1633 pr_warn("failed to accept err=%d\n", ret); 1634 return; 1635 } 1636 ret = nvmet_tcp_alloc_queue(port, newsock); 1637 if (ret) { 1638 pr_err("failed to allocate queue\n"); 1639 sock_release(newsock); 1640 } 1641 } 1642} 1643 1644static void nvmet_tcp_listen_data_ready(struct sock *sk) 1645{ 1646 struct nvmet_tcp_port *port; 1647 1648 read_lock_bh(&sk->sk_callback_lock); 1649 port = sk->sk_user_data; 1650 if (!port) 1651 goto out; 1652 1653 if (sk->sk_state == TCP_LISTEN) 1654 schedule_work(&port->accept_work); 1655out: 1656 read_unlock_bh(&sk->sk_callback_lock); 1657} 1658 1659static int nvmet_tcp_add_port(struct nvmet_port *nport) 1660{ 1661 struct nvmet_tcp_port *port; 1662 __kernel_sa_family_t af; 1663 int ret; 1664 1665 port = kzalloc(sizeof(*port), GFP_KERNEL); 1666 if (!port) 1667 return -ENOMEM; 1668 1669 switch (nport->disc_addr.adrfam) { 1670 case NVMF_ADDR_FAMILY_IP4: 1671 af = AF_INET; 1672 break; 1673 case NVMF_ADDR_FAMILY_IP6: 1674 af = AF_INET6; 1675 break; 1676 default: 1677 pr_err("address family %d not supported\n", 1678 nport->disc_addr.adrfam); 1679 ret = -EINVAL; 1680 goto err_port; 1681 } 1682 1683 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1684 nport->disc_addr.trsvcid, &port->addr); 1685 if (ret) { 1686 pr_err("malformed ip/port passed: %s:%s\n", 1687 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1688 goto err_port; 1689 } 1690 1691 port->nport = nport; 1692 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 1693 if (port->nport->inline_data_size < 0) 1694 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 1695 1696 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 1697 IPPROTO_TCP, &port->sock); 1698 if (ret) { 1699 pr_err("failed to create a socket\n"); 1700 goto err_port; 1701 } 1702 1703 port->sock->sk->sk_user_data = port; 1704 port->data_ready = port->sock->sk->sk_data_ready; 1705 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 1706 sock_set_reuseaddr(port->sock->sk); 1707 tcp_sock_set_nodelay(port->sock->sk); 1708 if (so_priority > 0) 1709 sock_set_priority(port->sock->sk, so_priority); 1710 1711 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, 1712 sizeof(port->addr)); 1713 if (ret) { 1714 pr_err("failed to bind port socket %d\n", ret); 1715 goto err_sock; 1716 } 1717 1718 ret = kernel_listen(port->sock, 128); 1719 if (ret) { 1720 pr_err("failed to listen %d on port sock\n", ret); 1721 goto err_sock; 1722 } 1723 1724 nport->priv = port; 1725 pr_info("enabling port %d (%pISpc)\n", 1726 le16_to_cpu(nport->disc_addr.portid), &port->addr); 1727 1728 return 0; 1729 1730err_sock: 1731 sock_release(port->sock); 1732err_port: 1733 kfree(port); 1734 return ret; 1735} 1736 1737static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) 1738{ 1739 struct nvmet_tcp_queue *queue; 1740 1741 mutex_lock(&nvmet_tcp_queue_mutex); 1742 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1743 if (queue->port == port) 1744 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1745 mutex_unlock(&nvmet_tcp_queue_mutex); 1746} 1747 1748static void nvmet_tcp_remove_port(struct nvmet_port *nport) 1749{ 1750 struct nvmet_tcp_port *port = nport->priv; 1751 1752 write_lock_bh(&port->sock->sk->sk_callback_lock); 1753 port->sock->sk->sk_data_ready = port->data_ready; 1754 port->sock->sk->sk_user_data = NULL; 1755 write_unlock_bh(&port->sock->sk->sk_callback_lock); 1756 cancel_work_sync(&port->accept_work); 1757 /* 1758 * Destroy the remaining queues, which are not belong to any 1759 * controller yet. 1760 */ 1761 nvmet_tcp_destroy_port_queues(port); 1762 1763 sock_release(port->sock); 1764 kfree(port); 1765} 1766 1767static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 1768{ 1769 struct nvmet_tcp_queue *queue; 1770 1771 mutex_lock(&nvmet_tcp_queue_mutex); 1772 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1773 if (queue->nvme_sq.ctrl == ctrl) 1774 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1775 mutex_unlock(&nvmet_tcp_queue_mutex); 1776} 1777 1778static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 1779{ 1780 struct nvmet_tcp_queue *queue = 1781 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 1782 1783 if (sq->qid == 0) { 1784 /* Let inflight controller teardown complete */ 1785 flush_scheduled_work(); 1786 } 1787 1788 queue->nr_cmds = sq->size * 2; 1789 if (nvmet_tcp_alloc_cmds(queue)) { 1790 queue->nr_cmds = 0; 1791 return NVME_SC_INTERNAL; 1792 } 1793 return 0; 1794} 1795 1796static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 1797 struct nvmet_port *nport, char *traddr) 1798{ 1799 struct nvmet_tcp_port *port = nport->priv; 1800 1801 if (inet_addr_is_any((struct sockaddr *)&port->addr)) { 1802 struct nvmet_tcp_cmd *cmd = 1803 container_of(req, struct nvmet_tcp_cmd, req); 1804 struct nvmet_tcp_queue *queue = cmd->queue; 1805 1806 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 1807 } else { 1808 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 1809 } 1810} 1811 1812static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 1813 .owner = THIS_MODULE, 1814 .type = NVMF_TRTYPE_TCP, 1815 .msdbd = 1, 1816 .add_port = nvmet_tcp_add_port, 1817 .remove_port = nvmet_tcp_remove_port, 1818 .queue_response = nvmet_tcp_queue_response, 1819 .delete_ctrl = nvmet_tcp_delete_ctrl, 1820 .install_queue = nvmet_tcp_install_queue, 1821 .disc_traddr = nvmet_tcp_disc_port_addr, 1822}; 1823 1824static int __init nvmet_tcp_init(void) 1825{ 1826 int ret; 1827 1828 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", 1829 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1830 if (!nvmet_tcp_wq) 1831 return -ENOMEM; 1832 1833 ret = nvmet_register_transport(&nvmet_tcp_ops); 1834 if (ret) 1835 goto err; 1836 1837 return 0; 1838err: 1839 destroy_workqueue(nvmet_tcp_wq); 1840 return ret; 1841} 1842 1843static void __exit nvmet_tcp_exit(void) 1844{ 1845 struct nvmet_tcp_queue *queue; 1846 1847 nvmet_unregister_transport(&nvmet_tcp_ops); 1848 1849 flush_scheduled_work(); 1850 mutex_lock(&nvmet_tcp_queue_mutex); 1851 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1852 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1853 mutex_unlock(&nvmet_tcp_queue_mutex); 1854 flush_scheduled_work(); 1855 1856 destroy_workqueue(nvmet_tcp_wq); 1857} 1858 1859module_init(nvmet_tcp_init); 1860module_exit(nvmet_tcp_exit); 1861 1862MODULE_LICENSE("GPL v2"); 1863MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 1864