1// SPDX-License-Identifier: GPL-2.0-or-later 2/******************************************************************************* 3 * This file contains iSCSI extentions for RDMA (iSER) Verbs 4 * 5 * (c) Copyright 2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ****************************************************************************/ 10 11#include <linux/string.h> 12#include <linux/module.h> 13#include <linux/scatterlist.h> 14#include <linux/socket.h> 15#include <linux/in.h> 16#include <linux/in6.h> 17#include <rdma/ib_verbs.h> 18#include <rdma/ib_cm.h> 19#include <rdma/rdma_cm.h> 20#include <target/target_core_base.h> 21#include <target/target_core_fabric.h> 22#include <target/iscsi/iscsi_transport.h> 23#include <linux/semaphore.h> 24 25#include "ib_isert.h" 26 27static int isert_debug_level; 28module_param_named(debug_level, isert_debug_level, int, 0644); 29MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 30 31static int isert_sg_tablesize_set(const char *val, 32 const struct kernel_param *kp); 33static const struct kernel_param_ops sg_tablesize_ops = { 34 .set = isert_sg_tablesize_set, 35 .get = param_get_int, 36}; 37 38static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE; 39module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644); 40MODULE_PARM_DESC(sg_tablesize, 41 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)"); 42 43static DEFINE_MUTEX(device_list_mutex); 44static LIST_HEAD(device_list); 45static struct workqueue_struct *isert_comp_wq; 46static struct workqueue_struct *isert_release_wq; 47 48static int 49isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 50static int 51isert_login_post_recv(struct isert_conn *isert_conn); 52static int 53isert_rdma_accept(struct isert_conn *isert_conn); 54struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 55 56static void isert_release_work(struct work_struct *work); 57static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 58static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 59static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 60static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 61 62static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp) 63{ 64 int n = 0, ret; 65 66 ret = kstrtoint(val, 10, &n); 67 if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE || 68 n > ISCSI_ISER_MAX_SG_TABLESIZE) 69 return -EINVAL; 70 71 return param_set_int(val, kp); 72} 73 74 75static inline bool 76isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 77{ 78 return (conn->pi_support && 79 cmd->prot_op != TARGET_PROT_NORMAL); 80} 81 82 83static void 84isert_qp_event_callback(struct ib_event *e, void *context) 85{ 86 struct isert_conn *isert_conn = context; 87 88 isert_err("%s (%d): conn %p\n", 89 ib_event_msg(e->event), e->event, isert_conn); 90 91 switch (e->event) { 92 case IB_EVENT_COMM_EST: 93 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 94 break; 95 case IB_EVENT_QP_LAST_WQE_REACHED: 96 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 97 break; 98 default: 99 break; 100 } 101} 102 103static struct ib_qp * 104isert_create_qp(struct isert_conn *isert_conn, 105 struct rdma_cm_id *cma_id) 106{ 107 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; 108 struct isert_device *device = isert_conn->device; 109 struct ib_device *ib_dev = device->ib_device; 110 struct ib_qp_init_attr attr; 111 int ret, factor; 112 113 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); 114 if (IS_ERR(isert_conn->cq)) { 115 isert_err("Unable to allocate cq\n"); 116 ret = PTR_ERR(isert_conn->cq); 117 return ERR_PTR(ret); 118 } 119 isert_conn->cq_size = cq_size; 120 121 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 122 attr.event_handler = isert_qp_event_callback; 123 attr.qp_context = isert_conn; 124 attr.send_cq = isert_conn->cq; 125 attr.recv_cq = isert_conn->cq; 126 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 127 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 128 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num, 129 isert_sg_tablesize); 130 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor; 131 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; 132 attr.cap.max_recv_sge = 1; 133 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 134 attr.qp_type = IB_QPT_RC; 135 if (device->pi_capable) 136 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 137 138 ret = rdma_create_qp(cma_id, device->pd, &attr); 139 if (ret) { 140 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 141 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 142 143 return ERR_PTR(ret); 144 } 145 146 return cma_id->qp; 147} 148 149static int 150isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 151{ 152 struct isert_device *device = isert_conn->device; 153 struct ib_device *ib_dev = device->ib_device; 154 struct iser_rx_desc *rx_desc; 155 struct ib_sge *rx_sg; 156 u64 dma_addr; 157 int i, j; 158 159 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, 160 sizeof(struct iser_rx_desc), 161 GFP_KERNEL); 162 if (!isert_conn->rx_descs) 163 return -ENOMEM; 164 165 rx_desc = isert_conn->rx_descs; 166 167 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 168 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, 169 ISER_RX_SIZE, DMA_FROM_DEVICE); 170 if (ib_dma_mapping_error(ib_dev, dma_addr)) 171 goto dma_map_fail; 172 173 rx_desc->dma_addr = dma_addr; 174 175 rx_sg = &rx_desc->rx_sg; 176 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); 177 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 178 rx_sg->lkey = device->pd->local_dma_lkey; 179 rx_desc->rx_cqe.done = isert_recv_done; 180 } 181 182 return 0; 183 184dma_map_fail: 185 rx_desc = isert_conn->rx_descs; 186 for (j = 0; j < i; j++, rx_desc++) { 187 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 188 ISER_RX_SIZE, DMA_FROM_DEVICE); 189 } 190 kfree(isert_conn->rx_descs); 191 isert_conn->rx_descs = NULL; 192 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 193 return -ENOMEM; 194} 195 196static void 197isert_free_rx_descriptors(struct isert_conn *isert_conn) 198{ 199 struct ib_device *ib_dev = isert_conn->device->ib_device; 200 struct iser_rx_desc *rx_desc; 201 int i; 202 203 if (!isert_conn->rx_descs) 204 return; 205 206 rx_desc = isert_conn->rx_descs; 207 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 208 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 209 ISER_RX_SIZE, DMA_FROM_DEVICE); 210 } 211 212 kfree(isert_conn->rx_descs); 213 isert_conn->rx_descs = NULL; 214} 215 216static int 217isert_create_device_ib_res(struct isert_device *device) 218{ 219 struct ib_device *ib_dev = device->ib_device; 220 int ret; 221 222 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", 223 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); 224 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 225 226 device->pd = ib_alloc_pd(ib_dev, 0); 227 if (IS_ERR(device->pd)) { 228 ret = PTR_ERR(device->pd); 229 isert_err("failed to allocate pd, device %p, ret=%d\n", 230 device, ret); 231 return ret; 232 } 233 234 /* Check signature cap */ 235 device->pi_capable = ib_dev->attrs.device_cap_flags & 236 IB_DEVICE_INTEGRITY_HANDOVER ? true : false; 237 238 return 0; 239} 240 241static void 242isert_free_device_ib_res(struct isert_device *device) 243{ 244 isert_info("device %p\n", device); 245 246 ib_dealloc_pd(device->pd); 247} 248 249static void 250isert_device_put(struct isert_device *device) 251{ 252 mutex_lock(&device_list_mutex); 253 device->refcount--; 254 isert_info("device %p refcount %d\n", device, device->refcount); 255 if (!device->refcount) { 256 isert_free_device_ib_res(device); 257 list_del(&device->dev_node); 258 kfree(device); 259 } 260 mutex_unlock(&device_list_mutex); 261} 262 263static struct isert_device * 264isert_device_get(struct rdma_cm_id *cma_id) 265{ 266 struct isert_device *device; 267 int ret; 268 269 mutex_lock(&device_list_mutex); 270 list_for_each_entry(device, &device_list, dev_node) { 271 if (device->ib_device->node_guid == cma_id->device->node_guid) { 272 device->refcount++; 273 isert_info("Found iser device %p refcount %d\n", 274 device, device->refcount); 275 mutex_unlock(&device_list_mutex); 276 return device; 277 } 278 } 279 280 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 281 if (!device) { 282 mutex_unlock(&device_list_mutex); 283 return ERR_PTR(-ENOMEM); 284 } 285 286 INIT_LIST_HEAD(&device->dev_node); 287 288 device->ib_device = cma_id->device; 289 ret = isert_create_device_ib_res(device); 290 if (ret) { 291 kfree(device); 292 mutex_unlock(&device_list_mutex); 293 return ERR_PTR(ret); 294 } 295 296 device->refcount++; 297 list_add_tail(&device->dev_node, &device_list); 298 isert_info("Created a new iser device %p refcount %d\n", 299 device, device->refcount); 300 mutex_unlock(&device_list_mutex); 301 302 return device; 303} 304 305static void 306isert_init_conn(struct isert_conn *isert_conn) 307{ 308 isert_conn->state = ISER_CONN_INIT; 309 INIT_LIST_HEAD(&isert_conn->node); 310 init_completion(&isert_conn->login_comp); 311 init_completion(&isert_conn->login_req_comp); 312 init_waitqueue_head(&isert_conn->rem_wait); 313 kref_init(&isert_conn->kref); 314 mutex_init(&isert_conn->mutex); 315 INIT_WORK(&isert_conn->release_work, isert_release_work); 316} 317 318static void 319isert_free_login_buf(struct isert_conn *isert_conn) 320{ 321 struct ib_device *ib_dev = isert_conn->device->ib_device; 322 323 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 324 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 325 kfree(isert_conn->login_rsp_buf); 326 327 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 328 ISER_RX_SIZE, DMA_FROM_DEVICE); 329 kfree(isert_conn->login_desc); 330} 331 332static int 333isert_alloc_login_buf(struct isert_conn *isert_conn, 334 struct ib_device *ib_dev) 335{ 336 int ret; 337 338 isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), 339 GFP_KERNEL); 340 if (!isert_conn->login_desc) 341 return -ENOMEM; 342 343 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, 344 isert_conn->login_desc->buf, 345 ISER_RX_SIZE, DMA_FROM_DEVICE); 346 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); 347 if (ret) { 348 isert_err("login_desc dma mapping error: %d\n", ret); 349 isert_conn->login_desc->dma_addr = 0; 350 goto out_free_login_desc; 351 } 352 353 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 354 if (!isert_conn->login_rsp_buf) { 355 ret = -ENOMEM; 356 goto out_unmap_login_desc; 357 } 358 359 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 360 isert_conn->login_rsp_buf, 361 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 362 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 363 if (ret) { 364 isert_err("login_rsp_dma mapping error: %d\n", ret); 365 isert_conn->login_rsp_dma = 0; 366 goto out_free_login_rsp_buf; 367 } 368 369 return 0; 370 371out_free_login_rsp_buf: 372 kfree(isert_conn->login_rsp_buf); 373out_unmap_login_desc: 374 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 375 ISER_RX_SIZE, DMA_FROM_DEVICE); 376out_free_login_desc: 377 kfree(isert_conn->login_desc); 378 return ret; 379} 380 381static void 382isert_set_nego_params(struct isert_conn *isert_conn, 383 struct rdma_conn_param *param) 384{ 385 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 386 387 /* Set max inflight RDMA READ requests */ 388 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 389 attr->max_qp_init_rd_atom); 390 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 391 392 if (param->private_data) { 393 u8 flags = *(u8 *)param->private_data; 394 395 /* 396 * use remote invalidation if the both initiator 397 * and the HCA support it 398 */ 399 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 400 (attr->device_cap_flags & 401 IB_DEVICE_MEM_MGT_EXTENSIONS); 402 if (isert_conn->snd_w_inv) 403 isert_info("Using remote invalidation\n"); 404 } 405} 406 407static void 408isert_destroy_qp(struct isert_conn *isert_conn) 409{ 410 ib_destroy_qp(isert_conn->qp); 411 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 412} 413 414static int 415isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 416{ 417 struct isert_np *isert_np = cma_id->context; 418 struct iscsi_np *np = isert_np->np; 419 struct isert_conn *isert_conn; 420 struct isert_device *device; 421 int ret = 0; 422 423 spin_lock_bh(&np->np_thread_lock); 424 if (!np->enabled) { 425 spin_unlock_bh(&np->np_thread_lock); 426 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 427 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 428 } 429 spin_unlock_bh(&np->np_thread_lock); 430 431 isert_dbg("cma_id: %p, portal: %p\n", 432 cma_id, cma_id->context); 433 434 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 435 if (!isert_conn) 436 return -ENOMEM; 437 438 isert_init_conn(isert_conn); 439 isert_conn->cm_id = cma_id; 440 441 device = isert_device_get(cma_id); 442 if (IS_ERR(device)) { 443 ret = PTR_ERR(device); 444 goto out; 445 } 446 isert_conn->device = device; 447 448 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 449 if (ret) 450 goto out_conn_dev; 451 452 isert_set_nego_params(isert_conn, &event->param.conn); 453 454 isert_conn->qp = isert_create_qp(isert_conn, cma_id); 455 if (IS_ERR(isert_conn->qp)) { 456 ret = PTR_ERR(isert_conn->qp); 457 goto out_rsp_dma_map; 458 } 459 460 ret = isert_login_post_recv(isert_conn); 461 if (ret) 462 goto out_destroy_qp; 463 464 ret = isert_rdma_accept(isert_conn); 465 if (ret) 466 goto out_destroy_qp; 467 468 mutex_lock(&isert_np->mutex); 469 list_add_tail(&isert_conn->node, &isert_np->accepted); 470 mutex_unlock(&isert_np->mutex); 471 472 return 0; 473 474out_destroy_qp: 475 isert_destroy_qp(isert_conn); 476out_rsp_dma_map: 477 isert_free_login_buf(isert_conn); 478out_conn_dev: 479 isert_device_put(device); 480out: 481 kfree(isert_conn); 482 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 483 return ret; 484} 485 486static void 487isert_connect_release(struct isert_conn *isert_conn) 488{ 489 struct isert_device *device = isert_conn->device; 490 491 isert_dbg("conn %p\n", isert_conn); 492 493 BUG_ON(!device); 494 495 isert_free_rx_descriptors(isert_conn); 496 if (isert_conn->cm_id && 497 !isert_conn->dev_removed) 498 rdma_destroy_id(isert_conn->cm_id); 499 500 if (isert_conn->qp) 501 isert_destroy_qp(isert_conn); 502 503 if (isert_conn->login_desc) 504 isert_free_login_buf(isert_conn); 505 506 isert_device_put(device); 507 508 if (isert_conn->dev_removed) 509 wake_up_interruptible(&isert_conn->rem_wait); 510 else 511 kfree(isert_conn); 512} 513 514static void 515isert_connected_handler(struct rdma_cm_id *cma_id) 516{ 517 struct isert_conn *isert_conn = cma_id->qp->qp_context; 518 struct isert_np *isert_np = cma_id->context; 519 520 isert_info("conn %p\n", isert_conn); 521 522 mutex_lock(&isert_conn->mutex); 523 isert_conn->state = ISER_CONN_UP; 524 kref_get(&isert_conn->kref); 525 mutex_unlock(&isert_conn->mutex); 526 527 mutex_lock(&isert_np->mutex); 528 list_move_tail(&isert_conn->node, &isert_np->pending); 529 mutex_unlock(&isert_np->mutex); 530 531 isert_info("np %p: Allow accept_np to continue\n", isert_np); 532 up(&isert_np->sem); 533} 534 535static void 536isert_release_kref(struct kref *kref) 537{ 538 struct isert_conn *isert_conn = container_of(kref, 539 struct isert_conn, kref); 540 541 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 542 current->pid); 543 544 isert_connect_release(isert_conn); 545} 546 547static void 548isert_put_conn(struct isert_conn *isert_conn) 549{ 550 kref_put(&isert_conn->kref, isert_release_kref); 551} 552 553static void 554isert_handle_unbound_conn(struct isert_conn *isert_conn) 555{ 556 struct isert_np *isert_np = isert_conn->cm_id->context; 557 558 mutex_lock(&isert_np->mutex); 559 if (!list_empty(&isert_conn->node)) { 560 /* 561 * This means iscsi doesn't know this connection 562 * so schedule a cleanup ourselves 563 */ 564 list_del_init(&isert_conn->node); 565 isert_put_conn(isert_conn); 566 queue_work(isert_release_wq, &isert_conn->release_work); 567 } 568 mutex_unlock(&isert_np->mutex); 569} 570 571/** 572 * isert_conn_terminate() - Initiate connection termination 573 * @isert_conn: isert connection struct 574 * 575 * Notes: 576 * In case the connection state is BOUND, move state 577 * to TEMINATING and start teardown sequence (rdma_disconnect). 578 * In case the connection state is UP, complete flush as well. 579 * 580 * This routine must be called with mutex held. Thus it is 581 * safe to call multiple times. 582 */ 583static void 584isert_conn_terminate(struct isert_conn *isert_conn) 585{ 586 int err; 587 588 if (isert_conn->state >= ISER_CONN_TERMINATING) 589 return; 590 591 isert_info("Terminating conn %p state %d\n", 592 isert_conn, isert_conn->state); 593 isert_conn->state = ISER_CONN_TERMINATING; 594 err = rdma_disconnect(isert_conn->cm_id); 595 if (err) 596 isert_warn("Failed rdma_disconnect isert_conn %p\n", 597 isert_conn); 598} 599 600static int 601isert_np_cma_handler(struct isert_np *isert_np, 602 enum rdma_cm_event_type event) 603{ 604 isert_dbg("%s (%d): isert np %p\n", 605 rdma_event_msg(event), event, isert_np); 606 607 switch (event) { 608 case RDMA_CM_EVENT_DEVICE_REMOVAL: 609 isert_np->cm_id = NULL; 610 break; 611 case RDMA_CM_EVENT_ADDR_CHANGE: 612 isert_np->cm_id = isert_setup_id(isert_np); 613 if (IS_ERR(isert_np->cm_id)) { 614 isert_err("isert np %p setup id failed: %ld\n", 615 isert_np, PTR_ERR(isert_np->cm_id)); 616 isert_np->cm_id = NULL; 617 } 618 break; 619 default: 620 isert_err("isert np %p Unexpected event %d\n", 621 isert_np, event); 622 } 623 624 return -1; 625} 626 627static int 628isert_disconnected_handler(struct rdma_cm_id *cma_id, 629 enum rdma_cm_event_type event) 630{ 631 struct isert_conn *isert_conn = cma_id->qp->qp_context; 632 633 mutex_lock(&isert_conn->mutex); 634 switch (isert_conn->state) { 635 case ISER_CONN_TERMINATING: 636 break; 637 case ISER_CONN_UP: 638 isert_conn_terminate(isert_conn); 639 ib_drain_qp(isert_conn->qp); 640 isert_handle_unbound_conn(isert_conn); 641 break; 642 case ISER_CONN_BOUND: 643 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 644 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 645 break; 646 default: 647 isert_warn("conn %p terminating in state %d\n", 648 isert_conn, isert_conn->state); 649 } 650 mutex_unlock(&isert_conn->mutex); 651 652 return 0; 653} 654 655static int 656isert_connect_error(struct rdma_cm_id *cma_id) 657{ 658 struct isert_conn *isert_conn = cma_id->qp->qp_context; 659 struct isert_np *isert_np = cma_id->context; 660 661 ib_drain_qp(isert_conn->qp); 662 663 mutex_lock(&isert_np->mutex); 664 list_del_init(&isert_conn->node); 665 mutex_unlock(&isert_np->mutex); 666 isert_conn->cm_id = NULL; 667 isert_put_conn(isert_conn); 668 669 return -1; 670} 671 672static int 673isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 674{ 675 struct isert_np *isert_np = cma_id->context; 676 struct isert_conn *isert_conn; 677 int ret = 0; 678 679 isert_info("%s (%d): status %d id %p np %p\n", 680 rdma_event_msg(event->event), event->event, 681 event->status, cma_id, cma_id->context); 682 683 if (isert_np->cm_id == cma_id) 684 return isert_np_cma_handler(cma_id->context, event->event); 685 686 switch (event->event) { 687 case RDMA_CM_EVENT_CONNECT_REQUEST: 688 ret = isert_connect_request(cma_id, event); 689 if (ret) 690 isert_err("failed handle connect request %d\n", ret); 691 break; 692 case RDMA_CM_EVENT_ESTABLISHED: 693 isert_connected_handler(cma_id); 694 break; 695 case RDMA_CM_EVENT_ADDR_CHANGE: 696 case RDMA_CM_EVENT_DISCONNECTED: 697 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 698 ret = isert_disconnected_handler(cma_id, event->event); 699 break; 700 case RDMA_CM_EVENT_DEVICE_REMOVAL: 701 isert_conn = cma_id->qp->qp_context; 702 isert_conn->dev_removed = true; 703 isert_disconnected_handler(cma_id, event->event); 704 wait_event_interruptible(isert_conn->rem_wait, 705 isert_conn->state == ISER_CONN_DOWN); 706 kfree(isert_conn); 707 /* 708 * return non-zero from the callback to destroy 709 * the rdma cm id 710 */ 711 return 1; 712 case RDMA_CM_EVENT_REJECTED: 713 isert_info("Connection rejected: %s\n", 714 rdma_reject_msg(cma_id, event->status)); 715 fallthrough; 716 case RDMA_CM_EVENT_UNREACHABLE: 717 case RDMA_CM_EVENT_CONNECT_ERROR: 718 ret = isert_connect_error(cma_id); 719 break; 720 default: 721 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 722 break; 723 } 724 725 return ret; 726} 727 728static int 729isert_post_recvm(struct isert_conn *isert_conn, u32 count) 730{ 731 struct ib_recv_wr *rx_wr; 732 int i, ret; 733 struct iser_rx_desc *rx_desc; 734 735 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 736 rx_desc = &isert_conn->rx_descs[i]; 737 738 rx_wr->wr_cqe = &rx_desc->rx_cqe; 739 rx_wr->sg_list = &rx_desc->rx_sg; 740 rx_wr->num_sge = 1; 741 rx_wr->next = rx_wr + 1; 742 rx_desc->in_use = false; 743 } 744 rx_wr--; 745 rx_wr->next = NULL; /* mark end of work requests list */ 746 747 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); 748 if (ret) 749 isert_err("ib_post_recv() failed with ret: %d\n", ret); 750 751 return ret; 752} 753 754static int 755isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 756{ 757 struct ib_recv_wr rx_wr; 758 int ret; 759 760 if (!rx_desc->in_use) { 761 /* 762 * if the descriptor is not in-use we already reposted it 763 * for recv, so just silently return 764 */ 765 return 0; 766 } 767 768 rx_desc->in_use = false; 769 rx_wr.wr_cqe = &rx_desc->rx_cqe; 770 rx_wr.sg_list = &rx_desc->rx_sg; 771 rx_wr.num_sge = 1; 772 rx_wr.next = NULL; 773 774 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 775 if (ret) 776 isert_err("ib_post_recv() failed with ret: %d\n", ret); 777 778 return ret; 779} 780 781static int 782isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 783{ 784 struct ib_device *ib_dev = isert_conn->cm_id->device; 785 struct ib_send_wr send_wr; 786 int ret; 787 788 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 789 ISER_HEADERS_LEN, DMA_TO_DEVICE); 790 791 tx_desc->tx_cqe.done = isert_login_send_done; 792 793 send_wr.next = NULL; 794 send_wr.wr_cqe = &tx_desc->tx_cqe; 795 send_wr.sg_list = tx_desc->tx_sg; 796 send_wr.num_sge = tx_desc->num_sge; 797 send_wr.opcode = IB_WR_SEND; 798 send_wr.send_flags = IB_SEND_SIGNALED; 799 800 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); 801 if (ret) 802 isert_err("ib_post_send() failed, ret: %d\n", ret); 803 804 return ret; 805} 806 807static void 808__isert_create_send_desc(struct isert_device *device, 809 struct iser_tx_desc *tx_desc) 810{ 811 812 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 813 tx_desc->iser_header.flags = ISCSI_CTRL; 814 815 tx_desc->num_sge = 1; 816 817 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 818 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 819 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 820 } 821} 822 823static void 824isert_create_send_desc(struct isert_conn *isert_conn, 825 struct isert_cmd *isert_cmd, 826 struct iser_tx_desc *tx_desc) 827{ 828 struct isert_device *device = isert_conn->device; 829 struct ib_device *ib_dev = device->ib_device; 830 831 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 832 ISER_HEADERS_LEN, DMA_TO_DEVICE); 833 834 __isert_create_send_desc(device, tx_desc); 835} 836 837static int 838isert_init_tx_hdrs(struct isert_conn *isert_conn, 839 struct iser_tx_desc *tx_desc) 840{ 841 struct isert_device *device = isert_conn->device; 842 struct ib_device *ib_dev = device->ib_device; 843 u64 dma_addr; 844 845 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 846 ISER_HEADERS_LEN, DMA_TO_DEVICE); 847 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 848 isert_err("ib_dma_mapping_error() failed\n"); 849 return -ENOMEM; 850 } 851 852 tx_desc->dma_addr = dma_addr; 853 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 854 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 855 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 856 857 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 858 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 859 tx_desc->tx_sg[0].lkey); 860 861 return 0; 862} 863 864static void 865isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 866 struct ib_send_wr *send_wr) 867{ 868 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 869 870 tx_desc->tx_cqe.done = isert_send_done; 871 send_wr->wr_cqe = &tx_desc->tx_cqe; 872 873 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 874 send_wr->opcode = IB_WR_SEND_WITH_INV; 875 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 876 } else { 877 send_wr->opcode = IB_WR_SEND; 878 } 879 880 send_wr->sg_list = &tx_desc->tx_sg[0]; 881 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 882 send_wr->send_flags = IB_SEND_SIGNALED; 883} 884 885static int 886isert_login_post_recv(struct isert_conn *isert_conn) 887{ 888 struct ib_recv_wr rx_wr; 889 struct ib_sge sge; 890 int ret; 891 892 memset(&sge, 0, sizeof(struct ib_sge)); 893 sge.addr = isert_conn->login_desc->dma_addr + 894 isert_get_hdr_offset(isert_conn->login_desc); 895 sge.length = ISER_RX_PAYLOAD_SIZE; 896 sge.lkey = isert_conn->device->pd->local_dma_lkey; 897 898 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 899 sge.addr, sge.length, sge.lkey); 900 901 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; 902 903 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 904 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; 905 rx_wr.sg_list = &sge; 906 rx_wr.num_sge = 1; 907 908 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 909 if (ret) 910 isert_err("ib_post_recv() failed: %d\n", ret); 911 912 return ret; 913} 914 915static int 916isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 917 u32 length) 918{ 919 struct isert_conn *isert_conn = conn->context; 920 struct isert_device *device = isert_conn->device; 921 struct ib_device *ib_dev = device->ib_device; 922 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 923 int ret; 924 925 __isert_create_send_desc(device, tx_desc); 926 927 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 928 sizeof(struct iscsi_hdr)); 929 930 isert_init_tx_hdrs(isert_conn, tx_desc); 931 932 if (length > 0) { 933 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 934 935 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 936 length, DMA_TO_DEVICE); 937 938 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 939 940 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 941 length, DMA_TO_DEVICE); 942 943 tx_dsg->addr = isert_conn->login_rsp_dma; 944 tx_dsg->length = length; 945 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 946 tx_desc->num_sge = 2; 947 } 948 if (!login->login_failed) { 949 if (login->login_complete) { 950 ret = isert_alloc_rx_descriptors(isert_conn); 951 if (ret) 952 return ret; 953 954 ret = isert_post_recvm(isert_conn, 955 ISERT_QP_MAX_RECV_DTOS); 956 if (ret) 957 return ret; 958 959 /* Now we are in FULL_FEATURE phase */ 960 mutex_lock(&isert_conn->mutex); 961 isert_conn->state = ISER_CONN_FULL_FEATURE; 962 mutex_unlock(&isert_conn->mutex); 963 goto post_send; 964 } 965 966 ret = isert_login_post_recv(isert_conn); 967 if (ret) 968 return ret; 969 } 970post_send: 971 ret = isert_login_post_send(isert_conn, tx_desc); 972 if (ret) 973 return ret; 974 975 return 0; 976} 977 978static void 979isert_rx_login_req(struct isert_conn *isert_conn) 980{ 981 struct iser_rx_desc *rx_desc = isert_conn->login_desc; 982 int rx_buflen = isert_conn->login_req_len; 983 struct iscsi_conn *conn = isert_conn->conn; 984 struct iscsi_login *login = conn->conn_login; 985 int size; 986 987 isert_info("conn %p\n", isert_conn); 988 989 WARN_ON_ONCE(!login); 990 991 if (login->first_request) { 992 struct iscsi_login_req *login_req = 993 (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); 994 /* 995 * Setup the initial iscsi_login values from the leading 996 * login request PDU. 997 */ 998 login->leading_connection = (!login_req->tsih) ? 1 : 0; 999 login->current_stage = 1000 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1001 >> 2; 1002 login->version_min = login_req->min_version; 1003 login->version_max = login_req->max_version; 1004 memcpy(login->isid, login_req->isid, 6); 1005 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1006 login->init_task_tag = login_req->itt; 1007 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1008 login->cid = be16_to_cpu(login_req->cid); 1009 login->tsih = be16_to_cpu(login_req->tsih); 1010 } 1011 1012 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); 1013 1014 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1015 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1016 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1017 MAX_KEY_VALUE_PAIRS); 1018 memcpy(login->req_buf, isert_get_data(rx_desc), size); 1019 1020 if (login->first_request) { 1021 complete(&isert_conn->login_comp); 1022 return; 1023 } 1024 schedule_delayed_work(&conn->login_work, 0); 1025} 1026 1027static struct iscsi_cmd 1028*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1029{ 1030 struct isert_conn *isert_conn = conn->context; 1031 struct isert_cmd *isert_cmd; 1032 struct iscsi_cmd *cmd; 1033 1034 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1035 if (!cmd) { 1036 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1037 return NULL; 1038 } 1039 isert_cmd = iscsit_priv_cmd(cmd); 1040 isert_cmd->conn = isert_conn; 1041 isert_cmd->iscsi_cmd = cmd; 1042 isert_cmd->rx_desc = rx_desc; 1043 1044 return cmd; 1045} 1046 1047static int 1048isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1049 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1050 struct iser_rx_desc *rx_desc, unsigned char *buf) 1051{ 1052 struct iscsi_conn *conn = isert_conn->conn; 1053 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1054 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1055 bool dump_payload = false; 1056 unsigned int data_len; 1057 1058 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1059 if (rc < 0) 1060 return rc; 1061 1062 imm_data = cmd->immediate_data; 1063 imm_data_len = cmd->first_burst_len; 1064 unsol_data = cmd->unsolicited_data; 1065 data_len = cmd->se_cmd.data_length; 1066 1067 if (imm_data && imm_data_len == data_len) 1068 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1069 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1070 if (rc < 0) { 1071 return 0; 1072 } else if (rc > 0) { 1073 dump_payload = true; 1074 goto sequence_cmd; 1075 } 1076 1077 if (!imm_data) 1078 return 0; 1079 1080 if (imm_data_len != data_len) { 1081 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1082 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1083 isert_get_data(rx_desc), imm_data_len); 1084 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1085 sg_nents, imm_data_len); 1086 } else { 1087 sg_init_table(&isert_cmd->sg, 1); 1088 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1089 cmd->se_cmd.t_data_nents = 1; 1090 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), 1091 imm_data_len); 1092 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1093 imm_data_len); 1094 } 1095 1096 cmd->write_data_done += imm_data_len; 1097 1098 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1099 spin_lock_bh(&cmd->istate_lock); 1100 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1101 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1102 spin_unlock_bh(&cmd->istate_lock); 1103 } 1104 1105sequence_cmd: 1106 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1107 1108 if (!rc && dump_payload == false && unsol_data) 1109 iscsit_set_unsolicited_dataout(cmd); 1110 else if (dump_payload && imm_data) 1111 target_put_sess_cmd(&cmd->se_cmd); 1112 1113 return 0; 1114} 1115 1116static int 1117isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1118 struct iser_rx_desc *rx_desc, unsigned char *buf) 1119{ 1120 struct scatterlist *sg_start; 1121 struct iscsi_conn *conn = isert_conn->conn; 1122 struct iscsi_cmd *cmd = NULL; 1123 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1124 u32 unsol_data_len = ntoh24(hdr->dlength); 1125 int rc, sg_nents, sg_off, page_off; 1126 1127 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1128 if (rc < 0) 1129 return rc; 1130 else if (!cmd) 1131 return 0; 1132 /* 1133 * FIXME: Unexpected unsolicited_data out 1134 */ 1135 if (!cmd->unsolicited_data) { 1136 isert_err("Received unexpected solicited data payload\n"); 1137 dump_stack(); 1138 return -1; 1139 } 1140 1141 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1142 "write_data_done: %u, data_length: %u\n", 1143 unsol_data_len, cmd->write_data_done, 1144 cmd->se_cmd.data_length); 1145 1146 sg_off = cmd->write_data_done / PAGE_SIZE; 1147 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1148 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1149 page_off = cmd->write_data_done % PAGE_SIZE; 1150 /* 1151 * FIXME: Non page-aligned unsolicited_data out 1152 */ 1153 if (page_off) { 1154 isert_err("unexpected non-page aligned data payload\n"); 1155 dump_stack(); 1156 return -1; 1157 } 1158 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1159 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1160 sg_nents, isert_get_data(rx_desc), unsol_data_len); 1161 1162 sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), 1163 unsol_data_len); 1164 1165 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1166 if (rc < 0) 1167 return rc; 1168 1169 /* 1170 * multiple data-outs on the same command can arrive - 1171 * so post the buffer before hand 1172 */ 1173 return isert_post_recv(isert_conn, rx_desc); 1174} 1175 1176static int 1177isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1178 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1179 unsigned char *buf) 1180{ 1181 struct iscsi_conn *conn = isert_conn->conn; 1182 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1183 int rc; 1184 1185 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1186 if (rc < 0) 1187 return rc; 1188 /* 1189 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1190 */ 1191 1192 return iscsit_process_nop_out(conn, cmd, hdr); 1193} 1194 1195static int 1196isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1197 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1198 struct iscsi_text *hdr) 1199{ 1200 struct iscsi_conn *conn = isert_conn->conn; 1201 u32 payload_length = ntoh24(hdr->dlength); 1202 int rc; 1203 unsigned char *text_in = NULL; 1204 1205 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1206 if (rc < 0) 1207 return rc; 1208 1209 if (payload_length) { 1210 text_in = kzalloc(payload_length, GFP_KERNEL); 1211 if (!text_in) 1212 return -ENOMEM; 1213 } 1214 cmd->text_in_ptr = text_in; 1215 1216 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); 1217 1218 return iscsit_process_text_cmd(conn, cmd, hdr); 1219} 1220 1221static int 1222isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1223 uint32_t read_stag, uint64_t read_va, 1224 uint32_t write_stag, uint64_t write_va) 1225{ 1226 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1227 struct iscsi_conn *conn = isert_conn->conn; 1228 struct iscsi_cmd *cmd; 1229 struct isert_cmd *isert_cmd; 1230 int ret = -EINVAL; 1231 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1232 1233 if (conn->sess->sess_ops->SessionType && 1234 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1235 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1236 " ignoring\n", opcode); 1237 return 0; 1238 } 1239 1240 switch (opcode) { 1241 case ISCSI_OP_SCSI_CMD: 1242 cmd = isert_allocate_cmd(conn, rx_desc); 1243 if (!cmd) 1244 break; 1245 1246 isert_cmd = iscsit_priv_cmd(cmd); 1247 isert_cmd->read_stag = read_stag; 1248 isert_cmd->read_va = read_va; 1249 isert_cmd->write_stag = write_stag; 1250 isert_cmd->write_va = write_va; 1251 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1252 1253 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1254 rx_desc, (unsigned char *)hdr); 1255 break; 1256 case ISCSI_OP_NOOP_OUT: 1257 cmd = isert_allocate_cmd(conn, rx_desc); 1258 if (!cmd) 1259 break; 1260 1261 isert_cmd = iscsit_priv_cmd(cmd); 1262 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1263 rx_desc, (unsigned char *)hdr); 1264 break; 1265 case ISCSI_OP_SCSI_DATA_OUT: 1266 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1267 (unsigned char *)hdr); 1268 break; 1269 case ISCSI_OP_SCSI_TMFUNC: 1270 cmd = isert_allocate_cmd(conn, rx_desc); 1271 if (!cmd) 1272 break; 1273 1274 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1275 (unsigned char *)hdr); 1276 break; 1277 case ISCSI_OP_LOGOUT: 1278 cmd = isert_allocate_cmd(conn, rx_desc); 1279 if (!cmd) 1280 break; 1281 1282 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1283 break; 1284 case ISCSI_OP_TEXT: 1285 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1286 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1287 else 1288 cmd = isert_allocate_cmd(conn, rx_desc); 1289 1290 if (!cmd) 1291 break; 1292 1293 isert_cmd = iscsit_priv_cmd(cmd); 1294 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1295 rx_desc, (struct iscsi_text *)hdr); 1296 break; 1297 default: 1298 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1299 dump_stack(); 1300 break; 1301 } 1302 1303 return ret; 1304} 1305 1306static void 1307isert_print_wc(struct ib_wc *wc, const char *type) 1308{ 1309 if (wc->status != IB_WC_WR_FLUSH_ERR) 1310 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1311 ib_wc_status_msg(wc->status), wc->status, 1312 wc->vendor_err); 1313 else 1314 isert_dbg("%s failure: %s (%d)\n", type, 1315 ib_wc_status_msg(wc->status), wc->status); 1316} 1317 1318static void 1319isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1320{ 1321 struct isert_conn *isert_conn = wc->qp->qp_context; 1322 struct ib_device *ib_dev = isert_conn->cm_id->device; 1323 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1324 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1325 struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); 1326 uint64_t read_va = 0, write_va = 0; 1327 uint32_t read_stag = 0, write_stag = 0; 1328 1329 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1330 isert_print_wc(wc, "recv"); 1331 if (wc->status != IB_WC_WR_FLUSH_ERR) 1332 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1333 return; 1334 } 1335 1336 rx_desc->in_use = true; 1337 1338 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1339 ISER_RX_SIZE, DMA_FROM_DEVICE); 1340 1341 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1342 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1343 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1344 1345 switch (iser_ctrl->flags & 0xF0) { 1346 case ISCSI_CTRL: 1347 if (iser_ctrl->flags & ISER_RSV) { 1348 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1349 read_va = be64_to_cpu(iser_ctrl->read_va); 1350 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1351 read_stag, (unsigned long long)read_va); 1352 } 1353 if (iser_ctrl->flags & ISER_WSV) { 1354 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1355 write_va = be64_to_cpu(iser_ctrl->write_va); 1356 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1357 write_stag, (unsigned long long)write_va); 1358 } 1359 1360 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1361 break; 1362 case ISER_HELLO: 1363 isert_err("iSER Hello message\n"); 1364 break; 1365 default: 1366 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1367 break; 1368 } 1369 1370 isert_rx_opcode(isert_conn, rx_desc, 1371 read_stag, read_va, write_stag, write_va); 1372 1373 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1374 ISER_RX_SIZE, DMA_FROM_DEVICE); 1375} 1376 1377static void 1378isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1379{ 1380 struct isert_conn *isert_conn = wc->qp->qp_context; 1381 struct ib_device *ib_dev = isert_conn->device->ib_device; 1382 1383 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1384 isert_print_wc(wc, "login recv"); 1385 return; 1386 } 1387 1388 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, 1389 ISER_RX_SIZE, DMA_FROM_DEVICE); 1390 1391 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1392 1393 if (isert_conn->conn) { 1394 struct iscsi_login *login = isert_conn->conn->conn_login; 1395 1396 if (login && !login->first_request) 1397 isert_rx_login_req(isert_conn); 1398 } 1399 1400 mutex_lock(&isert_conn->mutex); 1401 complete(&isert_conn->login_req_comp); 1402 mutex_unlock(&isert_conn->mutex); 1403 1404 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, 1405 ISER_RX_SIZE, DMA_FROM_DEVICE); 1406} 1407 1408static void 1409isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1410{ 1411 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1412 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1413 1414 if (!cmd->rw.nr_ops) 1415 return; 1416 1417 if (isert_prot_cmd(conn, se_cmd)) { 1418 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1419 conn->cm_id->port_num, se_cmd->t_data_sg, 1420 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1421 se_cmd->t_prot_nents, dir); 1422 } else { 1423 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1424 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1425 } 1426 1427 cmd->rw.nr_ops = 0; 1428} 1429 1430static void 1431isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1432{ 1433 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1434 struct isert_conn *isert_conn = isert_cmd->conn; 1435 struct iscsi_conn *conn = isert_conn->conn; 1436 struct iscsi_text_rsp *hdr; 1437 1438 isert_dbg("Cmd %p\n", isert_cmd); 1439 1440 switch (cmd->iscsi_opcode) { 1441 case ISCSI_OP_SCSI_CMD: 1442 spin_lock_bh(&conn->cmd_lock); 1443 if (!list_empty(&cmd->i_conn_node)) 1444 list_del_init(&cmd->i_conn_node); 1445 spin_unlock_bh(&conn->cmd_lock); 1446 1447 if (cmd->data_direction == DMA_TO_DEVICE) { 1448 iscsit_stop_dataout_timer(cmd); 1449 /* 1450 * Check for special case during comp_err where 1451 * WRITE_PENDING has been handed off from core, 1452 * but requires an extra target_put_sess_cmd() 1453 * before transport_generic_free_cmd() below. 1454 */ 1455 if (comp_err && 1456 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1457 struct se_cmd *se_cmd = &cmd->se_cmd; 1458 1459 target_put_sess_cmd(se_cmd); 1460 } 1461 } 1462 1463 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1464 transport_generic_free_cmd(&cmd->se_cmd, 0); 1465 break; 1466 case ISCSI_OP_SCSI_TMFUNC: 1467 spin_lock_bh(&conn->cmd_lock); 1468 if (!list_empty(&cmd->i_conn_node)) 1469 list_del_init(&cmd->i_conn_node); 1470 spin_unlock_bh(&conn->cmd_lock); 1471 1472 transport_generic_free_cmd(&cmd->se_cmd, 0); 1473 break; 1474 case ISCSI_OP_REJECT: 1475 case ISCSI_OP_NOOP_OUT: 1476 case ISCSI_OP_TEXT: 1477 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1478 /* If the continue bit is on, keep the command alive */ 1479 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1480 break; 1481 1482 spin_lock_bh(&conn->cmd_lock); 1483 if (!list_empty(&cmd->i_conn_node)) 1484 list_del_init(&cmd->i_conn_node); 1485 spin_unlock_bh(&conn->cmd_lock); 1486 1487 /* 1488 * Handle special case for REJECT when iscsi_add_reject*() has 1489 * overwritten the original iscsi_opcode assignment, and the 1490 * associated cmd->se_cmd needs to be released. 1491 */ 1492 if (cmd->se_cmd.se_tfo != NULL) { 1493 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1494 cmd->iscsi_opcode); 1495 transport_generic_free_cmd(&cmd->se_cmd, 0); 1496 break; 1497 } 1498 fallthrough; 1499 default: 1500 iscsit_release_cmd(cmd); 1501 break; 1502 } 1503} 1504 1505static void 1506isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1507{ 1508 if (tx_desc->dma_addr != 0) { 1509 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1510 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1511 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1512 tx_desc->dma_addr = 0; 1513 } 1514} 1515 1516static void 1517isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1518 struct ib_device *ib_dev, bool comp_err) 1519{ 1520 if (isert_cmd->pdu_buf_dma != 0) { 1521 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1522 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1523 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1524 isert_cmd->pdu_buf_dma = 0; 1525 } 1526 1527 isert_unmap_tx_desc(tx_desc, ib_dev); 1528 isert_put_cmd(isert_cmd, comp_err); 1529} 1530 1531static int 1532isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1533{ 1534 struct ib_mr_status mr_status; 1535 int ret; 1536 1537 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1538 if (ret) { 1539 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1540 goto fail_mr_status; 1541 } 1542 1543 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1544 u64 sec_offset_err; 1545 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1546 1547 switch (mr_status.sig_err.err_type) { 1548 case IB_SIG_BAD_GUARD: 1549 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1550 break; 1551 case IB_SIG_BAD_REFTAG: 1552 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1553 break; 1554 case IB_SIG_BAD_APPTAG: 1555 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1556 break; 1557 } 1558 sec_offset_err = mr_status.sig_err.sig_err_offset; 1559 do_div(sec_offset_err, block_size); 1560 se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba; 1561 1562 isert_err("PI error found type %d at sector 0x%llx " 1563 "expected 0x%x vs actual 0x%x\n", 1564 mr_status.sig_err.err_type, 1565 (unsigned long long)se_cmd->sense_info, 1566 mr_status.sig_err.expected, 1567 mr_status.sig_err.actual); 1568 ret = 1; 1569 } 1570 1571fail_mr_status: 1572 return ret; 1573} 1574 1575static void 1576isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1577{ 1578 struct isert_conn *isert_conn = wc->qp->qp_context; 1579 struct isert_device *device = isert_conn->device; 1580 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1581 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1582 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1583 int ret = 0; 1584 1585 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1586 isert_print_wc(wc, "rdma write"); 1587 if (wc->status != IB_WC_WR_FLUSH_ERR) 1588 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1589 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1590 return; 1591 } 1592 1593 isert_dbg("Cmd %p\n", isert_cmd); 1594 1595 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); 1596 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1597 1598 if (ret) { 1599 /* 1600 * transport_generic_request_failure() expects to have 1601 * plus two references to handle queue-full, so re-add 1602 * one here as target-core will have already dropped 1603 * it after the first isert_put_datain() callback. 1604 */ 1605 kref_get(&cmd->cmd_kref); 1606 transport_generic_request_failure(cmd, cmd->pi_err); 1607 } else { 1608 /* 1609 * XXX: isert_put_response() failure is not retried. 1610 */ 1611 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1612 if (ret) 1613 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); 1614 } 1615} 1616 1617static void 1618isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1619{ 1620 struct isert_conn *isert_conn = wc->qp->qp_context; 1621 struct isert_device *device = isert_conn->device; 1622 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1623 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1624 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1625 struct se_cmd *se_cmd = &cmd->se_cmd; 1626 int ret = 0; 1627 1628 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1629 isert_print_wc(wc, "rdma read"); 1630 if (wc->status != IB_WC_WR_FLUSH_ERR) 1631 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1632 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1633 return; 1634 } 1635 1636 isert_dbg("Cmd %p\n", isert_cmd); 1637 1638 iscsit_stop_dataout_timer(cmd); 1639 1640 if (isert_prot_cmd(isert_conn, se_cmd)) 1641 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); 1642 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1643 cmd->write_data_done = 0; 1644 1645 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1646 spin_lock_bh(&cmd->istate_lock); 1647 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1648 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1649 spin_unlock_bh(&cmd->istate_lock); 1650 1651 /* 1652 * transport_generic_request_failure() will drop the extra 1653 * se_cmd->cmd_kref reference after T10-PI error, and handle 1654 * any non-zero ->queue_status() callback error retries. 1655 */ 1656 if (ret) 1657 transport_generic_request_failure(se_cmd, se_cmd->pi_err); 1658 else 1659 target_execute_cmd(se_cmd); 1660} 1661 1662static void 1663isert_do_control_comp(struct work_struct *work) 1664{ 1665 struct isert_cmd *isert_cmd = container_of(work, 1666 struct isert_cmd, comp_work); 1667 struct isert_conn *isert_conn = isert_cmd->conn; 1668 struct ib_device *ib_dev = isert_conn->cm_id->device; 1669 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1670 1671 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1672 1673 switch (cmd->i_state) { 1674 case ISTATE_SEND_TASKMGTRSP: 1675 iscsit_tmr_post_handler(cmd, cmd->conn); 1676 fallthrough; 1677 case ISTATE_SEND_REJECT: 1678 case ISTATE_SEND_TEXTRSP: 1679 cmd->i_state = ISTATE_SENT_STATUS; 1680 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1681 ib_dev, false); 1682 break; 1683 case ISTATE_SEND_LOGOUTRSP: 1684 iscsit_logout_post_handler(cmd, cmd->conn); 1685 break; 1686 default: 1687 isert_err("Unknown i_state %d\n", cmd->i_state); 1688 dump_stack(); 1689 break; 1690 } 1691} 1692 1693static void 1694isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1695{ 1696 struct isert_conn *isert_conn = wc->qp->qp_context; 1697 struct ib_device *ib_dev = isert_conn->cm_id->device; 1698 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1699 1700 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1701 isert_print_wc(wc, "login send"); 1702 if (wc->status != IB_WC_WR_FLUSH_ERR) 1703 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1704 } 1705 1706 isert_unmap_tx_desc(tx_desc, ib_dev); 1707} 1708 1709static void 1710isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1711{ 1712 struct isert_conn *isert_conn = wc->qp->qp_context; 1713 struct ib_device *ib_dev = isert_conn->cm_id->device; 1714 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1715 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1716 1717 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1718 isert_print_wc(wc, "send"); 1719 if (wc->status != IB_WC_WR_FLUSH_ERR) 1720 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1721 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1722 return; 1723 } 1724 1725 isert_dbg("Cmd %p\n", isert_cmd); 1726 1727 switch (isert_cmd->iscsi_cmd->i_state) { 1728 case ISTATE_SEND_TASKMGTRSP: 1729 case ISTATE_SEND_LOGOUTRSP: 1730 case ISTATE_SEND_REJECT: 1731 case ISTATE_SEND_TEXTRSP: 1732 isert_unmap_tx_desc(tx_desc, ib_dev); 1733 1734 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1735 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1736 return; 1737 default: 1738 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1739 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1740 break; 1741 } 1742} 1743 1744static int 1745isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1746{ 1747 int ret; 1748 1749 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1750 if (ret) 1751 return ret; 1752 1753 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); 1754 if (ret) { 1755 isert_err("ib_post_send failed with %d\n", ret); 1756 return ret; 1757 } 1758 return ret; 1759} 1760 1761static int 1762isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1763{ 1764 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1765 struct isert_conn *isert_conn = conn->context; 1766 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1767 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1768 &isert_cmd->tx_desc.iscsi_header; 1769 1770 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1771 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1772 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1773 /* 1774 * Attach SENSE DATA payload to iSCSI Response PDU 1775 */ 1776 if (cmd->se_cmd.sense_buffer && 1777 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1778 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1779 struct isert_device *device = isert_conn->device; 1780 struct ib_device *ib_dev = device->ib_device; 1781 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1782 u32 padding, pdu_len; 1783 1784 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1785 cmd->sense_buffer); 1786 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1787 1788 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1789 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1790 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1791 1792 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1793 (void *)cmd->sense_buffer, pdu_len, 1794 DMA_TO_DEVICE); 1795 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1796 return -ENOMEM; 1797 1798 isert_cmd->pdu_buf_len = pdu_len; 1799 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1800 tx_dsg->length = pdu_len; 1801 tx_dsg->lkey = device->pd->local_dma_lkey; 1802 isert_cmd->tx_desc.num_sge = 2; 1803 } 1804 1805 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1806 1807 isert_dbg("Posting SCSI Response\n"); 1808 1809 return isert_post_response(isert_conn, isert_cmd); 1810} 1811 1812static void 1813isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1814{ 1815 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1816 struct isert_conn *isert_conn = conn->context; 1817 1818 spin_lock_bh(&conn->cmd_lock); 1819 if (!list_empty(&cmd->i_conn_node)) 1820 list_del_init(&cmd->i_conn_node); 1821 spin_unlock_bh(&conn->cmd_lock); 1822 1823 if (cmd->data_direction == DMA_TO_DEVICE) 1824 iscsit_stop_dataout_timer(cmd); 1825 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1826} 1827 1828static enum target_prot_op 1829isert_get_sup_prot_ops(struct iscsi_conn *conn) 1830{ 1831 struct isert_conn *isert_conn = conn->context; 1832 struct isert_device *device = isert_conn->device; 1833 1834 if (conn->tpg->tpg_attrib.t10_pi) { 1835 if (device->pi_capable) { 1836 isert_info("conn %p PI offload enabled\n", isert_conn); 1837 isert_conn->pi_support = true; 1838 return TARGET_PROT_ALL; 1839 } 1840 } 1841 1842 isert_info("conn %p PI offload disabled\n", isert_conn); 1843 isert_conn->pi_support = false; 1844 1845 return TARGET_PROT_NORMAL; 1846} 1847 1848static int 1849isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1850 bool nopout_response) 1851{ 1852 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1853 struct isert_conn *isert_conn = conn->context; 1854 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1855 1856 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1857 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1858 &isert_cmd->tx_desc.iscsi_header, 1859 nopout_response); 1860 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1861 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1862 1863 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1864 1865 return isert_post_response(isert_conn, isert_cmd); 1866} 1867 1868static int 1869isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1870{ 1871 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1872 struct isert_conn *isert_conn = conn->context; 1873 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1874 1875 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1876 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1877 &isert_cmd->tx_desc.iscsi_header); 1878 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1879 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1880 1881 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1882 1883 return isert_post_response(isert_conn, isert_cmd); 1884} 1885 1886static int 1887isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1888{ 1889 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1890 struct isert_conn *isert_conn = conn->context; 1891 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1892 1893 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1894 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1895 &isert_cmd->tx_desc.iscsi_header); 1896 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1897 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1898 1899 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1900 1901 return isert_post_response(isert_conn, isert_cmd); 1902} 1903 1904static int 1905isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1906{ 1907 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1908 struct isert_conn *isert_conn = conn->context; 1909 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1910 struct isert_device *device = isert_conn->device; 1911 struct ib_device *ib_dev = device->ib_device; 1912 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1913 struct iscsi_reject *hdr = 1914 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1915 1916 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1917 iscsit_build_reject(cmd, conn, hdr); 1918 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1919 1920 hton24(hdr->dlength, ISCSI_HDR_LEN); 1921 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1922 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1923 DMA_TO_DEVICE); 1924 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1925 return -ENOMEM; 1926 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1927 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1928 tx_dsg->length = ISCSI_HDR_LEN; 1929 tx_dsg->lkey = device->pd->local_dma_lkey; 1930 isert_cmd->tx_desc.num_sge = 2; 1931 1932 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1933 1934 isert_dbg("conn %p Posting Reject\n", isert_conn); 1935 1936 return isert_post_response(isert_conn, isert_cmd); 1937} 1938 1939static int 1940isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1941{ 1942 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1943 struct isert_conn *isert_conn = conn->context; 1944 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1945 struct iscsi_text_rsp *hdr = 1946 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1947 u32 txt_rsp_len; 1948 int rc; 1949 1950 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1951 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 1952 if (rc < 0) 1953 return rc; 1954 1955 txt_rsp_len = rc; 1956 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1957 1958 if (txt_rsp_len) { 1959 struct isert_device *device = isert_conn->device; 1960 struct ib_device *ib_dev = device->ib_device; 1961 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1962 void *txt_rsp_buf = cmd->buf_ptr; 1963 1964 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1965 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 1966 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1967 return -ENOMEM; 1968 1969 isert_cmd->pdu_buf_len = txt_rsp_len; 1970 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1971 tx_dsg->length = txt_rsp_len; 1972 tx_dsg->lkey = device->pd->local_dma_lkey; 1973 isert_cmd->tx_desc.num_sge = 2; 1974 } 1975 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1976 1977 isert_dbg("conn %p Text Response\n", isert_conn); 1978 1979 return isert_post_response(isert_conn, isert_cmd); 1980} 1981 1982static inline void 1983isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain) 1984{ 1985 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1986 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1987 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 1988 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 1989 /* 1990 * At the moment we hard code those, but if in the future 1991 * the target core would like to use it, we will take it 1992 * from se_cmd. 1993 */ 1994 domain->sig.dif.apptag_check_mask = 0xffff; 1995 domain->sig.dif.app_escape = true; 1996 domain->sig.dif.ref_escape = true; 1997 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 1998 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 1999 domain->sig.dif.ref_remap = true; 2000}; 2001 2002static int 2003isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2004{ 2005 memset(sig_attrs, 0, sizeof(*sig_attrs)); 2006 2007 switch (se_cmd->prot_op) { 2008 case TARGET_PROT_DIN_INSERT: 2009 case TARGET_PROT_DOUT_STRIP: 2010 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2011 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 2012 break; 2013 case TARGET_PROT_DOUT_INSERT: 2014 case TARGET_PROT_DIN_STRIP: 2015 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2016 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 2017 break; 2018 case TARGET_PROT_DIN_PASS: 2019 case TARGET_PROT_DOUT_PASS: 2020 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 2021 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 2022 break; 2023 default: 2024 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2025 return -EINVAL; 2026 } 2027 2028 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) 2029 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 2030 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) 2031 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 2032 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) 2033 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 2034 2035 return 0; 2036} 2037 2038static int 2039isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2040 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2041{ 2042 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2043 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2044 u8 port_num = conn->cm_id->port_num; 2045 u64 addr; 2046 u32 rkey, offset; 2047 int ret; 2048 2049 if (cmd->ctx_init_done) 2050 goto rdma_ctx_post; 2051 2052 if (dir == DMA_FROM_DEVICE) { 2053 addr = cmd->write_va; 2054 rkey = cmd->write_stag; 2055 offset = cmd->iscsi_cmd->write_data_done; 2056 } else { 2057 addr = cmd->read_va; 2058 rkey = cmd->read_stag; 2059 offset = 0; 2060 } 2061 2062 if (isert_prot_cmd(conn, se_cmd)) { 2063 struct ib_sig_attrs sig_attrs; 2064 2065 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2066 if (ret) 2067 return ret; 2068 2069 WARN_ON_ONCE(offset); 2070 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2071 se_cmd->t_data_sg, se_cmd->t_data_nents, 2072 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2073 &sig_attrs, addr, rkey, dir); 2074 } else { 2075 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2076 se_cmd->t_data_sg, se_cmd->t_data_nents, 2077 offset, addr, rkey, dir); 2078 } 2079 2080 if (ret < 0) { 2081 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2082 return ret; 2083 } 2084 2085 cmd->ctx_init_done = true; 2086 2087rdma_ctx_post: 2088 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2089 if (ret < 0) 2090 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2091 return ret; 2092} 2093 2094static int 2095isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2096{ 2097 struct se_cmd *se_cmd = &cmd->se_cmd; 2098 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2099 struct isert_conn *isert_conn = conn->context; 2100 struct ib_cqe *cqe = NULL; 2101 struct ib_send_wr *chain_wr = NULL; 2102 int rc; 2103 2104 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2105 isert_cmd, se_cmd->data_length); 2106 2107 if (isert_prot_cmd(isert_conn, se_cmd)) { 2108 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2109 cqe = &isert_cmd->tx_desc.tx_cqe; 2110 } else { 2111 /* 2112 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2113 */ 2114 isert_create_send_desc(isert_conn, isert_cmd, 2115 &isert_cmd->tx_desc); 2116 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2117 &isert_cmd->tx_desc.iscsi_header); 2118 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2119 isert_init_send_wr(isert_conn, isert_cmd, 2120 &isert_cmd->tx_desc.send_wr); 2121 2122 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2123 if (rc) 2124 return rc; 2125 2126 chain_wr = &isert_cmd->tx_desc.send_wr; 2127 } 2128 2129 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2130 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", 2131 isert_cmd, rc); 2132 return rc; 2133} 2134 2135static int 2136isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2137{ 2138 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2139 int ret; 2140 2141 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2142 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2143 2144 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2145 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2146 &isert_cmd->tx_desc.tx_cqe, NULL); 2147 2148 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", 2149 isert_cmd, ret); 2150 return ret; 2151} 2152 2153static int 2154isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2155{ 2156 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2157 int ret = 0; 2158 2159 switch (state) { 2160 case ISTATE_REMOVE: 2161 spin_lock_bh(&conn->cmd_lock); 2162 list_del_init(&cmd->i_conn_node); 2163 spin_unlock_bh(&conn->cmd_lock); 2164 isert_put_cmd(isert_cmd, true); 2165 break; 2166 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2167 ret = isert_put_nopin(cmd, conn, false); 2168 break; 2169 default: 2170 isert_err("Unknown immediate state: 0x%02x\n", state); 2171 ret = -EINVAL; 2172 break; 2173 } 2174 2175 return ret; 2176} 2177 2178static int 2179isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2180{ 2181 struct isert_conn *isert_conn = conn->context; 2182 int ret; 2183 2184 switch (state) { 2185 case ISTATE_SEND_LOGOUTRSP: 2186 ret = isert_put_logout_rsp(cmd, conn); 2187 if (!ret) 2188 isert_conn->logout_posted = true; 2189 break; 2190 case ISTATE_SEND_NOPIN: 2191 ret = isert_put_nopin(cmd, conn, true); 2192 break; 2193 case ISTATE_SEND_TASKMGTRSP: 2194 ret = isert_put_tm_rsp(cmd, conn); 2195 break; 2196 case ISTATE_SEND_REJECT: 2197 ret = isert_put_reject(cmd, conn); 2198 break; 2199 case ISTATE_SEND_TEXTRSP: 2200 ret = isert_put_text_rsp(cmd, conn); 2201 break; 2202 case ISTATE_SEND_STATUS: 2203 /* 2204 * Special case for sending non GOOD SCSI status from TX thread 2205 * context during pre se_cmd excecution failure. 2206 */ 2207 ret = isert_put_response(conn, cmd); 2208 break; 2209 default: 2210 isert_err("Unknown response state: 0x%02x\n", state); 2211 ret = -EINVAL; 2212 break; 2213 } 2214 2215 return ret; 2216} 2217 2218struct rdma_cm_id * 2219isert_setup_id(struct isert_np *isert_np) 2220{ 2221 struct iscsi_np *np = isert_np->np; 2222 struct rdma_cm_id *id; 2223 struct sockaddr *sa; 2224 int ret; 2225 2226 sa = (struct sockaddr *)&np->np_sockaddr; 2227 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2228 2229 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2230 RDMA_PS_TCP, IB_QPT_RC); 2231 if (IS_ERR(id)) { 2232 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2233 ret = PTR_ERR(id); 2234 goto out; 2235 } 2236 isert_dbg("id %p context %p\n", id, id->context); 2237 2238 ret = rdma_bind_addr(id, sa); 2239 if (ret) { 2240 isert_err("rdma_bind_addr() failed: %d\n", ret); 2241 goto out_id; 2242 } 2243 2244 ret = rdma_listen(id, 0); 2245 if (ret) { 2246 isert_err("rdma_listen() failed: %d\n", ret); 2247 goto out_id; 2248 } 2249 2250 return id; 2251out_id: 2252 rdma_destroy_id(id); 2253out: 2254 return ERR_PTR(ret); 2255} 2256 2257static int 2258isert_setup_np(struct iscsi_np *np, 2259 struct sockaddr_storage *ksockaddr) 2260{ 2261 struct isert_np *isert_np; 2262 struct rdma_cm_id *isert_lid; 2263 int ret; 2264 2265 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2266 if (!isert_np) 2267 return -ENOMEM; 2268 2269 sema_init(&isert_np->sem, 0); 2270 mutex_init(&isert_np->mutex); 2271 INIT_LIST_HEAD(&isert_np->accepted); 2272 INIT_LIST_HEAD(&isert_np->pending); 2273 isert_np->np = np; 2274 2275 /* 2276 * Setup the np->np_sockaddr from the passed sockaddr setup 2277 * in iscsi_target_configfs.c code.. 2278 */ 2279 memcpy(&np->np_sockaddr, ksockaddr, 2280 sizeof(struct sockaddr_storage)); 2281 2282 isert_lid = isert_setup_id(isert_np); 2283 if (IS_ERR(isert_lid)) { 2284 ret = PTR_ERR(isert_lid); 2285 goto out; 2286 } 2287 2288 isert_np->cm_id = isert_lid; 2289 np->np_context = isert_np; 2290 2291 return 0; 2292 2293out: 2294 kfree(isert_np); 2295 2296 return ret; 2297} 2298 2299static int 2300isert_rdma_accept(struct isert_conn *isert_conn) 2301{ 2302 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2303 struct rdma_conn_param cp; 2304 int ret; 2305 struct iser_cm_hdr rsp_hdr; 2306 2307 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2308 cp.initiator_depth = isert_conn->initiator_depth; 2309 cp.retry_count = 7; 2310 cp.rnr_retry_count = 7; 2311 2312 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2313 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2314 if (!isert_conn->snd_w_inv) 2315 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2316 cp.private_data = (void *)&rsp_hdr; 2317 cp.private_data_len = sizeof(rsp_hdr); 2318 2319 ret = rdma_accept(cm_id, &cp); 2320 if (ret) { 2321 isert_err("rdma_accept() failed with: %d\n", ret); 2322 return ret; 2323 } 2324 2325 return 0; 2326} 2327 2328static int 2329isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2330{ 2331 struct isert_conn *isert_conn = conn->context; 2332 int ret; 2333 2334 isert_info("before login_req comp conn: %p\n", isert_conn); 2335 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2336 if (ret) { 2337 isert_err("isert_conn %p interrupted before got login req\n", 2338 isert_conn); 2339 return ret; 2340 } 2341 reinit_completion(&isert_conn->login_req_comp); 2342 2343 /* 2344 * For login requests after the first PDU, isert_rx_login_req() will 2345 * kick schedule_delayed_work(&conn->login_work) as the packet is 2346 * received, which turns this callback from iscsi_target_do_login_rx() 2347 * into a NOP. 2348 */ 2349 if (!login->first_request) 2350 return 0; 2351 2352 isert_rx_login_req(isert_conn); 2353 2354 isert_info("before login_comp conn: %p\n", conn); 2355 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2356 if (ret) 2357 return ret; 2358 2359 isert_info("processing login->req: %p\n", login->req); 2360 2361 return 0; 2362} 2363 2364static void 2365isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2366 struct isert_conn *isert_conn) 2367{ 2368 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2369 struct rdma_route *cm_route = &cm_id->route; 2370 2371 conn->login_family = np->np_sockaddr.ss_family; 2372 2373 conn->login_sockaddr = cm_route->addr.dst_addr; 2374 conn->local_sockaddr = cm_route->addr.src_addr; 2375} 2376 2377static int 2378isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2379{ 2380 struct isert_np *isert_np = np->np_context; 2381 struct isert_conn *isert_conn; 2382 int ret; 2383 2384accept_wait: 2385 ret = down_interruptible(&isert_np->sem); 2386 if (ret) 2387 return -ENODEV; 2388 2389 spin_lock_bh(&np->np_thread_lock); 2390 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2391 spin_unlock_bh(&np->np_thread_lock); 2392 isert_dbg("np_thread_state %d\n", 2393 np->np_thread_state); 2394 /** 2395 * No point in stalling here when np_thread 2396 * is in state RESET/SHUTDOWN/EXIT - bail 2397 **/ 2398 return -ENODEV; 2399 } 2400 spin_unlock_bh(&np->np_thread_lock); 2401 2402 mutex_lock(&isert_np->mutex); 2403 if (list_empty(&isert_np->pending)) { 2404 mutex_unlock(&isert_np->mutex); 2405 goto accept_wait; 2406 } 2407 isert_conn = list_first_entry(&isert_np->pending, 2408 struct isert_conn, node); 2409 list_del_init(&isert_conn->node); 2410 mutex_unlock(&isert_np->mutex); 2411 2412 conn->context = isert_conn; 2413 isert_conn->conn = conn; 2414 isert_conn->state = ISER_CONN_BOUND; 2415 2416 isert_set_conn_info(np, conn, isert_conn); 2417 2418 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2419 2420 return 0; 2421} 2422 2423static void 2424isert_free_np(struct iscsi_np *np) 2425{ 2426 struct isert_np *isert_np = np->np_context; 2427 struct isert_conn *isert_conn, *n; 2428 LIST_HEAD(drop_conn_list); 2429 2430 if (isert_np->cm_id) 2431 rdma_destroy_id(isert_np->cm_id); 2432 2433 /* 2434 * FIXME: At this point we don't have a good way to insure 2435 * that at this point we don't have hanging connections that 2436 * completed RDMA establishment but didn't start iscsi login 2437 * process. So work-around this by cleaning up what ever piled 2438 * up in accepted and pending lists. 2439 */ 2440 mutex_lock(&isert_np->mutex); 2441 if (!list_empty(&isert_np->pending)) { 2442 isert_info("Still have isert pending connections\n"); 2443 list_for_each_entry_safe(isert_conn, n, 2444 &isert_np->pending, 2445 node) { 2446 isert_info("cleaning isert_conn %p state (%d)\n", 2447 isert_conn, isert_conn->state); 2448 list_move_tail(&isert_conn->node, &drop_conn_list); 2449 } 2450 } 2451 2452 if (!list_empty(&isert_np->accepted)) { 2453 isert_info("Still have isert accepted connections\n"); 2454 list_for_each_entry_safe(isert_conn, n, 2455 &isert_np->accepted, 2456 node) { 2457 isert_info("cleaning isert_conn %p state (%d)\n", 2458 isert_conn, isert_conn->state); 2459 list_move_tail(&isert_conn->node, &drop_conn_list); 2460 } 2461 } 2462 mutex_unlock(&isert_np->mutex); 2463 2464 list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) { 2465 list_del_init(&isert_conn->node); 2466 isert_connect_release(isert_conn); 2467 } 2468 2469 np->np_context = NULL; 2470 kfree(isert_np); 2471} 2472 2473static void isert_release_work(struct work_struct *work) 2474{ 2475 struct isert_conn *isert_conn = container_of(work, 2476 struct isert_conn, 2477 release_work); 2478 2479 isert_info("Starting release conn %p\n", isert_conn); 2480 2481 mutex_lock(&isert_conn->mutex); 2482 isert_conn->state = ISER_CONN_DOWN; 2483 mutex_unlock(&isert_conn->mutex); 2484 2485 isert_info("Destroying conn %p\n", isert_conn); 2486 isert_put_conn(isert_conn); 2487} 2488 2489static void 2490isert_wait4logout(struct isert_conn *isert_conn) 2491{ 2492 struct iscsi_conn *conn = isert_conn->conn; 2493 2494 isert_info("conn %p\n", isert_conn); 2495 2496 if (isert_conn->logout_posted) { 2497 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2498 wait_for_completion_timeout(&conn->conn_logout_comp, 2499 SECONDS_FOR_LOGOUT_COMP * HZ); 2500 } 2501} 2502 2503static void 2504isert_wait4cmds(struct iscsi_conn *conn) 2505{ 2506 isert_info("iscsi_conn %p\n", conn); 2507 2508 if (conn->sess) { 2509 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2510 target_wait_for_sess_cmds(conn->sess->se_sess); 2511 } 2512} 2513 2514/** 2515 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2516 * unsolicitate dataout 2517 * @conn: iscsi connection 2518 * 2519 * We might still have commands that are waiting for unsolicited 2520 * dataouts messages. We must put the extra reference on those 2521 * before blocking on the target_wait_for_session_cmds 2522 */ 2523static void 2524isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2525{ 2526 struct iscsi_cmd *cmd, *tmp; 2527 static LIST_HEAD(drop_cmd_list); 2528 2529 spin_lock_bh(&conn->cmd_lock); 2530 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2531 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2532 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2533 (cmd->write_data_done < cmd->se_cmd.data_length)) 2534 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2535 } 2536 spin_unlock_bh(&conn->cmd_lock); 2537 2538 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2539 list_del_init(&cmd->i_conn_node); 2540 if (cmd->i_state != ISTATE_REMOVE) { 2541 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2542 2543 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2544 isert_put_cmd(isert_cmd, true); 2545 } 2546 } 2547} 2548 2549static void isert_wait_conn(struct iscsi_conn *conn) 2550{ 2551 struct isert_conn *isert_conn = conn->context; 2552 2553 isert_info("Starting conn %p\n", isert_conn); 2554 2555 mutex_lock(&isert_conn->mutex); 2556 isert_conn_terminate(isert_conn); 2557 mutex_unlock(&isert_conn->mutex); 2558 2559 ib_drain_qp(isert_conn->qp); 2560 isert_put_unsol_pending_cmds(conn); 2561 isert_wait4cmds(conn); 2562 isert_wait4logout(isert_conn); 2563 2564 queue_work(isert_release_wq, &isert_conn->release_work); 2565} 2566 2567static void isert_free_conn(struct iscsi_conn *conn) 2568{ 2569 struct isert_conn *isert_conn = conn->context; 2570 2571 ib_drain_qp(isert_conn->qp); 2572 isert_put_conn(isert_conn); 2573} 2574 2575static void isert_get_rx_pdu(struct iscsi_conn *conn) 2576{ 2577 struct completion comp; 2578 2579 init_completion(&comp); 2580 2581 wait_for_completion_interruptible(&comp); 2582} 2583 2584static struct iscsit_transport iser_target_transport = { 2585 .name = "IB/iSER", 2586 .transport_type = ISCSI_INFINIBAND, 2587 .rdma_shutdown = true, 2588 .priv_size = sizeof(struct isert_cmd), 2589 .owner = THIS_MODULE, 2590 .iscsit_setup_np = isert_setup_np, 2591 .iscsit_accept_np = isert_accept_np, 2592 .iscsit_free_np = isert_free_np, 2593 .iscsit_wait_conn = isert_wait_conn, 2594 .iscsit_free_conn = isert_free_conn, 2595 .iscsit_get_login_rx = isert_get_login_rx, 2596 .iscsit_put_login_tx = isert_put_login_tx, 2597 .iscsit_immediate_queue = isert_immediate_queue, 2598 .iscsit_response_queue = isert_response_queue, 2599 .iscsit_get_dataout = isert_get_dataout, 2600 .iscsit_queue_data_in = isert_put_datain, 2601 .iscsit_queue_status = isert_put_response, 2602 .iscsit_aborted_task = isert_aborted_task, 2603 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2604 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2605}; 2606 2607static int __init isert_init(void) 2608{ 2609 int ret; 2610 2611 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2612 WQ_UNBOUND | WQ_HIGHPRI, 0); 2613 if (!isert_comp_wq) { 2614 isert_err("Unable to allocate isert_comp_wq\n"); 2615 return -ENOMEM; 2616 } 2617 2618 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2619 WQ_UNBOUND_MAX_ACTIVE); 2620 if (!isert_release_wq) { 2621 isert_err("Unable to allocate isert_release_wq\n"); 2622 ret = -ENOMEM; 2623 goto destroy_comp_wq; 2624 } 2625 2626 iscsit_register_transport(&iser_target_transport); 2627 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2628 2629 return 0; 2630 2631destroy_comp_wq: 2632 destroy_workqueue(isert_comp_wq); 2633 2634 return ret; 2635} 2636 2637static void __exit isert_exit(void) 2638{ 2639 flush_scheduled_work(); 2640 destroy_workqueue(isert_release_wq); 2641 destroy_workqueue(isert_comp_wq); 2642 iscsit_unregister_transport(&iser_target_transport); 2643 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2644} 2645 2646MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2647MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2648MODULE_LICENSE("GPL"); 2649 2650module_init(isert_init); 2651module_exit(isert_exit); 2652