1/* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/pci.h> 35#include <linux/platform_device.h> 36#include <rdma/ib_addr.h> 37#include <rdma/ib_umem.h> 38#include <rdma/uverbs_ioctl.h> 39#include "hns_roce_common.h" 40#include "hns_roce_device.h" 41#include "hns_roce_hem.h" 42#include <rdma/hns-abi.h> 43 44static void flush_work_handle(struct work_struct *work) 45{ 46 struct hns_roce_work *flush_work = container_of(work, 47 struct hns_roce_work, work); 48 struct hns_roce_qp *hr_qp = container_of(flush_work, 49 struct hns_roce_qp, flush_work); 50 struct device *dev = flush_work->hr_dev->dev; 51 struct ib_qp_attr attr; 52 int attr_mask; 53 int ret; 54 55 attr_mask = IB_QP_STATE; 56 attr.qp_state = IB_QPS_ERR; 57 58 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { 59 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); 60 if (ret) 61 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", 62 ret); 63 } 64 65 /* 66 * make sure we signal QP destroy leg that flush QP was completed 67 * so that it can safely proceed ahead now and destroy QP 68 */ 69 if (atomic_dec_and_test(&hr_qp->refcount)) 70 complete(&hr_qp->free); 71} 72 73void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 74{ 75 struct hns_roce_work *flush_work = &hr_qp->flush_work; 76 77 flush_work->hr_dev = hr_dev; 78 INIT_WORK(&flush_work->work, flush_work_handle); 79 atomic_inc(&hr_qp->refcount); 80 queue_work(hr_dev->irq_workq, &flush_work->work); 81} 82 83void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 84{ 85 struct device *dev = hr_dev->dev; 86 struct hns_roce_qp *qp; 87 88 xa_lock(&hr_dev->qp_table_xa); 89 qp = __hns_roce_qp_lookup(hr_dev, qpn); 90 if (qp) 91 atomic_inc(&qp->refcount); 92 xa_unlock(&hr_dev->qp_table_xa); 93 94 if (!qp) { 95 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); 96 return; 97 } 98 99 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && 100 (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 101 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 102 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) { 103 qp->state = IB_QPS_ERR; 104 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) 105 init_flush_work(hr_dev, qp); 106 } 107 108 qp->event(qp, (enum hns_roce_event)event_type); 109 110 if (atomic_dec_and_test(&qp->refcount)) 111 complete(&qp->free); 112} 113 114static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 115 enum hns_roce_event type) 116{ 117 struct ib_qp *ibqp = &hr_qp->ibqp; 118 struct ib_event event; 119 120 if (ibqp->event_handler) { 121 event.device = ibqp->device; 122 event.element.qp = ibqp; 123 switch (type) { 124 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 125 event.event = IB_EVENT_PATH_MIG; 126 break; 127 case HNS_ROCE_EVENT_TYPE_COMM_EST: 128 event.event = IB_EVENT_COMM_EST; 129 break; 130 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 131 event.event = IB_EVENT_SQ_DRAINED; 132 break; 133 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 134 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 135 break; 136 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 137 event.event = IB_EVENT_QP_FATAL; 138 break; 139 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 140 event.event = IB_EVENT_PATH_MIG_ERR; 141 break; 142 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 143 event.event = IB_EVENT_QP_REQ_ERR; 144 break; 145 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 146 event.event = IB_EVENT_QP_ACCESS_ERR; 147 break; 148 default: 149 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 150 type, hr_qp->qpn); 151 return; 152 } 153 ibqp->event_handler(&event, ibqp->qp_context); 154 } 155} 156 157static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 158{ 159 unsigned long num = 0; 160 int ret; 161 162 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 163 /* when hw version is v1, the sqpn is allocated */ 164 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) 165 num = HNS_ROCE_MAX_PORTS + 166 hr_dev->iboe.phy_port[hr_qp->port]; 167 else 168 num = 1; 169 170 hr_qp->doorbell_qpn = 1; 171 } else { 172 ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, 173 1, 1, &num); 174 if (ret) { 175 ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n"); 176 return -ENOMEM; 177 } 178 179 hr_qp->doorbell_qpn = (u32)num; 180 } 181 182 hr_qp->qpn = num; 183 184 return 0; 185} 186 187enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) 188{ 189 switch (state) { 190 case IB_QPS_RESET: 191 return HNS_ROCE_QP_STATE_RST; 192 case IB_QPS_INIT: 193 return HNS_ROCE_QP_STATE_INIT; 194 case IB_QPS_RTR: 195 return HNS_ROCE_QP_STATE_RTR; 196 case IB_QPS_RTS: 197 return HNS_ROCE_QP_STATE_RTS; 198 case IB_QPS_SQD: 199 return HNS_ROCE_QP_STATE_SQD; 200 case IB_QPS_ERR: 201 return HNS_ROCE_QP_STATE_ERR; 202 default: 203 return HNS_ROCE_QP_NUM_STATE; 204 } 205} 206 207static void add_qp_to_list(struct hns_roce_dev *hr_dev, 208 struct hns_roce_qp *hr_qp, 209 struct ib_cq *send_cq, struct ib_cq *recv_cq) 210{ 211 struct hns_roce_cq *hr_send_cq, *hr_recv_cq; 212 unsigned long flags; 213 214 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; 215 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; 216 217 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 218 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); 219 220 list_add_tail(&hr_qp->node, &hr_dev->qp_list); 221 if (hr_send_cq) 222 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); 223 if (hr_recv_cq) 224 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); 225 226 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); 227 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 228} 229 230static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, 231 struct hns_roce_qp *hr_qp, 232 struct ib_qp_init_attr *init_attr) 233{ 234 struct xarray *xa = &hr_dev->qp_table_xa; 235 int ret; 236 237 if (!hr_qp->qpn) 238 return -EINVAL; 239 240 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); 241 if (ret) 242 dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); 243 else 244 /* add QP to device's QP list for softwc */ 245 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, 246 init_attr->recv_cq); 247 248 return ret; 249} 250 251static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 252{ 253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 254 struct device *dev = hr_dev->dev; 255 int ret; 256 257 if (!hr_qp->qpn) 258 return -EINVAL; 259 260 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 261 if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 262 hr_dev->hw_rev == HNS_ROCE_HW_VER1) 263 return 0; 264 265 /* Alloc memory for QPC */ 266 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 267 if (ret) { 268 dev_err(dev, "Failed to get QPC table\n"); 269 goto err_out; 270 } 271 272 /* Alloc memory for IRRL */ 273 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 274 if (ret) { 275 dev_err(dev, "Failed to get IRRL table\n"); 276 goto err_put_qp; 277 } 278 279 if (hr_dev->caps.trrl_entry_sz) { 280 /* Alloc memory for TRRL */ 281 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 282 hr_qp->qpn); 283 if (ret) { 284 dev_err(dev, "Failed to get TRRL table\n"); 285 goto err_put_irrl; 286 } 287 } 288 289 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 290 /* Alloc memory for SCC CTX */ 291 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, 292 hr_qp->qpn); 293 if (ret) { 294 dev_err(dev, "Failed to get SCC CTX table\n"); 295 goto err_put_trrl; 296 } 297 } 298 299 return 0; 300 301err_put_trrl: 302 if (hr_dev->caps.trrl_entry_sz) 303 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 304 305err_put_irrl: 306 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 307 308err_put_qp: 309 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 310 311err_out: 312 return ret; 313} 314 315void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 316{ 317 struct xarray *xa = &hr_dev->qp_table_xa; 318 unsigned long flags; 319 320 list_del(&hr_qp->node); 321 list_del(&hr_qp->sq_node); 322 list_del(&hr_qp->rq_node); 323 324 xa_lock_irqsave(xa, flags); 325 __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); 326 xa_unlock_irqrestore(xa, flags); 327} 328 329static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 330{ 331 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 332 333 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 334 if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 335 hr_dev->hw_rev == HNS_ROCE_HW_VER1) 336 return; 337 338 if (hr_dev->caps.trrl_entry_sz) 339 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 340 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 341} 342 343static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 344{ 345 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 346 347 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) 348 return; 349 350 if (hr_qp->qpn < hr_dev->caps.reserved_qps) 351 return; 352 353 hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); 354} 355 356static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, 357 struct hns_roce_qp *hr_qp, int has_rq) 358{ 359 u32 cnt; 360 361 /* If srq exist, set zero for relative number of rq */ 362 if (!has_rq) { 363 hr_qp->rq.wqe_cnt = 0; 364 hr_qp->rq.max_gs = 0; 365 hr_qp->rq_inl_buf.wqe_cnt = 0; 366 cap->max_recv_wr = 0; 367 cap->max_recv_sge = 0; 368 369 return 0; 370 } 371 372 /* Check the validity of QP support capacity */ 373 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || 374 cap->max_recv_sge > hr_dev->caps.max_rq_sg) { 375 ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n", 376 cap->max_recv_wr, cap->max_recv_sge); 377 return -EINVAL; 378 } 379 380 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); 381 if (cnt > hr_dev->caps.max_wqes) { 382 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 383 cap->max_recv_wr); 384 return -EINVAL; 385 } 386 387 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); 388 389 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 390 hr_qp->rq.max_gs); 391 392 hr_qp->rq.wqe_cnt = cnt; 393 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) 394 hr_qp->rq_inl_buf.wqe_cnt = cnt; 395 else 396 hr_qp->rq_inl_buf.wqe_cnt = 0; 397 398 cap->max_recv_wr = cnt; 399 cap->max_recv_sge = hr_qp->rq.max_gs; 400 401 return 0; 402} 403 404static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, 405 struct hns_roce_qp *hr_qp, 406 struct ib_qp_cap *cap) 407{ 408 u32 cnt; 409 410 cnt = max(1U, cap->max_send_sge); 411 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 412 hr_qp->sq.max_gs = roundup_pow_of_two(cnt); 413 hr_qp->sge.sge_cnt = 0; 414 415 return 0; 416 } 417 418 hr_qp->sq.max_gs = cnt; 419 420 /* UD sqwqe's sge use extend sge */ 421 if (hr_qp->ibqp.qp_type == IB_QPT_GSI || 422 hr_qp->ibqp.qp_type == IB_QPT_UD) { 423 cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs); 424 } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) { 425 cnt = roundup_pow_of_two(sq_wqe_cnt * 426 (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE)); 427 } else { 428 cnt = 0; 429 } 430 431 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 432 433 /* If the number of extended sge is not zero, they MUST use the 434 * space of HNS_HW_PAGE_SIZE at least. 435 */ 436 hr_qp->sge.sge_cnt = cnt ? 437 max(cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE) : 0; 438 439 return 0; 440} 441 442static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, 443 struct ib_qp_cap *cap, 444 struct hns_roce_ib_create_qp *ucmd) 445{ 446 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 447 u8 max_sq_stride = ilog2(roundup_sq_stride); 448 449 /* Sanity check SQ size before proceeding */ 450 if (ucmd->log_sq_stride > max_sq_stride || 451 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 452 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); 453 return -EINVAL; 454 } 455 456 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 457 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", 458 cap->max_send_sge); 459 return -EINVAL; 460 } 461 462 return 0; 463} 464 465static int set_user_sq_size(struct hns_roce_dev *hr_dev, 466 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, 467 struct hns_roce_ib_create_qp *ucmd) 468{ 469 struct ib_device *ibdev = &hr_dev->ib_dev; 470 u32 cnt = 0; 471 int ret; 472 473 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || 474 cnt > hr_dev->caps.max_wqes) 475 return -EINVAL; 476 477 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); 478 if (ret) { 479 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", 480 ret); 481 return ret; 482 } 483 484 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); 485 if (ret) 486 return ret; 487 488 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 489 hr_qp->sq.wqe_cnt = cnt; 490 491 return 0; 492} 493 494static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, 495 struct hns_roce_qp *hr_qp, 496 struct hns_roce_buf_attr *buf_attr) 497{ 498 int buf_size; 499 int idx = 0; 500 501 hr_qp->buff_size = 0; 502 503 /* SQ WQE */ 504 hr_qp->sq.offset = 0; 505 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, 506 hr_qp->sq.wqe_shift); 507 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 508 buf_attr->region[idx].size = buf_size; 509 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; 510 idx++; 511 hr_qp->buff_size += buf_size; 512 } 513 514 /* extend SGE WQE in SQ */ 515 hr_qp->sge.offset = hr_qp->buff_size; 516 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, 517 hr_qp->sge.sge_shift); 518 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 519 buf_attr->region[idx].size = buf_size; 520 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; 521 idx++; 522 hr_qp->buff_size += buf_size; 523 } 524 525 /* RQ WQE */ 526 hr_qp->rq.offset = hr_qp->buff_size; 527 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, 528 hr_qp->rq.wqe_shift); 529 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 530 buf_attr->region[idx].size = buf_size; 531 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; 532 idx++; 533 hr_qp->buff_size += buf_size; 534 } 535 536 if (hr_qp->buff_size < 1) 537 return -EINVAL; 538 539 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 540 buf_attr->fixed_page = true; 541 buf_attr->region_count = idx; 542 543 return 0; 544} 545 546static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, 547 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) 548{ 549 struct ib_device *ibdev = &hr_dev->ib_dev; 550 u32 cnt; 551 int ret; 552 553 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || 554 cap->max_send_sge > hr_dev->caps.max_sq_sg) { 555 ibdev_err(ibdev, 556 "failed to check SQ WR or SGE num, ret = %d.\n", 557 -EINVAL); 558 return -EINVAL; 559 } 560 561 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); 562 if (cnt > hr_dev->caps.max_wqes) { 563 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", 564 cnt); 565 return -EINVAL; 566 } 567 568 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 569 hr_qp->sq.wqe_cnt = cnt; 570 571 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); 572 if (ret) 573 return ret; 574 575 /* sync the parameters of kernel QP to user's configuration */ 576 cap->max_send_wr = cnt; 577 cap->max_send_sge = hr_qp->sq.max_gs; 578 579 return 0; 580} 581 582static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 583{ 584 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) 585 return 0; 586 587 return 1; 588} 589 590static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 591{ 592 if (attr->qp_type == IB_QPT_XRC_INI || 593 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 594 !attr->cap.max_recv_wr) 595 return 0; 596 597 return 1; 598} 599 600static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, 601 struct ib_qp_init_attr *init_attr) 602{ 603 u32 max_recv_sge = init_attr->cap.max_recv_sge; 604 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; 605 struct hns_roce_rinl_wqe *wqe_list; 606 int i; 607 608 /* allocate recv inline buf */ 609 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), 610 GFP_KERNEL); 611 612 if (!wqe_list) 613 goto err; 614 615 /* Allocate a continuous buffer for all inline sge we need */ 616 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * 617 sizeof(struct hns_roce_rinl_sge)), 618 GFP_KERNEL); 619 if (!wqe_list[0].sg_list) 620 goto err_wqe_list; 621 622 /* Assign buffers of sg_list to each inline wqe */ 623 for (i = 1; i < wqe_cnt; i++) 624 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; 625 626 hr_qp->rq_inl_buf.wqe_list = wqe_list; 627 628 return 0; 629 630err_wqe_list: 631 kfree(wqe_list); 632 633err: 634 return -ENOMEM; 635} 636 637static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) 638{ 639 if (hr_qp->rq_inl_buf.wqe_list) 640 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); 641 kfree(hr_qp->rq_inl_buf.wqe_list); 642} 643 644static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 645 struct ib_qp_init_attr *init_attr, 646 struct ib_udata *udata, unsigned long addr) 647{ 648 struct ib_device *ibdev = &hr_dev->ib_dev; 649 struct hns_roce_buf_attr buf_attr = {}; 650 int ret; 651 652 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) { 653 ret = alloc_rq_inline_buf(hr_qp, init_attr); 654 if (ret) { 655 ibdev_err(ibdev, 656 "failed to alloc inline buf, ret = %d.\n", 657 ret); 658 return ret; 659 } 660 } else { 661 hr_qp->rq_inl_buf.wqe_list = NULL; 662 } 663 664 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); 665 if (ret) { 666 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); 667 goto err_inline; 668 } 669 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, 670 HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, 671 udata, addr); 672 if (ret) { 673 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); 674 goto err_inline; 675 } 676 677 return 0; 678err_inline: 679 free_rq_inline_buf(hr_qp); 680 681 return ret; 682} 683 684static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 685{ 686 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); 687 free_rq_inline_buf(hr_qp); 688} 689 690static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, 691 struct ib_qp_init_attr *init_attr, 692 struct ib_udata *udata, 693 struct hns_roce_ib_create_qp_resp *resp, 694 struct hns_roce_ib_create_qp *ucmd) 695{ 696 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && 697 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 698 hns_roce_qp_has_sq(init_attr) && 699 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); 700} 701 702static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, 703 struct ib_qp_init_attr *init_attr, 704 struct ib_udata *udata, 705 struct hns_roce_ib_create_qp_resp *resp) 706{ 707 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 708 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 709 hns_roce_qp_has_rq(init_attr)); 710} 711 712static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, 713 struct ib_qp_init_attr *init_attr) 714{ 715 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 716 hns_roce_qp_has_rq(init_attr)); 717} 718 719static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 720 struct ib_qp_init_attr *init_attr, 721 struct ib_udata *udata, 722 struct hns_roce_ib_create_qp *ucmd, 723 struct hns_roce_ib_create_qp_resp *resp) 724{ 725 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 726 udata, struct hns_roce_ucontext, ibucontext); 727 struct ib_device *ibdev = &hr_dev->ib_dev; 728 int ret; 729 730 if (udata) { 731 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { 732 ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, 733 &hr_qp->sdb); 734 if (ret) { 735 ibdev_err(ibdev, 736 "failed to map user SQ doorbell, ret = %d.\n", 737 ret); 738 goto err_out; 739 } 740 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 741 } 742 743 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { 744 ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, 745 &hr_qp->rdb); 746 if (ret) { 747 ibdev_err(ibdev, 748 "failed to map user RQ doorbell, ret = %d.\n", 749 ret); 750 goto err_sdb; 751 } 752 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 753 } 754 } else { 755 /* QP doorbell register address */ 756 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + 757 DB_REG_OFFSET * hr_dev->priv_uar.index; 758 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + 759 DB_REG_OFFSET * hr_dev->priv_uar.index; 760 761 if (kernel_qp_has_rdb(hr_dev, init_attr)) { 762 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 763 if (ret) { 764 ibdev_err(ibdev, 765 "failed to alloc kernel RQ doorbell, ret = %d.\n", 766 ret); 767 goto err_out; 768 } 769 *hr_qp->rdb.db_record = 0; 770 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 771 } 772 } 773 774 return 0; 775err_sdb: 776 if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 777 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 778err_out: 779 return ret; 780} 781 782static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 783 struct ib_udata *udata) 784{ 785 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 786 udata, struct hns_roce_ucontext, ibucontext); 787 788 if (udata) { 789 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 790 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 791 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 792 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 793 } else { 794 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 795 hns_roce_free_db(hr_dev, &hr_qp->rdb); 796 } 797} 798 799static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, 800 struct hns_roce_qp *hr_qp) 801{ 802 struct ib_device *ibdev = &hr_dev->ib_dev; 803 u64 *sq_wrid = NULL; 804 u64 *rq_wrid = NULL; 805 int ret; 806 807 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); 808 if (ZERO_OR_NULL_PTR(sq_wrid)) { 809 ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); 810 return -ENOMEM; 811 } 812 813 if (hr_qp->rq.wqe_cnt) { 814 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); 815 if (ZERO_OR_NULL_PTR(rq_wrid)) { 816 ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); 817 ret = -ENOMEM; 818 goto err_sq; 819 } 820 } 821 822 hr_qp->sq.wrid = sq_wrid; 823 hr_qp->rq.wrid = rq_wrid; 824 return 0; 825err_sq: 826 kfree(sq_wrid); 827 828 return ret; 829} 830 831static void free_kernel_wrid(struct hns_roce_qp *hr_qp) 832{ 833 kfree(hr_qp->rq.wrid); 834 kfree(hr_qp->sq.wrid); 835} 836 837static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 838 struct ib_qp_init_attr *init_attr, 839 struct ib_udata *udata, 840 struct hns_roce_ib_create_qp *ucmd) 841{ 842 struct ib_device *ibdev = &hr_dev->ib_dev; 843 int ret; 844 845 hr_qp->ibqp.qp_type = init_attr->qp_type; 846 847 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) 848 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; 849 850 hr_qp->max_inline_data = init_attr->cap.max_inline_data; 851 852 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 853 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 854 else 855 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 856 857 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, 858 hns_roce_qp_has_rq(init_attr)); 859 if (ret) { 860 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", 861 ret); 862 return ret; 863 } 864 865 if (udata) { 866 ret = ib_copy_from_udata(ucmd, udata, 867 min(udata->inlen, sizeof(*ucmd))); 868 if (ret) { 869 ibdev_err(ibdev, 870 "failed to copy QP ucmd, ret = %d\n", ret); 871 return ret; 872 } 873 874 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); 875 if (ret) 876 ibdev_err(ibdev, 877 "failed to set user SQ size, ret = %d.\n", 878 ret); 879 } else { 880 if (init_attr->create_flags & 881 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 882 ibdev_err(ibdev, "Failed to check multicast loopback\n"); 883 return -EINVAL; 884 } 885 886 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { 887 ibdev_err(ibdev, "Failed to check ipoib ud lso\n"); 888 return -EINVAL; 889 } 890 891 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); 892 if (ret) 893 ibdev_err(ibdev, 894 "failed to set kernel SQ size, ret = %d.\n", 895 ret); 896 } 897 898 return ret; 899} 900 901static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 902 struct ib_pd *ib_pd, 903 struct ib_qp_init_attr *init_attr, 904 struct ib_udata *udata, 905 struct hns_roce_qp *hr_qp) 906{ 907 struct hns_roce_ib_create_qp_resp resp = {}; 908 struct ib_device *ibdev = &hr_dev->ib_dev; 909 struct hns_roce_ib_create_qp ucmd = {}; 910 int ret; 911 912 mutex_init(&hr_qp->mutex); 913 spin_lock_init(&hr_qp->sq.lock); 914 spin_lock_init(&hr_qp->rq.lock); 915 916 hr_qp->state = IB_QPS_RESET; 917 hr_qp->flush_flag = 0; 918 919 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); 920 if (ret) { 921 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); 922 return ret; 923 } 924 925 if (!udata) { 926 ret = alloc_kernel_wrid(hr_dev, hr_qp); 927 if (ret) { 928 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", 929 ret); 930 return ret; 931 } 932 } 933 934 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); 935 if (ret) { 936 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", 937 ret); 938 goto err_wrid; 939 } 940 941 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); 942 if (ret) { 943 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); 944 goto err_db; 945 } 946 947 ret = alloc_qpn(hr_dev, hr_qp); 948 if (ret) { 949 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); 950 goto err_buf; 951 } 952 953 ret = alloc_qpc(hr_dev, hr_qp); 954 if (ret) { 955 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", 956 ret); 957 goto err_qpn; 958 } 959 960 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); 961 if (ret) { 962 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); 963 goto err_qpc; 964 } 965 966 if (udata) { 967 resp.cap_flags = hr_qp->en_flags; 968 ret = ib_copy_to_udata(udata, &resp, 969 min(udata->outlen, sizeof(resp))); 970 if (ret) { 971 ibdev_err(ibdev, "copy qp resp failed!\n"); 972 goto err_store; 973 } 974 } 975 976 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 977 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); 978 if (ret) 979 goto err_store; 980 } 981 982 hr_qp->ibqp.qp_num = hr_qp->qpn; 983 hr_qp->event = hns_roce_ib_qp_event; 984 atomic_set(&hr_qp->refcount, 1); 985 init_completion(&hr_qp->free); 986 987 return 0; 988 989err_store: 990 hns_roce_qp_remove(hr_dev, hr_qp); 991err_qpc: 992 free_qpc(hr_dev, hr_qp); 993err_qpn: 994 free_qpn(hr_dev, hr_qp); 995err_buf: 996 free_qp_buf(hr_dev, hr_qp); 997err_db: 998 free_qp_db(hr_dev, hr_qp, udata); 999err_wrid: 1000 free_kernel_wrid(hr_qp); 1001 return ret; 1002} 1003 1004void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1005 struct ib_udata *udata) 1006{ 1007 if (atomic_dec_and_test(&hr_qp->refcount)) 1008 complete(&hr_qp->free); 1009 wait_for_completion(&hr_qp->free); 1010 1011 free_qpc(hr_dev, hr_qp); 1012 free_qpn(hr_dev, hr_qp); 1013 free_qp_buf(hr_dev, hr_qp); 1014 free_kernel_wrid(hr_qp); 1015 free_qp_db(hr_dev, hr_qp, udata); 1016 1017 kfree(hr_qp); 1018} 1019 1020struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 1021 struct ib_qp_init_attr *init_attr, 1022 struct ib_udata *udata) 1023{ 1024 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1025 struct ib_device *ibdev = &hr_dev->ib_dev; 1026 struct hns_roce_qp *hr_qp; 1027 int ret; 1028 1029 switch (init_attr->qp_type) { 1030 case IB_QPT_RC: 1031 case IB_QPT_GSI: 1032 break; 1033 default: 1034 ibdev_err(ibdev, "not support QP type %d\n", 1035 init_attr->qp_type); 1036 return ERR_PTR(-EOPNOTSUPP); 1037 } 1038 1039 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 1040 if (!hr_qp) 1041 return ERR_PTR(-ENOMEM); 1042 1043 if (init_attr->qp_type == IB_QPT_GSI) { 1044 hr_qp->port = init_attr->port_num - 1; 1045 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 1046 } 1047 1048 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); 1049 if (ret) { 1050 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", 1051 init_attr->qp_type, ret); 1052 ibdev_err(ibdev, "Create GSI QP failed!\n"); 1053 kfree(hr_qp); 1054 return ERR_PTR(ret); 1055 } 1056 return &hr_qp->ibqp; 1057} 1058 1059int to_hr_qp_type(int qp_type) 1060{ 1061 int transport_type; 1062 1063 if (qp_type == IB_QPT_RC) 1064 transport_type = SERV_TYPE_RC; 1065 else if (qp_type == IB_QPT_UC) 1066 transport_type = SERV_TYPE_UC; 1067 else if (qp_type == IB_QPT_UD) 1068 transport_type = SERV_TYPE_UD; 1069 else if (qp_type == IB_QPT_GSI) 1070 transport_type = SERV_TYPE_UD; 1071 else 1072 transport_type = -1; 1073 1074 return transport_type; 1075} 1076 1077static int check_mtu_validate(struct hns_roce_dev *hr_dev, 1078 struct hns_roce_qp *hr_qp, 1079 struct ib_qp_attr *attr, int attr_mask) 1080{ 1081 enum ib_mtu active_mtu; 1082 int p; 1083 1084 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1085 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1086 1087 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && 1088 attr->path_mtu > hr_dev->caps.max_mtu) || 1089 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { 1090 ibdev_err(&hr_dev->ib_dev, 1091 "attr path_mtu(%d)invalid while modify qp", 1092 attr->path_mtu); 1093 return -EINVAL; 1094 } 1095 1096 return 0; 1097} 1098 1099static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1100 int attr_mask) 1101{ 1102 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1103 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1104 int p; 1105 1106 if ((attr_mask & IB_QP_PORT) && 1107 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 1108 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", 1109 attr->port_num); 1110 return -EINVAL; 1111 } 1112 1113 if (attr_mask & IB_QP_PKEY_INDEX) { 1114 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1115 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 1116 ibdev_err(&hr_dev->ib_dev, 1117 "invalid attr, pkey_index = %u.\n", 1118 attr->pkey_index); 1119 return -EINVAL; 1120 } 1121 } 1122 1123 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1124 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1125 ibdev_err(&hr_dev->ib_dev, 1126 "invalid attr, max_rd_atomic = %u.\n", 1127 attr->max_rd_atomic); 1128 return -EINVAL; 1129 } 1130 1131 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1132 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1133 ibdev_err(&hr_dev->ib_dev, 1134 "invalid attr, max_dest_rd_atomic = %u.\n", 1135 attr->max_dest_rd_atomic); 1136 return -EINVAL; 1137 } 1138 1139 if (attr_mask & IB_QP_PATH_MTU) 1140 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); 1141 1142 return 0; 1143} 1144 1145int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1146 int attr_mask, struct ib_udata *udata) 1147{ 1148 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1149 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1150 enum ib_qp_state cur_state, new_state; 1151 int ret = -EINVAL; 1152 1153 mutex_lock(&hr_qp->mutex); 1154 1155 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) 1156 goto out; 1157 1158 cur_state = hr_qp->state; 1159 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1160 1161 if (ibqp->uobject && 1162 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 1163 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { 1164 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 1165 1166 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1167 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 1168 } else { 1169 ibdev_warn(&hr_dev->ib_dev, 1170 "flush cqe is not supported in userspace!\n"); 1171 goto out; 1172 } 1173 } 1174 1175 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1176 attr_mask)) { 1177 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); 1178 goto out; 1179 } 1180 1181 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); 1182 if (ret) 1183 goto out; 1184 1185 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1186 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 1187 ret = -EPERM; 1188 ibdev_err(&hr_dev->ib_dev, 1189 "RST2RST state is not supported\n"); 1190 } else { 1191 ret = 0; 1192 } 1193 1194 goto out; 1195 } 1196 1197 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1198 new_state); 1199 1200out: 1201 mutex_unlock(&hr_qp->mutex); 1202 1203 return ret; 1204} 1205 1206void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1207 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1208{ 1209 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1210 __acquire(&send_cq->lock); 1211 __acquire(&recv_cq->lock); 1212 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1213 spin_lock_irq(&send_cq->lock); 1214 __acquire(&recv_cq->lock); 1215 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1216 spin_lock_irq(&recv_cq->lock); 1217 __acquire(&send_cq->lock); 1218 } else if (send_cq == recv_cq) { 1219 spin_lock_irq(&send_cq->lock); 1220 __acquire(&recv_cq->lock); 1221 } else if (send_cq->cqn < recv_cq->cqn) { 1222 spin_lock_irq(&send_cq->lock); 1223 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1224 } else { 1225 spin_lock_irq(&recv_cq->lock); 1226 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1227 } 1228} 1229 1230void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1231 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1232 __releases(&recv_cq->lock) 1233{ 1234 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1235 __release(&recv_cq->lock); 1236 __release(&send_cq->lock); 1237 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1238 __release(&recv_cq->lock); 1239 spin_unlock(&send_cq->lock); 1240 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1241 __release(&send_cq->lock); 1242 spin_unlock(&recv_cq->lock); 1243 } else if (send_cq == recv_cq) { 1244 __release(&recv_cq->lock); 1245 spin_unlock_irq(&send_cq->lock); 1246 } else if (send_cq->cqn < recv_cq->cqn) { 1247 spin_unlock(&recv_cq->lock); 1248 spin_unlock_irq(&send_cq->lock); 1249 } else { 1250 spin_unlock(&send_cq->lock); 1251 spin_unlock_irq(&recv_cq->lock); 1252 } 1253} 1254 1255static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 1256{ 1257 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1258} 1259 1260void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n) 1261{ 1262 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1263} 1264 1265void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n) 1266{ 1267 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1268} 1269 1270void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n) 1271{ 1272 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); 1273} 1274 1275bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 1276 struct ib_cq *ib_cq) 1277{ 1278 struct hns_roce_cq *hr_cq; 1279 u32 cur; 1280 1281 cur = hr_wq->head - hr_wq->tail; 1282 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1283 return false; 1284 1285 hr_cq = to_hr_cq(ib_cq); 1286 spin_lock(&hr_cq->lock); 1287 cur = hr_wq->head - hr_wq->tail; 1288 spin_unlock(&hr_cq->lock); 1289 1290 return cur + nreq >= hr_wq->wqe_cnt; 1291} 1292 1293int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1294{ 1295 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1296 int reserved_from_top = 0; 1297 int reserved_from_bot; 1298 int ret; 1299 1300 mutex_init(&qp_table->scc_mutex); 1301 xa_init(&hr_dev->qp_table_xa); 1302 1303 reserved_from_bot = hr_dev->caps.reserved_qps; 1304 1305 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, 1306 hr_dev->caps.num_qps - 1, reserved_from_bot, 1307 reserved_from_top); 1308 if (ret) { 1309 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", 1310 ret); 1311 return ret; 1312 } 1313 1314 return 0; 1315} 1316 1317void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1318{ 1319 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); 1320} 1321