Lines Matching refs:smcibdev

125 	rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
144 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
149 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
153 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
163 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
166 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
167 sizeof(smcibdev->mac[ibport - 1]));
180 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
182 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
186 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
193 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
194 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
219 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
223 memset(&smcibdev->pattr[ibport - 1], 0,
224 sizeof(smcibdev->pattr[ibport - 1]));
225 rc = ib_query_port(smcibdev->ibdev, ibport,
226 &smcibdev->pattr[ibport - 1]);
230 rc = smc_ib_fill_mac(smcibdev, ibport);
234 smc_ib_port_active(smcibdev, ibport))
236 smc_ib_define_local_systemid(smcibdev, ibport);
244 struct smc_ib_device *smcibdev = container_of(
248 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
249 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
250 clear_bit(port_idx, &smcibdev->port_event_mask);
251 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
252 set_bit(port_idx, smcibdev->ports_going_away);
253 smcr_port_err(smcibdev, port_idx + 1);
255 clear_bit(port_idx, smcibdev->ports_going_away);
256 smcr_port_add(smcibdev, port_idx + 1);
265 struct smc_ib_device *smcibdev;
269 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
275 set_bit(port_idx, &smcibdev->port_event_mask);
277 smcibdev->ports_going_away))
281 schedule_work(&smcibdev->port_event_work);
287 set_bit(port_idx, &smcibdev->port_event_mask);
288 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
289 schedule_work(&smcibdev->port_event_work);
295 set_bit(port_idx, &smcibdev->port_event_mask);
296 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
297 schedule_work(&smcibdev->port_event_work);
303 set_bit(port_idx, &smcibdev->port_event_mask);
304 schedule_work(&smcibdev->port_event_work);
322 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
332 struct smc_ib_device *smcibdev = lnk->smcibdev;
341 set_bit(port_idx, &smcibdev->port_event_mask);
342 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
343 schedule_work(&smcibdev->port_event_work);
363 .send_cq = lnk->smcibdev->roce_cq_send,
364 .recv_cq = lnk->smcibdev->roce_cq_recv,
444 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
464 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
478 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
495 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
502 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
509 mutex_lock(&smcibdev->mutex);
511 if (smcibdev->initialized)
518 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
520 smcibdev, &cqattr);
521 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
522 if (IS_ERR(smcibdev->roce_cq_send)) {
523 smcibdev->roce_cq_send = NULL;
526 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
528 smcibdev, &cqattr);
529 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
530 if (IS_ERR(smcibdev->roce_cq_recv)) {
531 smcibdev->roce_cq_recv = NULL;
534 smc_wr_add_dev(smcibdev);
535 smcibdev->initialized = 1;
539 ib_destroy_cq(smcibdev->roce_cq_send);
541 mutex_unlock(&smcibdev->mutex);
545 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
547 mutex_lock(&smcibdev->mutex);
548 if (!smcibdev->initialized)
550 smcibdev->initialized = 0;
551 ib_destroy_cq(smcibdev->roce_cq_recv);
552 ib_destroy_cq(smcibdev->roce_cq_send);
553 smc_wr_remove_dev(smcibdev);
555 mutex_unlock(&smcibdev->mutex);
563 struct smc_ib_device *smcibdev;
570 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
571 if (!smcibdev)
574 smcibdev->ibdev = ibdev;
575 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
576 atomic_set(&smcibdev->lnk_cnt, 0);
577 init_waitqueue_head(&smcibdev->lnks_deleted);
578 mutex_init(&smcibdev->mutex);
580 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
582 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
583 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
585 ib_register_event_handler(&smcibdev->event_handler);
588 port_cnt = smcibdev->ibdev->phys_port_cnt;
590 smcibdev->ibdev->name, port_cnt);
594 set_bit(i, &smcibdev->port_event_mask);
597 smcibdev->pnetid[i]))
598 smc_pnetid_by_table_ib(smcibdev, i + 1);
601 smcibdev->ibdev->name, i + 1,
602 smcibdev->pnetid[i],
603 smcibdev->pnetid_by_user[i] ?
607 schedule_work(&smcibdev->port_event_work);
614 struct smc_ib_device *smcibdev = client_data;
617 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
620 smcibdev->ibdev->name);
621 smc_smcr_terminate_all(smcibdev);
622 smc_ib_cleanup_per_ibdev(smcibdev);
623 ib_unregister_event_handler(&smcibdev->event_handler);
624 cancel_work_sync(&smcibdev->port_event_work);
625 kfree(smcibdev);