Lines Matching refs:smcibdev

136 	rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
155 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
160 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
164 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
174 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
177 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
178 sizeof(smcibdev->mac[ibport - 1]));
191 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
193 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
269 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
277 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
278 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
301 /* check if gid is still defined on smcibdev */
303 struct smc_ib_device *smcibdev, u8 ibport)
309 for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
310 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
327 /* check all links if the gid is still defined on smcibdev */
328 static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport)
335 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
342 lgr->lnk[i].smcibdev != smcibdev)
346 smcibdev, ibport))
347 smcr_port_err(smcibdev, ibport);
353 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
357 memset(&smcibdev->pattr[ibport - 1], 0,
358 sizeof(smcibdev->pattr[ibport - 1]));
359 rc = ib_query_port(smcibdev->ibdev, ibport,
360 &smcibdev->pattr[ibport - 1]);
364 rc = smc_ib_fill_mac(smcibdev, ibport);
368 smc_ib_port_active(smcibdev, ibport))
370 smc_ib_define_local_systemid(smcibdev, ibport);
378 struct smc_ib_device *smcibdev = container_of(
382 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
383 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
384 clear_bit(port_idx, &smcibdev->port_event_mask);
385 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
386 set_bit(port_idx, smcibdev->ports_going_away);
387 smcr_port_err(smcibdev, port_idx + 1);
389 clear_bit(port_idx, smcibdev->ports_going_away);
390 smcr_port_add(smcibdev, port_idx + 1);
391 smc_ib_gid_check(smcibdev, port_idx + 1);
400 struct smc_ib_device *smcibdev;
404 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
410 set_bit(port_idx, &smcibdev->port_event_mask);
412 smcibdev->ports_going_away))
416 schedule_work(&smcibdev->port_event_work);
422 set_bit(port_idx, &smcibdev->port_event_mask);
423 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
424 schedule_work(&smcibdev->port_event_work);
430 set_bit(port_idx, &smcibdev->port_event_mask);
431 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
432 schedule_work(&smcibdev->port_event_work);
438 set_bit(port_idx, &smcibdev->port_event_mask);
439 schedule_work(&smcibdev->port_event_work);
457 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
465 struct smc_ib_device *smcibdev)
477 lgr->lnk[i].smcibdev != smcibdev)
493 struct smc_ib_device *smcibdev,
506 smcibdev->pnetid_by_user[port]))
508 memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
513 smcibdev->ndev_ifidx[port]))
517 port_state = smc_ib_port_active(smcibdev, port + 1);
520 lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]);
547 static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
567 is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev);
570 if (smcibdev->ibdev->dev.parent) {
572 pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent);
577 snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name);
581 if (!rdma_is_port_valid(smcibdev->ibdev, i))
583 if (smc_nl_handle_dev_port(skb, smcibdev->ibdev,
584 smcibdev, i - 1))
605 struct smc_ib_device *smcibdev;
610 list_for_each_entry(smcibdev, &dev_list->list, list) {
613 if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
632 struct smc_ib_device *smcibdev = lnk->smcibdev;
641 set_bit(port_idx, &smcibdev->port_event_mask);
642 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
643 schedule_work(&smcibdev->port_event_work);
664 .send_cq = lnk->smcibdev->roce_cq_send,
665 .recv_cq = lnk->smcibdev->roce_cq_recv,
746 if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
773 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
796 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
810 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
827 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
834 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
841 mutex_lock(&smcibdev->mutex);
843 if (smcibdev->initialized)
850 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
852 smcibdev, &cqattr);
853 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
854 if (IS_ERR(smcibdev->roce_cq_send)) {
855 smcibdev->roce_cq_send = NULL;
858 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
860 smcibdev, &cqattr);
861 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
862 if (IS_ERR(smcibdev->roce_cq_recv)) {
863 smcibdev->roce_cq_recv = NULL;
866 smc_wr_add_dev(smcibdev);
867 smcibdev->initialized = 1;
871 ib_destroy_cq(smcibdev->roce_cq_send);
873 mutex_unlock(&smcibdev->mutex);
877 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
879 mutex_lock(&smcibdev->mutex);
880 if (!smcibdev->initialized)
882 smcibdev->initialized = 0;
883 ib_destroy_cq(smcibdev->roce_cq_recv);
884 ib_destroy_cq(smcibdev->roce_cq_send);
885 smc_wr_remove_dev(smcibdev);
887 mutex_unlock(&smcibdev->mutex);
892 static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
894 struct ib_device *ibdev = smcibdev->ibdev;
901 smcibdev->ndev_ifidx[port] = ndev->ifindex;
908 struct smc_ib_device *smcibdev;
915 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
916 port_cnt = smcibdev->ibdev->phys_port_cnt;
918 libdev = smcibdev->ibdev;
926 smcibdev->ndev_ifidx[i] = ndev->ifindex;
928 smcibdev->ndev_ifidx[i] = 0;
937 struct smc_ib_device *smcibdev;
944 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
945 if (!smcibdev)
948 smcibdev->ibdev = ibdev;
949 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
950 atomic_set(&smcibdev->lnk_cnt, 0);
951 init_waitqueue_head(&smcibdev->lnks_deleted);
952 mutex_init(&smcibdev->mutex);
954 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
956 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
957 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
959 ib_register_event_handler(&smcibdev->event_handler);
962 port_cnt = smcibdev->ibdev->phys_port_cnt;
964 smcibdev->ibdev->name, port_cnt);
968 set_bit(i, &smcibdev->port_event_mask);
971 smcibdev->pnetid[i]))
972 smc_pnetid_by_table_ib(smcibdev, i + 1);
973 smc_copy_netdev_ifindex(smcibdev, i);
976 smcibdev->ibdev->name, i + 1,
977 smcibdev->pnetid[i],
978 smcibdev->pnetid_by_user[i] ?
982 schedule_work(&smcibdev->port_event_work);
989 struct smc_ib_device *smcibdev = client_data;
992 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
995 smcibdev->ibdev->name);
996 smc_smcr_terminate_all(smcibdev);
997 smc_ib_cleanup_per_ibdev(smcibdev);
998 ib_unregister_event_handler(&smcibdev->event_handler);
999 cancel_work_sync(&smcibdev->port_event_work);
1000 kfree(smcibdev);