Lines Matching refs:sport

144 	struct srpt_port *sport;
154 sport = &sdev->port[port_num];
155 sport->lid = 0;
156 sport->sm_lid = 0;
172 sport = &sdev->port[port_num];
173 if (!sport->lid && !sport->sm_lid)
174 schedule_work(&sport->work);
316 * @sport: HCA port through which the MAD has been received.
324 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
327 struct srpt_device *sdev = sport->sdev;
366 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
479 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
511 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
548 * @sport: SRPT HCA port.
556 static int srpt_refresh_port(struct srpt_port *sport)
564 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
568 sport->sm_lid = port_attr.sm_lid;
569 sport->lid = port_attr.lid;
571 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
575 srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
576 &sport->gid.global.interface_id);
577 snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
579 be64_to_cpu(sport->gid.global.subnet_prefix),
580 be64_to_cpu(sport->gid.global.interface_id));
582 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
589 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
592 dev_name(&sport->sdev->device->dev), sport->port, ret);
596 if (!sport->mad_agent) {
603 mad_agent = ib_register_mad_agent(sport->sdev->device,
604 sport->port,
609 sport, 0);
612 dev_name(&sport->sdev->device->dev), sport->port,
614 sport->mad_agent = NULL;
617 ib_modify_port(sport->sdev->device, sport->port, 0,
622 sport->mad_agent = mad_agent;
640 struct srpt_port *sport;
644 sport = &sdev->port[i - 1];
645 WARN_ON(sport->port != i);
646 if (sport->mad_agent) {
648 ib_unregister_mad_agent(sport->mad_agent);
649 sport->mad_agent = NULL;
925 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
954 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
972 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
1130 attr->port_num = ch->sport->port;
1132 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1638 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1681 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1791 struct srpt_port *sport = ch->sport;
1792 struct srpt_device *sdev = sport->sdev;
1794 int sq_size = sport->port_attrib.srp_sq_size;
1832 qp_init->port_num = ch->sport->port;
1968 struct srpt_port *sport = ch->sport;
1975 mutex_lock(&sport->mutex);
1977 mutex_unlock(&sport->mutex);
1985 static void __srpt_close_all_ch(struct srpt_port *sport)
1990 lockdep_assert_held(&sport->mutex);
1992 list_for_each_entry(nexus, &sport->nexus_list, entry) {
1997 dev_name(&sport->sdev->device->dev),
1998 sport->port);
2005 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
2008 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
2015 mutex_lock(&sport->mutex);
2016 list_for_each_entry(n, &sport->nexus_list, entry) {
2025 &sport->nexus_list);
2028 mutex_unlock(&sport->mutex);
2047 static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
2048 __must_hold(&sport->mutex)
2050 lockdep_assert_held(&sport->mutex);
2052 if (sport->enabled == enabled)
2054 sport->enabled = enabled;
2056 __srpt_close_all_ch(sport);
2059 static void srpt_drop_sport_ref(struct srpt_port *sport)
2061 if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2062 complete(sport->freed_channels);
2069 srpt_drop_sport_ref(ch->sport);
2079 * as long as the channel is on sport->nexus_list.
2085 struct srpt_port *sport;
2091 sdev = ch->sport->sdev;
2108 sport = ch->sport;
2109 mutex_lock(&sport->mutex);
2111 mutex_unlock(&sport->mutex);
2119 ch->sport->sdev, ch->rq_size,
2154 struct srpt_port *sport = &sdev->port[port_num - 1];
2174 port_num, &sport->gid, be16_to_cpu(pkey));
2176 nexus = srpt_get_nexus(sport, req->initiator_port_id,
2199 if (!sport->enabled) {
2202 dev_name(&sport->sdev->device->dev), port_num);
2228 ch->sport = sport;
2246 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2254 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2289 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2322 if (sport->guid_id) {
2323 mutex_lock(&sport->guid_id->mutex);
2324 list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
2331 mutex_unlock(&sport->guid_id->mutex);
2334 if (sport->gid_id) {
2335 mutex_lock(&sport->gid_id->mutex);
2336 list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
2349 mutex_unlock(&sport->gid_id->mutex);
2366 * will decrement sport->refcount. Hence increment sport->refcount now.
2368 atomic_inc(&sport->refcount);
2370 mutex_lock(&sport->mutex);
2388 if (!sport->enabled) {
2393 mutex_unlock(&sport->mutex);
2398 mutex_unlock(&sport->mutex);
2444 * Hold the sport mutex while accepting a connection to avoid that
2447 mutex_lock(&sport->mutex);
2448 if (sport->enabled && ch->state == CH_CONNECTING) {
2456 mutex_unlock(&sport->mutex);
2477 ch->sport->sdev, ch->rq_size,
2485 ch->sport->sdev, ch->rq_size,
2775 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2817 struct srpt_device *sdev = ch->sport->sdev;
2850 ch->sport->port, NULL, first_wr);
2946 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2948 srpt_refresh_port(sport);
2953 * @sport: SRPT HCA port.
2955 static int srpt_release_sport(struct srpt_port *sport)
2963 sport->freed_channels = &c;
2965 mutex_lock(&sport->mutex);
2966 srpt_set_enabled(sport, false);
2967 mutex_unlock(&sport->mutex);
2969 while (atomic_read(&sport->refcount) > 0 &&
2972 dev_name(&sport->sdev->device->dev), sport->port,
2973 atomic_read(&sport->refcount));
2975 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2985 mutex_lock(&sport->mutex);
2986 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2990 mutex_unlock(&sport->mutex);
2996 struct srpt_port *sport;
3004 struct srpt_port *sport;
3013 sport = &sdev->port[i];
3015 if (strcmp(sport->guid_name, name) == 0) {
3018 sport, &sport->guid_id};
3020 if (strcmp(sport->gid_name, name) == 0) {
3023 sport, &sport->gid_id};
3150 struct srpt_port *sport;
3214 sport = &sdev->port[i - 1];
3215 INIT_LIST_HEAD(&sport->nexus_list);
3216 mutex_init(&sport->mutex);
3217 sport->sdev = sdev;
3218 sport->port = i;
3219 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3220 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3221 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3222 sport->port_attrib.use_srq = false;
3223 INIT_WORK(&sport->work, srpt_refresh_port_work);
3225 ret = srpt_refresh_port(sport);
3317 struct srpt_port *sport = wwn->priv;
3319 if (sport->guid_id && &sport->guid_id->wwn == wwn)
3320 return sport->guid_id;
3321 if (sport->gid_id && &sport->gid_id->wwn == wwn)
3322 return sport->gid_id;
3352 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3461 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3463 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3470 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3489 sport->port_attrib.srp_max_rdma_size = val;
3498 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3500 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3507 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3526 sport->port_attrib.srp_max_rsp_size = val;
3535 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3537 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_sq_size);
3544 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3563 sport->port_attrib.srp_sq_size = val;
3572 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3574 return sysfs_emit(page, "%d\n", sport->port_attrib.use_srq);
3581 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3582 struct srpt_device *sdev = sport->sdev;
3596 ret = mutex_lock_interruptible(&sport->mutex);
3599 enabled = sport->enabled;
3601 srpt_set_enabled(sport, false);
3602 sport->port_attrib.use_srq = val;
3603 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3604 srpt_set_enabled(sport, enabled);
3606 mutex_unlock(&sport->mutex);
3717 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3719 mutex_lock(&sport->mutex);
3720 srpt_set_enabled(sport, enable);
3721 mutex_unlock(&sport->mutex);
3763 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3769 sport->enabled = false;
3785 struct srpt_port *sport = papi.sport;
3797 srpt_sdev_put(sport->sdev);
3802 port_id->wwn.priv = sport;
3803 memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
3804 sport->gid_name, ARRAY_SIZE(port_id->name));
3818 struct srpt_port *sport = wwn->priv;
3820 if (sport->guid_id == port_id)
3821 sport->guid_id = NULL;
3822 else if (sport->gid_id == port_id)
3823 sport->gid_id = NULL;
3827 srpt_sdev_put(sport->sdev);