Lines Matching refs:sport
140 struct srpt_port *sport;
150 sport = &sdev->port[port_num];
151 sport->lid = 0;
152 sport->sm_lid = 0;
168 sport = &sdev->port[port_num];
169 if (!sport->lid && !sport->sm_lid)
170 schedule_work(&sport->work);
310 * @sport: HCA port through which the MAD has been received.
318 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
321 struct srpt_device *sdev = sport->sdev;
360 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
473 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
505 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
542 * @sport: SRPT HCA port.
550 static int srpt_refresh_port(struct srpt_port *sport)
558 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
562 sport->sm_lid = port_attr.sm_lid;
563 sport->lid = port_attr.lid;
565 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
569 srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
570 &sport->gid.global.interface_id);
571 snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
573 be64_to_cpu(sport->gid.global.subnet_prefix),
574 be64_to_cpu(sport->gid.global.interface_id));
576 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
583 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
586 dev_name(&sport->sdev->device->dev), sport->port, ret);
590 if (!sport->mad_agent) {
597 mad_agent = ib_register_mad_agent(sport->sdev->device,
598 sport->port,
603 sport, 0);
606 dev_name(&sport->sdev->device->dev), sport->port,
608 sport->mad_agent = NULL;
611 ib_modify_port(sport->sdev->device, sport->port, 0,
616 sport->mad_agent = mad_agent;
634 struct srpt_port *sport;
638 sport = &sdev->port[i - 1];
639 WARN_ON(sport->port != i);
640 if (sport->mad_agent) {
642 ib_unregister_mad_agent(sport->mad_agent);
643 sport->mad_agent = NULL;
919 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
948 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
966 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
1124 attr->port_num = ch->sport->port;
1126 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1628 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1671 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1781 struct srpt_port *sport = ch->sport;
1782 struct srpt_device *sdev = sport->sdev;
1784 int sq_size = sport->port_attrib.srp_sq_size;
1823 qp_init->port_num = ch->sport->port;
1959 struct srpt_port *sport = ch->sport;
1966 mutex_lock(&sport->mutex);
1968 mutex_unlock(&sport->mutex);
1976 static void __srpt_close_all_ch(struct srpt_port *sport)
1981 lockdep_assert_held(&sport->mutex);
1983 list_for_each_entry(nexus, &sport->nexus_list, entry) {
1988 dev_name(&sport->sdev->device->dev),
1989 sport->port);
1996 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
1999 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
2006 mutex_lock(&sport->mutex);
2007 list_for_each_entry(n, &sport->nexus_list, entry) {
2016 &sport->nexus_list);
2019 mutex_unlock(&sport->mutex);
2038 static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
2039 __must_hold(&sport->mutex)
2041 lockdep_assert_held(&sport->mutex);
2043 if (sport->enabled == enabled)
2045 sport->enabled = enabled;
2047 __srpt_close_all_ch(sport);
2050 static void srpt_drop_sport_ref(struct srpt_port *sport)
2052 if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2053 complete(sport->freed_channels);
2060 srpt_drop_sport_ref(ch->sport);
2070 * as long as the channel is on sport->nexus_list.
2076 struct srpt_port *sport;
2082 sdev = ch->sport->sdev;
2099 sport = ch->sport;
2100 mutex_lock(&sport->mutex);
2102 mutex_unlock(&sport->mutex);
2110 ch->sport->sdev, ch->rq_size,
2145 struct srpt_port *sport = &sdev->port[port_num - 1];
2165 port_num, &sport->gid, be16_to_cpu(pkey));
2167 nexus = srpt_get_nexus(sport, req->initiator_port_id,
2190 if (!sport->enabled) {
2193 dev_name(&sport->sdev->device->dev), port_num);
2219 ch->sport = sport;
2237 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2245 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2280 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2313 if (sport->guid_id) {
2314 mutex_lock(&sport->guid_id->mutex);
2315 list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
2322 mutex_unlock(&sport->guid_id->mutex);
2325 if (sport->gid_id) {
2326 mutex_lock(&sport->gid_id->mutex);
2327 list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
2340 mutex_unlock(&sport->gid_id->mutex);
2357 * will decrement sport->refcount. Hence increment sport->refcount now.
2359 atomic_inc(&sport->refcount);
2361 mutex_lock(&sport->mutex);
2379 if (!sport->enabled) {
2384 mutex_unlock(&sport->mutex);
2389 mutex_unlock(&sport->mutex);
2435 * Hold the sport mutex while accepting a connection to avoid that
2438 mutex_lock(&sport->mutex);
2439 if (sport->enabled && ch->state == CH_CONNECTING) {
2447 mutex_unlock(&sport->mutex);
2468 ch->sport->sdev, ch->rq_size,
2476 ch->sport->sdev, ch->rq_size,
2766 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2808 struct srpt_device *sdev = ch->sport->sdev;
2841 ch->sport->port, NULL, first_wr);
2938 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2940 srpt_refresh_port(sport);
2945 * @sport: SRPT HCA port.
2947 static int srpt_release_sport(struct srpt_port *sport)
2955 sport->freed_channels = &c;
2957 mutex_lock(&sport->mutex);
2958 srpt_set_enabled(sport, false);
2959 mutex_unlock(&sport->mutex);
2961 while (atomic_read(&sport->refcount) > 0 &&
2964 dev_name(&sport->sdev->device->dev), sport->port,
2965 atomic_read(&sport->refcount));
2967 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2977 mutex_lock(&sport->mutex);
2978 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2982 mutex_unlock(&sport->mutex);
2988 struct srpt_port *sport;
2996 struct srpt_port *sport;
3005 sport = &sdev->port[i];
3007 if (strcmp(sport->guid_name, name) == 0) {
3010 sport, &sport->guid_id};
3012 if (strcmp(sport->gid_name, name) == 0) {
3015 sport, &sport->gid_id};
3142 struct srpt_port *sport;
3205 sport = &sdev->port[i - 1];
3206 INIT_LIST_HEAD(&sport->nexus_list);
3207 mutex_init(&sport->mutex);
3208 sport->sdev = sdev;
3209 sport->port = i;
3210 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3211 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3212 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3213 sport->port_attrib.use_srq = false;
3214 INIT_WORK(&sport->work, srpt_refresh_port_work);
3216 ret = srpt_refresh_port(sport);
3313 struct srpt_port *sport = wwn->priv;
3315 if (sport->guid_id && &sport->guid_id->wwn == wwn)
3316 return sport->guid_id;
3317 if (sport->gid_id && &sport->gid_id->wwn == wwn)
3318 return sport->gid_id;
3353 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3480 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3482 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3489 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3508 sport->port_attrib.srp_max_rdma_size = val;
3517 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3519 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3526 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3545 sport->port_attrib.srp_max_rsp_size = val;
3554 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3556 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3563 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3582 sport->port_attrib.srp_sq_size = val;
3591 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3593 return sprintf(page, "%d\n", sport->port_attrib.use_srq);
3600 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3601 struct srpt_device *sdev = sport->sdev;
3615 ret = mutex_lock_interruptible(&sport->mutex);
3618 enabled = sport->enabled;
3620 srpt_set_enabled(sport, false);
3621 sport->port_attrib.use_srq = val;
3622 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3623 srpt_set_enabled(sport, enabled);
3625 mutex_unlock(&sport->mutex);
3737 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3739 return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
3746 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3761 mutex_lock(&sport->mutex);
3762 srpt_set_enabled(sport, tmp);
3763 mutex_unlock(&sport->mutex);
3812 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3818 sport->enabled = false;
3834 struct srpt_port *sport = papi.sport;
3846 srpt_sdev_put(sport->sdev);
3851 port_id->wwn.priv = sport;
3852 memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
3853 sport->gid_name, ARRAY_SIZE(port_id->name));
3867 struct srpt_port *sport = wwn->priv;
3869 if (sport->guid_id == port_id)
3870 sport->guid_id = NULL;
3871 else if (sport->gid_id == port_id)
3872 sport->gid_id = NULL;
3876 srpt_sdev_put(sport->sdev);