Lines Matching defs:sdev
138 struct srpt_device *sdev =
144 dev_name(&sdev->device->dev));
149 if (port_num < sdev->device->phys_port_cnt) {
150 sport = &sdev->port[port_num];
156 sdev->device->phys_port_cnt);
167 if (port_num < sdev->device->phys_port_cnt) {
168 sport = &sdev->port[port_num];
174 sdev->device->phys_port_cnt);
321 struct srpt_device *sdev = sport->sdev;
339 if (sdev->use_srq)
340 send_queue_depth = sdev->srq_size;
343 sdev->device->attrs.max_qp_wr);
348 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
349 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
350 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
351 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
558 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
565 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
576 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
583 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
586 dev_name(&sport->sdev->device->dev), sport->port, ret);
597 mad_agent = ib_register_mad_agent(sport->sdev->device,
606 dev_name(&sport->sdev->device->dev), sport->port,
611 ib_modify_port(sport->sdev->device, sport->port, 0,
624 * @sdev: SRPT HCA pointer.
629 static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
638 sport = &sdev->port[i - 1];
641 ib_modify_port(sdev->device, i, 0, &port_modify);
650 * @sdev: SRPT HCA pointer.
655 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
670 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
672 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
687 * @sdev: SRPT HCA pointer.
692 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
699 ib_dma_unmap_single(sdev->device, ioctx->dma,
707 * @sdev: Device to allocate the I/O context ring for.
715 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
731 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
741 srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
751 * @sdev: SRPT HCA pointer.
757 struct srpt_device *sdev, int ring_size,
767 srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
818 * @sdev: SRPT HCA pointer.
822 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
828 BUG_ON(!sdev);
831 list.lkey = sdev->lkey;
839 if (sdev->use_srq)
840 return ib_post_srq_recv(sdev->srq, &wr, NULL);
1126 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1628 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1671 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1782 struct srpt_device *sdev = sport->sdev;
1783 const struct ib_device_attr *attrs = &sdev->device->attrs;
1795 ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1824 if (sdev->use_srq)
1825 qp_init->srq = sdev->srq;
1830 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
1833 ch->qp = ib_create_qp(sdev->pd, qp_init);
1864 if (!sdev->use_srq)
1866 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1988 dev_name(&sport->sdev->device->dev),
2075 struct srpt_device *sdev;
2082 sdev = ch->sport->sdev;
2083 BUG_ON(!sdev);
2110 ch->sport->sdev, ch->rq_size,
2116 sdev, ch->rq_size,
2126 * @sdev: HCA through which the login request was received.
2138 static int srpt_cm_req_recv(struct srpt_device *const sdev,
2145 struct srpt_port *sport = &sdev->port[port_num - 1];
2193 dev_name(&sport->sdev->device->dev), port_num);
2233 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2245 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2256 if (!sdev->use_srq) {
2280 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2383 dev_name(&sdev->device->dev), port_num);
2468 ch->sport->sdev, ch->rq_size,
2476 ch->sport->sdev, ch->rq_size,
2540 struct srpt_device *sdev;
2546 sdev = ib_get_client_data(cm_id->device, &srpt_client);
2547 if (!sdev)
2568 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2808 struct srpt_device *sdev = ch->sport->sdev;
2865 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2870 sge.lkey = sdev->lkey;
2964 dev_name(&sport->sdev->device->dev), sport->port,
2995 struct srpt_device *sdev;
2999 list_for_each_entry(sdev, &srpt_dev_list, list) {
3000 dev = sdev->device;
3005 sport = &sdev->port[i];
3008 kref_get(&sdev->refcnt);
3013 kref_get(&sdev->refcnt);
3041 static void srpt_free_srq(struct srpt_device *sdev)
3043 if (!sdev->srq)
3046 ib_destroy_srq(sdev->srq);
3047 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3048 sdev->srq_size, sdev->req_buf_cache,
3050 kmem_cache_destroy(sdev->req_buf_cache);
3051 sdev->srq = NULL;
3054 static int srpt_alloc_srq(struct srpt_device *sdev)
3058 .srq_context = (void *)sdev,
3059 .attr.max_wr = sdev->srq_size,
3063 struct ib_device *device = sdev->device;
3067 WARN_ON_ONCE(sdev->srq);
3068 srq = ib_create_srq(sdev->pd, &srq_attr);
3074 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
3075 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3077 sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3079 if (!sdev->req_buf_cache)
3082 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3083 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3084 sizeof(*sdev->ioctx_ring[0]),
3085 sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3086 if (!sdev->ioctx_ring)
3089 sdev->use_srq = true;
3090 sdev->srq = srq;
3092 for (i = 0; i < sdev->srq_size; ++i) {
3093 INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
3094 srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
3100 kmem_cache_destroy(sdev->req_buf_cache);
3107 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
3109 struct ib_device *device = sdev->device;
3113 srpt_free_srq(sdev);
3114 sdev->use_srq = false;
3115 } else if (use_srq && !sdev->srq) {
3116 ret = srpt_alloc_srq(sdev);
3119 dev_name(&device->dev), sdev->use_srq, ret);
3125 struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
3127 kfree(sdev);
3130 static void srpt_sdev_put(struct srpt_device *sdev)
3132 kref_put(&sdev->refcnt, srpt_free_sdev);
3141 struct srpt_device *sdev;
3147 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
3149 if (!sdev)
3152 kref_init(&sdev->refcnt);
3153 sdev->device = device;
3154 mutex_init(&sdev->sdev_mutex);
3156 sdev->pd = ib_alloc_pd(device, 0);
3157 if (IS_ERR(sdev->pd)) {
3158 ret = PTR_ERR(sdev->pd);
3162 sdev->lkey = sdev->pd->local_dma_lkey;
3164 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3166 srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
3172 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3173 if (IS_ERR(sdev->cm_id)) {
3175 PTR_ERR(sdev->cm_id));
3176 ret = PTR_ERR(sdev->cm_id);
3177 sdev->cm_id = NULL;
3192 ret = sdev->cm_id ?
3193 ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
3197 sdev->cm_id->state);
3201 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3204 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3205 sport = &sdev->port[i - 1];
3208 sport->sdev = sdev;
3219 dev_name(&sdev->device->dev), i);
3225 ib_register_event_handler(&sdev->event_handler);
3227 list_add_tail(&sdev->list, &srpt_dev_list);
3230 ib_set_client_data(device, &srpt_client, sdev);
3235 srpt_unregister_mad_agent(sdev, i);
3237 if (sdev->cm_id)
3238 ib_destroy_cm_id(sdev->cm_id);
3240 srpt_free_srq(sdev);
3241 ib_dealloc_pd(sdev->pd);
3243 srpt_sdev_put(sdev);
3255 struct srpt_device *sdev = client_data;
3258 srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
3260 ib_unregister_event_handler(&sdev->event_handler);
3263 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3264 cancel_work_sync(&sdev->port[i].work);
3266 if (sdev->cm_id)
3267 ib_destroy_cm_id(sdev->cm_id);
3272 * Unregistering a target must happen after destroying sdev->cm_id
3277 list_del(&sdev->list);
3280 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3281 srpt_release_sport(&sdev->port[i]);
3283 srpt_free_srq(sdev);
3285 ib_dealloc_pd(sdev->pd);
3287 srpt_sdev_put(sdev);
3353 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3601 struct srpt_device *sdev = sport->sdev;
3612 ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3622 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3627 mutex_unlock(&sdev->sdev_mutex);
3846 srpt_sdev_put(sport->sdev);
3876 srpt_sdev_put(sport->sdev);