Lines Matching defs:device
143 pr_debug("ASYNC event= %d on device= %s\n", event->event,
144 dev_name(&sdev->device->dev));
149 if (port_num < sdev->device->phys_port_cnt) {
156 sdev->device->phys_port_cnt);
167 if (port_num < sdev->device->phys_port_cnt) {
174 sdev->device->phys_port_cnt);
343 sdev->device->attrs.max_qp_wr);
348 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
349 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
350 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
351 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
558 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
565 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
576 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
583 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
585 pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
586 dev_name(&sport->sdev->device->dev), sport->port, ret);
597 mad_agent = ib_register_mad_agent(sport->sdev->device,
606 dev_name(&sport->sdev->device->dev), sport->port,
611 ib_modify_port(sport->sdev->device, sport->port, 0,
627 * Note: It is safe to call this function more than once for the same device.
641 ib_modify_port(sdev->device, i, 0, &port_modify);
670 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
672 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
699 ib_dma_unmap_single(sdev->device, ioctx->dma,
1126 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1628 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1783 const struct ib_device_attr *attrs = &sdev->device->attrs;
1795 ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1988 dev_name(&sport->sdev->device->dev),
2193 dev_name(&sport->sdev->device->dev), port_num);
2233 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2383 dev_name(&sdev->device->dev), port_num);
2546 sdev = ib_get_client_data(cm_id->device, &srpt_client);
2865 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2964 dev_name(&sport->sdev->device->dev), sport->port,
3000 dev = sdev->device;
3063 struct ib_device *device = sdev->device;
3075 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3109 struct ib_device *device = sdev->device;
3119 dev_name(&device->dev), sdev->use_srq, ret);
3136 * srpt_add_one - InfiniBand device addition callback function
3137 * @device: Describes a HCA.
3139 static int srpt_add_one(struct ib_device *device)
3145 pr_debug("device = %p\n", device);
3147 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
3153 sdev->device = device;
3156 sdev->pd = ib_alloc_pd(device, 0);
3164 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3169 srpt_service_guid = be64_to_cpu(device->node_guid);
3171 if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
3172 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3201 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3204 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3219 dev_name(&sdev->device->dev), i);
3230 ib_set_client_data(device, &srpt_client, sdev);
3231 pr_debug("added %s.\n", dev_name(&device->dev));
3244 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3249 * srpt_remove_one - InfiniBand device removal callback function
3250 * @device: Describes a HCA.
3253 static void srpt_remove_one(struct ib_device *device, void *client_data)
3258 srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
3263 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3269 ib_set_client_data(device, &srpt_client, NULL);
3280 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3386 * within a particular SCSI target device within a particular SCSI instance.