Lines Matching refs:evt
209 * @evt: ibmvfc event struct
212 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
214 struct ibmvfc_host *vhost = evt->vhost;
215 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
216 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
222 entry->evt = evt;
224 entry->fmt = evt->crq.format;
245 * @evt: ibmvfc event struct
248 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
250 struct ibmvfc_host *vhost = evt->vhost;
251 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
252 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
259 entry->evt = evt;
261 entry->fmt = evt->crq.format;
287 #define ibmvfc_trc_start(evt) do { } while (0)
288 #define ibmvfc_trc_end(evt) do { } while (0)
812 struct ibmvfc_event *evt = &pool->events[i];
815 * evt->active states
820 atomic_set(&evt->active, -1);
821 atomic_set(&evt->free, 1);
822 evt->crq.valid = 0x80;
823 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
824 evt->xfer_iu = pool->iu_storage + i;
825 evt->vhost = vhost;
826 evt->queue = queue;
827 evt->ext_list = NULL;
828 list_add_tail(&evt->queue_list, &queue->free);
1003 * @evt: ibmvfc event to be checked for validity
1009 struct ibmvfc_event *evt)
1011 int index = evt - pool->events;
1014 if (evt != pool->events + index) /* unaligned */
1021 * @evt: ibmvfc_event to be freed
1024 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1026 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1029 BUG_ON(!ibmvfc_valid_event(pool, evt));
1030 BUG_ON(atomic_inc_return(&evt->free) != 1);
1031 BUG_ON(atomic_dec_and_test(&evt->active));
1033 spin_lock_irqsave(&evt->queue->l_lock, flags);
1034 list_add_tail(&evt->queue_list, &evt->queue->free);
1035 if (evt->eh_comp)
1036 complete(evt->eh_comp);
1037 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1042 * @evt: ibmvfc event struct
1047 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1049 struct scsi_cmnd *cmnd = evt->cmnd;
1056 ibmvfc_free_event(evt);
1068 struct ibmvfc_event *evt, *pos;
1070 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1071 list_del(&evt->queue_list);
1072 ibmvfc_trc_end(evt);
1073 evt->done(evt);
1079 * @evt: ibmvfc event struct
1085 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1092 BUG_ON(!atomic_dec_and_test(&evt->active));
1093 if (evt->cmnd) {
1094 evt->cmnd->result = (error_code << 16);
1095 evt->done = ibmvfc_scsi_eh_done;
1097 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1099 del_timer(&evt->timer);
1112 struct ibmvfc_event *evt, *pos;
1123 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1124 ibmvfc_fail_request(evt, error_code);
1131 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1132 ibmvfc_fail_request(evt, error_code);
1517 struct ibmvfc_event *evt;
1526 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1527 atomic_set(&evt->free, 0);
1528 list_del(&evt->queue_list);
1530 return evt;
1534 * ibmvfc_locked_done - Calls evt completion with host_lock held
1535 * @evt: ibmvfc evt to complete
1539 * MAD evt with the host_lock.
1541 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1545 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1546 evt->_done(evt);
1547 spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1553 * @evt: The event
1557 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1560 evt->cmnd = NULL;
1561 evt->sync_iu = NULL;
1562 evt->eh_comp = NULL;
1563 evt->crq.format = format;
1565 evt->done = done;
1567 evt->_done = done;
1568 evt->done = ibmvfc_locked_done;
1570 evt->hwq = 0;
1595 * @evt: ibmvfc event struct
1603 struct ibmvfc_event *evt,
1610 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1640 if (!evt->ext_list) {
1641 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1642 &evt->ext_list_token);
1644 if (!evt->ext_list) {
1652 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1654 data->va = cpu_to_be64(evt->ext_list_token);
1668 struct ibmvfc_event *evt = from_timer(evt, t, timer);
1669 struct ibmvfc_host *vhost = evt->vhost;
1670 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1676 * @evt: event to be sent
1682 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1685 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1690 *evt->xfer_iu = evt->iu;
1691 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1692 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1693 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1694 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1698 timer_setup(&evt->timer, ibmvfc_timeout, 0);
1701 evt->timer.expires = jiffies + (timeout * HZ);
1702 add_timer(&evt->timer);
1705 spin_lock_irqsave(&evt->queue->l_lock, flags);
1706 list_add_tail(&evt->queue_list, &evt->queue->sent);
1707 atomic_set(&evt->active, 1);
1711 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1713 evt->queue->vios_cookie,
1722 atomic_set(&evt->active, 0);
1723 list_del(&evt->queue_list);
1724 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1725 del_timer(&evt->timer);
1735 if (evt->cmnd)
1736 scsi_dma_unmap(evt->cmnd);
1737 ibmvfc_free_event(evt);
1742 if (evt->cmnd) {
1743 evt->cmnd->result = DID_ERROR << 16;
1744 evt->done = ibmvfc_scsi_eh_done;
1746 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1748 evt->done(evt);
1750 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1751 ibmvfc_trc_start(evt);
1759 * @evt: ibmvfc event to log
1762 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1764 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1765 struct ibmvfc_host *vhost = evt->vhost;
1767 struct scsi_cmnd *cmnd = evt->cmnd;
1816 * @evt: ibmvfc event to be handled
1820 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1822 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1823 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1824 struct scsi_cmnd *cmnd = evt->cmnd;
1837 cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1852 ibmvfc_log_error(evt);
1863 ibmvfc_free_event(evt);
1896 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1899 struct ibmvfc_host *vhost = evt->vhost;
1900 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1911 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1937 struct ibmvfc_event *evt;
1953 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1954 if (!evt)
1957 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1959 evt = ibmvfc_get_event(&vhost->crq);
1960 if (!evt)
1964 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1965 evt->cmnd = cmnd;
1967 vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1978 vfc_cmd->correlation = cpu_to_be64((u64)evt);
1980 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1981 return ibmvfc_send_event(evt, vhost, 0);
1983 ibmvfc_free_event(evt);
1998 * @evt: ibmvfc event struct
2001 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2004 if (evt->sync_iu)
2005 *evt->sync_iu = *evt->xfer_iu;
2007 complete(&evt->comp);
2012 * @evt: struct ibmvfc_event
2015 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2017 struct ibmvfc_host *vhost = evt->vhost;
2019 ibmvfc_free_event(evt);
2035 struct ibmvfc_event *evt;
2049 evt = ibmvfc_get_event(&vhost->crq);
2050 if (!evt) {
2055 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2057 tmf = &evt->iu.tmf;
2065 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2093 struct ibmvfc_event *evt;
2112 evt = ibmvfc_get_event(&vhost->crq);
2113 if (!evt) {
2117 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2118 plogi = &evt->iu.plogi;
2124 evt->sync_iu = &rsp_iu;
2125 init_completion(&evt->comp);
2127 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2133 wait_for_completion(&evt->comp);
2139 ibmvfc_free_event(evt);
2158 struct ibmvfc_event *evt;
2234 evt = ibmvfc_get_event(&vhost->crq);
2235 if (!evt) {
2240 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2241 mad = &evt->iu.passthru;
2248 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2262 mad->iu.tag = cpu_to_be64((u64)evt);
2265 evt->sync_iu = &rsp_iu;
2266 init_completion(&evt->comp);
2267 rc = ibmvfc_send_event(evt, vhost, 0);
2275 wait_for_completion(&evt->comp);
2283 ibmvfc_free_event(evt);
2313 struct ibmvfc_event *evt = NULL;
2324 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2326 evt = ibmvfc_get_event(&vhost->crq);
2328 if (!evt) {
2333 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2334 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2341 evt->sync_iu = &rsp_iu;
2343 init_completion(&evt->comp);
2344 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2355 wait_for_completion(&evt->comp);
2374 ibmvfc_free_event(evt);
2381 * @evt: ibmvfc event struct
2387 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2391 if (evt->cmnd) {
2392 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2401 * @evt: ibmvfc event struct
2407 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2409 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2416 * @evt: ibmvfc event struct
2422 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2424 if (evt->cmnd && evt->cmnd->device == device)
2431 * @evt: ibmvfc event struct
2436 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2440 list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2441 if (loop_evt == evt)
2459 struct ibmvfc_event *evt;
2481 evt = &queues[q_index].evt_pool.events[i];
2482 if (!ibmvfc_event_is_free(evt)) {
2483 if (match(evt, device)) {
2484 evt->eh_comp = ∁
2502 evt = &queues[q_index].evt_pool.events[i];
2503 if (!ibmvfc_event_is_free(evt)) {
2504 if (match(evt, device)) {
2505 evt->eh_comp = NULL;
2532 struct ibmvfc_event *evt;
2535 evt = ibmvfc_get_event(queue);
2536 if (!evt)
2538 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2540 tmf = &evt->iu.tmf;
2561 init_completion(&evt->comp);
2563 return evt;
2569 struct ibmvfc_event *evt, *found_evt, *temp;
2584 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2585 if (evt->cmnd && evt->cmnd->device == sdev) {
2586 found_evt = evt;
2593 evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2594 if (!evt) {
2599 evt->sync_iu = &queues[i].cancel_rsp;
2600 ibmvfc_send_event(evt, vhost, default_timeout);
2601 list_add_tail(&evt->cancel, &cancelq);
2616 list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2617 wait_for_completion(&evt->comp);
2618 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2619 list_del(&evt->cancel);
2620 ibmvfc_free_event(evt);
2649 struct ibmvfc_event *evt, *found_evt;
2659 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2660 if (evt->cmnd && evt->cmnd->device == sdev) {
2661 found_evt = evt;
2675 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2676 evt->sync_iu = &rsp;
2677 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2692 wait_for_completion(&evt->comp);
2695 ibmvfc_free_event(evt);
2738 * @evt: ibmvfc event struct
2744 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2748 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2749 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2756 * @evt: ibmvfc event struct
2762 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2764 if (evt == match)
2784 struct ibmvfc_event *evt, *found_evt;
2795 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2796 if (evt->cmnd && evt->cmnd->device == sdev) {
2797 found_evt = evt;
2811 evt = ibmvfc_get_event(&vhost->crq);
2812 if (!evt) {
2816 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2817 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2824 evt->sync_iu = &rsp_iu;
2826 tmf->correlation = cpu_to_be64((u64)evt);
2828 init_completion(&evt->comp);
2829 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2840 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2859 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2889 ibmvfc_free_event(evt);
3252 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3311 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3317 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3323 spin_lock(&evt->queue->l_lock);
3324 list_move_tail(&evt->queue_list, evt_doneq);
3325 spin_unlock(&evt->queue->l_lock);
3757 struct ibmvfc_event *evt, *temp;
3797 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3798 del_timer(&evt->timer);
3799 list_del(&evt->queue_list);
3800 ibmvfc_trc_end(evt);
3801 evt->done(evt);
3828 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3844 if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3850 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3856 spin_lock(&evt->queue->l_lock);
3857 list_move_tail(&evt->queue_list, evt_doneq);
3858 spin_unlock(&evt->queue->l_lock);
3879 struct ibmvfc_event *evt, *temp;
3903 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3904 del_timer(&evt->timer);
3905 list_del(&evt->queue_list);
3906 ibmvfc_trc_end(evt);
3907 evt->done(evt);
3991 * @evt: ibmvfc event struct
3994 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3996 struct ibmvfc_target *tgt = evt->tgt;
3997 struct ibmvfc_host *vhost = evt->vhost;
3998 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4054 ibmvfc_free_event(evt);
4067 struct ibmvfc_event *evt;
4073 evt = ibmvfc_get_event(&vhost->crq);
4074 if (!evt) {
4081 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4082 evt->tgt = tgt;
4083 prli = &evt->iu.prli;
4104 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4114 * @evt: ibmvfc event struct
4117 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4119 struct ibmvfc_target *tgt = evt->tgt;
4120 struct ibmvfc_host *vhost = evt->vhost;
4121 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4166 ibmvfc_free_event(evt);
4179 struct ibmvfc_event *evt;
4186 evt = ibmvfc_get_event(&vhost->crq);
4187 if (!evt) {
4195 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4196 evt->tgt = tgt;
4197 plogi = &evt->iu.plogi;
4209 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4219 * @evt: ibmvfc event struct
4222 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4224 struct ibmvfc_target *tgt = evt->tgt;
4225 struct ibmvfc_host *vhost = evt->vhost;
4226 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4230 ibmvfc_free_event(evt);
4265 struct ibmvfc_event *evt;
4268 evt = ibmvfc_get_event(&vhost->crq);
4269 if (!evt)
4271 ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4272 evt->tgt = tgt;
4273 mad = &evt->iu.implicit_logout;
4279 return evt;
4290 struct ibmvfc_event *evt;
4296 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4298 if (!evt) {
4307 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4317 * @evt: ibmvfc event struct
4320 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4322 struct ibmvfc_target *tgt = evt->tgt;
4323 struct ibmvfc_host *vhost = evt->vhost;
4324 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4328 ibmvfc_free_event(evt);
4355 struct ibmvfc_event *evt;
4366 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4370 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4380 * @evt: ibmvfc event struct
4383 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4385 struct ibmvfc_target *tgt = evt->tgt;
4386 struct ibmvfc_host *vhost = evt->vhost;
4387 struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4423 ibmvfc_free_event(evt);
4437 struct ibmvfc_event *evt;
4443 evt = ibmvfc_get_event(&vhost->crq);
4444 if (!evt) {
4452 ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4453 evt->tgt = tgt;
4454 move = &evt->iu.move_login;
4465 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4495 * @evt: ibmvfc event struct
4498 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4500 struct ibmvfc_target *tgt = evt->tgt;
4501 struct ibmvfc_host *vhost = evt->vhost;
4502 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4532 ibmvfc_free_event(evt);
4538 * @evt: ibmvfc event struct
4541 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4543 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4549 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4554 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4558 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4566 * @evt: ibmvfc event struct
4574 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4576 struct ibmvfc_host *vhost = evt->vhost;
4577 struct ibmvfc_target *tgt = evt->tgt;
4581 ibmvfc_free_event(evt);
4598 struct ibmvfc_event *evt;
4615 evt = ibmvfc_get_event(&vhost->crq);
4616 if (!evt) {
4624 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4626 evt->tgt = tgt;
4627 tmf = &evt->iu.tmf;
4640 rc = ibmvfc_send_event(evt, vhost, default_timeout);
4667 struct ibmvfc_event *evt;
4673 evt = ibmvfc_get_event(&vhost->crq);
4674 if (!evt) {
4681 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4682 evt->tgt = tgt;
4684 ibmvfc_init_passthru(evt);
4685 mad = &evt->iu.passthru;
4705 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4716 * @evt: ibmvfc event struct
4719 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4721 struct ibmvfc_target *tgt = evt->tgt;
4722 struct ibmvfc_host *vhost = evt->vhost;
4723 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4763 ibmvfc_free_event(evt);
4776 struct ibmvfc_event *evt;
4782 evt = ibmvfc_get_event(&vhost->crq);
4783 if (!evt) {
4790 evt->tgt = tgt;
4791 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4792 query_tgt = &evt->iu.query_tgt;
4800 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4918 * @evt: ibmvfc event struct
4921 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4923 struct ibmvfc_host *vhost = evt->vhost;
4924 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4948 ibmvfc_free_event(evt);
4960 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4963 if (!evt) {
4969 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4970 mad = &evt->iu.discover_targets;
4981 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4987 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4989 struct ibmvfc_host *vhost = evt->vhost;
4992 u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4996 ibmvfc_free_event(evt);
5041 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5048 if (!evt) {
5063 ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
5064 mad = &evt->iu.channel_setup;
5074 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5080 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5082 struct ibmvfc_host *vhost = evt->vhost;
5083 struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5091 ibmvfc_free_event(evt);
5098 ibmvfc_free_event(evt);
5104 ibmvfc_free_event(evt);
5114 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5117 if (!evt) {
5123 ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5124 mad = &evt->iu.channel_enquiry;
5137 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5145 * @evt: ibmvfc event struct
5148 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5150 struct ibmvfc_host *vhost = evt->vhost;
5151 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5158 ibmvfc_free_event(evt);
5168 ibmvfc_free_event(evt);
5174 ibmvfc_free_event(evt);
5179 ibmvfc_free_event(evt);
5242 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5244 if (!evt) {
5252 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5255 mad = &evt->iu.npiv_login;
5265 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5273 * @evt: ibmvfc event struct
5276 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5278 struct ibmvfc_host *vhost = evt->vhost;
5279 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5281 ibmvfc_free_event(evt);
5311 struct ibmvfc_event *evt;
5313 evt = ibmvfc_get_event(&vhost->crq);
5314 if (!evt) {
5320 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5322 mad = &evt->iu.npiv_logout;
5330 if (!ibmvfc_send_event(evt, vhost, default_timeout))