Lines Matching refs:tgt
119 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
201 host = btree_lookup32(&vha->hw->tgt.host_map, key);
218 BUG_ON(ha->tgt.tgt_vp_map == NULL);
220 return ha->tgt.tgt_vp_map[vp_idx].vha;
229 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
231 vha->hw->tgt.num_pend_cmds++;
232 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
234 vha->hw->tgt.num_pend_cmds;
235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
241 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
242 vha->hw->tgt.num_pend_cmds--;
243 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
251 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
254 if (tgt->tgt_stop) {
256 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
288 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
307 } else if (tgt->tgt_stop) {
309 "Freeing unknown %s %p, because tgt is being stopped\n",
569 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
592 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
594 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
610 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
685 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
699 ha->tgt.tgt_ops->shutdown_sess(fcport);
700 ha->tgt.tgt_ops->put_sess(fcport);
712 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
716 if (!vha->hw->tgt.tgt_ops)
719 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
720 if (tgt->tgt_stop) {
721 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
726 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
731 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
737 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
740 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
748 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
760 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
772 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
774 ha->tgt.tgt_ops->put_sess(sess);
950 struct qla_tgt *tgt = sess->tgt;
1018 ha->tgt.tgt_ops->free_session(sess);
1047 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1050 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1051 tgt->sess_count--;
1106 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1120 if (tgt && (tgt->sess_count == 0))
1121 wake_up_all(&tgt->waitQ);
1125 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1143 /* ha->tgt.sess_lock supposed to be held on entry */
1168 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1191 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1193 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1195 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1196 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1226 struct qla_tgt *tgt = sess->tgt;
1236 if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1237 wake_up_all(&tgt->waitQ);
1283 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1286 scsi_qla_host_t *vha = tgt->vha;
1293 /* At this point tgt could be already dead */
1368 sess->tgt = vha->vha_tgt.qla_tgt;
1381 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1391 * fc_port access across ->tgt.sess_lock reaquire.
1400 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1405 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1409 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1430 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1434 if (!vha->hw->tgt.tgt_ops)
1437 if (!tgt)
1440 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1441 if (tgt->tgt_stop) {
1442 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1446 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1463 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1467 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1469 struct qla_hw_data *ha = tgt->ha;
1473 * We need to protect against race, when tgt is freed before or
1476 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1477 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1478 "tgt %p, sess_count=%d\n",
1479 tgt, tgt->sess_count);
1480 res = (tgt->sess_count == 0);
1481 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1487 int qlt_stop_phase1(struct qla_tgt *tgt)
1489 struct scsi_qla_host *vha = tgt->vha;
1490 struct qla_hw_data *ha = tgt->ha;
1496 if (tgt->tgt_stop || tgt->tgt_stopped) {
1498 "Already in tgt->tgt_stop or tgt_stopped state\n");
1511 tgt->tgt_stop = 1;
1512 qlt_clear_tgt_db(tgt);
1517 "Waiting for sess works (tgt %p)", tgt);
1518 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1519 while (!list_empty(&tgt->sess_works_list)) {
1520 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1522 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1524 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1527 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1529 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1537 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1545 void qlt_stop_phase2(struct qla_tgt *tgt)
1547 scsi_qla_host_t *vha = tgt->vha;
1549 if (tgt->tgt_stopped) {
1551 "Already in tgt->tgt_stopped state\n");
1555 if (!tgt->tgt_stop) {
1562 mutex_lock(&tgt->ha->optrom_mutex);
1564 tgt->tgt_stop = 0;
1565 tgt->tgt_stopped = 1;
1567 mutex_unlock(&tgt->ha->optrom_mutex);
1569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1570 tgt);
1584 static void qlt_release(struct qla_tgt *tgt)
1586 scsi_qla_host_t *vha = tgt->vha;
1593 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1594 qlt_stop_phase1(tgt);
1596 if (!tgt->tgt_stopped)
1597 qlt_stop_phase2(tgt);
1602 h = &tgt->qphints[i];
1610 kfree(tgt->qphints);
1615 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1616 btree_remove64(&tgt->lun_qpair_map, key);
1618 btree_destroy64(&tgt->lun_qpair_map);
1621 if (ha->tgt.tgt_ops &&
1622 ha->tgt.tgt_ops->remove_target &&
1624 ha->tgt.tgt_ops->remove_target(vha);
1629 "Release of tgt %p finished\n", tgt);
1631 kfree(tgt);
1635 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1643 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1649 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1651 " to find session for param %p (size %d, tgt %p)\n",
1652 type, prm, param, param_size, tgt);
1657 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1658 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1659 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1661 schedule_work(&tgt->sess_work);
2014 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2018 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2020 h = &tgt->qphints[0];
2022 h = &tgt->qphints[0];
2046 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2115 if (ha->tgt.tgt_ops->find_cmd_by_tag) {
2118 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2173 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2174 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2179 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2185 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2353 ha->tgt.tgt_ops->free_mcmd(mcmd);
2391 ha->tgt.tgt_ops->free_mcmd(mcmd);
2743 prm->tgt = cmd->tgt;
2921 scsi_qla_host_t *vha = cmd->tgt->vha;
2942 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2965 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2982 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3371 struct qla_tgt *tgt = cmd->tgt;
3379 prm.tgt = tgt;
3392 vha->hw->tgt.tgt_ops->handle_data(cmd);
3531 vha->hw->tgt.tgt_ops->handle_data(cmd);
3537 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3547 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3706 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3720 vha->hw->tgt.leak_exchg_thresh_hold =
3724 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3726 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3734 vha->hw->tgt.num_qfull_cmds_alloc--;
3737 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3744 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3746 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3747 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3764 struct qla_tgt *tgt = cmd->tgt;
3765 struct scsi_qla_host *vha = tgt->vha;
3824 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4039 ha->tgt.tgt_ops->handle_data(cmd);
4058 ha->tgt.tgt_ops->free_cmd(cmd);
4137 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4144 ha->tgt.tgt_ops->put_sess(sess);
4158 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4161 ha->tgt.tgt_ops->put_sess(sess);
4181 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4187 ha->tgt.num_act_qpairs);
4189 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4191 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4192 btree_remove64(&tgt->lun_qpair_map, key);
4199 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4206 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4210 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4220 h = qla_qpair_to_hint(tgt, qpair);
4222 rc = btree_insert64(&tgt->lun_qpair_map,
4240 h = qla_qpair_to_hint(tgt, qp);
4242 rc = btree_insert64(&tgt->lun_qpair_map,
4262 h = qla_qpair_to_hint(tgt, qpair);
4264 rc = btree_insert64(&tgt->lun_qpair_map,
4274 h = &tgt->qphints[0];
4287 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4294 cmd->tgt = vha->vha_tgt.qla_tgt;
4318 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4324 if (unlikely(tgt->tgt_stop)) {
4326 "New command while device %p is shutting down\n", tgt);
4334 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4362 ha->tgt.tgt_ops->put_sess(sess);
4463 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4464 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4466 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4506 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4530 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4531 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4532 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4564 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4707 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4710 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4860 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4922 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4925 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4949 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4962 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4988 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5024 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5065 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5067 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5100 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5102 if (tgt->link_reinit_iocb_pending) {
5104 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5105 tgt->link_reinit_iocb_pending = 0;
5166 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5173 if (tgt->link_reinit_iocb_pending) {
5175 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5177 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5178 tgt->link_reinit_iocb_pending = 1;
5289 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5291 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5352 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5358 if (unlikely(tgt->tgt_stop)) {
5360 "New command while device %p is shutting down\n", tgt);
5364 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5365 vha->hw->tgt.num_qfull_cmds_dropped++;
5366 if (vha->hw->tgt.num_qfull_cmds_dropped >
5369 vha->hw->tgt.num_qfull_cmds_dropped;
5374 vha->hw->tgt.num_qfull_cmds_dropped);
5380 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5385 cmd = ha->tgt.tgt_ops->get_cmd(sess);
5391 vha->hw->tgt.num_qfull_cmds_dropped++;
5392 if (vha->hw->tgt.num_qfull_cmds_dropped >
5395 vha->hw->tgt.num_qfull_cmds_dropped;
5405 cmd->tgt = vha->vha_tgt.qla_tgt;
5418 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5419 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5421 vha->hw->tgt.num_qfull_cmds_alloc++;
5422 if (vha->hw->tgt.num_qfull_cmds_alloc >
5425 vha->hw->tgt.num_qfull_cmds_alloc;
5426 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5439 if (list_empty(&ha->tgt.q_full_list))
5445 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5446 if (list_empty(&ha->tgt.q_full_list)) {
5447 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5451 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5452 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5482 vha->hw->tgt.num_qfull_cmds_alloc--;
5497 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5498 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5499 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5524 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5542 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5546 if (unlikely(tgt == NULL)) {
5548 "ATIO pkt, but no tgt (ha %p)", ha);
5556 tgt->atio_irq_cmd_count++;
5579 tgt->atio_irq_cmd_count--;
5648 tgt->atio_irq_cmd_count--;
5727 ha->tgt.tgt_ops->free_mcmd(mcmd);
5738 ha->tgt.tgt_ops->free_mcmd(mcmd);
5741 ha->tgt.tgt_ops->free_mcmd(mcmd);
5750 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5752 if (unlikely(tgt == NULL)) {
5754 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5852 if (tgt->notify_ack_expected > 0) {
5859 tgt->notify_ack_expected--;
5881 if (tgt->abts_resp_expected > 0) {
5906 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5909 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5940 if (tgt->link_reinit_iocb_pending) {
5942 &tgt->link_reinit_iocb,
5944 tgt->link_reinit_iocb_pending = 0;
6035 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6061 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6158 static void qlt_abort_work(struct qla_tgt *tgt,
6161 struct scsi_qla_host *vha = tgt->vha;
6168 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6170 if (tgt->tgt_stop)
6175 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6177 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6182 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6203 ha->tgt.tgt_ops->put_sess(sess);
6210 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6219 static void qlt_tmr_work(struct qla_tgt *tgt,
6223 struct scsi_qla_host *vha = tgt->vha;
6233 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6235 if (tgt->tgt_stop)
6239 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6241 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6246 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6268 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6270 ha->tgt.tgt_ops->put_sess(sess);
6277 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6284 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6285 struct scsi_qla_host *vha = tgt->vha;
6288 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6290 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6291 while (!list_empty(&tgt->sess_works_list)) {
6293 tgt->sess_works_list.next, typeof(*prm),
6302 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6306 qlt_abort_work(tgt, prm);
6309 qlt_tmr_work(tgt, prm);
6316 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6320 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6326 struct qla_tgt *tgt;
6344 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6345 if (!tgt) {
6351 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6354 if (!tgt->qphints) {
6355 kfree(tgt);
6364 rc = btree_init64(&tgt->lun_qpair_map);
6366 kfree(tgt->qphints);
6367 kfree(tgt);
6372 h = &tgt->qphints[0];
6383 h = &tgt->qphints[i + 1];
6394 tgt->ha = ha;
6395 tgt->vha = base_vha;
6396 init_waitqueue_head(&tgt->waitQ);
6397 INIT_LIST_HEAD(&tgt->del_sess_list);
6398 spin_lock_init(&tgt->sess_work_lock);
6399 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6400 INIT_LIST_HEAD(&tgt->sess_works_list);
6401 atomic_set(&tgt->tgt_global_resets_count, 0);
6403 base_vha->vha_tgt.qla_tgt = tgt;
6409 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6412 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6415 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6416 ha->tgt.tgt_ops->add_target(base_vha);
6447 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6448 btree_remove32(&ha->tgt.host_map, key);
6450 btree_destroy32(&ha->tgt.host_map);
6475 struct qla_tgt *tgt;
6484 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6485 vha = tgt->vha;
6505 if (tgt->tgt_stop) {
6551 ha->tgt.tgt_ops = NULL;
6606 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6610 if (!tgt) {
6620 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6621 ha->tgt.num_act_qpairs = ha->max_qpairs;
6623 tgt->tgt_stopped = 0;
6651 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6654 if (!tgt) {
6699 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6735 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6740 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6761 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6762 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6763 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6766 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6787 ha->tgt.atio_ring_index++;
6788 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6789 ha->tgt.atio_ring_index = 0;
6790 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6792 ha->tgt.atio_ring_ptr++;
6795 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6801 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6847 if (!ha->tgt.saved_set) {
6849 ha->tgt.saved_exchange_count = nv->exchange_count;
6850 ha->tgt.saved_firmware_options_1 =
6852 ha->tgt.saved_firmware_options_2 =
6854 ha->tgt.saved_firmware_options_3 =
6856 ha->tgt.saved_set = 1;
6902 if (ha->tgt.saved_set) {
6903 nv->exchange_count = ha->tgt.saved_exchange_count;
6905 ha->tgt.saved_firmware_options_1;
6907 ha->tgt.saved_firmware_options_2;
6909 ha->tgt.saved_firmware_options_3;
6937 if (ha->tgt.node_name_set) {
6938 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6953 if (!ha->tgt.saved_set) {
6955 ha->tgt.saved_exchange_count = nv->exchange_count;
6956 ha->tgt.saved_firmware_options_1 =
6958 ha->tgt.saved_firmware_options_2 =
6960 ha->tgt.saved_firmware_options_3 =
6962 ha->tgt.saved_set = 1;
7005 if (ha->tgt.saved_set) {
7006 nv->exchange_count = ha->tgt.saved_exchange_count;
7008 ha->tgt.saved_firmware_options_1;
7010 ha->tgt.saved_firmware_options_2;
7012 ha->tgt.saved_firmware_options_3;
7040 if (ha->tgt.node_name_set) {
7041 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7094 rc = btree_init32(&ha->tgt.host_map);
7114 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7118 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7136 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7138 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7178 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7181 if (!ha->tgt.tgt_vp_map)
7184 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7185 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7186 &ha->tgt.atio_dma, GFP_KERNEL);
7187 if (!ha->tgt.atio_ring) {
7188 kfree(ha->tgt.tgt_vp_map);
7200 if (ha->tgt.atio_ring) {
7201 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7202 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7203 ha->tgt.atio_dma);
7205 ha->tgt.atio_ring = NULL;
7206 ha->tgt.atio_dma = 0;
7207 kfree(ha->tgt.tgt_vp_map);
7208 ha->tgt.tgt_vp_map = NULL;
7226 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7229 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7233 rc = btree_insert32(&vha->hw->tgt.host_map,
7243 btree_update32(&vha->hw->tgt.host_map, key, vha);
7246 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7251 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7253 btree_remove32(&vha->hw->tgt.host_map, key);