Lines Matching refs:assoc
35 struct nvmet_fc_tgt_assoc *assoc;
142 struct nvmet_fc_tgt_assoc *assoc;
201 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
203 return (assoc->association_id | qid);
244 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
245 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
252 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
475 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
489 if (!tgtport->ops->ls_req || !assoc->hostport ||
490 assoc->hostport->invalid)
499 tgtport->fc_target_port.port_num, assoc->a_id);
512 lsop->hosthandle = assoc->hostport->hosthandle;
515 assoc->association_id);
522 tgtport->fc_target_port.port_num, assoc->a_id, ret);
789 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
803 if (!nvmet_fc_tgt_a_get(assoc))
807 assoc->tgtport->fc_target_port.port_num,
808 assoc->a_id, qid);
814 queue->assoc = assoc;
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
831 WARN_ON(assoc->queues[qid]);
832 spin_lock_irqsave(&assoc->tgtport->lock, flags);
833 assoc->queues[qid] = queue;
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
842 nvmet_fc_tgt_a_put(assoc);
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
857 queue->assoc->queues[queue->qid] = NULL;
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
862 nvmet_fc_tgt_a_put(queue->assoc);
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
964 struct nvmet_fc_tgt_assoc *assoc;
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
975 if (association_id == assoc->association_id) {
976 queue = assoc->queues[qid];
1088 struct nvmet_fc_tgt_assoc *assoc =
1091 nvmet_fc_delete_target_assoc(assoc);
1092 nvmet_fc_tgt_a_put(assoc);
1098 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1104 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1105 if (!assoc)
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1116 if (IS_ERR(assoc->hostport))
1119 assoc->tgtport = tgtport;
1120 assoc->a_id = idx;
1121 INIT_LIST_HEAD(&assoc->a_list);
1122 kref_init(&assoc->ref);
1123 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1124 atomic_set(&assoc->terminating, 0);
1139 assoc->association_id = ran;
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
1145 return assoc;
1152 kfree(assoc);
1159 struct nvmet_fc_tgt_assoc *assoc =
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1166 nvmet_fc_xmt_disconnect_assoc(assoc);
1168 nvmet_fc_free_hostport(assoc->hostport);
1170 list_del(&assoc->a_list);
1171 oldls = assoc->rcv_disconn;
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1179 tgtport->fc_target_port.port_num, assoc->a_id);
1180 kfree(assoc);
1185 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1187 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1191 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1193 return kref_get_unless_zero(&assoc->ref);
1197 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1204 terminating = atomic_xchg(&assoc->terminating, 1);
1212 queue = assoc->queues[i];
1226 tgtport->fc_target_port.port_num, assoc->a_id);
1228 nvmet_fc_tgt_a_put(assoc);
1235 struct nvmet_fc_tgt_assoc *assoc;
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1241 if (association_id == assoc->association_id) {
1242 ret = assoc;
1243 if (!nvmet_fc_tgt_a_get(assoc))
1476 struct nvmet_fc_tgt_assoc *assoc, *next;
1480 list_for_each_entry_safe(assoc, next,
1482 if (!nvmet_fc_tgt_a_get(assoc))
1484 if (!schedule_work(&assoc->del_work))
1486 nvmet_fc_tgt_a_put(assoc);
1525 struct nvmet_fc_tgt_assoc *assoc, *next;
1530 list_for_each_entry_safe(assoc, next,
1532 if (!assoc->hostport ||
1533 assoc->hostport->hosthandle != hosthandle)
1535 if (!nvmet_fc_tgt_a_get(assoc))
1537 assoc->hostport->invalid = 1;
1539 if (!schedule_work(&assoc->del_work))
1541 nvmet_fc_tgt_a_put(assoc);
1558 struct nvmet_fc_tgt_assoc *assoc;
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1573 queue = assoc->queues[0];
1575 if (nvmet_fc_tgt_a_get(assoc))
1585 if (!schedule_work(&assoc->del_work))
1587 nvmet_fc_tgt_a_put(assoc);
1671 iod->assoc = nvmet_fc_alloc_target_assoc(
1673 if (!iod->assoc)
1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1680 nvmet_fc_tgt_a_put(iod->assoc);
1702 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1717 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1762 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1764 if (!iod->assoc)
1767 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1774 nvmet_fc_tgt_a_put(iod->assoc);
1807 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1823 struct nvmet_fc_tgt_assoc *assoc = NULL;
1832 /* match an active association - takes an assoc ref if !NULL */
1833 assoc = nvmet_fc_find_target_assoc(tgtport,
1835 iod->assoc = assoc;
1836 if (!assoc)
1840 if (ret || !assoc) {
1863 nvmet_fc_tgt_a_put(assoc);
1875 oldls = assoc->rcv_disconn;
1876 assoc->rcv_disconn = iod;
1879 nvmet_fc_delete_target_assoc(assoc);
1885 tgtport->fc_target_port.port_num, assoc->a_id);
1950 iod->assoc = NULL;