Lines Matching defs:queue

91 	struct nvmet_fc_tgt_queue	*queue;
182 return (fodptr - fodptr->queue->fod);
194 * note: Association ID = Connection ID for queue 0
246 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
247 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
460 * terminates the FC-NVME connections (per queue, both admin and io
626 struct nvmet_fc_tgt_queue *queue)
628 struct nvmet_fc_fcp_iod *fod = queue->fod;
631 for (i = 0; i < queue->sqsize; fod++, i++) {
634 fod->queue = queue;
639 list_add_tail(&fod->fcp_list, &queue->fod_list);
661 struct nvmet_fc_tgt_queue *queue)
663 struct nvmet_fc_fcp_iod *fod = queue->fod;
666 for (i = 0; i < queue->sqsize; fod++, i++) {
674 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
678 lockdep_assert_held(&queue->qlock);
680 fod = list_first_entry_or_null(&queue->fod_list,
686 * no queue reference is taken, as it was taken by the
687 * queue lookup just prior to the allocation. The iod
697 struct nvmet_fc_tgt_queue *queue,
703 * put all admin cmds on hw queue id 0. All io commands go to
704 * the respective hw queue based on a modulo basis
706 fcpreq->hwqid = queue->qid ?
707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
724 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
745 /* release the queue lookup reference on the completed IO */
746 nvmet_fc_tgt_q_put(queue);
748 spin_lock_irqsave(&queue->qlock, flags);
749 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
753 spin_unlock_irqrestore(&queue->qlock, flags);
763 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
765 spin_unlock_irqrestore(&queue->qlock, flags);
781 * Leave the queue lookup get reference taken when
785 queue_work(queue->work_q, &fod->defer_work);
792 struct nvmet_fc_tgt_queue *queue;
799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
800 if (!queue)
806 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
809 if (!queue->work_q)
812 queue->qid = qid;
813 queue->sqsize = sqsize;
814 queue->assoc = assoc;
815 INIT_LIST_HEAD(&queue->fod_list);
816 INIT_LIST_HEAD(&queue->avail_defer_list);
817 INIT_LIST_HEAD(&queue->pending_cmd_list);
818 atomic_set(&queue->connected, 0);
819 atomic_set(&queue->sqtail, 0);
820 atomic_set(&queue->rsn, 1);
821 atomic_set(&queue->zrspcnt, 0);
822 spin_lock_init(&queue->qlock);
823 kref_init(&queue->ref);
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
827 ret = nvmet_sq_init(&queue->nvme_sq);
833 assoc->queues[qid] = queue;
836 return queue;
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
840 destroy_workqueue(queue->work_q);
844 kfree(queue);
852 struct nvmet_fc_tgt_queue *queue =
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
857 queue->assoc->queues[queue->qid] = NULL;
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
862 nvmet_fc_tgt_a_put(queue->assoc);
864 destroy_workqueue(queue->work_q);
866 kfree(queue);
870 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
872 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
876 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
878 return kref_get_unless_zero(&queue->ref);
883 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
886 struct nvmet_fc_fcp_iod *fod = queue->fod;
892 disconnect = atomic_xchg(&queue->connected, 0);
898 spin_lock_irqsave(&queue->qlock, flags);
900 for (i = 0; i < queue->sqsize; fod++, i++) {
919 /* Cleanup defer'ed IOs in queue */
920 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
927 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
933 spin_unlock_irqrestore(&queue->qlock, flags);
944 /* release the queue lookup reference */
945 nvmet_fc_tgt_q_put(queue);
949 spin_lock_irqsave(&queue->qlock, flags);
951 spin_unlock_irqrestore(&queue->qlock, flags);
953 flush_workqueue(queue->work_q);
955 nvmet_sq_destroy(&queue->nvme_sq);
957 nvmet_fc_tgt_q_put(queue);
965 struct nvmet_fc_tgt_queue *queue;
976 queue = assoc->queues[qid];
977 if (queue &&
978 (!atomic_read(&queue->connected) ||
979 !nvmet_fc_tgt_q_get(queue)))
980 queue = NULL;
982 return queue;
1200 struct nvmet_fc_tgt_queue *queue;
1212 queue = assoc->queues[i];
1213 if (queue) {
1214 if (!nvmet_fc_tgt_q_get(queue))
1217 nvmet_fc_delete_target_queue(queue);
1218 nvmet_fc_tgt_q_put(queue);
1559 struct nvmet_fc_tgt_queue *queue;
1573 queue = assoc->queues[0];
1574 if (queue && queue->nvme_sq.ctrl == ctrl) {
1640 struct nvmet_fc_tgt_queue *queue;
1670 /* new association w/ admin queue */
1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1678 if (!queue) {
1696 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1697 atomic_set(&queue->connected, 1);
1698 queue->sqhd = 0; /* best place to init value */
1731 struct nvmet_fc_tgt_queue *queue;
1761 /* new io queue */
1767 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1770 if (!queue)
1791 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1792 atomic_set(&queue->connected, 1);
1793 queue->sqhd = 0; /* best place to init value */
2157 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2158 if (!(rspcnt % fod->queue->ersp_ratio) ||
2163 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2175 rsn = atomic_inc_return(&fod->queue->rsn);
2204 nvmet_fc_free_fcp_iod(fod->queue, fod);
2386 nvmet_fc_free_fcp_iod(fod->queue, fod);
2410 nvmet_fc_free_fcp_iod(fod->queue, fod);
2444 fod->queue->sqhd = cqe->sq_head;
2455 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2456 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2541 &fod->queue->nvme_cq,
2542 &fod->queue->nvme_sq,
2553 atomic_inc(&fod->queue->sqtail);
2593 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2605 * To alleviate this scenario, a temporary queue is maintained in the
2606 * transport for pending LLDD requests waiting for a queue job structure.
2607 * In these "overrun" cases, a temporary queue element is allocated
2609 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2639 struct nvmet_fc_tgt_queue *queue;
2644 /* validate iu, so the connection id can be used to find the queue */
2651 queue = nvmet_fc_find_target_queue(tgtport,
2653 if (!queue)
2663 spin_lock_irqsave(&queue->qlock, flags);
2665 fod = nvmet_fc_alloc_fcp_iod(queue);
2667 spin_unlock_irqrestore(&queue->qlock, flags);
2674 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2680 spin_unlock_irqrestore(&queue->qlock, flags);
2681 /* release the queue lookup reference */
2682 nvmet_fc_tgt_q_put(queue);
2686 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2692 spin_unlock_irqrestore(&queue->qlock, flags);
2697 /* release the queue lookup reference */
2698 nvmet_fc_tgt_q_put(queue);
2701 spin_lock_irqsave(&queue->qlock, flags);
2710 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2712 /* NOTE: the queue lookup reference is still valid */
2714 spin_unlock_irqrestore(&queue->qlock, flags);
2748 struct nvmet_fc_tgt_queue *queue;
2755 queue = fod->queue;
2757 spin_lock_irqsave(&queue->qlock, flags);
2769 spin_unlock_irqrestore(&queue->qlock, flags);