Lines Matching defs:queue
91 struct nvmet_fc_tgt_queue *queue;
186 return (fodptr - fodptr->queue->fod);
198 * note: Association ID = Connection ID for queue 0
250 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
251 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
472 * terminates the FC-NVME connections (per queue, both admin and io
638 struct nvmet_fc_tgt_queue *queue)
640 struct nvmet_fc_fcp_iod *fod = queue->fod;
643 for (i = 0; i < queue->sqsize; fod++, i++) {
646 fod->queue = queue;
651 list_add_tail(&fod->fcp_list, &queue->fod_list);
673 struct nvmet_fc_tgt_queue *queue)
675 struct nvmet_fc_fcp_iod *fod = queue->fod;
678 for (i = 0; i < queue->sqsize; fod++, i++) {
686 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
690 lockdep_assert_held(&queue->qlock);
692 fod = list_first_entry_or_null(&queue->fod_list,
698 * no queue reference is taken, as it was taken by the
699 * queue lookup just prior to the allocation. The iod
709 struct nvmet_fc_tgt_queue *queue,
715 * put all admin cmds on hw queue id 0. All io commands go to
716 * the respective hw queue based on a modulo basis
718 fcpreq->hwqid = queue->qid ?
719 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
736 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
757 /* release the queue lookup reference on the completed IO */
758 nvmet_fc_tgt_q_put(queue);
760 spin_lock_irqsave(&queue->qlock, flags);
761 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
764 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
765 spin_unlock_irqrestore(&queue->qlock, flags);
775 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
777 spin_unlock_irqrestore(&queue->qlock, flags);
793 * Leave the queue lookup get reference taken when
797 queue_work(queue->work_q, &fod->defer_work);
804 struct nvmet_fc_tgt_queue *queue;
810 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
811 if (!queue)
814 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
817 if (!queue->work_q)
820 queue->qid = qid;
821 queue->sqsize = sqsize;
822 queue->assoc = assoc;
823 INIT_LIST_HEAD(&queue->fod_list);
824 INIT_LIST_HEAD(&queue->avail_defer_list);
825 INIT_LIST_HEAD(&queue->pending_cmd_list);
826 atomic_set(&queue->connected, 0);
827 atomic_set(&queue->sqtail, 0);
828 atomic_set(&queue->rsn, 1);
829 atomic_set(&queue->zrspcnt, 0);
830 spin_lock_init(&queue->qlock);
831 kref_init(&queue->ref);
833 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
835 ret = nvmet_sq_init(&queue->nvme_sq);
840 assoc->queues[qid] = queue;
842 return queue;
845 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
846 destroy_workqueue(queue->work_q);
848 kfree(queue);
856 struct nvmet_fc_tgt_queue *queue =
859 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
861 destroy_workqueue(queue->work_q);
863 kfree_rcu(queue, rcu);
867 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
869 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
873 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
875 return kref_get_unless_zero(&queue->ref);
880 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
882 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
883 struct nvmet_fc_fcp_iod *fod = queue->fod;
889 disconnect = atomic_xchg(&queue->connected, 0);
895 spin_lock_irqsave(&queue->qlock, flags);
897 for (i = 0; i < queue->sqsize; fod++, i++) {
916 /* Cleanup defer'ed IOs in queue */
917 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
924 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
930 spin_unlock_irqrestore(&queue->qlock, flags);
941 /* release the queue lookup reference */
942 nvmet_fc_tgt_q_put(queue);
946 spin_lock_irqsave(&queue->qlock, flags);
948 spin_unlock_irqrestore(&queue->qlock, flags);
950 flush_workqueue(queue->work_q);
952 nvmet_sq_destroy(&queue->nvme_sq);
954 nvmet_fc_tgt_q_put(queue);
962 struct nvmet_fc_tgt_queue *queue;
972 queue = assoc->queues[qid];
973 if (queue &&
974 (!atomic_read(&queue->connected) ||
975 !nvmet_fc_tgt_q_get(queue)))
976 queue = NULL;
978 return queue;
1582 struct nvmet_fc_tgt_queue *queue;
1596 queue = assoc->queues[0];
1597 if (queue && queue->nvme_sq.ctrl == ctrl) {
1664 struct nvmet_fc_tgt_queue *queue;
1694 /* new association w/ admin queue */
1700 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1702 if (!queue) {
1720 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1721 atomic_set(&queue->connected, 1);
1722 queue->sqhd = 0; /* best place to init value */
1755 struct nvmet_fc_tgt_queue *queue;
1785 /* new io queue */
1791 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1794 if (!queue)
1815 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1816 atomic_set(&queue->connected, 1);
1817 queue->sqhd = 0; /* best place to init value */
2180 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2181 if (!(rspcnt % fod->queue->ersp_ratio) ||
2186 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2198 rsn = atomic_inc_return(&fod->queue->rsn);
2227 nvmet_fc_free_fcp_iod(fod->queue, fod);
2409 nvmet_fc_free_fcp_iod(fod->queue, fod);
2433 nvmet_fc_free_fcp_iod(fod->queue, fod);
2467 fod->queue->sqhd = cqe->sq_head;
2478 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2479 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2565 &fod->queue->nvme_cq,
2566 &fod->queue->nvme_sq,
2577 atomic_inc(&fod->queue->sqtail);
2617 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2629 * To alleviate this scenario, a temporary queue is maintained in the
2630 * transport for pending LLDD requests waiting for a queue job structure.
2631 * In these "overrun" cases, a temporary queue element is allocated
2633 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2663 struct nvmet_fc_tgt_queue *queue;
2668 /* validate iu, so the connection id can be used to find the queue */
2675 queue = nvmet_fc_find_target_queue(tgtport,
2677 if (!queue)
2687 spin_lock_irqsave(&queue->qlock, flags);
2689 fod = nvmet_fc_alloc_fcp_iod(queue);
2691 spin_unlock_irqrestore(&queue->qlock, flags);
2698 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2704 spin_unlock_irqrestore(&queue->qlock, flags);
2705 /* release the queue lookup reference */
2706 nvmet_fc_tgt_q_put(queue);
2710 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2716 spin_unlock_irqrestore(&queue->qlock, flags);
2721 /* release the queue lookup reference */
2722 nvmet_fc_tgt_q_put(queue);
2725 spin_lock_irqsave(&queue->qlock, flags);
2734 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2736 /* NOTE: the queue lookup reference is still valid */
2738 spin_unlock_irqrestore(&queue->qlock, flags);
2772 struct nvmet_fc_tgt_queue *queue;
2779 queue = fod->queue;
2781 spin_lock_irqsave(&queue->qlock, flags);
2793 spin_unlock_irqrestore(&queue->qlock, flags);