Lines Matching defs:queue

677 	list_for_each_entry(tgt, &vhost->targets, queue)
710 list_for_each_entry(tgt, &vhost->targets, queue) {
777 * @queue: ibmvfc queue struct
783 struct ibmvfc_queue *queue,
787 struct ibmvfc_event_pool *pool = &queue->evt_pool;
807 INIT_LIST_HEAD(&queue->sent);
808 INIT_LIST_HEAD(&queue->free);
809 spin_lock_init(&queue->l_lock);
826 evt->queue = queue;
828 list_add_tail(&evt->queue_list, &queue->free);
838 * @queue: ibmvfc queue struct
842 struct ibmvfc_queue *queue)
845 struct ibmvfc_event_pool *pool = &queue->evt_pool;
865 * ibmvfc_free_queue - Deallocate queue
867 * @queue: ibmvfc queue struct
872 struct ibmvfc_queue *queue)
876 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
877 free_page((unsigned long)queue->msgs.handle);
878 queue->msgs.handle = NULL;
880 ibmvfc_free_event_pool(vhost, queue);
978 /* Clean out the queue */
1026 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1033 spin_lock_irqsave(&evt->queue->l_lock, flags);
1034 list_add_tail(&evt->queue_list, &evt->queue->free);
1037 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1221 list_for_each_entry(tgt, &vhost->targets, queue)
1511 * @queue: ibmvfc queue struct
1515 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1520 spin_lock_irqsave(&queue->l_lock, flags);
1521 if (list_empty(&queue->free)) {
1522 ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
1523 spin_unlock_irqrestore(&queue->l_lock, flags);
1526 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1529 spin_unlock_irqrestore(&queue->l_lock, flags);
1705 spin_lock_irqsave(&evt->queue->l_lock, flags);
1706 list_add_tail(&evt->queue_list, &evt->queue->sent);
1711 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1713 evt->queue->vios_cookie,
1724 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1734 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1750 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1803 list_for_each_entry(tgt, &vhost->targets, queue) {
2100 list_for_each_entry(tgt, &vhost->targets, queue) {
2440 list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2525 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2535 evt = ibmvfc_get_event(queue);
2618 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
3068 list_for_each_entry(tgt, &vhost->targets, queue) {
3208 list_for_each_entry(tgt, &vhost->targets, queue) {
3243 * @crq: Command/Response queue
3245 * @evt_doneq: Event done queue
3323 spin_lock(&evt->queue->l_lock);
3325 spin_unlock(&evt->queue->l_lock);
3432 * ibmvfc_change_queue_depth - Change the device's queue depth
3679 * ibmvfc_next_async_crq - Returns the next entry in async queue
3683 * Pointer to next entry in queue / NULL if empty
3702 * ibmvfc_next_crq - Returns the next entry in message queue
3706 * Pointer to next entry in queue / NULL if empty
3710 struct ibmvfc_queue *queue = &vhost->crq;
3713 crq = &queue->msgs.crq[queue->cur];
3715 if (++queue->cur == queue->size)
3716 queue->cur = 0;
3844 if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3856 spin_lock(&evt->queue->l_lock);
3858 spin_unlock(&evt->queue->l_lock);
4828 list_for_each_entry(tgt, &vhost->targets, queue) {
4835 list_for_each_entry(tgt, &vhost->targets, queue) {
4892 list_add_tail(&tgt->queue, &vhost->targets);
5194 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5347 list_for_each_entry(tgt, &vhost->targets, queue) {
5367 list_for_each_entry(tgt, &vhost->targets, queue) {
5397 list_for_each_entry(tgt, &vhost->targets, queue)
5400 list_for_each_entry(tgt, &vhost->targets, queue)
5408 list_for_each_entry(tgt, &vhost->targets, queue)
5411 list_for_each_entry(tgt, &vhost->targets, queue)
5481 list_del(&tgt->queue);
5601 list_for_each_entry(tgt, &vhost->targets, queue)
5606 list_for_each_entry(tgt, &vhost->targets, queue) {
5618 list_for_each_entry(tgt, &vhost->targets, queue) {
5630 list_for_each_entry(tgt, &vhost->targets, queue) {
5635 list_del(&tgt->queue);
5705 list_for_each_entry(tgt, &vhost->targets, queue) {
5753 * ibmvfc_alloc_queue - Allocate queue
5755 * @queue: ibmvfc queue to allocate
5756 * @fmt: queue format to allocate
5762 struct ibmvfc_queue *queue,
5770 spin_lock_init(&queue->_lock);
5771 queue->q_lock = &queue->_lock;
5775 fmt_size = sizeof(*queue->msgs.crq);
5779 fmt_size = sizeof(*queue->msgs.async);
5782 fmt_size = sizeof(*queue->msgs.scrq);
5787 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5791 if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5796 queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5797 if (!queue->msgs.handle)
5800 queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5803 if (dma_mapping_error(dev, queue->msg_token)) {
5804 free_page((unsigned long)queue->msgs.handle);
5805 queue->msgs.handle = NULL;
5809 queue->cur = 0;
5810 queue->fmt = fmt;
5811 queue->size = PAGE_SIZE / fmt_size;
5813 queue->vhost = vhost;
5953 /* Clean out the queue */
6093 dev_err(dev, "Couldn't allocate/map async queue.\n");
6190 list_for_each_entry(tgt, &vhost->targets, queue) {
6310 list_add_tail(&vhost->queue, &ibmvfc_head);
6367 list_del(&vhost->queue);