| /kernel/linux/linux-5.10/net/sched/ |
| H A D | sch_multiq.c | 25 struct Qdisc **queues; member 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 105 qdisc = q->queues[q->curband]; in multiq_dequeue() 137 qdisc = q->queues[curband]; in multiq_peek() 154 qdisc_reset(q->queues[band]); in multiq_reset() 166 qdisc_put(q->queues[band]); in multiq_destroy() 168 kfree(q->queues); in multiq_destroy() 196 if (q->queues[i] != &noop_qdisc) { in multiq_tune() 197 struct Qdisc *child = q->queues[ in multiq_tune() [all...] |
| H A D | sch_prio.c | 26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() 117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() 137 qdisc_reset(q->queues[prio]); in prio_reset() 173 qdisc_put(q->queues[prio]); in prio_destroy() 180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local 198 queues[ in prio_tune() [all...] |
| /kernel/linux/linux-6.6/net/sched/ |
| H A D | sch_prio.c | 26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() 117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() 137 qdisc_reset(q->queues[prio]); in prio_reset() 173 qdisc_put(q->queues[prio]); in prio_destroy() 180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local 198 queues[ in prio_tune() [all...] |
| H A D | sch_multiq.c | 25 struct Qdisc **queues; member 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 105 qdisc = q->queues[q->curband]; in multiq_dequeue() 137 qdisc = q->queues[curband]; in multiq_peek() 154 qdisc_reset(q->queues[band]); in multiq_reset() 166 qdisc_put(q->queues[band]); in multiq_destroy() 168 kfree(q->queues); in multiq_destroy() 196 if (q->queues[i] != &noop_qdisc) { in multiq_tune() 197 struct Qdisc *child = q->queues[ in multiq_tune() [all...] |
| /kernel/linux/linux-5.10/drivers/staging/wfx/ |
| H A D | queue.c | 234 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local 240 // sort the queues in wfx_tx_queues_get_skb() 244 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb() 245 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() 247 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb() 248 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb() 249 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb() 259 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb() 267 WARN_ON(queues[ in wfx_tx_queues_get_skb() [all...] |
| /kernel/linux/linux-6.6/drivers/net/wireless/silabs/wfx/ |
| H A D | queue.c | 229 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local 235 /* sort the queues */ in wfx_tx_queues_get_skb() 239 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb() 240 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() 242 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb() 243 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb() 244 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb() 254 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb() 262 WARN_ON(queues[ in wfx_tx_queues_get_skb() [all...] |
| /kernel/linux/linux-5.10/drivers/nvme/target/ |
| H A D | loop.c | 30 struct nvme_loop_queue *queues; member 71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event() 198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 220 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 240 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx() 264 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue() 266 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue() 287 kfree(ctrl->queues); in nvme_loop_free_ctrl() 298 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[ in nvme_loop_destroy_io_queues() [all...] |
| /kernel/linux/linux-6.6/drivers/nvme/target/ |
| H A D | loop.c | 30 struct nvme_loop_queue *queues; member 71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event() 198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx() 266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue() 268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue() 285 kfree(ctrl->queues); in nvme_loop_free_ctrl() 296 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[ in nvme_loop_destroy_io_queues() [all...] |
| /kernel/linux/linux-6.6/drivers/media/platform/nxp/imx8-isi/ |
| H A D | imx8-isi-m2m.c | 52 /* Protects the m2m vb2 queues */ 58 } queues; member 85 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata() 87 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata() 112 src_vbuf->sequence = ctx->queues.out.sequence++; in mxc_isi_m2m_frame_write_done() 113 dst_vbuf->sequence = ctx->queues.cap.sequence++; in mxc_isi_m2m_frame_write_done() 135 .width = ctx->queues.out.format.width, in mxc_isi_m2m_device_run() 136 .height = ctx->queues.out.format.height, in mxc_isi_m2m_device_run() 139 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run() 140 .height = ctx->queues in mxc_isi_m2m_device_run() [all...] |
| /kernel/linux/linux-5.10/drivers/scsi/aacraid/ |
| H A D | comminit.c | 373 struct aac_entry * queues; in aac_comm_init() local 375 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all...] |
| /kernel/linux/linux-6.6/drivers/scsi/aacraid/ |
| H A D | comminit.c | 373 struct aac_entry * queues; in aac_comm_init() local 375 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all...] |
| /kernel/linux/linux-6.6/sound/virtio/ |
| H A D | virtio_card.h | 37 * @queues: Virtqueue wrappers. 51 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member 70 return &snd->queues[VIRTIO_SND_VQ_CONTROL]; in virtsnd_control_queue() 76 return &snd->queues[VIRTIO_SND_VQ_EVENT]; in virtsnd_event_queue() 82 return &snd->queues[VIRTIO_SND_VQ_TX]; in virtsnd_tx_queue() 88 return &snd->queues[VIRTIO_SND_VQ_RX]; in virtsnd_rx_queue()
|
| /kernel/linux/linux-5.10/tools/perf/util/ |
| H A D | intel-bts.c | 46 struct auxtrace_queues queues; member 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues() 222 if (bts->queues.new_data) { in intel_bts_update_queues() 223 bts->queues.new_data = false; in intel_bts_update_queues() 465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue() 539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 543 struct auxtrace_queue *queue = &bts->queues in intel_bts_process_tid_exit() 710 struct auxtrace_queues *queues = &bts->queues; intel_bts_free_events() local [all...] |
| H A D | arm-spe.c | 40 struct auxtrace_queues queues; member 142 queue = &speq->spe->queues.queue_array[speq->queue_nr]; in arm_spe_get_trace() 450 for (i = 0; i < spe->queues.nr_queues; i++) { in arm_spe__setup_queues() 451 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); in arm_spe__setup_queues() 461 if (spe->queues.new_data) { in arm_spe__update_queues() 462 spe->queues.new_data = false; in arm_spe__update_queues() 529 queue = &spe->queues.queue_array[queue_nr]; in arm_spe_process_queues() 565 struct auxtrace_queues *queues = &spe->queues; in arm_spe_process_timeless_queues() local 569 for (i = 0; i < queues in arm_spe_process_timeless_queues() 707 struct auxtrace_queues *queues = &spe->queues; arm_spe_free_events() local [all...] |
| H A D | s390-cpumsf.c | 47 * To sort the queues in chronological order, all queue access is controlled 54 * After the auxtrace infrastructure has been setup, the auxtrace queues are 61 * record sample, the auxtrace queues will be processed. As auxtrace queues 169 struct auxtrace_queues queues; member 202 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 205 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr() 700 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder() 824 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 825 ret = s390_cpumsf_setup_queue(sf, &sf->queues in s390_cpumsf_setup_queues() 1015 struct auxtrace_queues *queues = &sf->queues; s390_cpumsf_free_queues() local [all...] |
| H A D | auxtrace.c | 218 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument 220 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init() 221 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init() 222 if (!queues->queue_array) in auxtrace_queues__init() 227 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument 230 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() 240 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow() 247 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow() 248 list_splice_tail(&queues in auxtrace_queues__grow() 284 auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) auxtrace_queues__queue_buffer() argument 318 auxtrace_queues__split_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) auxtrace_queues__split_buffer() argument 356 auxtrace_queues__add_buffer(struct auxtrace_queues *queues, struct perf_session *session, unsigned int idx, struct auxtrace_buffer *buffer, struct auxtrace_buffer **buffer_ptr) auxtrace_queues__add_buffer() argument 401 auxtrace_queues__add_event(struct auxtrace_queues *queues, struct perf_session *session, union perf_event *event, off_t data_offset, struct auxtrace_buffer **buffer_ptr) auxtrace_queues__add_event() argument 421 auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, struct perf_session *session, off_t file_offset, size_t sz) auxtrace_queues__add_indexed_event() argument 448 auxtrace_queues__free(struct auxtrace_queues *queues) auxtrace_queues__free() argument 956 auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, struct perf_session *session, struct auxtrace_index_entry *ent) auxtrace_queues__process_index_entry() argument 964 auxtrace_queues__process_index(struct auxtrace_queues *queues, struct perf_session *session) auxtrace_queues__process_index() argument 1004 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, struct perf_sample *sample, struct perf_session *session) auxtrace_queues__sample_queue() argument 1028 auxtrace_queues__add_sample(struct auxtrace_queues *queues, struct perf_session *session, struct perf_sample *sample, u64 data_offset, u64 reference) auxtrace_queues__add_sample() argument [all...] |
| /kernel/linux/linux-6.6/tools/perf/util/ |
| H A D | intel-bts.c | 46 struct auxtrace_queues queues; member 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues() 222 if (bts->queues.new_data) { in intel_bts_update_queues() 223 bts->queues.new_data = false; in intel_bts_update_queues() 465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue() 539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 543 struct auxtrace_queue *queue = &bts->queues in intel_bts_process_tid_exit() 710 struct auxtrace_queues *queues = &bts->queues; intel_bts_free_events() local [all...] |
| H A D | s390-cpumsf.c | 47 * To sort the queues in chronological order, all queue access is controlled 54 * After the auxtrace infrastructure has been setup, the auxtrace queues are 61 * record sample, the auxtrace queues will be processed. As auxtrace queues 170 struct auxtrace_queues queues; member 203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 206 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr() 701 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder() 825 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 826 ret = s390_cpumsf_setup_queue(sf, &sf->queues in s390_cpumsf_setup_queues() 1016 struct auxtrace_queues *queues = &sf->queues; s390_cpumsf_free_queues() local [all...] |
| H A D | auxtrace.c | 221 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument 223 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init() 224 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init() 225 if (!queues->queue_array) in auxtrace_queues__init() 230 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument 233 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() 243 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow() 250 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow() 251 list_splice_tail(&queues in auxtrace_queues__grow() 287 auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) auxtrace_queues__queue_buffer() argument 321 auxtrace_queues__split_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) auxtrace_queues__split_buffer() argument 359 auxtrace_queues__add_buffer(struct auxtrace_queues *queues, struct perf_session *session, unsigned int idx, struct auxtrace_buffer *buffer, struct auxtrace_buffer **buffer_ptr) auxtrace_queues__add_buffer() argument 404 auxtrace_queues__add_event(struct auxtrace_queues *queues, struct perf_session *session, union perf_event *event, off_t data_offset, struct auxtrace_buffer **buffer_ptr) auxtrace_queues__add_event() argument 424 auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, struct perf_session *session, off_t file_offset, size_t sz) auxtrace_queues__add_indexed_event() argument 451 auxtrace_queues__free(struct auxtrace_queues *queues) auxtrace_queues__free() argument 989 auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, struct perf_session *session, struct auxtrace_index_entry *ent) auxtrace_queues__process_index_entry() argument 997 auxtrace_queues__process_index(struct auxtrace_queues *queues, struct perf_session *session) auxtrace_queues__process_index() argument 1037 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, struct perf_sample *sample, struct perf_session *session) auxtrace_queues__sample_queue() argument 1061 auxtrace_queues__add_sample(struct auxtrace_queues *queues, struct perf_session *session, struct perf_sample *sample, u64 data_offset, u64 reference) auxtrace_queues__add_sample() argument [all...] |
| H A D | arm-spe.c | 42 struct auxtrace_queues queues; member 154 queue = &speq->spe->queues.queue_array[speq->queue_nr]; in arm_spe_get_trace() 271 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]); in arm_spe_set_tid() 768 for (i = 0; i < spe->queues.nr_queues; i++) { in arm_spe__setup_queues() 769 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); in arm_spe__setup_queues() 779 if (spe->queues.new_data) { in arm_spe__update_queues() 780 spe->queues.new_data = false; in arm_spe__update_queues() 822 queue = &spe->queues.queue_array[queue_nr]; in arm_spe_process_queues() 863 struct auxtrace_queues *queues = &spe->queues; in arm_spe_process_timeless_queues() local 1035 struct auxtrace_queues *queues = &spe->queues; arm_spe_free_events() local [all...] |
| /kernel/linux/linux-6.6/drivers/target/ |
| H A D | target_core_tmr.c | 118 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task() 120 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task() 121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task() 148 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 163 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 301 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list() 303 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list() 304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list() 333 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
|
| /kernel/linux/linux-5.10/include/linux/ |
| H A D | ptr_ring.h | 625 void ***queues; in ptr_ring_resize_multiple() local 628 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple() 629 if (!queues) in ptr_ring_resize_multiple() 633 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple() 634 if (!queues[i]) in ptr_ring_resize_multiple() 641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple() 648 kvfree(queues[i]); in ptr_ring_resize_multiple() 650 kfree(queues); in ptr_ring_resize_multiple() [all...] |
| /kernel/linux/linux-6.6/include/linux/ |
| H A D | ptr_ring.h | 625 void ***queues; in ptr_ring_resize_multiple() local 628 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple() 629 if (!queues) in ptr_ring_resize_multiple() 633 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple() 634 if (!queues[i]) in ptr_ring_resize_multiple() 641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple() 648 kvfree(queues[i]); in ptr_ring_resize_multiple() 650 kfree(queues); in ptr_ring_resize_multiple() [all...] |
| /kernel/linux/linux-6.6/drivers/vdpa/alibaba/ |
| H A D | eni_vdpa.c | 45 int queues; member 118 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_free_irq() 164 int queues = eni_vdpa->queues; in eni_vdpa_request_irq() local 165 int vectors = queues + 1; in eni_vdpa_request_irq() 177 for (i = 0; i < queues; i++) { in eni_vdpa_request_irq() 195 irq = pci_irq_vector(pdev, queues); in eni_vdpa_request_irq() 202 vp_legacy_config_vector(ldev, queues); in eni_vdpa_request_irq() 492 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); in eni_vdpa_probe() 494 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, in eni_vdpa_probe() [all...] |
| /kernel/linux/linux-5.10/drivers/target/ |
| H A D | target_core_tmr.c | 127 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task() 128 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task() 155 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 177 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 314 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list() 315 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list() 344 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
|