/kernel/linux/linux-5.10/drivers/media/usb/uvc/ |
H A D | uvc_queue.c | 24 * Video buffers queue management. 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 36 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument 38 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream() 49 * This function must be called with the queue spinlock held. 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 58 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 59 struct uvc_buffer *buf = list_first_entry(&queue in uvc_queue_return_buffers() 76 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_queue_setup() local 107 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_prepare() local 134 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local 156 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_finish() local 166 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_start_streaming() local 187 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_stop_streaming() local 219 uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, int drop_corrupted) uvc_queue_init() argument 255 uvc_queue_release(struct uvc_video_queue *queue) uvc_queue_release() argument 266 uvc_request_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvc_request_buffers() argument 278 uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvc_query_buffer() argument 289 uvc_create_buffers(struct uvc_video_queue *queue, struct v4l2_create_buffers *cb) uvc_create_buffers() argument 301 uvc_queue_buffer(struct uvc_video_queue *queue, struct media_device *mdev, struct v4l2_buffer *buf) uvc_queue_buffer() argument 313 uvc_export_buffer(struct uvc_video_queue *queue, struct v4l2_exportbuffer *exp) uvc_export_buffer() argument 325 uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) uvc_dequeue_buffer() argument 337 uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamon() argument 348 uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamoff() argument 359 uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvc_queue_mmap() argument 365 uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) uvc_queue_get_unmapped_area() argument 372 uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) uvc_queue_poll() argument 391 uvc_queue_allocated(struct uvc_video_queue *queue) uvc_queue_allocated() argument 414 uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvc_queue_cancel() argument 438 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) __uvc_queue_get_current_buffer() argument 446 uvc_queue_get_current_buffer(struct uvc_video_queue *queue) uvc_queue_get_current_buffer() argument 465 uvc_queue_buffer_requeue(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvc_queue_buffer_requeue() argument 480 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_queue_buffer_complete() local 506 uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvc_queue_next_buffer() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/wireless/st/cw1200/ |
H A D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 11 #include "queue.h" 26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 28 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 29 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 31 queue->queue_id); in __cw1200_queue_lock() 32 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 38 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 39 BUG_ON(!queue in __cw1200_queue_unlock() 88 __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) __cw1200_queue_gc() argument 132 struct cw1200_queue *queue = cw1200_queue_gc() local 161 cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) cw1200_queue_init() argument 199 cw1200_queue_clear(struct cw1200_queue *queue) cw1200_queue_clear() argument 241 cw1200_queue_deinit(struct cw1200_queue *queue) cw1200_queue_deinit() argument 253 cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) cw1200_queue_get_num_queued() argument 277 cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) cw1200_queue_put() argument 328 cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) cw1200_queue_get() argument 369 cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_requeue() argument 411 cw1200_queue_requeue_all(struct cw1200_queue *queue) cw1200_queue_requeue_all() argument 438 cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_remove() argument 489 cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) cw1200_queue_get_skb() argument 519 cw1200_queue_lock(struct cw1200_queue *queue) cw1200_queue_lock() argument 526 cw1200_queue_unlock(struct cw1200_queue *queue) cw1200_queue_unlock() argument 533 cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) cw1200_queue_get_xmit_timestamp() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/wireless/st/cw1200/ |
H A D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 12 #include "queue.h" 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 40 BUG_ON(!queue in __cw1200_queue_unlock() 89 __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) __cw1200_queue_gc() argument 135 struct cw1200_queue *queue = cw1200_queue_gc() local 164 cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) cw1200_queue_init() argument 202 cw1200_queue_clear(struct cw1200_queue *queue) cw1200_queue_clear() argument 244 cw1200_queue_deinit(struct cw1200_queue *queue) cw1200_queue_deinit() argument 256 cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) cw1200_queue_get_num_queued() argument 280 cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) cw1200_queue_put() argument 331 cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) cw1200_queue_get() argument 372 cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_requeue() argument 414 cw1200_queue_requeue_all(struct cw1200_queue *queue) cw1200_queue_requeue_all() argument 441 cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_remove() argument 492 cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) cw1200_queue_get_skb() argument 522 cw1200_queue_lock(struct cw1200_queue *queue) cw1200_queue_lock() argument 529 cw1200_queue_unlock(struct cw1200_queue *queue) cw1200_queue_unlock() argument 536 cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) cw1200_queue_get_xmit_timestamp() argument [all...] |
/kernel/linux/linux-6.6/drivers/media/usb/uvc/ |
H A D | uvc_queue.c | 24 * Video buffers queue management. 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 43 * This function must be called with the queue spinlock held. 45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 52 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 55 queue); in uvc_queue_return_buffers() 56 list_del(&buf->queue); in uvc_queue_return_buffers() 63 * videobuf2 queue operation 70 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_queue_setup() local 101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_prepare() local 129 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local 152 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_finish() local 162 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_start_streaming() local 183 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_stop_streaming() local 215 uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, int drop_corrupted) uvc_queue_init() argument 251 uvc_queue_release(struct uvc_video_queue *queue) uvc_queue_release() argument 262 uvc_request_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvc_request_buffers() argument 274 uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvc_query_buffer() argument 285 uvc_create_buffers(struct uvc_video_queue *queue, struct v4l2_create_buffers *cb) uvc_create_buffers() argument 297 uvc_queue_buffer(struct uvc_video_queue *queue, struct media_device *mdev, struct v4l2_buffer *buf) uvc_queue_buffer() argument 309 uvc_export_buffer(struct uvc_video_queue *queue, struct v4l2_exportbuffer *exp) uvc_export_buffer() argument 321 uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) uvc_dequeue_buffer() argument 333 uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamon() argument 344 uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamoff() argument 355 uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvc_queue_mmap() argument 361 uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) uvc_queue_get_unmapped_area() argument 368 uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) uvc_queue_poll() argument 387 uvc_queue_allocated(struct uvc_video_queue *queue) uvc_queue_allocated() argument 410 uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvc_queue_cancel() argument 435 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) __uvc_queue_get_current_buffer() argument 443 uvc_queue_get_current_buffer(struct uvc_video_queue *queue) uvc_queue_get_current_buffer() argument 462 uvc_queue_buffer_requeue(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvc_queue_buffer_requeue() argument 477 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_queue_buffer_complete() local 503 uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvc_queue_next_buffer() argument [all...] |
/kernel/linux/linux-6.6/drivers/usb/gadget/function/ |
H A D | uvc_queue.c | 26 * Video buffers queue management. 32 * the videobuf2 queue operations by serializing calls to videobuf2 and a 33 * spinlock to protect the IRQ queue that holds the buffers to be processed by 38 * videobuf2 queue operations 45 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 46 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 73 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 83 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 87 if (queue in uvc_buffer_prepare() 104 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local 133 uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type, struct mutex *lock) uvcg_queue_init() argument 171 uvcg_free_buffers(struct uvc_video_queue *queue) uvcg_free_buffers() argument 179 uvcg_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvcg_alloc_buffers() argument 189 uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_query_buffer() argument 194 uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_queue_buffer() argument 203 uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) uvcg_dequeue_buffer() argument 215 uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) uvcg_queue_poll() argument 221 uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvcg_queue_mmap() argument 232 uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) uvcg_queue_get_unmapped_area() argument 251 uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvcg_queue_cancel() argument 295 uvcg_queue_enable(struct uvc_video_queue *queue, int enable) uvcg_queue_enable() argument 330 uvcg_complete_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvcg_complete_buffer() argument 349 uvcg_queue_head(struct uvc_video_queue *queue) uvcg_queue_head() argument [all...] |
/kernel/linux/linux-5.10/drivers/usb/gadget/function/ |
H A D | uvc_queue.c | 25 * Video buffers queue management. 31 * the videobuf2 queue operations by serializing calls to videobuf2 and a 32 * spinlock to protect the IRQ queue that holds the buffers to be processed by 37 * videobuf2 queue operations 44 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 45 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 59 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 69 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 85 struct uvc_video_queue *queue in uvc_buffer_queue() local 113 uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, struct mutex *lock) uvcg_queue_init() argument 141 uvcg_free_buffers(struct uvc_video_queue *queue) uvcg_free_buffers() argument 149 uvcg_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvcg_alloc_buffers() argument 159 uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_query_buffer() argument 164 uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_queue_buffer() argument 184 uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) uvcg_dequeue_buffer() argument 196 uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) uvcg_queue_poll() argument 202 uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvcg_queue_mmap() argument 213 uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) uvcg_queue_get_unmapped_area() argument 232 uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvcg_queue_cancel() argument 275 uvcg_queue_enable(struct uvc_video_queue *queue, int enable) uvcg_queue_enable() argument 309 uvcg_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) uvcg_queue_next_buffer() argument 338 uvcg_queue_head(struct uvc_video_queue *queue) uvcg_queue_head() argument [all...] |
/kernel/linux/linux-5.10/drivers/misc/genwqe/ |
H A D | card_ddcb.c | 14 * Device Driver Control Block (DDCB) queue support. Definition of 15 * interrupt handlers for queue support as well as triggering the 40 * Situation (1): Empty queue 82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 90 return queue in queue_enqueued_ddcbs() 95 queue_free_ddcbs(struct ddcb_queue *queue) queue_free_ddcbs() argument 163 print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) print_ddcb_info() argument 265 enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, struct ddcb *pddcb, int ddcb_no) enqueue_ddcb() argument 332 struct ddcb_queue *queue = req->queue; copy_ddcb_results() local 365 genwqe_check_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) genwqe_check_ddcb_queue() argument 481 struct ddcb_queue *queue; __genwqe_wait_ddcb() local 506 struct ddcb_queue *queue = req->queue; __genwqe_wait_ddcb() local 567 get_next_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, int *num) get_next_ddcb() argument 625 struct ddcb_queue *queue = req->queue; __genwqe_purge_ddcb() local 759 struct ddcb_queue *queue; __genwqe_enqueue_ddcb() local 981 struct ddcb_queue *queue = &cd->queue; genwqe_next_ddcb_ready() local 1012 struct ddcb_queue *queue = &cd->queue; genwqe_ddcbs_in_flight() local 1021 setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) setup_ddcb_queue() argument 1098 ddcb_queue_initialized(struct ddcb_queue *queue) ddcb_queue_initialized() argument 1103 free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) free_ddcb_queue() argument 1231 struct ddcb_queue *queue; genwqe_setup_service_layer() local 1320 struct ddcb_queue *queue = &cd->queue; queue_wake_up_all() local 1347 struct ddcb_queue *queue = &cd->queue; genwqe_finish_queue() local [all...] |
/kernel/linux/linux-6.6/drivers/misc/genwqe/ |
H A D | card_ddcb.c | 14 * Device Driver Control Block (DDCB) queue support. Definition of 15 * interrupt handlers for queue support as well as triggering the 40 * Situation (1): Empty queue 82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 90 return queue in queue_enqueued_ddcbs() 95 queue_free_ddcbs(struct ddcb_queue *queue) queue_free_ddcbs() argument 163 print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) print_ddcb_info() argument 265 enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, struct ddcb *pddcb, int ddcb_no) enqueue_ddcb() argument 332 struct ddcb_queue *queue = req->queue; copy_ddcb_results() local 365 genwqe_check_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) genwqe_check_ddcb_queue() argument 481 struct ddcb_queue *queue; __genwqe_wait_ddcb() local 506 struct ddcb_queue *queue = req->queue; __genwqe_wait_ddcb() local 567 get_next_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, int *num) get_next_ddcb() argument 625 struct ddcb_queue *queue = req->queue; __genwqe_purge_ddcb() local 759 struct ddcb_queue *queue; __genwqe_enqueue_ddcb() local 981 struct ddcb_queue *queue = &cd->queue; genwqe_next_ddcb_ready() local 1012 struct ddcb_queue *queue = &cd->queue; genwqe_ddcbs_in_flight() local 1021 setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) setup_ddcb_queue() argument 1098 ddcb_queue_initialized(struct ddcb_queue *queue) ddcb_queue_initialized() argument 1103 free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) free_ddcb_queue() argument 1231 struct ddcb_queue *queue; genwqe_setup_service_layer() local 1320 struct ddcb_queue *queue = &cd->queue; queue_wake_up_all() local 1347 struct ddcb_queue *queue = &cd->queue; genwqe_finish_queue() local [all...] |
/kernel/linux/linux-5.10/drivers/net/wireless/broadcom/b43legacy/ |
H A D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue in tx_data() 89 tx_complete(struct b43legacy_pioqueue *queue, struct sk_buff *skb) tx_complete() argument 103 generate_cookie(struct b43legacy_pioqueue *queue, struct b43legacy_pio_txpacket *packet) generate_cookie() argument 141 struct b43legacy_pioqueue *queue = NULL; parse_cookie() local 172 pio_tx_write_fragment(struct b43legacy_pioqueue *queue, struct sk_buff *skb, struct b43legacy_pio_txpacket *packet, size_t txhdr_size) pio_tx_write_fragment() argument 205 struct b43legacy_pioqueue *queue = packet->queue; free_txpacket() local 219 struct b43legacy_pioqueue *queue = packet->queue; pio_tx_packet() local 269 struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask); tx_tasklet() local 299 setup_txqueues(struct b43legacy_pioqueue *queue) setup_txqueues() argument 319 struct b43legacy_pioqueue *queue; b43legacy_setup_pioqueue() local 367 cancel_transfers(struct b43legacy_pioqueue *queue) cancel_transfers() argument 379 b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) b43legacy_destroy_pioqueue() argument 409 struct b43legacy_pioqueue *queue; b43legacy_pio_init() local 455 struct b43legacy_pioqueue *queue = dev->pio.queue1; b43legacy_pio_tx() local 477 struct b43legacy_pioqueue *queue; b43legacy_pio_handle_txstatus() local 535 pio_rx_error(struct b43legacy_pioqueue *queue, int clear_buffers, const char *error) pio_rx_error() argument 553 b43legacy_pio_rx(struct b43legacy_pioqueue *queue) b43legacy_pio_rx() argument 634 b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_suspend() argument 642 b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_resume() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/wireless/broadcom/b43legacy/ |
H A D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue in tx_data() 89 tx_complete(struct b43legacy_pioqueue *queue, struct sk_buff *skb) tx_complete() argument 103 generate_cookie(struct b43legacy_pioqueue *queue, struct b43legacy_pio_txpacket *packet) generate_cookie() argument 141 struct b43legacy_pioqueue *queue = NULL; parse_cookie() local 172 pio_tx_write_fragment(struct b43legacy_pioqueue *queue, struct sk_buff *skb, struct b43legacy_pio_txpacket *packet, size_t txhdr_size) pio_tx_write_fragment() argument 205 struct b43legacy_pioqueue *queue = packet->queue; free_txpacket() local 219 struct b43legacy_pioqueue *queue = packet->queue; pio_tx_packet() local 269 struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask); tx_tasklet() local 299 setup_txqueues(struct b43legacy_pioqueue *queue) setup_txqueues() argument 319 struct b43legacy_pioqueue *queue; b43legacy_setup_pioqueue() local 367 cancel_transfers(struct b43legacy_pioqueue *queue) cancel_transfers() argument 379 b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) b43legacy_destroy_pioqueue() argument 409 struct b43legacy_pioqueue *queue; b43legacy_pio_init() local 455 struct b43legacy_pioqueue *queue = dev->pio.queue1; b43legacy_pio_tx() local 477 struct b43legacy_pioqueue *queue; b43legacy_pio_handle_txstatus() local 535 pio_rx_error(struct b43legacy_pioqueue *queue, int clear_buffers, const char *error) pio_rx_error() argument 553 b43legacy_pio_rx(struct b43legacy_pioqueue *queue) b43legacy_pio_rx() argument 634 b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_suspend() argument 642 b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_resume() argument [all...] |
/kernel/linux/linux-6.6/drivers/nvme/target/ |
H A D | tcp.c | 60 * queue before determining it to be idle. This optional module behavior 94 struct nvmet_tcp_queue *queue; member 197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 200 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 205 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 237 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 253 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 256 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 259 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 264 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) nvmet_tcp_hdgst_len() argument 269 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) nvmet_tcp_ddgst_len() argument 284 nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, void *pdu, size_t len) nvmet_tcp_verify_hdgst() argument 310 nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) nvmet_tcp_check_ddgst() argument 365 nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) nvmet_tcp_fatal_error() argument 374 nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) nvmet_tcp_socket_error() argument 431 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_c2h_data_pdu() local 464 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_r2t_pdu() local 489 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_response_pdu() local 506 nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) nvmet_tcp_process_resp_list() argument 518 nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) nvmet_tcp_fetch_cmd() argument 548 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_queue_response() local 608 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_data() local 720 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_ddgst() local 753 nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, bool last_in_batch) nvmet_tcp_try_send_one() argument 802 nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, int budget, int *sends) nvmet_tcp_try_send() argument 821 nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) nvmet_prepare_receive_pdu() argument 829 nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) nvmet_tcp_free_crypto() argument 838 nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) nvmet_tcp_alloc_crypto() argument 865 nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) nvmet_tcp_handle_icreq() argument 922 nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) nvmet_tcp_handle_req_failure() argument 953 nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_handle_h2c_data_pdu() argument 1002 nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_done_recv_pdu() argument 1113 nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_pdu() argument 1168 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_prep_recv_ddgst() local 1176 nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_data() argument 1203 nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_ddgst() argument 1244 nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_one() argument 1278 nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, int budget, int *recvs) nvmet_tcp_try_recv() argument 1297 nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) nvmet_tcp_schedule_release_queue() argument 1307 nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) nvmet_tcp_arm_queue_deadline() argument 1312 nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, int ops) nvmet_tcp_check_queue_deadline() argument 1326 struct nvmet_tcp_queue *queue = nvmet_tcp_io_work() local 1356 nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *c) nvmet_tcp_alloc_cmd() argument 1408 nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_alloc_cmds() argument 1434 nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_free_cmds() argument 1446 nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) nvmet_tcp_restore_socket_callbacks() argument 1458 nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_uninit_data_in_cmds() argument 1474 nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) nvmet_tcp_free_cmd_data_in_buffers() argument 1491 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue_work() local 1520 struct nvmet_tcp_queue *queue; nvmet_tcp_data_ready() local 1533 struct nvmet_tcp_queue *queue; nvmet_tcp_write_space() local 1555 struct nvmet_tcp_queue *queue; nvmet_tcp_state_change() local 1580 nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) nvmet_tcp_set_queue_sock() argument 1638 struct nvmet_tcp_queue *queue; nvmet_tcp_alloc_queue() local 1814 struct nvmet_tcp_queue *queue; nvmet_tcp_destroy_port_queues() local 1844 struct nvmet_tcp_queue *queue; nvmet_tcp_delete_ctrl() local 1855 struct nvmet_tcp_queue *queue = nvmet_tcp_install_queue() local 1877 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_disc_port_addr() local 1918 struct nvmet_tcp_queue *queue; nvmet_tcp_exit() local [all...] |
/kernel/linux/linux-5.10/drivers/iio/buffer/ |
H A D | industrialio-buffer-dma.c | 33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the 34 * incoming queue are waiting for the DMA controller to pick them up and fill 35 * them with data. Block on the outgoing queue have been filled with data and 51 * incoming or outgoing queue the block will be freed. 100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() 174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 183 block->queue in iio_dma_buffer_alloc_block() 165 iio_dma_buffer_alloc_block( struct iio_dma_buffer_queue *queue, size_t size) iio_dma_buffer_alloc_block() argument 194 struct iio_dma_buffer_queue *queue = block->queue; _iio_dma_buffer_block_done() local 215 struct iio_dma_buffer_queue *queue = block->queue; iio_dma_buffer_block_done() local 237 iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, struct list_head *list) iio_dma_buffer_block_list_abort() argument 282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_request_update() local 359 iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) iio_dma_buffer_submit_block() argument 403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_enable() local 429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_disable() local 442 iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) iio_dma_buffer_enqueue() argument 455 iio_dma_buffer_dequeue( struct iio_dma_buffer_queue *queue) iio_dma_buffer_dequeue() argument 484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_read() local 539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); iio_dma_buffer_data_available() local 610 iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev, const struct iio_dma_buffer_ops *ops) iio_dma_buffer_init() argument 636 iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) iio_dma_buffer_exit() argument 674 iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) iio_dma_buffer_release() argument [all...] |
/kernel/linux/linux-6.6/drivers/iio/buffer/ |
H A D | industrialio-buffer-dma.c | 33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the 34 * incoming queue are waiting for the DMA controller to pick them up and fill 35 * them with data. Block on the outgoing queue have been filled with data and 51 * incoming or outgoing queue the block will be freed. 100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() 174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 183 block->queue in iio_dma_buffer_alloc_block() 165 iio_dma_buffer_alloc_block( struct iio_dma_buffer_queue *queue, size_t size) iio_dma_buffer_alloc_block() argument 194 struct iio_dma_buffer_queue *queue = block->queue; _iio_dma_buffer_block_done() local 215 struct iio_dma_buffer_queue *queue = block->queue; iio_dma_buffer_block_done() local 237 iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, struct list_head *list) iio_dma_buffer_block_list_abort() argument 282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_request_update() local 359 iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) iio_dma_buffer_submit_block() argument 403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_enable() local 429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_disable() local 442 iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) iio_dma_buffer_enqueue() argument 455 iio_dma_buffer_dequeue( struct iio_dma_buffer_queue *queue) iio_dma_buffer_dequeue() argument 484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_read() local 539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); iio_dma_buffer_data_available() local 610 iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev, const struct iio_dma_buffer_ops *ops) iio_dma_buffer_init() argument 636 iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) iio_dma_buffer_exit() argument 674 iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) iio_dma_buffer_release() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/xen-netback/ |
H A D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue in xenvif_rx_queue_tail() 111 xenvif_rx_dequeue(struct xenvif_queue *queue) xenvif_rx_dequeue() argument 135 xenvif_rx_queue_purge(struct xenvif_queue *queue) xenvif_rx_queue_purge() argument 143 xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) xenvif_rx_queue_drop_expired() argument 159 xenvif_rx_copy_flush(struct xenvif_queue *queue) xenvif_rx_copy_flush() argument 193 xenvif_rx_copy_add(struct xenvif_queue *queue, struct xen_netif_rx_request *req, unsigned int offset, void *data, size_t len) xenvif_rx_copy_add() argument 252 xenvif_rx_next_skb(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) xenvif_rx_next_skb() argument 328 xenvif_rx_complete(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) xenvif_rx_complete() argument 355 xenvif_rx_next_chunk(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, unsigned int offset, void **data, size_t *len) xenvif_rx_next_chunk() argument 393 xenvif_rx_data_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) xenvif_rx_data_slot() argument 437 xenvif_rx_extra_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) xenvif_rx_extra_slot() argument 461 xenvif_rx_skb(struct xenvif_queue *queue) xenvif_rx_skb() argument 491 xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_action() argument 510 xenvif_rx_queue_slots(const struct xenvif_queue *queue) xenvif_rx_queue_slots() argument 520 xenvif_rx_queue_stalled(const struct xenvif_queue *queue) xenvif_rx_queue_stalled() argument 530 xenvif_rx_queue_ready(struct xenvif_queue *queue) xenvif_rx_queue_ready() argument 537 xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) xenvif_have_rx_work() argument 547 xenvif_rx_queue_timeout(struct xenvif_queue *queue) xenvif_rx_queue_timeout() argument 570 xenvif_wait_for_rx_work(struct xenvif_queue *queue) xenvif_wait_for_rx_work() argument 595 xenvif_queue_carrier_off(struct xenvif_queue *queue) xenvif_queue_carrier_off() argument 610 xenvif_queue_carrier_on(struct xenvif_queue *queue) xenvif_queue_carrier_on() argument 628 struct xenvif_queue *queue = data; xenvif_kthread_guest_rx() local [all...] |
/kernel/linux/linux-6.6/drivers/net/xen-netback/ |
H A D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue in xenvif_rx_queue_tail() 111 xenvif_rx_dequeue(struct xenvif_queue *queue) xenvif_rx_dequeue() argument 135 xenvif_rx_queue_purge(struct xenvif_queue *queue) xenvif_rx_queue_purge() argument 143 xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) xenvif_rx_queue_drop_expired() argument 159 xenvif_rx_copy_flush(struct xenvif_queue *queue) xenvif_rx_copy_flush() argument 193 xenvif_rx_copy_add(struct xenvif_queue *queue, struct xen_netif_rx_request *req, unsigned int offset, void *data, size_t len) xenvif_rx_copy_add() argument 252 xenvif_rx_next_skb(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) xenvif_rx_next_skb() argument 328 xenvif_rx_complete(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) xenvif_rx_complete() argument 355 xenvif_rx_next_chunk(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, unsigned int offset, void **data, size_t *len) xenvif_rx_next_chunk() argument 393 xenvif_rx_data_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) xenvif_rx_data_slot() argument 437 xenvif_rx_extra_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) xenvif_rx_extra_slot() argument 461 xenvif_rx_skb(struct xenvif_queue *queue) xenvif_rx_skb() argument 491 xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_action() argument 510 xenvif_rx_queue_slots(const struct xenvif_queue *queue) xenvif_rx_queue_slots() argument 520 xenvif_rx_queue_stalled(const struct xenvif_queue *queue) xenvif_rx_queue_stalled() argument 530 xenvif_rx_queue_ready(struct xenvif_queue *queue) xenvif_rx_queue_ready() argument 537 xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) xenvif_have_rx_work() argument 547 xenvif_rx_queue_timeout(struct xenvif_queue *queue) xenvif_rx_queue_timeout() argument 570 xenvif_wait_for_rx_work(struct xenvif_queue *queue) xenvif_wait_for_rx_work() argument 595 xenvif_queue_carrier_off(struct xenvif_queue *queue) xenvif_queue_carrier_off() argument 610 xenvif_queue_carrier_on(struct xenvif_queue *queue) xenvif_queue_carrier_on() argument 628 struct xenvif_queue *queue = data; xenvif_kthread_guest_rx() local [all...] |
/kernel/linux/linux-5.10/drivers/nvme/target/ |
H A D | tcp.c | 57 struct nvmet_tcp_queue *queue; member 159 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 162 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 167 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 195 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 199 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 215 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 218 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 221 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 223 return queue in queue_cpu() 226 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) nvmet_tcp_hdgst_len() argument 231 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) nvmet_tcp_ddgst_len() argument 246 nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, void *pdu, size_t len) nvmet_tcp_verify_hdgst() argument 272 nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) nvmet_tcp_check_ddgst() argument 329 nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) nvmet_tcp_fatal_error() argument 338 nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) nvmet_tcp_socket_error() argument 412 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_c2h_data_pdu() local 445 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_r2t_pdu() local 470 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_response_pdu() local 487 nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) nvmet_tcp_process_resp_list() argument 499 nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) nvmet_tcp_fetch_cmd() argument 529 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_queue_response() local 585 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_data() local 692 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_ddgst() local 725 nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, bool last_in_batch) nvmet_tcp_try_send_one() argument 774 nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, int budget, int *sends) nvmet_tcp_try_send() argument 793 nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) nvmet_prepare_receive_pdu() argument 801 nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) nvmet_tcp_free_crypto() argument 810 nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) nvmet_tcp_alloc_crypto() argument 837 nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) nvmet_tcp_handle_icreq() argument 894 nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) nvmet_tcp_handle_req_failure() argument 918 nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_handle_h2c_data_pdu() argument 967 nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_done_recv_pdu() argument 1071 nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_pdu() argument 1126 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_prep_recv_ddgst() local 1134 nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_data() argument 1162 nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_ddgst() argument 1202 nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) nvmet_tcp_try_recv_one() argument 1236 nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, int budget, int *recvs) nvmet_tcp_try_recv() argument 1255 nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) nvmet_tcp_schedule_release_queue() argument 1267 struct nvmet_tcp_queue *queue = nvmet_tcp_io_work() local 1296 nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *c) nvmet_tcp_alloc_cmd() argument 1348 nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_alloc_cmds() argument 1374 nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_free_cmds() argument 1386 nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) nvmet_tcp_restore_socket_callbacks() argument 1406 nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_uninit_data_in_cmds() argument 1425 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue_work() local 1451 struct nvmet_tcp_queue *queue; nvmet_tcp_data_ready() local 1462 struct nvmet_tcp_queue *queue; nvmet_tcp_write_space() local 1484 struct nvmet_tcp_queue *queue; nvmet_tcp_state_change() local 1509 nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) nvmet_tcp_set_queue_sock() argument 1565 struct nvmet_tcp_queue *queue; nvmet_tcp_alloc_queue() local 1739 struct nvmet_tcp_queue *queue; nvmet_tcp_destroy_port_queues() local 1769 struct nvmet_tcp_queue *queue; nvmet_tcp_delete_ctrl() local 1780 struct nvmet_tcp_queue *queue = nvmet_tcp_install_queue() local 1804 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_disc_port_addr() local 1845 struct nvmet_tcp_queue *queue; nvmet_tcp_exit() local [all...] |
/kernel/linux/linux-5.10/drivers/crypto/cavium/zip/ |
H A D | zip_device.c | 50 * zip_cmd_queue_consumed - Calculates the space consumed in the command queue. 53 * @queue: Queue number 55 * Return: Bytes consumed in the command queue buffer. 57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) in zip_cmd_queue_consumed() argument 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 64 * zip_load_instr - Submits the instruction into the ZIP command queue 69 * This function copies the ZIP instruction to the command queue and rings the 71 * queue is maintained in a circular fashion. When there is space for exactly 72 * one instruction in the queue, nex 81 u32 queue = 0; zip_load_instr() local 175 zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) zip_update_cmd_bufs() argument [all...] |
/kernel/linux/linux-6.6/drivers/crypto/cavium/zip/ |
H A D | zip_device.c | 50 * zip_cmd_queue_consumed - Calculates the space consumed in the command queue. 53 * @queue: Queue number 55 * Return: Bytes consumed in the command queue buffer. 57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) in zip_cmd_queue_consumed() argument 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 64 * zip_load_instr - Submits the instruction into the ZIP command queue 69 * This function copies the ZIP instruction to the command queue and rings the 71 * queue is maintained in a circular fashion. When there is space for exactly 72 * one instruction in the queue, nex 81 u32 queue = 0; zip_load_instr() local 175 zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) zip_update_cmd_bufs() argument [all...] |
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | tcp.c | 82 struct nvme_tcp_queue *queue; member 180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 189 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 194 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 197 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 198 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 206 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) nvme_tcp_ddgst_len() argument 321 nvme_tcp_send_all(struct nvme_tcp_queue *queue) nvme_tcp_send_all() argument 331 nvme_tcp_queue_more(struct nvme_tcp_queue *queue) nvme_tcp_queue_more() argument 340 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_queue_request() local 361 nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) nvme_tcp_process_req_list() argument 373 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) nvme_tcp_fetch_request() argument 419 nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, void *pdu, size_t pdu_len) nvme_tcp_verify_hdgst() argument 446 nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) nvme_tcp_check_ddgst() argument 482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; nvme_tcp_init_request() local 503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_tcp_init_hctx() local 513 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_init_admin_hctx() local 520 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) nvme_tcp_recv_state() argument 527 nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) nvme_tcp_init_recv_ctx() argument 545 nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) nvme_tcp_process_nvme_cqe() argument 571 nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, struct nvme_tcp_data_pdu *pdu) nvme_tcp_handle_c2h_data() argument 605 nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, struct nvme_tcp_rsp_pdu *pdu) nvme_tcp_handle_comp() argument 630 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_h2c_data_pdu() local 661 nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_r2t_pdu *pdu) nvme_tcp_handle_r2t() argument 710 nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_pdu() argument 768 nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_data() argument 839 nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_ddgst() argument 887 struct nvme_tcp_queue *queue = desc->arg.data; nvme_tcp_recv_skb() local 922 struct nvme_tcp_queue *queue; nvme_tcp_data_ready() local 936 struct nvme_tcp_queue *queue; nvme_tcp_write_space() local 949 struct nvme_tcp_queue *queue; nvme_tcp_state_change() local 975 nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) nvme_tcp_done_send_req() argument 995 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data() local 1058 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_cmd_pdu() local 1099 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data_pdu() local 1133 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_ddgst() local 1164 nvme_tcp_try_send(struct nvme_tcp_queue *queue) nvme_tcp_try_send() argument 1214 nvme_tcp_try_recv(struct nvme_tcp_queue *queue) nvme_tcp_try_recv() argument 1232 struct nvme_tcp_queue *queue = nvme_tcp_io_work() local 1263 nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) nvme_tcp_free_crypto() argument 1272 nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) nvme_tcp_alloc_crypto() argument 1307 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_alloc_async_req() local 1325 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_free_queue() local 1349 nvme_tcp_init_connection(struct nvme_tcp_queue *queue) nvme_tcp_init_connection() argument 1456 nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) nvme_tcp_admin_queue() argument 1461 nvme_tcp_default_queue(struct nvme_tcp_queue *queue) nvme_tcp_default_queue() argument 1470 nvme_tcp_read_queue(struct nvme_tcp_queue *queue) nvme_tcp_read_queue() argument 1481 nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) nvme_tcp_poll_queue() argument 1494 nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) nvme_tcp_set_queue_io_cpu() argument 1513 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_alloc_queue() local 1651 nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) nvme_tcp_restore_sock_ops() argument 1663 __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) __nvme_tcp_stop_queue() argument 1673 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_stop_queue() local 1684 nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) nvme_tcp_setup_sock_ops() argument 1703 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; nvme_tcp_start_queue() local 2213 nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, struct nvme_command *c, u32 data_len) nvme_tcp_set_sg_inline() argument 2237 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_submit_async_event() local 2310 nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) nvme_tcp_map_data() argument 2335 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_cmd_pdu() local 2386 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_commit_rqs() local 2396 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_queue_rq() local 2425 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_poll() local 2441 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; nvme_tcp_get_address() local [all...] |
/kernel/linux/linux-5.10/drivers/nvme/host/ |
H A D | tcp.c | 81 struct nvme_tcp_queue *queue; member 176 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 183 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 185 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 188 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 190 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 193 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 194 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 197 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 202 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) nvme_tcp_ddgst_len() argument 207 nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue) nvme_tcp_inline_data_size() argument 303 nvme_tcp_send_all(struct nvme_tcp_queue *queue) nvme_tcp_send_all() argument 313 nvme_tcp_queue_more(struct nvme_tcp_queue *queue) nvme_tcp_queue_more() argument 322 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_queue_request() local 343 nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) nvme_tcp_process_req_list() argument 355 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) nvme_tcp_fetch_request() argument 401 nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, void *pdu, size_t pdu_len) nvme_tcp_verify_hdgst() argument 428 nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) nvme_tcp_check_ddgst() argument 463 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; nvme_tcp_init_request() local 482 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_tcp_init_hctx() local 492 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_init_admin_hctx() local 499 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) nvme_tcp_recv_state() argument 506 nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) nvme_tcp_init_recv_ctx() argument 524 nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) nvme_tcp_process_nvme_cqe() argument 545 nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, struct nvme_tcp_data_pdu *pdu) nvme_tcp_handle_c2h_data() argument 579 nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, struct nvme_tcp_rsp_pdu *pdu) nvme_tcp_handle_comp() argument 605 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_h2c_data_pdu() local 654 nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_r2t_pdu *pdu) nvme_tcp_handle_r2t() argument 682 nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_pdu() argument 740 nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_data() argument 810 nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) nvme_tcp_recv_ddgst() argument 852 struct nvme_tcp_queue *queue = desc->arg.data; nvme_tcp_recv_skb() local 884 struct nvme_tcp_queue *queue; nvme_tcp_data_ready() local 896 struct nvme_tcp_queue *queue; nvme_tcp_write_space() local 909 struct nvme_tcp_queue *queue; nvme_tcp_state_change() local 935 nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) nvme_tcp_done_send_req() argument 955 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data() local 1011 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_cmd_pdu() local 1051 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data_pdu() local 1082 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_ddgst() local 1109 nvme_tcp_try_send(struct nvme_tcp_queue *queue) nvme_tcp_try_send() argument 1155 nvme_tcp_try_recv(struct nvme_tcp_queue *queue) nvme_tcp_try_recv() argument 1173 struct nvme_tcp_queue *queue = nvme_tcp_io_work() local 1204 nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) nvme_tcp_free_crypto() argument 1213 nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) nvme_tcp_alloc_crypto() argument 1248 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_alloc_async_req() local 1265 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_free_queue() local 1278 nvme_tcp_init_connection(struct nvme_tcp_queue *queue) nvme_tcp_init_connection() argument 1376 nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) nvme_tcp_admin_queue() argument 1381 nvme_tcp_default_queue(struct nvme_tcp_queue *queue) nvme_tcp_default_queue() argument 1390 nvme_tcp_read_queue(struct nvme_tcp_queue *queue) nvme_tcp_read_queue() argument 1401 nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) nvme_tcp_poll_queue() argument 1414 nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) nvme_tcp_set_queue_io_cpu() argument 1434 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_alloc_queue() local 1557 nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) nvme_tcp_restore_sock_ops() argument 1569 __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) __nvme_tcp_stop_queue() argument 1579 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_stop_queue() local 1587 nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) nvme_tcp_setup_sock_ops() argument 1606 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; nvme_tcp_start_queue() local 2214 nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, struct nvme_command *c, u32 data_len) nvme_tcp_set_sg_inline() argument 2238 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_submit_async_event() local 2312 nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) nvme_tcp_map_data() argument 2337 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_cmd_pdu() local 2386 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_commit_rqs() local 2396 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_queue_rq() local 2463 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_poll() local [all...] |
/kernel/linux/linux-5.10/drivers/net/wireguard/ |
H A D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 43 void wg_packet_queue_free(struct crypt_queue *queue, boo argument 53 wg_prev_queue_init(struct prev_queue *queue) wg_prev_queue_init() argument 66 __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) __wg_prev_queue_enqueue() argument 72 wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) wg_prev_queue_enqueue() argument 80 wg_prev_queue_dequeue(struct prev_queue *queue) wg_prev_queue_dequeue() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_marker.c | 37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) in vmw_marker_queue_init() argument 39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init() 40 queue->lag = 0; in vmw_marker_queue_init() 41 queue->lag_time = ktime_get_raw_ns(); in vmw_marker_queue_init() 42 spin_lock_init(&queue->lock); in vmw_marker_queue_init() 45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) in vmw_marker_queue_takedown() argument 49 spin_lock(&queue->lock); in vmw_marker_queue_takedown() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown() 53 spin_unlock(&queue->lock); in vmw_marker_queue_takedown() 56 int vmw_marker_push(struct vmw_marker_queue *queue, in vmw_marker_push() argument 73 vmw_marker_pull(struct vmw_marker_queue *queue, uint32_t signaled_seqno) vmw_marker_pull() argument 107 vmw_fifo_lag(struct vmw_marker_queue *queue) vmw_fifo_lag() argument 120 vmw_lag_lt(struct vmw_marker_queue *queue, uint32_t us) vmw_lag_lt() argument 128 vmw_wait_lag(struct vmw_private *dev_priv, struct vmw_marker_queue *queue, uint32_t us) vmw_wait_lag() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/wireguard/ |
H A D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 43 void wg_packet_queue_free(struct crypt_queue *queue, boo argument 53 wg_prev_queue_init(struct prev_queue *queue) wg_prev_queue_init() argument 66 __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) __wg_prev_queue_enqueue() argument 72 wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) wg_prev_queue_enqueue() argument 80 wg_prev_queue_dequeue(struct prev_queue *queue) wg_prev_queue_dequeue() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ |
H A D | xen-netfront.c | 96 /* IRQ name is queue name with "-tx" or "-rx" appended */ 168 /* Multi-queue support */ 220 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 224 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 225 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 229 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 233 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 234 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref() 250 struct netfront_queue *queue = from_timer(queue, in rx_refill_timeout() local 254 netfront_tx_slot_available(struct netfront_queue *queue) netfront_tx_slot_available() argument 260 xennet_maybe_wake_tx(struct netfront_queue *queue) xennet_maybe_wake_tx() argument 272 xennet_alloc_one_rx_buffer(struct netfront_queue *queue) xennet_alloc_one_rx_buffer() argument 299 xennet_alloc_rx_buffers(struct netfront_queue *queue) xennet_alloc_rx_buffers() argument 366 struct netfront_queue *queue = NULL; xennet_open() local 390 xennet_tx_buf_gc(struct netfront_queue *queue) xennet_tx_buf_gc() argument 466 struct netfront_queue *queue; global() member 483 struct netfront_queue *queue = info->queue; xennet_tx_setup_grant() local 604 xennet_mark_tx_pending(struct netfront_queue *queue) xennet_mark_tx_pending() argument 613 xennet_xdp_xmit_one(struct net_device *dev, struct netfront_queue *queue, struct xdp_frame *xdpf) xennet_xdp_xmit_one() argument 651 struct netfront_queue *queue = NULL; xennet_xdp_xmit() local 722 struct netfront_queue *queue = NULL; xennet_start_xmit() local 875 struct netfront_queue *queue; xennet_close() local 889 struct netfront_queue *queue = &info->queues[i]; xennet_destroy_queues() local 906 xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) xennet_set_rx_rsp_cons() argument 916 xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) xennet_move_rx_slot() argument 929 xennet_get_extras(struct netfront_queue *queue, struct xen_netif_extra_info *extras, RING_IDX rp) xennet_get_extras() argument 971 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, struct xen_netif_rx_response *rx, struct bpf_prog *prog, struct xdp_buff *xdp, bool *need_xdp_flush) xennet_run_xdp() argument 1018 xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, bool *need_xdp_flush) xennet_get_responses() argument 1161 xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) xennet_fill_frags() argument 1224 handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) handle_incoming_queue() argument 1262 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); xennet_poll() local 1420 xennet_release_tx_bufs(struct netfront_queue *queue) xennet_release_tx_bufs() argument 1443 xennet_release_rx_bufs(struct netfront_queue *queue) xennet_release_rx_bufs() argument 1513 xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) xennet_handle_tx() argument 1538 xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) xennet_handle_rx() argument 1820 struct netfront_queue *queue = &info->queues[i]; xennet_disconnect_backend() local 1902 setup_netfront_single(struct netfront_queue *queue) setup_netfront_single() argument 1928 setup_netfront_split(struct netfront_queue *queue) setup_netfront_split() argument 1972 setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) setup_netfront() argument 2052 xennet_init_queue(struct netfront_queue *queue) xennet_init_queue() argument 2108 write_queue_xenstore_keys(struct netfront_queue *queue, struct xenbus_transaction *xbt, int write_hierarchical) write_queue_xenstore_keys() argument 2192 xennet_create_page_pool(struct netfront_queue *queue) xennet_create_page_pool() argument 2247 struct netfront_queue *queue = &info->queues[i]; xennet_create_queues() local 2293 struct netfront_queue *queue = NULL; talk_to_netback() local 2455 struct netfront_queue *queue = NULL; xennet_connect() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ |
H A D | xen-netfront.c | 94 /* IRQ name is queue name with "-tx" or "-rx" appended */ 166 /* Multi-queue support */ 218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref() 248 struct netfront_queue *queue = from_timer(queue, in rx_refill_timeout() local 252 netfront_tx_slot_available(struct netfront_queue *queue) netfront_tx_slot_available() argument 258 xennet_maybe_wake_tx(struct netfront_queue *queue) xennet_maybe_wake_tx() argument 270 xennet_alloc_one_rx_buffer(struct netfront_queue *queue) xennet_alloc_one_rx_buffer() argument 297 xennet_alloc_rx_buffers(struct netfront_queue *queue) xennet_alloc_rx_buffers() argument 364 struct netfront_queue *queue = NULL; xennet_open() local 388 xennet_tx_buf_gc(struct netfront_queue *queue) xennet_tx_buf_gc() argument 464 struct netfront_queue *queue; global() member 481 struct netfront_queue *queue = info->queue; xennet_tx_setup_grant() local 602 xennet_mark_tx_pending(struct netfront_queue *queue) xennet_mark_tx_pending() argument 611 xennet_xdp_xmit_one(struct net_device *dev, struct netfront_queue *queue, struct xdp_frame *xdpf) xennet_xdp_xmit_one() argument 649 struct netfront_queue *queue = NULL; xennet_xdp_xmit() local 718 struct netfront_queue *queue = NULL; xennet_start_xmit() local 871 struct netfront_queue *queue; xennet_close() local 885 struct netfront_queue *queue = &info->queues[i]; xennet_destroy_queues() local 902 xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) xennet_set_rx_rsp_cons() argument 912 xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) xennet_move_rx_slot() argument 925 xennet_get_extras(struct netfront_queue *queue, struct xen_netif_extra_info *extras, RING_IDX rp) xennet_get_extras() argument 967 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, struct xen_netif_rx_response *rx, struct bpf_prog *prog, struct xdp_buff *xdp, bool *need_xdp_flush) xennet_run_xdp() argument 1014 xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, bool *need_xdp_flush) xennet_get_responses() argument 1157 xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) xennet_fill_frags() argument 1220 handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) handle_incoming_queue() argument 1258 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); xennet_poll() local 1416 xennet_release_tx_bufs(struct netfront_queue *queue) xennet_release_tx_bufs() argument 1438 xennet_release_rx_bufs(struct netfront_queue *queue) xennet_release_rx_bufs() argument 1507 xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) xennet_handle_tx() argument 1532 xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) xennet_handle_rx() argument 1816 struct netfront_queue *queue = &info->queues[i]; xennet_disconnect_backend() local 1898 setup_netfront_single(struct netfront_queue *queue) setup_netfront_single() argument 1924 setup_netfront_split(struct netfront_queue *queue) setup_netfront_split() argument 1968 setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) setup_netfront() argument 2019 xennet_init_queue(struct netfront_queue *queue) xennet_init_queue() argument 2075 write_queue_xenstore_keys(struct netfront_queue *queue, struct xenbus_transaction *xbt, int write_hierarchical) write_queue_xenstore_keys() argument 2159 xennet_create_page_pool(struct netfront_queue *queue) xennet_create_page_pool() argument 2214 struct netfront_queue *queue = &info->queues[i]; xennet_create_queues() local 2259 struct netfront_queue *queue = NULL; talk_to_netback() local 2423 struct netfront_queue *queue = NULL; xennet_connect() local [all...] |