/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nf_tables_offload.c | 200 struct list_head *cb_list) in nft_setup_cb_call() 205 list_for_each_entry(block_cb, cb_list, list) { in nft_setup_cb_call() 282 &basechain->flow_block.cb_list); in nft_flow_offload_rule() 288 list_splice(&bo->cb_list, &basechain->flow_block.cb_list); in nft_flow_offload_bind() 306 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); in nft_flow_offload_unbind() 309 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nft_flow_offload_unbind() 350 bo->cb_list_head = &basechain->flow_block.cb_list; in nft_flow_block_offload_init() 351 INIT_LIST_HEAD(&bo->cb_list); in nft_flow_block_offload_init() 385 list_move(&block_cb->list, &bo.cb_list); in nft_indr_block_cleanup() 199 nft_setup_cb_call(enum tc_setup_type type, void *type_data, struct list_head *cb_list) nft_setup_cb_call() argument [all...] |
H A D | nf_flow_table_offload.c | 719 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_add() 727 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_del() 778 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_stats() 916 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list); in nf_flow_table_block_setup() 919 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nf_flow_table_block_setup() 945 bo->cb_list_head = &flowtable->flow_block.cb_list; in nf_flow_table_block_offload_init() 946 INIT_LIST_HEAD(&bo->cb_list); in nf_flow_table_block_offload_init()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | nf_tables_offload.c | 199 struct list_head *cb_list) in nft_setup_cb_call() 204 list_for_each_entry(block_cb, cb_list, list) { in nft_setup_cb_call() 281 &basechain->flow_block.cb_list); in nft_flow_offload_cmd() 317 list_splice(&bo->cb_list, &basechain->flow_block.cb_list); in nft_flow_offload_bind() 335 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); in nft_flow_offload_unbind() 338 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nft_flow_offload_unbind() 379 bo->cb_list_head = &basechain->flow_block.cb_list; in nft_flow_block_offload_init() 380 INIT_LIST_HEAD(&bo->cb_list); in nft_flow_block_offload_init() 414 list_move(&block_cb->list, &bo.cb_list); in nft_indr_block_cleanup() 198 nft_setup_cb_call(enum tc_setup_type type, void *type_data, struct list_head *cb_list) nft_setup_cb_call() argument [all...] |
H A D | nf_flow_table_offload.c | 879 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_add() 888 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_del() 942 &offload->flowtable->flow_block.cb_list); in flow_offload_tuple_stats() 1108 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list); in nf_flow_table_block_setup() 1111 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nf_flow_table_block_setup() 1137 bo->cb_list_head = &flowtable->flow_block.cb_list; in nf_flow_table_block_offload_init() 1138 INIT_LIST_HEAD(&bo->cb_list); in nf_flow_table_block_offload_init()
|
/kernel/linux/linux-5.10/drivers/dma-buf/ |
H A D | dma-fence.c | 333 struct list_head cb_list; in dma_fence_signal_locked() local 341 /* Stash the cb_list before replacing it with the timestamp */ in dma_fence_signal_locked() 342 list_replace(&fence->cb_list, &cb_list); in dma_fence_signal_locked() 348 list_for_each_entry_safe(cur, tmp, &cb_list, node) { in dma_fence_signal_locked() 445 if (WARN(!list_empty(&fence->cb_list) && in dma_fence_release() 574 list_add_tail(&cb->node, &fence->cb_list); in dma_fence_add_callback() 700 list_add(&cb.base.node, &fence->cb_list); in dma_fence_default_wait() 856 INIT_LIST_HEAD(&fence->cb_list); in dma_fence_init()
|
/kernel/linux/linux-6.6/drivers/dma-buf/ |
H A D | dma-fence.c | 371 struct list_head cb_list; in dma_fence_signal_timestamp_locked() local 379 /* Stash the cb_list before replacing it with the timestamp */ in dma_fence_signal_timestamp_locked() 380 list_replace(&fence->cb_list, &cb_list); in dma_fence_signal_timestamp_locked() 386 list_for_each_entry_safe(cur, tmp, &cb_list, node) { in dma_fence_signal_timestamp_locked() 538 if (WARN(!list_empty(&fence->cb_list) && in dma_fence_release() 661 list_add_tail(&cb->node, &fence->cb_list); in dma_fence_add_callback() 784 list_add(&cb.base.node, &fence->cb_list); in dma_fence_default_wait() 1015 INIT_LIST_HEAD(&fence->cb_list); in dma_fence_init()
|
/kernel/linux/linux-5.10/drivers/dma/ |
H A D | bcm2835-dma.c | 95 struct bcm2835_cb_entry cb_list[]; member 209 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, in bcm2835_dma_free_cb_chain() 210 desc->cb_list[i].paddr); in bcm2835_dma_free_cb_chain() 317 d = kzalloc(struct_size(d, cb_list, frames), gfp); in bcm2835_dma_create_cb_chain() 330 cb_entry = &d->cb_list[frame]; in bcm2835_dma_create_cb_chain() 357 d->cb_list[frame - 1].cb->next = cb_entry->paddr; in bcm2835_dma_create_cb_chain() 370 d->cb_list[d->frames - 1].cb->info |= finalextrainfo; in bcm2835_dma_create_cb_chain() 452 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); in bcm2835_dma_start_desc() 544 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; in bcm2835_dma_desc_size_pos() 686 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, in bcm2835_dma_prep_slave_sg() [all...] |
/kernel/linux/linux-6.6/drivers/dma/ |
H A D | bcm2835-dma.c | 95 struct bcm2835_cb_entry cb_list[]; member 209 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, in bcm2835_dma_free_cb_chain() 210 desc->cb_list[i].paddr); in bcm2835_dma_free_cb_chain() 317 d = kzalloc(struct_size(d, cb_list, frames), gfp); in bcm2835_dma_create_cb_chain() 330 cb_entry = &d->cb_list[frame]; in bcm2835_dma_create_cb_chain() 357 d->cb_list[frame - 1].cb->next = cb_entry->paddr; in bcm2835_dma_create_cb_chain() 370 d->cb_list[d->frames - 1].cb->info |= finalextrainfo; in bcm2835_dma_create_cb_chain() 452 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); in bcm2835_dma_start_desc() 544 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; in bcm2835_dma_desc_size_pos() 686 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, in bcm2835_dma_prep_slave_sg() [all...] |
/kernel/linux/linux-5.10/net/core/ |
H A D | flow_offload.c | 238 list_for_each_entry(block_cb, &block->cb_list, list) { in flow_block_cb_lookup() 359 struct list_head *cb_list; member 371 INIT_LIST_HEAD(&bo.cb_list); in existing_qdiscs_register() 373 list_splice(&bo.cb_list, cur->cb_list); in existing_qdiscs_register() 527 info->cb_list = bo->cb_list_head; in indir_dev_add() 566 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; in flow_indr_dev_setup_offload()
|
/kernel/linux/linux-5.10/include/net/ |
H A D | flow_offload.h | 431 struct list_head cb_list; member 443 struct list_head cb_list; member 500 list_add_tail(&block_cb->list, &offload->cb_list); in flow_block_cb_add() 506 list_move(&block_cb->list, &offload->cb_list); in flow_block_cb_remove() 513 list_move(&block_cb->list, &offload->cb_list); in flow_indr_block_cb_remove() 556 INIT_LIST_HEAD(&flow_block->cb_list); in flow_block_init()
|
/kernel/linux/linux-6.6/include/net/ |
H A D | flow_offload.h | 493 struct list_head cb_list; member 505 struct list_head cb_list; member 562 list_add_tail(&block_cb->list, &offload->cb_list); in flow_block_cb_add() 568 list_move(&block_cb->list, &offload->cb_list); in flow_block_cb_remove() 575 list_move(&block_cb->list, &offload->cb_list); in flow_indr_block_cb_remove() 637 INIT_LIST_HEAD(&flow_block->cb_list); in flow_block_init()
|
/kernel/linux/linux-6.6/net/core/ |
H A D | flow_offload.c | 294 list_for_each_entry(block_cb, &block->cb_list, list) { in flow_block_cb_lookup() 414 struct list_head *cb_list; member 426 INIT_LIST_HEAD(&bo.cb_list); in existing_qdiscs_register() 428 list_splice(&bo.cb_list, cur->cb_list); in existing_qdiscs_register() 585 info->cb_list = bo->cb_list_head; in indir_dev_add() 630 return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; in flow_indr_dev_setup_offload()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_breadcrumbs.c | 287 struct list_head cb_list; in signal_irq_work() local 290 list_replace(&rq->fence.cb_list, &cb_list); in signal_irq_work() 292 __dma_fence_signal__notify(&rq->fence, &cb_list); in signal_irq_work()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_breadcrumbs.c | 247 struct list_head cb_list; in signal_irq_work() local 253 list_replace(&rq->fence.cb_list, &cb_list); in signal_irq_work() 255 __dma_fence_signal__notify(&rq->fence, &cb_list); in signal_irq_work()
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-core.c | 1066 INIT_LIST_HEAD(&plug->cb_list); in blk_start_plug_nr_ios() 1108 while (!list_empty(&plug->cb_list)) { in flush_plug_callbacks() 1109 list_splice_init(&plug->cb_list, &callbacks); in flush_plug_callbacks() 1130 list_for_each_entry(cb, &plug->cb_list, list) in blk_check_plugged() 1140 list_add(&cb->list, &plug->cb_list); in blk_check_plugged() 1148 if (!list_empty(&plug->cb_list)) in __blk_flush_plug()
|
/kernel/linux/linux-5.10/include/drm/ |
H A D | drm_syncobj.h | 53 * @cb_list: List of callbacks to call when the &fence gets replaced. 55 struct list_head cb_list; member 57 * @lock: Protects &cb_list and write-locks &fence.
|
/kernel/linux/linux-6.6/include/drm/ |
H A D | drm_syncobj.h | 53 * @cb_list: List of callbacks to call when the &fence gets replaced. 55 struct list_head cb_list; member 61 * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
|
/kernel/linux/linux-5.10/arch/s390/kernel/ |
H A D | vtime.c | 279 LIST_HEAD(cb_list); in virt_timer_expire() 287 list_move_tail(&timer->entry, &cb_list); in virt_timer_expire() 300 list_for_each_entry_safe(timer, tmp, &cb_list, entry) { in virt_timer_expire()
|
/kernel/linux/linux-6.6/arch/s390/kernel/ |
H A D | vtime.c | 290 LIST_HEAD(cb_list); in virt_timer_expire() 298 list_move_tail(&timer->entry, &cb_list); in virt_timer_expire() 311 list_for_each_entry_safe(timer, tmp, &cb_list, entry) { in virt_timer_expire()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
H A D | drm_syncobj.c | 256 list_add_tail(&wait->node, &syncobj->cb_list); in drm_syncobj_fence_add_wait() 304 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) in drm_syncobj_add_point() 337 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) in drm_syncobj_replace_fence() 488 INIT_LIST_HEAD(&syncobj->cb_list); in drm_syncobj_create()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | alias_GUID.c | 500 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; in set_guid_rec() 810 while (!list_empty(&det->cb_list)) { in mlx4_ib_destroy_alias_guid_service() 811 cb_ctx = list_entry(det->cb_list.next, in mlx4_ib_destroy_alias_guid_service() 870 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); in mlx4_ib_init_alias_guid_service()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | alias_GUID.c | 499 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; in set_guid_rec() 809 while (!list_empty(&det->cb_list)) { in mlx4_ib_destroy_alias_guid_service() 810 cb_ctx = list_entry(det->cb_list.next, in mlx4_ib_destroy_alias_guid_service() 867 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); in mlx4_ib_init_alias_guid_service()
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-core.c | 1693 INIT_LIST_HEAD(&plug->cb_list); in blk_start_plug() 1710 while (!list_empty(&plug->cb_list)) { in flush_plug_callbacks() 1711 list_splice_init(&plug->cb_list, &callbacks); in flush_plug_callbacks() 1732 list_for_each_entry(cb, &plug->cb_list, list) in blk_check_plugged() 1742 list_add(&cb->list, &plug->cb_list); in blk_check_plugged()
|
/kernel/linux/linux-5.10/include/net/netfilter/ |
H A D | nf_flow_table.h | 184 list_add_tail(&block_cb->list, &block->cb_list); in nf_flow_table_offload_add_cb()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | dma-fence.h | 34 * @cb_list: list of all callbacks to call 71 * cb_list that they don't themselves hold a reference for. 74 * rcu freelist and the cb_list. The timestamp is only set upon 75 * signaling while simultaneously notifying the cb_list. Ergo, we 76 * only use either the cb_list of timestamp. Upon destruction, 78 * that the cb_list is *only* valid until the signal bit is set, 85 struct list_head cb_list; member 86 /* @cb_list replaced by @timestamp on dma_fence_signal() */ 110 * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
|