/kernel/linux/linux-5.10/drivers/net/wireguard/ |
H A D | ratelimiter.c | 20 static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); 81 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_gc_entries() 191 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_init() 211 cancel_delayed_work_sync(&gc_work); in wg_ratelimiter_uninit()
|
/kernel/linux/linux-6.6/drivers/net/wireguard/ |
H A D | ratelimiter.c | 20 static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); 81 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_gc_entries() 191 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_init() 211 cancel_delayed_work_sync(&gc_work); in wg_ratelimiter_uninit()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/qxl/ |
H A D | qxl_kms.c | 102 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); in qxl_gc_work() 260 INIT_WORK(&qdev->gc_work, qxl_gc_work); in qxl_device_init() 287 /* check if qxl_device_init() was successful (gc_work is initialized last) */ in qxl_device_fini() 288 if (!qdev->gc_work.func) in qxl_device_fini() 308 flush_work(&qdev->gc_work); in qxl_device_fini()
|
H A D | qxl_cmd.c | 204 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect() 206 flush_work(&qdev->gc_work); in qxl_queue_garbage_collect()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | nf_conntrack_core.c | 1459 struct conntrack_gc_work *gc_work; in gc_worker() local 1465 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); in gc_worker() 1467 i = gc_work->next_bucket; in gc_worker() 1468 if (gc_work->early_drop) in gc_worker() 1472 gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT; in gc_worker() 1473 gc_work->count = GC_SCAN_INITIAL_COUNT; in gc_worker() 1474 gc_work->start_time = start_time; in gc_worker() 1477 next_run = gc_work->avg_timeout; in gc_worker() 1478 count = gc_work->count; in gc_worker() 1512 gc_work in gc_worker() 1599 conntrack_gc_work_init(struct conntrack_gc_work *gc_work) conntrack_gc_work_init() argument [all...] |
H A D | nf_flow_table_core.c | 452 flow_table = container_of(work, struct nf_flowtable, gc_work.work); in nf_flow_offload_work_gc() 454 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); in nf_flow_offload_work_gc() 549 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); in nf_flow_table_init() 559 &flowtable->gc_work, HZ); in nf_flow_table_init() 589 flush_delayed_work(&flowtable->gc_work); in nf_flow_table_gc_cleanup() 610 cancel_delayed_work_sync(&flow_table->gc_work); in nf_flow_table_free()
|
H A D | nf_conncount.c | 62 struct work_struct gc_work; member 307 schedule_work(&data->gc_work); in schedule_gc_worker() 457 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work); in tree_gc_worker() 555 INIT_WORK(&data->gc_work, tree_gc_worker); in nf_conncount_init() 591 cancel_work_sync(&data->gc_work); in nf_conncount_destroy()
|
H A D | nft_set_hash.c | 26 struct delayed_work gc_work; member 323 priv = container_of(work, struct nft_rhash, gc_work.work); in nft_rhash_gc() 383 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc() 397 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc_init() 416 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); in nft_rhash_init() 444 cancel_delayed_work_sync(&priv->gc_work); in nft_rhash_destroy()
|
H A D | nft_set_rbtree.c | 22 struct delayed_work gc_work; member 626 priv = container_of(work, struct nft_rbtree, gc_work.work); in nft_rbtree_gc() 694 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_gc() 714 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc); in nft_rbtree_init() 716 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_init() 729 cancel_delayed_work_sync(&priv->gc_work); in nft_rbtree_destroy()
|
H A D | xt_hashlimit.c | 128 struct delayed_work gc_work; member 352 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc); in htable_create() 353 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work, in htable_create() 383 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); in htable_gc() 388 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); in htable_gc() 429 cancel_delayed_work_sync(&hinfo->gc_work); in htable_put()
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nf_flow_table_core.c | 386 flow_table = container_of(work, struct nf_flowtable, gc_work.work); in nf_flow_offload_work_gc() 388 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); in nf_flow_offload_work_gc() 508 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); in nf_flow_table_init() 518 &flowtable->gc_work, HZ); in nf_flow_table_init() 547 flush_delayed_work(&flowtable->gc_work); in nf_flow_table_gc_cleanup() 568 cancel_delayed_work_sync(&flow_table->gc_work); in nf_flow_table_free()
|
H A D | nf_conncount.c | 62 struct work_struct gc_work; member 296 schedule_work(&data->gc_work); in schedule_gc_worker() 446 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work); in tree_gc_worker() 544 INIT_WORK(&data->gc_work, tree_gc_worker); in nf_conncount_init() 580 cancel_work_sync(&data->gc_work); in nf_conncount_destroy()
|
H A D | nft_set_hash.c | 29 struct delayed_work gc_work; member 309 priv = container_of(work, struct nft_rhash, gc_work.work); in nft_rhash_gc() 369 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc() 383 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc_init() 402 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); in nft_rhash_init() 430 cancel_delayed_work_sync(&priv->gc_work); in nft_rhash_destroy()
|
H A D | nft_set_rbtree.c | 25 struct delayed_work gc_work; member 628 priv = container_of(work, struct nft_rbtree, gc_work.work); in nft_rbtree_gc() 693 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_gc() 713 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc); in nft_rbtree_init() 715 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_init() 728 cancel_delayed_work_sync(&priv->gc_work); in nft_rbtree_destroy()
|
H A D | nf_conntrack_core.c | 1356 struct conntrack_gc_work *gc_work; in gc_worker() local 1357 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); in gc_worker() 1359 i = gc_work->next_bucket; in gc_worker() 1360 if (gc_work->early_drop) in gc_worker() 1423 gc_work->next_bucket = i; in gc_worker() 1429 if (gc_work->exiting) in gc_worker() 1440 gc_work->early_drop = false; in gc_worker() 1441 gc_work->next_bucket = 0; in gc_worker() 1443 queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); in gc_worker() 1446 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) in conntrack_gc_work_init() argument [all...] |
H A D | xt_hashlimit.c | 128 struct delayed_work gc_work; member 352 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc); in htable_create() 353 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work, in htable_create() 383 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); in htable_gc() 388 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); in htable_gc() 429 cancel_delayed_work_sync(&hinfo->gc_work); in htable_put()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/qxl/ |
H A D | qxl_kms.c | 104 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); in qxl_gc_work() 265 INIT_WORK(&qdev->gc_work, qxl_gc_work); in qxl_device_init() 294 flush_work(&qdev->gc_work); in qxl_device_fini()
|
H A D | qxl_cmd.c | 212 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect() 214 flush_work(&qdev->gc_work); in qxl_queue_garbage_collect()
|
H A D | qxl_drv.h | 266 struct work_struct gc_work; member
|
/kernel/linux/linux-5.10/net/bridge/ |
H A D | br_stp_if.c | 56 mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10); in br_stp_enable_bridge() 87 cancel_delayed_work_sync(&br->gc_work); in br_stp_disable_bridge()
|
H A D | br_ioctl.c | 148 b.gc_timer_value = br_timer_value(&br->gc_work.timer); in old_dev_ioctl()
|
/kernel/linux/linux-6.6/net/bridge/ |
H A D | br_stp_if.c | 56 mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10); in br_stp_enable_bridge() 87 cancel_delayed_work_sync(&br->gc_work); in br_stp_disable_bridge()
|
/kernel/linux/linux-5.10/include/net/netfilter/ |
H A D | nf_flow_table.h | 74 struct delayed_work gc_work; member
|
/kernel/linux/linux-6.6/include/net/netfilter/ |
H A D | nf_flow_table.h | 81 struct delayed_work gc_work; member
|
/kernel/linux/linux-5.10/include/net/ |
H A D | neighbour.h | 218 struct delayed_work gc_work; member
|