/device/soc/rockchip/common/vendor/drivers/gpu/arm/mali400/mali/linux/ |
H A D | mali_osk_wq.c | 37 struct delayed_work work; member 45 static void _mali_osk_wq_work_func(struct work_struct *work); 111 mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL); in mali_osk_wq_create_work() local 113 if (work == NULL) { in mali_osk_wq_create_work() 117 work->handler = handler; in mali_osk_wq_create_work() 118 work->data = data; in mali_osk_wq_create_work() 119 work->high_pri = MALI_FALSE; in mali_osk_wq_create_work() 121 INIT_WORK(&work->work_handle, _mali_osk_wq_work_func); in mali_osk_wq_create_work() 123 return work; in mali_osk_wq_create_work() 128 mali_osk_wq_work_object_t *work in _mali_osk_wq_create_work_high_pri() local 143 _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work) _mali_osk_wq_delete_work() argument 150 _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work) _mali_osk_wq_delete_work_nonflush() argument 156 mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work) mali_osk_wq_schedule_work() argument 166 mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work) mali_osk_wq_schedule_work_high_pri() argument 176 _mali_osk_wq_work_func(struct work_struct *work) _mali_osk_wq_work_func() argument 198 _mali_osk_wq_delayed_work_func(struct work_struct *work) _mali_osk_wq_delayed_work_func() argument 208 mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL); _mali_osk_wq_delayed_create_work() local 222 mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work) mali_osk_wq_delayed_delete_work_nonflush() argument 228 _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work) _mali_osk_wq_delayed_cancel_work_async() argument 234 mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work) mali_osk_wq_delayed_cancel_work_sync() argument 240 _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay) _mali_osk_wq_delayed_schedule_work() argument [all...] |
H A D | mali_internal_sync.c | 99 sync_fence = (struct mali_internal_sync_fence *)waiter->work.private; in mali_internal_fence_check_cb_func() 155 wait = container_of(curr, struct mali_internal_sync_fence_waiter, work); in mali_internal_sync_fence_wake_up_wq() 157 list_del_init(&wait->work.entry); in mali_internal_sync_fence_wake_up_wq() 159 list_del_init(&wait->work.task_list); in mali_internal_sync_fence_wake_up_wq() 161 wait->callback(wait->work.private, wait); in mali_internal_sync_fence_wake_up_wq() 581 INIT_LIST_HEAD(&waiter->work.entry); in mali_internal_sync_fence_waiter_init() 583 INIT_LIST_HEAD(&waiter->work.task_list); in mali_internal_sync_fence_waiter_init() 607 init_waitqueue_func_entry(&waiter->work, in mali_internal_sync_fence_wait_async() 609 waiter->work.private = sync_fence; in mali_internal_sync_fence_wait_async() 614 __add_wait_queue_tail(&sync_fence->wq, &waiter->work); in mali_internal_sync_fence_wait_async() [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/mali400/mali/linux/ |
H A D | mali_osk_wq.c | 36 struct delayed_work work; member 44 static void _mali_osk_wq_work_func(struct work_struct *work); 106 mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL); in _mali_osk_wq_create_work() local 108 if (NULL == work) return NULL; in _mali_osk_wq_create_work() 110 work->handler = handler; in _mali_osk_wq_create_work() 111 work->data = data; in _mali_osk_wq_create_work() 112 work->high_pri = MALI_FALSE; in _mali_osk_wq_create_work() 114 INIT_WORK(&work->work_handle, _mali_osk_wq_work_func); in _mali_osk_wq_create_work() 116 return work; in _mali_osk_wq_create_work() 121 mali_osk_wq_work_object_t *work in _mali_osk_wq_create_work_high_pri() local 134 _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work) _mali_osk_wq_delete_work() argument 141 _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work) _mali_osk_wq_delete_work_nonflush() argument 147 _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work) _mali_osk_wq_schedule_work() argument 157 _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work) _mali_osk_wq_schedule_work_high_pri() argument 167 _mali_osk_wq_work_func(struct work_struct *work) _mali_osk_wq_work_func() argument 190 _mali_osk_wq_delayed_work_func(struct work_struct *work) _mali_osk_wq_delayed_work_func() argument 200 mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL); _mali_osk_wq_delayed_create_work() local 212 _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work) _mali_osk_wq_delayed_delete_work_nonflush() argument 218 _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work) _mali_osk_wq_delayed_cancel_work_async() argument 224 _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work) _mali_osk_wq_delayed_cancel_work_sync() argument 230 _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay) _mali_osk_wq_delayed_schedule_work() argument [all...] |
H A D | mali_internal_sync.c | 88 sync_fence = (struct mali_internal_sync_fence *)waiter->work.private; in mali_internal_fence_check_cb_func() 136 wait = container_of(curr, struct mali_internal_sync_fence_waiter, work); in mali_internal_sync_fence_wake_up_wq() 138 list_del_init(&wait->work.entry); in mali_internal_sync_fence_wake_up_wq() 140 list_del_init(&wait->work.task_list); in mali_internal_sync_fence_wake_up_wq() 142 wait->callback(wait->work.private, wait); in mali_internal_sync_fence_wake_up_wq() 521 INIT_LIST_HEAD(&waiter->work.entry); in mali_internal_sync_fence_waiter_init() 523 INIT_LIST_HEAD(&waiter->work.task_list); in mali_internal_sync_fence_waiter_init() 545 init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq); in mali_internal_sync_fence_wait_async() 546 waiter->work.private = sync_fence; in mali_internal_sync_fence_wait_async() 552 __add_wait_queue_tail(&sync_fence->wq, &waiter->work); in mali_internal_sync_fence_wait_async() [all...] |
/device/soc/hisilicon/hi3751v350/sdk_linux/source/common/drv/osal/ |
H A D | osal_workqueue.c | 28 struct work_struct *work; member 32 static osal_workqueue *osal_find_work(struct work_struct *work) in osal_find_work() argument 36 if (work == NULL) { in osal_find_work() 42 osal_printk("find work failed! wq_list is empty!\n"); in osal_find_work() 47 if (ws->work == work) { in osal_find_work() 51 osal_printk("find work failed!\n"); in osal_find_work() 55 static int osal_del_work(struct work_struct *work) in osal_del_work() argument 60 if (work == NULL) { in osal_del_work() 66 osal_printk("find work faile in osal_del_work() 81 osal_work_handler(struct work_struct *work) osal_work_handler() argument 89 osal_workqueue_init(osal_workqueue *work, osal_workqueue_handler handler) osal_workqueue_init() argument 121 osal_workqueue_schedule(osal_workqueue *work) osal_workqueue_schedule() argument 131 osal_workqueue_destroy(osal_workqueue *work) osal_workqueue_destroy() argument 141 osal_workqueue_flush(osal_workqueue *work) osal_workqueue_flush() argument [all...] |
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/osal/linux/ |
H A D | osal_workqueue.c | 27 struct work_struct *work; member 31 static struct osal_work_struct *osal_find_work(struct work_struct *work) in osal_find_work() argument 35 osal_trace("find work failed! wq_list is empty!\n"); in osal_find_work() 40 if (ws->work == work) { in osal_find_work() 44 osal_trace("find work failed!\n"); in osal_find_work() 48 static int osal_del_work(struct work_struct *work) in osal_del_work() argument 53 osal_trace("find work failed! wq_list is empty!\n"); in osal_del_work() 58 if (ws->work == work) { in osal_del_work() 69 osal_work_func(struct work_struct *work) osal_work_func() argument 77 osal_init_work(struct osal_work_struct *work, osal_work_func_t func) osal_init_work() argument 104 osal_schedule_work(struct osal_work_struct *work) osal_schedule_work() argument 114 osal_destroy_work(struct osal_work_struct *work) osal_destroy_work() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/backend/gpu/ |
H A D | mali_kbase_model_linux.c | 37 struct work_struct work; member 40 static void serve_job_irq(struct work_struct *work) in serve_job_irq() argument 42 struct model_irq_data *data = container_of(work, struct model_irq_data, in serve_job_irq() 43 work); in serve_job_irq() 68 static void serve_gpu_irq(struct work_struct *work) in serve_gpu_irq() argument 70 struct model_irq_data *data = container_of(work, struct model_irq_data, in serve_gpu_irq() 71 work); in serve_gpu_irq() 88 static void serve_mmu_irq(struct work_struct *work) in serve_mmu_irq() argument 90 struct model_irq_data *data = container_of(work, struct model_irq_data, in serve_mmu_irq() 91 work); in serve_mmu_irq() [all...] |
/device/soc/rockchip/common/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_wifi6/ |
H A D | dhd_linux_wq.c | 3 * Broadcom Dongle Host Driver (DHD), Generic work queue framework 4 * Generic interface to handle dhd deferred work events 60 * work events may occur simultaneously. 103 /* deferred work functions */ 109 struct dhd_deferred_wq *work = NULL; in dhd_deferred_work_init() local 119 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), in dhd_deferred_work_init() 121 if (!work) { in dhd_deferred_work_init() 122 DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__)); in dhd_deferred_work_init() 126 INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); in dhd_deferred_work_init() 129 spin_lock_init(&work in dhd_deferred_work_init() 180 dhd_deferred_work_deinit(void *work) dhd_deferred_work_deinit() argument 343 dhd_deferred_work_handler(struct work_struct *work) dhd_deferred_work_handler() argument 381 dhd_deferred_work_set_skip(void *work, u8 event, bool set) dhd_deferred_work_set_skip() argument [all...] |
H A D | dhd_debug_linux.c | 55 struct delayed_work work; member 103 dbg_ring_poll_worker(struct work_struct *work) in dbg_ring_poll_worker() argument 105 struct delayed_work *d_work = to_delayed_work(work); in dbg_ring_poll_worker() 113 container_of(d_work, linux_dbgring_info_t, work); in dbg_ring_poll_worker() 170 /* retrigger the work at same interval */ in dbg_ring_poll_worker() 232 cancel_delayed_work_sync(&ring_info->work); in dhd_os_start_logging() 235 cancel_delayed_work_sync(&ring_info->work); in dhd_os_start_logging() 236 schedule_delayed_work(&ring_info->work, ring_info->interval); in dhd_os_start_logging() 258 /* cancel any pending work */ in dhd_os_reset_logging() 259 cancel_delayed_work_sync(&ring_info->work); in dhd_os_reset_logging() [all...] |
H A D | wl_cfg_btcoex.c | 58 struct work_struct work; member 311 schedule_work(&bt_local->work); in wl_cfg80211_bt_timerfunc() 314 static void wl_cfg80211_bt_handler(struct work_struct *work) in wl_cfg80211_bt_handler() argument 319 btcx_inf = container_of(work, struct btcoex_info, work); in wl_cfg80211_bt_handler() 404 INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler); in wl_cfg80211_btcoex_init() 420 cancel_work_sync(&btcoex_info_loc->work); in wl_cfg80211_btcoex_deinit() 537 schedule_work(&btco_inf->work); in wl_cfg80211_set_btcoex_dhcp()
|
/device/soc/hisilicon/hi3861v100/sdk_liteos/include/ |
H A D | hi_workqueue.h | 44 /* init work. */ 45 inline hi_void hi_workqueue_init_work(hi_work *work, work_func func) in hi_workqueue_init_work() argument 47 INIT_WORK(work, func); in hi_workqueue_init_work() 50 /* add work to workqueue. */ 51 inline hi_s32 hi_workqueue_add_work(hi_workqueue *workqueue, hi_work *work) in hi_workqueue_add_work() argument 53 return queue_work(workqueue, work); in hi_workqueue_add_work() 56 /* cancle work from workqueue. */ 57 inline hi_bool hi_workqueue_cancle_work_sync(hi_work *work) in hi_workqueue_cancle_work_sync() argument 59 return cancel_work_sync(work); in hi_workqueue_cancle_work_sync() 62 /* judge work i 63 hi_workqueue_is_busy(hi_work *work) hi_workqueue_is_busy() argument [all...] |
/device/soc/rockchip/common/vendor/drivers/media/platform/rockchip/cif/ |
H A D | cif-luma.c | 153 /* Make sure no new work queued in isr before draining wq */ in rkcif_luma_vb2_stop_streaming() 212 static void rkcif_stats_send_luma(struct rkcif_luma_vdev *vdev, struct rkcif_luma_readout_work *work) in rkcif_stats_send_luma() argument 238 cur_stat_buf->frame_id = work->frame_id; in rkcif_stats_send_luma() 239 cur_stat_buf->meas_type = work->meas_type; in rkcif_stats_send_luma() 242 cur_stat_buf->luma[i].exp_mean[j] = work->luma[i].exp_mean[j]; in rkcif_stats_send_luma() 249 cur_buf->vb.vb2_buf.timestamp = work->timestamp; in rkcif_stats_send_luma() 256 struct rkcif_luma_readout_work work; in rkcif_luma_readout_task() local 260 out = kfifo_out(&vdev->rd_kfifo, &work, sizeof(work)); in rkcif_luma_readout_task() 265 if (work in rkcif_luma_readout_task() [all...] |
/device/soc/rockchip/common/vendor/drivers/media/platform/rockchip/isp/ |
H A D | isp_mipi_luma.c | 148 /* Make sure no new work queued in isr before draining wq */ in rkisp_luma_vb2_stop_streaming() 208 static void rkisp_stats_send_luma(struct rkisp_luma_vdev *vdev, struct rkisp_luma_readout_work *work) in rkisp_stats_send_luma() argument 234 cur_stat_buf->frame_id = work->frame_id; in rkisp_stats_send_luma() 235 cur_stat_buf->meas_type = work->meas_type; in rkisp_stats_send_luma() 238 cur_stat_buf->luma[i].exp_mean[j] = work->luma[i].exp_mean[j]; in rkisp_stats_send_luma() 245 cur_buf->vb.vb2_buf.timestamp = work->timestamp; in rkisp_stats_send_luma() 252 struct rkisp_luma_readout_work work; in rkisp_luma_readout_task() local 256 out = kfifo_out(&vdev->rd_kfifo, &work, sizeof(work)); in rkisp_luma_readout_task() 261 if (work in rkisp_luma_readout_task() [all...] |
/device/soc/rockchip/rk3588/kernel/drivers/media/platform/rockchip/cif/ |
H A D | cif-luma.c | 162 /* Make sure no new work queued in isr before draining wq */ in rkcif_luma_vb2_stop_streaming() 225 struct rkcif_luma_readout_work *work) in rkcif_stats_send_luma() 255 cur_stat_buf->frame_id = work->frame_id; in rkcif_stats_send_luma() 256 cur_stat_buf->meas_type = work->meas_type; in rkcif_stats_send_luma() 260 work->luma[i].exp_mean[j]; in rkcif_stats_send_luma() 268 cur_buf->vb.vb2_buf.timestamp = work->timestamp; in rkcif_stats_send_luma() 275 struct rkcif_luma_readout_work work; in rkcif_luma_readout_task() local 281 &work, sizeof(work)); in rkcif_luma_readout_task() 285 if (work in rkcif_luma_readout_task() 224 rkcif_stats_send_luma(struct rkcif_luma_vdev *vdev, struct rkcif_luma_readout_work *work) rkcif_stats_send_luma() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/mali400/mali/common/ |
H A D | mali_osk.h | 89 /** @brief Initialize work queues (for deferred work) 95 /** @brief Terminate work queues (for deferred work) 99 /** @brief Create work in the work queue 101 * Creates a work object which can be scheduled in the work queue. When 104 * Refer to \ref _mali_osk_wq_schedule_work() for details on how work 114 * Creates a work objec [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/mali400/mali/common/ |
H A D | mali_osk.h | 106 /** @brief Initialize work queues (for deferred work) 112 /** @brief Terminate work queues (for deferred work) 116 /** @brief Create work in the work queue 118 * Creates a work object which can be scheduled in the work queue. When 121 * Refer to \ref mali_osk_wq_schedule_work() for details on how work 131 * Creates a work objec [all...] |
/device/soc/hisilicon/common/platform/wifi/hi3881v100/driver/oal/ |
H A D | oal_workqueue.h | 119 * queue_delayed_work - queue work on a workqueue after delay 121 * @dwork: delayable work to queue 134 * queue_delayed_work_on - queue work on specific CPU after delay 135 * @cpu: CPU number to execute work on 137 * @dwork: work to queue 140 * Returns %false if @work was already on a queue, %true otherwise. If 151 功能描述 : queue work on system wq after delay 152 输入参数 : @dwork: delayable work to queue 174 #define oal_work_is_busy(work) work_busy(work) [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_softjobs.c | 206 static void kbasep_soft_event_complete_job(struct work_struct *work)
in kbasep_soft_event_complete_job() argument 208 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, work);
in kbasep_soft_event_complete_job() 238 INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
in kbasep_complete_triggered_soft_events() 239 queue_work(kctx->jctx.job_done_wq, &katom->work);
in kbasep_complete_triggered_soft_events() 325 struct work_struct work;
member 328 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
in kbase_fence_debug_wait_timeout_worker() argument 330 struct kbase_fence_debug_work *w = container_of(work, struct kbase_fence_debug_work, work);
in kbase_fence_debug_wait_timeout_worker() 343 struct kbase_fence_debug_work *work;
in kbase_fence_debug_timeout() local 1107 kbasep_jit_free_finish_worker(struct work_struct *work) kbasep_jit_free_finish_worker() argument [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_softjobs.c | 213 static void kbasep_soft_event_complete_job(struct work_struct *work) in kbasep_soft_event_complete_job() argument 215 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, work); in kbasep_soft_event_complete_job() 245 INIT_WORK(&katom->work, kbasep_soft_event_complete_job); in kbasep_complete_triggered_soft_events() 246 queue_work(kctx->jctx.job_done_wq, &katom->work); in kbasep_complete_triggered_soft_events() 332 struct work_struct work; member 335 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work) in kbase_fence_debug_wait_timeout_worker() argument 337 struct kbase_fence_debug_work *w = container_of(work, struct kbase_fence_debug_work, work); in kbase_fence_debug_wait_timeout_worker() 350 struct kbase_fence_debug_work *work; in kbase_fence_debug_timeout() local 1316 kbasep_jit_finish_worker(struct work_struct *work) kbasep_jit_finish_worker() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_softjobs.c | 222 static void kbasep_soft_event_complete_job(struct work_struct *work) in kbasep_soft_event_complete_job() argument 224 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, in kbasep_soft_event_complete_job() 225 work); in kbasep_soft_event_complete_job() 254 INIT_WORK(&katom->work, in kbasep_complete_triggered_soft_events() 257 &katom->work); in kbasep_complete_triggered_soft_events() 350 struct work_struct work; member 353 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work) in kbase_fence_debug_wait_timeout_worker() argument 355 struct kbase_fence_debug_work *w = container_of(work, in kbase_fence_debug_wait_timeout_worker() 356 struct kbase_fence_debug_work, work); in kbase_fence_debug_wait_timeout_worker() 369 struct kbase_fence_debug_work *work; in kbase_fence_debug_timeout() local 1349 kbasep_jit_finish_worker(struct work_struct *work) kbasep_jit_finish_worker() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_softjobs.c | 211 static void kbasep_soft_event_complete_job(struct work_struct *work) in kbasep_soft_event_complete_job() argument 213 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, in kbasep_soft_event_complete_job() 214 work); in kbasep_soft_event_complete_job() 243 INIT_WORK(&katom->work, in kbasep_complete_triggered_soft_events() 246 &katom->work); in kbasep_complete_triggered_soft_events() 339 struct work_struct work; member 342 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work) in kbase_fence_debug_wait_timeout_worker() argument 344 struct kbase_fence_debug_work *w = container_of(work, in kbase_fence_debug_wait_timeout_worker() 345 struct kbase_fence_debug_work, work); in kbase_fence_debug_wait_timeout_worker() 358 struct kbase_fence_debug_work *work; in kbase_fence_debug_timeout() local 1138 kbasep_jit_free_finish_worker(struct work_struct *work) kbasep_jit_free_finish_worker() argument [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/platform/rk/ |
H A D | mali_kbase_config_rk.c | 73 static void rk_pm_power_off_delay_work(struct work_struct *work)
in rk_pm_power_off_delay_work() argument 75 struct rk_context *platform = container_of(to_delayed_work(work), struct rk_context, work);
in rk_pm_power_off_delay_work() 120 INIT_DEFERRABLE_WORK(&platform->work, rk_pm_power_off_delay_work);
in kbase_platform_rk_init() 152 cancel_delayed_work_sync(&platform->work);
in kbase_platform_rk_term() 184 cancel_delayed_work_sync(&platform->work);
in rk_pm_callback_power_on() 232 queue_delayed_work(platform->power_off_wq, &platform->work, msecs_to_jiffies(platform->delay_ms));
in rk_pm_callback_power_off()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/platform/rk/ |
H A D | mali_kbase_config_rk.c | 74 static void rk_pm_power_off_delay_work(struct work_struct *work) in rk_pm_power_off_delay_work() argument 77 container_of(to_delayed_work(work), struct rk_context, work); in rk_pm_power_off_delay_work() 122 INIT_DEFERRABLE_WORK(&platform->work, rk_pm_power_off_delay_work); in kbase_platform_rk_init() 155 cancel_delayed_work_sync(&platform->work); in kbase_platform_rk_term() 187 cancel_delayed_work_sync(&platform->work); in rk_pm_callback_power_on() 235 queue_delayed_work(platform->power_off_wq, &platform->work, in rk_pm_callback_power_off()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/platform/rk/ |
H A D | mali_kbase_config_rk.c | 72 static void rk_pm_power_off_delay_work(struct work_struct *work) in rk_pm_power_off_delay_work() argument 74 struct rk_context *platform = container_of(to_delayed_work(work), struct rk_context, work); in rk_pm_power_off_delay_work() 118 INIT_DEFERRABLE_WORK(&platform->work, rk_pm_power_off_delay_work); in kbase_platform_rk_init() 150 cancel_delayed_work_sync(&platform->work); in kbase_platform_rk_term() 182 cancel_delayed_work_sync(&platform->work); in rk_pm_callback_power_on() 229 queue_delayed_work(platform->power_off_wq, &platform->work, msecs_to_jiffies(platform->delay_ms)); in rk_pm_callback_power_off()
|
/device/soc/rockchip/common/sdk_linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_drm.c | 133 struct nouveau_cli *cli = container_of(w, typeof(*cli), work); in nouveau_cli_work() 134 struct nouveau_cli_work *work, *wtmp; in nouveau_cli_work() local 136 list_for_each_entry_safe(work, wtmp, &cli->worker, head) in nouveau_cli_work() 138 if (!work->fence || nouveau_cli_work_ready(work->fence)) { in nouveau_cli_work() 139 list_del(&work->head); in nouveau_cli_work() 140 work->func(work); in nouveau_cli_work() 148 struct nouveau_cli_work *work = container_of(cb, typeof(*work), c in nouveau_cli_work_fence() local 152 nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence, struct nouveau_cli_work *work) nouveau_cli_work_queue() argument [all...] |