/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_ct.c | 44 spin_lock_init(&ct->requests.lock); in intel_guc_ct_init_early() 45 INIT_LIST_HEAD(&ct->requests.pending); in intel_guc_ct_init_early() 46 INIT_LIST_HEAD(&ct->requests.incoming); in intel_guc_ct_init_early() 47 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); in intel_guc_ct_init_early() 291 return ++ct->requests.last_fence; in ct_get_next_fence() 501 spin_lock_irqsave(&ct->requests.lock, flags); in ct_send() 502 list_add_tail(&request.link, &ct->requests.pending); in ct_send() 503 spin_unlock_irqrestore(&ct->requests.lock, flags); in ct_send() 536 spin_lock_irqsave(&ct->requests.lock, flags); in ct_send() 538 spin_unlock_irqrestore(&ct->requests in ct_send() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_ct.c | 78 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 117 spin_lock_init(&ct->requests.lock); in intel_guc_ct_init_early() 118 INIT_LIST_HEAD(&ct->requests.pending); in intel_guc_ct_init_early() 119 INIT_LIST_HEAD(&ct->requests.incoming); in intel_guc_ct_init_early() 123 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); in intel_guc_ct_init_early() 382 unsigned int lost = fence % ARRAY_SIZE(ct->requests.lost_and_found); in ct_track_lost_and_found() 390 ct->requests.lost_and_found[lost].stack = stack_depot_save(entries, n, GFP_NOWAIT); in ct_track_lost_and_found() 392 ct->requests.lost_and_found[lost].fence = fence; in ct_track_lost_and_found() 393 ct->requests.lost_and_found[lost].action = action; in ct_track_lost_and_found() 400 return ++ct->requests in ct_get_next_fence() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_requests.c | 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 31 return !list_empty(&engine->kernel_context->timeline->requests); in engine_active() 208 /* If the device is asleep, we have no requests outstanding */ in intel_gt_wait_for_idle() 224 container_of(work, typeof(*gt), requests.retire_work.work); in retire_work_handler() 226 schedule_delayed_work(>->requests.retire_work, in retire_work_handler() 233 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); in intel_gt_init_requests() 238 cancel_delayed_work(>->requests.retire_work); in intel_gt_park_requests() 243 schedule_delayed_work(>->requests.retire_work, in intel_gt_unpark_requests() 250 cancel_delayed_work_sync(>->requests.retire_work); in intel_gt_fini_requests()
|
H A D | intel_gt.c | 389 struct i915_request *requests[I915_NUM_ENGINES] = {}; in __engines_record_defaults() local 436 requests[id] = i915_request_get(rq); in __engines_record_defaults() 453 for (id = 0; id < ARRAY_SIZE(requests); id++) { in __engines_record_defaults() 457 rq = requests[id]; in __engines_record_defaults() 488 for (id = 0; id < ARRAY_SIZE(requests); id++) { in __engines_record_defaults() 492 rq = requests[id]; in __engines_record_defaults() 623 * all in-flight requests so that we can quickly unbind the active in intel_gt_driver_unregister()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_requests.c | 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 31 return !list_empty(&engine->kernel_context->timeline->requests); in engine_active() 208 container_of(work, typeof(*gt), requests.retire_work.work); in retire_work_handler() 210 queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, in retire_work_handler() 217 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); in intel_gt_init_requests() 222 cancel_delayed_work(>->requests.retire_work); in intel_gt_park_requests() 227 queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, in intel_gt_unpark_requests() 234 cancel_delayed_work_sync(>->requests.retire_work); in intel_gt_fini_requests()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | i915_scheduler.h | 17 for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ 18 list_for_each_entry(it, &(plist)->requests[idx], sched.link) 25 &(plist)->requests[idx], \
|
H A D | i915_scheduler.c | 62 for (i = 0; i < ARRAY_SIZE(p->requests); i++) { in assert_priolists() 63 if (list_empty(&p->requests[i])) in assert_priolists() 119 * requests, so if userspace lied about their in i915_sched_lookup_priolist() 128 for (i = 0; i < ARRAY_SIZE(p->requests); i++) in i915_sched_lookup_priolist() 129 INIT_LIST_HEAD(&p->requests[i]); in i915_sched_lookup_priolist() 136 return &p->requests[idx]; in i915_sched_lookup_priolist() 269 * end result is a topological list of requests in reverse order, the in __i915_schedule()
|
H A D | i915_request.c | 297 &i915_request_timeline(rq)->requests)); in i915_request_retire() 315 * We only loosely track inflight requests across preemption, in i915_request_retire() 349 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 376 * requests, we know that only the currently executing request in __request_in_flight() 528 * requests that we have unsubmitted from HW, but left running in __i915_request_submit() 584 list_move_tail(&request->sched.link, &engine->active.requests); in __i915_request_submit() 726 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 747 if (list_empty(&tl->requests)) in request_alloc_slow() 751 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 760 rq = list_last_entry(&tl->requests, typeo in request_alloc_slow() [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | trace.h | 106 __field( __u32, requests ) 111 __entry->requests = vcpu->requests; 114 TP_printk("vcpu=%x requests=%x", 115 __entry->cpu_nr, __entry->requests)
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | trace.h | 106 __field( __u32, requests ) 111 __entry->requests = vcpu->requests; 114 TP_printk("vcpu=%x requests=%x", 115 __entry->cpu_nr, __entry->requests)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_execbuffer.c | 257 /** our requests to build */ 258 struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; member 1971 * Using two helper loops for the order of which requests / batches are created 1991 if (eb->requests[i]) in eb_find_first_request_added() 1992 return eb->requests[i]; in eb_find_first_request_added() 2041 struct i915_request *rq = eb->requests[j]; in eb_capture_commit() 2145 if (!eb->requests[j]) in eb_move_to_gpu() 2148 err = _i915_vma_move_to_active(vma, eb->requests[j], in eb_move_to_gpu() 2152 &eb->requests[j]->fence, in eb_move_to_gpu() 2193 if (!eb->requests[ in eb_move_to_gpu() [all...] |
/kernel/linux/linux-6.6/net/handshake/ |
H A D | netlink.c | 199 LIST_HEAD(requests); in handshake_net_exit() 208 list_splice_init(&requests, &hn->hn_requests); in handshake_net_exit() 211 while (!list_empty(&requests)) { in handshake_net_exit() 212 req = list_first_entry(&requests, struct handshake_req, hr_list); in handshake_net_exit()
|
/kernel/linux/linux-6.6/drivers/media/v4l2-core/ |
H A D | v4l2-ctrls-request.c | 21 INIT_LIST_HEAD(&hdl->requests); in v4l2_ctrl_handler_init_request() 39 if (hdl->req_obj.ops || list_empty(&hdl->requests)) in v4l2_ctrl_handler_free_request() 44 * outstanding requests, then unbind and put those objects before in v4l2_ctrl_handler_free_request() 47 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { in v4l2_ctrl_handler_free_request() 102 list_del_init(&hdl->requests); in v4l2_ctrl_request_unbind() 163 list_add_tail(&hdl->requests, &from->requests); in v4l2_ctrl_request_bind()
|
/kernel/linux/linux-5.10/drivers/iio/adc/ |
H A D | twl4030-madc.c | 157 * @requests: Array of request struct corresponding to SW1, SW2 and RT 166 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; member 445 * corresponding to RT, SW1, SW2 conversion requests. 498 madc->requests[i].result_pending = true; in twl4030_madc_threaded_irq_handler() 501 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 523 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 624 if (twl4030_madc->requests[req->method].active) { in twl4030_madc_conversion() 655 twl4030_madc->requests[req->method].active = true; in twl4030_madc_conversion() 659 twl4030_madc->requests[req->method].active = false; in twl4030_madc_conversion() 664 twl4030_madc->requests[re in twl4030_madc_conversion() [all...] |
/kernel/linux/linux-6.6/drivers/iio/adc/ |
H A D | twl4030-madc.c | 158 * @requests: Array of request struct corresponding to SW1, SW2 and RT 167 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; member 439 * corresponding to RT, SW1, SW2 conversion requests. 492 madc->requests[i].result_pending = true; in twl4030_madc_threaded_irq_handler() 495 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 517 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 618 if (twl4030_madc->requests[req->method].active) { in twl4030_madc_conversion() 649 twl4030_madc->requests[req->method].active = true; in twl4030_madc_conversion() 653 twl4030_madc->requests[req->method].active = false; in twl4030_madc_conversion() 658 twl4030_madc->requests[re in twl4030_madc_conversion() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_scheduler.h | 20 list_for_each_entry(it, &(plist)->requests, sched.link) 23 list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
|
H A D | i915_request.c | 380 &i915_request_timeline(rq)->requests)); in i915_request_retire() 396 * We only loosely track inflight requests across preemption, in i915_request_retire() 428 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 456 * requests, we know that only the currently executing request in __request_in_flight() 617 * requests that we have unsubmitted from HW, but left running in __i915_request_submit() 829 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 850 if (list_empty(&tl->requests)) in request_alloc_slow() 854 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 863 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 866 /* Retire our old requests i in request_alloc_slow() [all...] |
H A D | i915_priolist_types.h | 42 struct list_head requests; member
|
/kernel/linux/linux-5.10/drivers/base/ |
H A D | devtmpfs.c | 9 * device which requests a device node, will add a node in this 47 } *requests; variable 105 req->next = requests; in devtmpfs_submit_req() 106 requests = req; in devtmpfs_submit_req() 393 while (requests) { in devtmpfs_work_loop() 394 struct req *req = requests; in devtmpfs_work_loop() 395 requests = NULL; in devtmpfs_work_loop()
|
/kernel/linux/linux-6.6/drivers/base/ |
H A D | devtmpfs.c | 9 * device which requests a device node, will add a node in this 55 } *requests; variable 102 req->next = requests; in devtmpfs_submit_req() 103 requests = req; in devtmpfs_submit_req() 393 while (requests) { in devtmpfs_work_loop() 394 struct req *req = requests; in devtmpfs_work_loop() 395 requests = NULL; in devtmpfs_work_loop()
|
/kernel/linux/linux-6.6/drivers/vdpa/vdpa_sim/ |
H A D | vdpa_sim_net.c | 51 u64 requests; member 134 u64 requests = 0, errors = 0, successes = 0; in vdpasim_handle_cvq() local 150 ++requests; in vdpasim_handle_cvq() 190 net->cq_stats.requests += requests; in vdpasim_handle_cvq() 377 cq_requests = net->cq_stats.requests; in vdpasim_net_get_stats() 383 "cvq requests")) in vdpasim_net_get_stats() 489 * connect the device to the vDPA bus, so requests can arrive after in vdpasim_net_dev_add()
|
/kernel/linux/linux-5.10/drivers/media/pci/tw686x/ |
H A D | tw686x.h | 172 void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests, 178 void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
|
/kernel/linux/linux-6.6/drivers/media/pci/tw686x/ |
H A D | tw686x.h | 172 void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests, 178 void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/selftests/ |
H A D | mock_timeline.c | 19 INIT_LIST_HEAD(&timeline->requests); in mock_timeline_init()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/selftests/ |
H A D | mock_timeline.c | 19 INIT_LIST_HEAD(&timeline->requests); in mock_timeline_init()
|