/kernel/linux/linux-5.10/drivers/crypto/ccp/ |
H A D | ccp-crypto-main.c | 61 static struct ccp_crypto_queue req_queue; variable 110 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { in ccp_crypto_cmd_complete() 121 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 126 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete() 128 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete() 131 if (req_queue in ccp_crypto_cmd_complete() [all...] |
/kernel/linux/linux-6.6/drivers/crypto/ccp/ |
H A D | ccp-crypto-main.c | 61 static struct ccp_crypto_queue req_queue; variable 103 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { in ccp_crypto_cmd_complete() 114 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 116 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 117 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 119 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete() 121 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete() 124 if (req_queue in ccp_crypto_cmd_complete() [all...] |
/kernel/linux/linux-5.10/drivers/s390/char/ |
H A D | tape_core.c | 141 if (list_empty(&tdev->req_queue)) in tape_operation_show() 146 req = list_entry(tdev->req_queue.next, struct tape_request, in tape_operation_show() 542 INIT_LIST_HEAD(&device->req_queue); in tape_alloc_device() 644 list_for_each_safe(l, n, &device->req_queue) { in __tape_discard_requests() 815 list_for_each_safe(l, n, &device->req_queue) { in __tape_start_next_request() 877 request = list_entry(device->req_queue.next, struct tape_request, list); in tape_long_busy_timeout() 905 if (!list_empty(&device->req_queue)) in __tape_end_request() 964 if (list_empty(&device->req_queue)) { in __tape_start_request() 971 list_add(&request->list, &device->req_queue); in __tape_start_request() 975 list_add_tail(&request->list, &device->req_queue); in __tape_start_request() [all...] |
H A D | tape_proc.c | 71 if (!list_empty(&device->req_queue)) { in tape_proc_show() 72 request = list_entry(device->req_queue.next, in tape_proc_show()
|
H A D | raw3270.c | 44 struct list_head req_queue; /* Request queue. */ member 244 if (list_empty(&rp->req_queue) && in __raw3270_start() 254 list_add_tail(&rq->list, &rp->req_queue); in __raw3270_start() 312 list_add_tail(&rq->list, &rp->req_queue); in raw3270_start_irq() 369 while (!list_empty(&rp->req_queue)) { in raw3270_irq() 370 rq = list_entry(rp->req_queue.next,struct raw3270_request,list); in raw3270_irq() 651 while (!list_empty(&rp->req_queue)) { in __raw3270_disconnect() 652 rq = list_entry(rp->req_queue.next,struct raw3270_request,list); in __raw3270_disconnect() 714 INIT_LIST_HEAD(&rp->req_queue); in raw3270_setup_device()
|
H A D | tape.h | 209 struct list_head req_queue; member
|
/kernel/linux/linux-6.6/drivers/s390/char/ |
H A D | tape_core.c | 141 if (list_empty(&tdev->req_queue)) in tape_operation_show() 146 req = list_entry(tdev->req_queue.next, struct tape_request, in tape_operation_show() 493 INIT_LIST_HEAD(&device->req_queue); in tape_alloc_device() 595 list_for_each_safe(l, n, &device->req_queue) { in __tape_discard_requests() 766 list_for_each_safe(l, n, &device->req_queue) { in __tape_start_next_request() 828 request = list_entry(device->req_queue.next, struct tape_request, list); in tape_long_busy_timeout() 856 if (!list_empty(&device->req_queue)) in __tape_end_request() 915 if (list_empty(&device->req_queue)) { in __tape_start_request() 922 list_add(&request->list, &device->req_queue); in __tape_start_request() 926 list_add_tail(&request->list, &device->req_queue); in __tape_start_request() [all...] |
H A D | tape_proc.c | 71 if (!list_empty(&device->req_queue)) { in tape_proc_show() 72 request = list_entry(device->req_queue.next, in tape_proc_show()
|
H A D | raw3270.c | 46 struct list_head req_queue; /* Request queue. */ member 249 if (list_empty(&rp->req_queue) && in __raw3270_start() 259 list_add_tail(&rq->list, &rp->req_queue); in __raw3270_start() 328 list_add_tail(&rq->list, &rp->req_queue); in raw3270_start_irq() 385 while (!list_empty(&rp->req_queue)) { in raw3270_irq() 386 rq = list_entry(rp->req_queue.next, struct raw3270_request, list); in raw3270_irq() 681 while (!list_empty(&rp->req_queue)) { in __raw3270_disconnect() 682 rq = list_entry(rp->req_queue.next, struct raw3270_request, list); in __raw3270_disconnect() 745 INIT_LIST_HEAD(&rp->req_queue); in raw3270_setup_device()
|
H A D | tape.h | 209 struct list_head req_queue; member
|
/kernel/linux/linux-5.10/drivers/usb/usbip/ |
H A D | vudc_dev.c | 77 while (!list_empty(&ep->req_queue)) { in nuke() 78 req = list_first_entry(&ep->req_queue, struct vrequest, in nuke() 327 list_add_tail(&req->req_entry, &ep->req_queue); in vep_queue() 353 list_for_each_entry(lst, &ep->req_queue, req_entry) { in vep_dequeue() 389 !list_empty(&ep->req_queue)) in vep_set_halt_and_wedge() 544 INIT_LIST_HEAD(&ep->req_queue); in init_vudc_hw()
|
H A D | vudc.h | 32 struct list_head req_queue; /* Request queue */ member
|
H A D | vudc_sysfs.c | 47 usb_req = list_last_entry(&ep0->req_queue, struct vrequest, req_entry); in get_gadget_descs()
|
H A D | vudc_transfer.c | 190 list_for_each_entry(req, &ep->req_queue, req_entry) { in transfer()
|
/kernel/linux/linux-6.6/drivers/usb/usbip/ |
H A D | vudc_dev.c | 77 while (!list_empty(&ep->req_queue)) { in nuke() 78 req = list_first_entry(&ep->req_queue, struct vrequest, in nuke() 327 list_add_tail(&req->req_entry, &ep->req_queue); in vep_queue() 353 list_for_each_entry(lst, &ep->req_queue, req_entry) { in vep_dequeue() 389 !list_empty(&ep->req_queue)) in vep_set_halt_and_wedge() 543 INIT_LIST_HEAD(&ep->req_queue); in init_vudc_hw()
|
H A D | vudc.h | 32 struct list_head req_queue; /* Request queue */ member
|
H A D | vudc_sysfs.c | 47 usb_req = list_last_entry(&ep0->req_queue, struct vrequest, req_entry); in get_gadget_descs()
|
/kernel/linux/linux-5.10/drivers/crypto/ccree/ |
H A D | cc_request_mgr.c | 20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; member 296 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; in cc_do_send_request() 581 cc_req = &request_mgr_handle->req_queue[*tail]; in proc_completions()
|
/kernel/linux/linux-6.6/drivers/crypto/ccree/ |
H A D | cc_request_mgr.c | 20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; member 295 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; in cc_do_send_request() 580 cc_req = &request_mgr_handle->req_queue[*tail]; in proc_completions()
|
/kernel/linux/linux-5.10/include/media/ |
H A D | media-device.h | 53 * @req_queue: Queue a validated request, cannot fail. If something goes 63 * or delete objects from the request before req_queue exits. 71 void (*req_queue)(struct media_request *req); member
|
H A D | v4l2-device.h | 252 v4l2_dev->mdev->ops->req_queue; in v4l2_device_supports_requests()
|
/kernel/linux/linux-6.6/include/media/ |
H A D | media-device.h | 54 * @req_queue: Queue a validated request, cannot fail. If something goes 64 * or delete objects from the request before req_queue exits. 72 void (*req_queue)(struct media_request *req); member
|
H A D | v4l2-device.h | 252 v4l2_dev->mdev->ops->req_queue; in v4l2_device_supports_requests()
|
/kernel/linux/linux-5.10/drivers/media/mc/ |
H A D | mc-request.c | 156 * and call req_queue. The reason we set the state first is that this in media_request_ioctl_queue() 157 * allows req_queue to unbind or complete the queued objects in case in media_request_ioctl_queue() 161 * after each object is queued through the req_queue op (and note that in media_request_ioctl_queue() 167 * while req_queue is called, so that's safe as well. in media_request_ioctl_queue() 175 mdev->ops->req_queue(req); in media_request_ioctl_queue() 253 !mdev->ops->req_validate || !mdev->ops->req_queue) in media_request_get_by_fd()
|
/kernel/linux/linux-6.6/drivers/media/mc/ |
H A D | mc-request.c | 156 * and call req_queue. The reason we set the state first is that this in media_request_ioctl_queue() 157 * allows req_queue to unbind or complete the queued objects in case in media_request_ioctl_queue() 161 * after each object is queued through the req_queue op (and note that in media_request_ioctl_queue() 167 * while req_queue is called, so that's safe as well. in media_request_ioctl_queue() 175 mdev->ops->req_queue(req); in media_request_ioctl_queue() 253 !mdev->ops->req_validate || !mdev->ops->req_queue) in media_request_get_by_fd()
|