Lines Matching defs:hs_req
425 * @hs_req: The request being processed.
432 struct dwc2_hsotg_req *hs_req)
434 struct usb_request *req = &hs_req->req;
491 * @hs_req: The request to write data for.
505 struct dwc2_hsotg_req *hs_req)
509 int buf_pos = hs_req->req.actual;
641 to_write, hs_req->req.length, can_write, buf_pos);
646 hs_req->req.actual = buf_pos + to_write;
653 data = hs_req->req.buf + buf_pos;
983 struct dwc2_hsotg_req *hs_req, *treq;
1007 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1008 dma_addr_t dma_addr = hs_req->req.dma;
1010 if (hs_req->req.num_sgs) {
1011 WARN_ON(hs_req->req.num_sgs > 1);
1012 dma_addr = sg_dma_address(hs_req->req.sg);
1015 hs_req->req.length);
1035 struct dwc2_hsotg_req *hs_req,
1042 * @hs_req: The request to start.
1050 struct dwc2_hsotg_req *hs_req,
1053 struct usb_request *ureq = &hs_req->req;
1070 } else if (hs_ep->req != hs_req && continuing) {
1147 hs_ep->req = hs_req;
1200 hs_req->req.frame_number = hs_ep->target_frame;
1201 hs_req->req.actual = 0;
1202 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1230 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1285 struct dwc2_hsotg_req *hs_req)
1287 void *req_buf = hs_req->req.buf;
1293 WARN_ON(hs_req->saved_req_buf);
1296 hs_ep->ep.name, req_buf, hs_req->req.length);
1298 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1299 if (!hs_req->req.buf) {
1300 hs_req->req.buf = req_buf;
1308 hs_req->saved_req_buf = req_buf;
1311 memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1318 struct dwc2_hsotg_req *hs_req)
1321 if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1325 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1328 if (!hs_ep->dir_in && !hs_req->req.status)
1329 memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1330 hs_req->req.actual);
1333 kfree(hs_req->req.buf);
1335 hs_req->req.buf = hs_req->saved_req_buf;
1336 hs_req->saved_req_buf = NULL;
1405 struct dwc2_hsotg_req *hs_req = our_req(req);
1426 INIT_LIST_HEAD(&hs_req->queue);
1455 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1473 list_add_tail(&hs_req->queue, &hs_ep->queue);
1483 dma_addr_t dma_addr = hs_req->req.dma;
1485 if (hs_req->req.num_sgs) {
1486 WARN_ON(hs_req->req.num_sgs > 1);
1487 dma_addr = sg_dma_address(hs_req->req.sg);
1490 hs_req->req.length);
1502 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1517 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1540 struct dwc2_hsotg_req *hs_req = our_req(req);
1542 kfree(hs_req);
1746 struct dwc2_hsotg_req *hs_req;
1749 hs_req = get_ep_head(hs_ep);
1750 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1774 struct dwc2_hsotg_req *hs_req;
1853 hs_req = ep->req;
1855 list_del_init(&hs_req->queue);
1856 if (hs_req->req.complete) {
1859 &ep->ep, &hs_req->req);
2037 struct dwc2_hsotg_req *hs_req = our_req(req);
2047 if (!list_empty(&hs_req->queue)) {
2105 * @hs_req: The request to complete.
2116 struct dwc2_hsotg_req *hs_req,
2119 if (!hs_req) {
2125 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2132 if (hs_req->req.status == -EINPROGRESS)
2133 hs_req->req.status = result;
2136 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2138 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2141 list_del_init(&hs_req->queue);
2148 if (hs_req->req.complete) {
2150 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2180 struct dwc2_hsotg_req *hs_req;
2191 hs_req = get_ep_head(hs_ep);
2192 if (!hs_req) {
2196 ureq = &hs_req->req;
2218 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2262 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2267 if (!hs_req) {
2283 read_ptr = hs_req->req.actual;
2284 max_req = hs_req->req.length - read_ptr;
2287 __func__, to_read, max_req, read_ptr, hs_req->req.length);
2300 hs_req->req.actual += to_read;
2308 hs_req->req.buf + read_ptr, to_read);
2390 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2391 struct usb_request *req = &hs_req->req;
2395 if (!hs_req) {
2402 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2430 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2458 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2651 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2653 if (!hs_ep->dir_in || !hs_req) {
2664 if (hs_req->req.actual < hs_req->req.length) {
2667 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2684 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2688 if (!hs_req) {
2703 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2740 if (hs_req->req.actual != size_done)
2742 __func__, hs_req->req.actual, size_done);
2744 hs_req->req.actual = size_done;
2746 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2748 if (!size_left && hs_req->req.actual < hs_req->req.length) {
2750 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2772 hs_req->req.frame_number = hs_ep->target_frame;
2776 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2823 struct dwc2_hsotg_req *hs_req;
2860 hs_req = get_ep_head(hs_ep);
2861 if (hs_req) {
2862 hs_req->req.frame_number = hs_ep->target_frame;
2863 hs_req->req.actual = 0;
2864 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2887 struct dwc2_hsotg_req *hs_req;
2918 hs_req = get_ep_head(ep);
2919 if (hs_req) {
2920 hs_req->req.frame_number = ep->target_frame;
2921 hs_req->req.actual = 0;
2922 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2955 struct dwc2_hsotg_req *hs_req;
3011 hs_req = get_ep_head(hs_ep);
3012 if (hs_req) {
3013 hs_req->req.frame_number = hs_ep->target_frame;
3014 hs_req->req.actual = 0;
3015 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4319 struct dwc2_hsotg_req *hs_req = our_req(req);
4328 if (!on_list(hs_ep, hs_req)) {
4337 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);