Lines Matching defs:hs_req
426 * @hs_req: The request being processed.
433 struct dwc2_hsotg_req *hs_req)
435 struct usb_request *req = &hs_req->req;
492 * @hs_req: The request to write data for.
506 struct dwc2_hsotg_req *hs_req)
510 int buf_pos = hs_req->req.actual;
642 to_write, hs_req->req.length, can_write, buf_pos);
647 hs_req->req.actual = buf_pos + to_write;
654 data = hs_req->req.buf + buf_pos;
984 struct dwc2_hsotg_req *hs_req, *treq;
1008 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1009 dma_addr_t dma_addr = hs_req->req.dma;
1011 if (hs_req->req.num_sgs) {
1012 WARN_ON(hs_req->req.num_sgs > 1);
1013 dma_addr = sg_dma_address(hs_req->req.sg);
1016 hs_req->req.length);
1036 struct dwc2_hsotg_req *hs_req,
1043 * @hs_req: The request to start.
1051 struct dwc2_hsotg_req *hs_req,
1054 struct usb_request *ureq = &hs_req->req;
1071 } else if (hs_ep->req != hs_req && continuing) {
1148 hs_ep->req = hs_req;
1201 hs_req->req.frame_number = hs_ep->target_frame;
1202 hs_req->req.actual = 0;
1203 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1231 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1286 struct dwc2_hsotg_req *hs_req)
1288 void *req_buf = hs_req->req.buf;
1294 WARN_ON(hs_req->saved_req_buf);
1297 hs_ep->ep.name, req_buf, hs_req->req.length);
1299 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1300 if (!hs_req->req.buf) {
1301 hs_req->req.buf = req_buf;
1309 hs_req->saved_req_buf = req_buf;
1312 memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1319 struct dwc2_hsotg_req *hs_req)
1322 if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1326 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1329 if (!hs_ep->dir_in && !hs_req->req.status)
1330 memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1331 hs_req->req.actual);
1334 kfree(hs_req->req.buf);
1336 hs_req->req.buf = hs_req->saved_req_buf;
1337 hs_req->saved_req_buf = NULL;
1406 struct dwc2_hsotg_req *hs_req = our_req(req);
1427 INIT_LIST_HEAD(&hs_req->queue);
1456 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1474 list_add_tail(&hs_req->queue, &hs_ep->queue);
1484 dma_addr_t dma_addr = hs_req->req.dma;
1486 if (hs_req->req.num_sgs) {
1487 WARN_ON(hs_req->req.num_sgs > 1);
1488 dma_addr = sg_dma_address(hs_req->req.sg);
1491 hs_req->req.length);
1503 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1518 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1541 struct dwc2_hsotg_req *hs_req = our_req(req);
1543 kfree(hs_req);
1747 struct dwc2_hsotg_req *hs_req;
1750 hs_req = get_ep_head(hs_ep);
1751 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1775 struct dwc2_hsotg_req *hs_req;
1853 hs_req = ep->req;
1855 list_del_init(&hs_req->queue);
1856 if (hs_req->req.complete) {
1859 &ep->ep, &hs_req->req);
2037 struct dwc2_hsotg_req *hs_req = our_req(req);
2047 if (!list_empty(&hs_req->queue)) {
2105 * @hs_req: The request to complete.
2116 struct dwc2_hsotg_req *hs_req,
2119 if (!hs_req) {
2125 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2132 if (hs_req->req.status == -EINPROGRESS)
2133 hs_req->req.status = result;
2136 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2138 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2141 list_del_init(&hs_req->queue);
2148 if (hs_req->req.complete) {
2150 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2180 struct dwc2_hsotg_req *hs_req;
2191 hs_req = get_ep_head(hs_ep);
2192 if (!hs_req) {
2196 ureq = &hs_req->req;
2218 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2262 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2267 if (!hs_req) {
2283 read_ptr = hs_req->req.actual;
2284 max_req = hs_req->req.length - read_ptr;
2287 __func__, to_read, max_req, read_ptr, hs_req->req.length);
2300 hs_req->req.actual += to_read;
2308 hs_req->req.buf + read_ptr, to_read);
2390 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2391 struct usb_request *req = &hs_req->req;
2395 if (!hs_req) {
2402 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2430 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2458 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2651 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2653 if (!hs_ep->dir_in || !hs_req) {
2664 if (hs_req->req.actual < hs_req->req.length) {
2667 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2684 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2688 if (!hs_req) {
2703 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2740 if (hs_req->req.actual != size_done)
2742 __func__, hs_req->req.actual, size_done);
2744 hs_req->req.actual = size_done;
2746 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2748 if (!size_left && hs_req->req.actual < hs_req->req.length) {
2750 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2772 hs_req->req.frame_number = hs_ep->target_frame;
2776 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2823 struct dwc2_hsotg_req *hs_req;
2860 hs_req = get_ep_head(hs_ep);
2861 if (hs_req) {
2862 hs_req->req.frame_number = hs_ep->target_frame;
2863 hs_req->req.actual = 0;
2864 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2887 struct dwc2_hsotg_req *hs_req;
2918 hs_req = get_ep_head(ep);
2919 if (hs_req) {
2920 hs_req->req.frame_number = ep->target_frame;
2921 hs_req->req.actual = 0;
2922 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2955 struct dwc2_hsotg_req *hs_req;
3011 hs_req = get_ep_head(hs_ep);
3012 if (hs_req) {
3013 hs_req->req.frame_number = hs_ep->target_frame;
3014 hs_req->req.actual = 0;
3015 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4318 struct dwc2_hsotg_req *hs_req = our_req(req);
4327 if (!on_list(hs_ep, hs_req)) {
4336 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);