Lines Matching refs:req
269 struct omap_req *req;
271 req = kzalloc(sizeof(*req), gfp_flags);
272 if (!req)
275 INIT_LIST_HEAD(&req->queue);
277 return &req->req;
283 struct omap_req *req = container_of(_req, struct omap_req, req);
285 kfree(req);
291 done(struct omap_ep *ep, struct omap_req *req, int status)
296 list_del_init(&req->queue);
298 if (req->req.status == -EINPROGRESS)
299 req->req.status = status;
301 status = req->req.status;
304 usb_gadget_unmap_request(&udc->gadget, &req->req,
310 VDBG("complete %s req %p stat %d len %u/%u\n",
311 ep->ep.name, &req->req, status,
312 req->req.actual, req->req.length);
317 usb_gadget_giveback_request(&ep->ep, &req->req);
331 write_packet(u8 *buf, struct omap_req *req, unsigned max)
336 len = min(req->req.length - req->req.actual, max);
337 req->req.actual += len;
357 static int write_fifo(struct omap_ep *ep, struct omap_req *req)
364 buf = req->req.buf + req->req.actual;
373 count = write_packet(buf, req, count);
380 else if (req->req.length == req->req.actual
381 && !req->req.zero)
391 done(ep, req, 0);
396 read_packet(u8 *buf, struct omap_req *req, unsigned avail)
401 len = min(req->req.length - req->req.actual, avail);
402 req->req.actual += len;
419 static int read_fifo(struct omap_ep *ep, struct omap_req *req)
425 buf = req->req.buf + req->req.actual;
446 count = read_packet(buf, req, avail);
453 req->req.status = -EOVERFLOW;
458 } else if (req->req.length == req->req.actual)
466 done(ep, req, 0);
516 static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
519 unsigned length = req->req.length - req->req.actual;
541 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
550 req->dma_bytes = length;
553 static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
558 req->req.actual += req->dma_bytes;
561 if (req->req.actual < req->req.length)
563 if (req->req.zero
564 && req->dma_bytes != 0
565 && (req->req.actual % ep->maxpacket) == 0)
568 req->req.actual += dma_src_len(ep, req->req.dma
569 + req->req.actual);
576 done(ep, req, status);
579 static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
581 unsigned packets = req->req.length - req->req.actual;
588 req->dma_bytes = packets * ep->ep.maxpacket;
594 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
609 finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
614 ep->dma_counter = (u16) (req->req.dma + req->req.actual);
615 count = dma_dest_len(ep, req->req.dma + req->req.actual);
616 count += req->req.actual;
619 if (count <= req->req.length)
620 req->req.actual = count;
622 if (count != req->dma_bytes || status)
626 else if (req->req.actual < req->req.length)
633 done(ep, req, status);
640 struct omap_req *req;
648 req = container_of(ep->queue.next,
650 finish_in_dma(ep, req, 0);
655 req = container_of(ep->queue.next,
657 next_in_dma(ep, req);
667 req = container_of(ep->queue.next,
669 finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
674 req = container_of(ep->queue.next,
676 next_out_dma(ep, req);
790 struct omap_req *req;
791 req = container_of(ep->queue.next, struct omap_req, queue);
793 (is_in ? next_in_dma : next_out_dma)(ep, req);
796 (is_in ? write_fifo : read_fifo)(ep, req);
811 struct omap_req *req;
816 req = container_of(ep->queue.next, struct omap_req, queue);
818 req = NULL;
825 ep->dma_channel - 1, req);
836 if (req) {
837 finish_in_dma(ep, req, -ECONNRESET);
853 if (req)
854 finish_out_dma(ep, req, -ECONNRESET, 0);
869 struct omap_req *req = container_of(_req, struct omap_req, req);
875 if (!_req || !req->req.complete || !req->req.buf
876 || !list_empty(&req->queue)) {
885 if (req->req.length > ep->ep.maxpacket)
897 && (req->req.length % ep->ep.maxpacket) != 0) {
907 usb_gadget_map_request(&udc->gadget, &req->req,
910 VDBG("%s queue req %p, len %d buf %p\n",
915 req->req.status = -EINPROGRESS;
916 req->req.actual = 0;
936 if (!req->req.length) {
964 done(ep, req, 0);
965 req = NULL;
984 (is_in ? next_in_dma : next_out_dma)(ep, req);
985 else if (req) {
986 if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
987 req = NULL;
999 if (req != NULL)
1000 list_add_tail(&req->queue, &ep->queue);
1009 struct omap_req *req = NULL, *iter;
1019 if (&iter->req != _req)
1021 req = iter;
1024 if (!req) {
1029 if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
1038 done(ep, req, -ECONNRESET);
1334 struct omap_req *req;
1347 req = list_entry(ep->queue.next, struct omap_req, queue);
1348 done(ep, req, status);
1397 struct omap_req *req = NULL;
1423 req = container_of(ep0->queue.next, struct omap_req, queue);
1437 if (req)
1438 stat = write_fifo(ep0, req);
1440 if (!req && udc->ep0_pending) {
1450 if (req)
1451 done(ep0, req, 0);
1453 req = NULL;
1475 stat = read_fifo(ep0, req);
1476 if (!req || stat < 0) {
1486 done(ep0, req, 0);
1498 if (req)
1499 done(ep0, req, 0);
1707 VDBG("req %02x.%02x protocol STALL; stat %d\n",
1875 struct omap_req *req;
1878 req = container_of(ep->queue.next,
1880 (void) read_fifo(ep, req);
1898 struct omap_req *req;
1919 req = container_of(ep->queue.next,
1921 stat = read_fifo(ep, req);
1953 req = container_of(ep->queue.next,
1955 (void) write_fifo(ep, req);
1982 struct omap_req *req;
1986 req = list_entry(ep->queue.next, struct omap_req, queue);
1996 /* done(ep, req, -EPROTO) */;
1998 write_fifo(ep, req);
2010 /* done(ep, req, status) */;
2012 read_fifo(ep, req);
2144 struct omap_req *req;
2194 list_for_each_entry(req, &ep->queue, queue) {
2195 unsigned length = req->req.actual;
2200 (ep, req->req.dma + length);
2204 &req->req, length,
2205 req->req.length, req->req.buf);
2322 seq_printf(s, "ULPD control %04x req %04x status %04x\n",