Lines Matching defs:endp

201 	struct u132_endp *endp[MAX_U132_ENDPS];
328 struct u132_endp *endp = kref_to_u132_endp(kref);
329 struct u132 *u132 = endp->u132;
330 u8 usb_addr = endp->usb_addr;
331 u8 usb_endp = endp->usb_endp;
334 u8 endp_number = endp->endp_number;
335 struct usb_host_endpoint *hep = endp->hep;
336 struct u132_ring *ring = endp->ring;
337 struct list_head *head = &endp->endp_ring;
339 if (endp == ring->curr_endp) {
351 if (endp->input) {
355 if (endp->output) {
359 u132->endp[endp_number - 1] = NULL;
361 kfree(endp);
365 static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp)
367 kref_put(&endp->kref, u132_endp_delete);
370 static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp)
372 kref_get(&endp->kref);
376 struct u132_endp *endp)
378 kref_init(&endp->kref);
382 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
385 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
386 kref_get(&endp->kref);
389 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
391 if (cancel_delayed_work(&endp->scheduler))
392 kref_put(&endp->kref, u132_endp_delete);
504 static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
511 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
513 endp->queue_next += 1;
514 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
515 endp->active = 0;
516 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
518 struct list_head *next = endp->urb_more.next;
522 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
524 endp->active = 0;
525 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
529 ring = endp->ring;
534 u132_endp_put_kref(u132, endp);
538 static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
541 u132_endp_put_kref(u132, endp);
544 static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
550 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
552 endp->queue_next += 1;
553 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
554 endp->active = 0;
555 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
557 struct list_head *next = endp->urb_more.next;
561 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
563 endp->active = 0;
564 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
571 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
572 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
576 return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
577 urb, address, endp->usb_endp, toggle_bits, callback);
581 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
582 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
586 return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
587 urb, address, endp->usb_endp, toggle_bits, callback);
591 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
592 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
597 endp, urb, address, endp->usb_endp, toggle_bits, callback);
601 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
602 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
607 endp, urb, address, endp->usb_endp, toggle_bits, callback);
619 struct u132_endp *endp = data;
620 struct u132 *u132 = endp->u132;
621 u8 address = u132->addr[endp->usb_addr].address;
628 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
630 } else if (endp->dequeueing) {
631 endp->dequeueing = 0;
633 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
639 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
642 struct u132_ring *ring = endp->ring;
653 endp->toggle_bits = toggle_bits;
654 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
659 retval = edset_single(u132, ring, endp, urb,
660 address, endp->toggle_bits,
663 u132_hcd_giveback_urb(u132, endp, urb,
667 endp->active = 0;
668 endp->jiffies = jiffies +
673 u132_endp_put_kref(u132, endp);
678 endp->toggle_bits = toggle_bits;
679 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
682 u132_hcd_giveback_urb(u132, endp, urb, 0);
686 endp->toggle_bits = toggle_bits;
687 usb_settoggle(udev->usb_device, endp->usb_endp,
690 endp->toggle_bits = 0x2;
691 usb_settoggle(udev->usb_device, endp->usb_endp,
694 endp->toggle_bits = 0x2;
695 usb_settoggle(udev->usb_device, endp->usb_endp,
702 u132_hcd_giveback_urb(u132, endp, urb,
710 u132_hcd_giveback_urb(u132, endp, urb, 0);
719 struct u132_endp *endp = data;
720 struct u132 *u132 = endp->u132;
721 u8 address = u132->addr[endp->usb_addr].address;
727 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
729 } else if (endp->dequeueing) {
730 endp->dequeueing = 0;
732 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
738 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
741 struct u132_ring *ring = endp->ring;
743 endp->toggle_bits = toggle_bits;
747 retval = edset_output(u132, ring, endp, urb, address,
748 endp->toggle_bits, u132_hcd_bulk_output_sent);
750 u132_hcd_giveback_urb(u132, endp, urb, retval);
754 u132_hcd_giveback_urb(u132, endp, urb, 0);
761 u132_hcd_giveback_urb(u132, endp, urb, 0);
770 struct u132_endp *endp = data;
771 struct u132 *u132 = endp->u132;
772 u8 address = u132->addr[endp->usb_addr].address;
779 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
781 } else if (endp->dequeueing) {
782 endp->dequeueing = 0;
784 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
790 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
793 struct u132_ring *ring = endp->ring;
805 endp->toggle_bits = toggle_bits;
806 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
810 ring->number, endp, urb, address,
811 endp->usb_endp, endp->toggle_bits,
814 u132_hcd_giveback_urb(u132, endp, urb, retval);
817 endp->toggle_bits = toggle_bits;
818 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
821 u132_hcd_giveback_urb(u132, endp, urb,
826 endp->toggle_bits = toggle_bits;
827 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
830 u132_hcd_giveback_urb(u132, endp, urb, 0);
833 endp->toggle_bits = toggle_bits;
834 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
840 u132_hcd_giveback_urb(u132, endp, urb, 0);
843 endp->toggle_bits = 0x2;
844 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
846 u132_hcd_giveback_urb(u132, endp, urb,
850 endp->toggle_bits = 0x2;
851 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
856 u132_hcd_giveback_urb(u132, endp, urb,
864 u132_hcd_giveback_urb(u132, endp, urb, 0);
873 struct u132_endp *endp = data;
874 struct u132 *u132 = endp->u132;
880 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
882 } else if (endp->dequeueing) {
883 endp->dequeueing = 0;
885 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
891 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
895 u132_hcd_giveback_urb(u132, endp, urb, 0);
901 u132_hcd_giveback_urb(u132, endp, urb, 0);
910 struct u132_endp *endp = data;
911 struct u132 *u132 = endp->u132;
912 u8 address = u132->addr[endp->usb_addr].address;
918 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
920 } else if (endp->dequeueing) {
921 endp->dequeueing = 0;
923 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
929 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
932 struct u132_ring *ring = endp->ring;
947 ring->number, endp, urb, address,
948 endp->usb_endp, 0x3,
951 u132_hcd_giveback_urb(u132, endp, urb, retval);
957 u132_hcd_giveback_urb(u132, endp, urb,
965 u132_hcd_giveback_urb(u132, endp, urb,
973 u132_hcd_giveback_urb(u132, endp, urb, 0);
982 struct u132_endp *endp = data;
983 struct u132 *u132 = endp->u132;
989 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
991 } else if (endp->dequeueing) {
992 endp->dequeueing = 0;
994 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1000 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1004 u132_hcd_giveback_urb(u132, endp, urb, 0);
1010 u132_hcd_giveback_urb(u132, endp, urb, 0);
1019 struct u132_endp *endp = data;
1020 struct u132 *u132 = endp->u132;
1021 u8 address = u132->addr[endp->usb_addr].address;
1027 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1029 } else if (endp->dequeueing) {
1030 endp->dequeueing = 0;
1032 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1038 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1043 struct u132_ring *ring = endp->ring;
1046 ring->number, endp, urb, address,
1047 endp->usb_endp, 0,
1050 u132_hcd_giveback_urb(u132, endp, urb, retval);
1054 struct u132_ring *ring = endp->ring;
1057 ring->number, endp, urb, address,
1058 endp->usb_endp, 0,
1061 u132_hcd_giveback_urb(u132, endp, urb, retval);
1068 u132_hcd_giveback_urb(u132, endp, urb, 0);
1077 struct u132_endp *endp = data;
1078 struct u132 *u132 = endp->u132;
1079 u8 address = u132->addr[endp->usb_addr].address;
1086 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1088 } else if (endp->dequeueing) {
1089 endp->dequeueing = 0;
1091 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1097 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1101 endp->usb_addr = udev->usb_addr;
1103 u132_hcd_giveback_urb(u132, endp, urb, 0);
1109 u132_hcd_giveback_urb(u132, endp, urb, 0);
1118 struct u132_endp *endp = data;
1119 struct u132 *u132 = endp->u132;
1125 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1127 } else if (endp->dequeueing) {
1128 endp->dequeueing = 0;
1130 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1136 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1140 struct u132_ring *ring = endp->ring;
1143 ring->number, endp, urb, 0, endp->usb_endp, 0,
1146 u132_hcd_giveback_urb(u132, endp, urb, retval);
1152 u132_hcd_giveback_urb(u132, endp, urb, 0);
1161 struct u132_endp *endp = data;
1162 struct u132 *u132 = endp->u132;
1168 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1170 } else if (endp->dequeueing) {
1171 endp->dequeueing = 0;
1173 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1179 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1183 u132_hcd_giveback_urb(u132, endp, urb, 0);
1189 u132_hcd_giveback_urb(u132, endp, urb, 0);
1198 struct u132_endp *endp = data;
1199 struct u132 *u132 = endp->u132;
1200 u8 address = u132->addr[endp->usb_addr].address;
1206 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1208 } else if (endp->dequeueing) {
1209 endp->dequeueing = 0;
1211 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1217 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1221 struct u132_ring *ring = endp->ring;
1232 ring->number, endp, urb, address, endp->usb_endp, 0x3,
1235 u132_hcd_giveback_urb(u132, endp, urb, retval);
1241 u132_hcd_giveback_urb(u132, endp, urb, 0);
1250 struct u132_endp *endp = data;
1251 struct u132 *u132 = endp->u132;
1252 u8 address = u132->addr[endp->usb_addr].address;
1258 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1260 } else if (endp->dequeueing) {
1261 endp->dequeueing = 0;
1263 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1269 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1273 struct u132_ring *ring = endp->ring;
1276 ring->number, endp, urb, address, endp->usb_endp, 0,
1279 u132_hcd_giveback_urb(u132, endp, urb, retval);
1285 u132_hcd_giveback_urb(u132, endp, urb, 0);
1305 struct u132_endp *endp, *last_endp = ring->curr_endp;
1307 list_for_each_entry(endp, &last_endp->endp_ring, endp_ring) {
1308 if (endp->queue_next == endp->queue_last) {
1309 } else if ((endp->delayed == 0)
1310 || time_after_eq(jiffies, endp->jiffies)) {
1311 ring->curr_endp = endp;
1318 unsigned long delta = endp->jiffies - jiffies;
1355 struct u132_endp *endp =
1357 struct u132 *u132 = endp->u132;
1359 ring = endp->ring;
1360 if (endp->edset_flush) {
1361 endp->edset_flush = 0;
1362 if (endp->dequeueing)
1364 ring->number, endp);
1366 u132_endp_put_kref(u132, endp);
1368 } else if (endp->active) {
1370 u132_endp_put_kref(u132, endp);
1374 u132_endp_put_kref(u132, endp);
1376 } else if (endp->queue_next == endp->queue_last) {
1378 u132_endp_put_kref(u132, endp);
1380 } else if (endp->pipetype == PIPE_INTERRUPT) {
1381 u8 address = u132->addr[endp->usb_addr].address;
1384 u132_endp_put_kref(u132, endp);
1388 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1389 endp->queue_next];
1390 endp->active = 1;
1391 ring->curr_endp = endp;
1394 retval = edset_single(u132, ring, endp, urb, address,
1395 endp->toggle_bits, u132_hcd_interrupt_recv);
1397 u132_hcd_giveback_urb(u132, endp, urb, retval);
1400 } else if (endp->pipetype == PIPE_CONTROL) {
1401 u8 address = u132->addr[endp->usb_addr].address;
1404 u132_endp_put_kref(u132, endp);
1408 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1409 endp->queue_next];
1410 endp->active = 1;
1411 ring->curr_endp = endp;
1414 retval = edset_setup(u132, ring, endp, urb, address,
1417 u132_hcd_giveback_urb(u132, endp, urb, retval);
1419 } else if (endp->usb_addr == 0) {
1421 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1422 endp->queue_next];
1423 endp->active = 1;
1424 ring->curr_endp = endp;
1427 retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
1430 u132_hcd_giveback_urb(u132, endp, urb, retval);
1434 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1435 endp->queue_next];
1436 address = u132->addr[endp->usb_addr].address;
1437 endp->active = 1;
1438 ring->curr_endp = endp;
1441 retval = edset_setup(u132, ring, endp, urb, address,
1444 u132_hcd_giveback_urb(u132, endp, urb, retval);
1448 if (endp->input) {
1449 u8 address = u132->addr[endp->usb_addr].address;
1452 u132_endp_put_kref(u132, endp);
1456 struct urb *urb = endp->urb_list[
1457 ENDP_QUEUE_MASK & endp->queue_next];
1458 endp->active = 1;
1459 ring->curr_endp = endp;
1462 retval = edset_input(u132, ring, endp, urb,
1463 address, endp->toggle_bits,
1467 u132_hcd_giveback_urb(u132, endp, urb,
1472 u8 address = u132->addr[endp->usb_addr].address;
1475 u132_endp_put_kref(u132, endp);
1479 struct urb *urb = endp->urb_list[
1480 ENDP_QUEUE_MASK & endp->queue_next];
1481 endp->active = 1;
1482 ring->curr_endp = endp;
1485 retval = edset_output(u132, ring, endp, urb,
1486 address, endp->toggle_bits,
1490 u132_hcd_giveback_urb(u132, endp, urb,
1856 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1858 if (!endp)
1861 spin_lock_init(&endp->queue_lock.slock);
1862 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1865 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1866 kfree(endp);
1871 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1872 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1873 INIT_LIST_HEAD(&endp->urb_more);
1874 ring = endp->ring = &u132->ring[0];
1876 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
1878 INIT_LIST_HEAD(&endp->endp_ring);
1879 ring->curr_endp = endp;
1882 endp->dequeueing = 0;
1883 endp->edset_flush = 0;
1884 endp->active = 0;
1885 endp->delayed = 0;
1886 endp->endp_number = endp_number;
1887 endp->u132 = u132;
1888 endp->hep = urb->ep;
1889 endp->pipetype = usb_pipetype(urb->pipe);
1890 u132_endp_init_kref(u132, endp);
1892 endp->toggle_bits = 0x2;
1894 endp->input = 1;
1895 endp->output = 0;
1899 endp->toggle_bits = 0x2;
1901 endp->input = 0;
1902 endp->output = 1;
1907 endp->delayed = 1;
1908 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
1909 endp->udev_number = address;
1910 endp->usb_addr = usb_addr;
1911 endp->usb_endp = usb_endp;
1912 endp->queue_size = 1;
1913 endp->queue_last = 0;
1914 endp->queue_next = 0;
1915 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
1916 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1917 u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval));
1923 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
1927 endp->delayed = 1;
1928 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
1929 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
1930 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
1935 endp->queue_size -= 1;
1938 list_add_tail(&urbq->urb_more, &endp->urb_more);
1955 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1957 if (!endp)
1960 spin_lock_init(&endp->queue_lock.slock);
1961 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1964 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1965 kfree(endp);
1970 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1971 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1972 INIT_LIST_HEAD(&endp->urb_more);
1973 endp->dequeueing = 0;
1974 endp->edset_flush = 0;
1975 endp->active = 0;
1976 endp->delayed = 0;
1977 endp->endp_number = endp_number;
1978 endp->u132 = u132;
1979 endp->hep = urb->ep;
1980 endp->pipetype = usb_pipetype(urb->pipe);
1981 u132_endp_init_kref(u132, endp);
1983 endp->toggle_bits = 0x2;
1986 endp->input = 1;
1987 endp->output = 0;
1991 endp->toggle_bits = 0x2;
1994 endp->input = 0;
1995 endp->output = 1;
1999 ring = endp->ring = &u132->ring[ring_number - 1];
2001 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
2003 INIT_LIST_HEAD(&endp->endp_ring);
2004 ring->curr_endp = endp;
2008 endp->udev_number = address;
2009 endp->usb_addr = usb_addr;
2010 endp->usb_endp = usb_endp;
2011 endp->queue_size = 1;
2012 endp->queue_last = 0;
2013 endp->queue_next = 0;
2014 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2015 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2016 u132_endp_queue_work(u132, endp, 0);
2022 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2026 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2027 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2032 endp->queue_size -= 1;
2035 list_add_tail(&urbq->urb_more, &endp->urb_more);
2051 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
2053 if (!endp)
2056 spin_lock_init(&endp->queue_lock.slock);
2057 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2060 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2061 kfree(endp);
2066 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
2067 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2068 INIT_LIST_HEAD(&endp->urb_more);
2069 ring = endp->ring = &u132->ring[0];
2071 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
2073 INIT_LIST_HEAD(&endp->endp_ring);
2074 ring->curr_endp = endp;
2077 endp->dequeueing = 0;
2078 endp->edset_flush = 0;
2079 endp->active = 0;
2080 endp->delayed = 0;
2081 endp->endp_number = endp_number;
2082 endp->u132 = u132;
2083 endp->hep = urb->ep;
2084 u132_endp_init_kref(u132, endp);
2085 u132_endp_get_kref(u132, endp);
2089 endp->udev_number = address;
2090 endp->usb_addr = usb_addr;
2091 endp->usb_endp = usb_endp;
2092 endp->input = 1;
2093 endp->output = 1;
2094 endp->pipetype = usb_pipetype(urb->pipe);
2100 endp->queue_size = 1;
2101 endp->queue_last = 0;
2102 endp->queue_next = 0;
2103 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2104 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2105 u132_endp_queue_work(u132, endp, 0);
2110 endp->udev_number = address;
2111 endp->usb_addr = usb_addr;
2112 endp->usb_endp = usb_endp;
2113 endp->input = 1;
2114 endp->output = 1;
2115 endp->pipetype = usb_pipetype(urb->pipe);
2121 endp->queue_size = 1;
2122 endp->queue_last = 0;
2123 endp->queue_next = 0;
2124 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2125 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2126 u132_endp_queue_work(u132, endp, 0);
2133 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2139 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2140 endp->urb_list[ENDP_QUEUE_MASK &
2141 endp->queue_last++] = urb;
2147 endp->queue_size -= 1;
2151 &endp->urb_more);
2167 endp->udev_number = i;
2172 endp->endp_number;
2175 endp->endp_number;
2189 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2190 endp->urb_list[ENDP_QUEUE_MASK &
2191 endp->queue_last++] = urb;
2197 endp->queue_size -= 1;
2201 &endp->urb_more);
2213 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2214 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
2220 endp->queue_size -= 1;
2223 list_add_tail(&urbq->urb_more, &endp->urb_more);
2256 struct u132_endp *endp = urb->ep->hcpriv;
2258 if (endp) {
2261 spin_lock_irqsave(&endp->queue_lock.slock,
2267 usb_dev, endp,
2274 spin_unlock_irqrestore(&endp->queue_lock.slock,
2279 u132_endp_queue_work(u132, endp,
2286 } else { /*(endp == NULL) */
2298 struct u132_endp *endp = urb->ep->hcpriv;
2300 if (endp) {
2303 spin_lock_irqsave(&endp->queue_lock.slock,
2309 usb_dev, endp,
2316 spin_unlock_irqrestore(&endp->queue_lock.slock,
2321 u132_endp_queue_work(u132, endp, 0);
2331 struct u132_endp *endp = urb->ep->hcpriv;
2349 if (endp) {
2352 spin_lock_irqsave(&endp->queue_lock.slock,
2358 endp, usb_addr,
2364 spin_unlock_irqrestore(&endp->queue_lock.slock,
2369 u132_endp_queue_work(u132, endp, 0);
2383 struct u132_endp *endp, struct urb *urb)
2387 list_for_each_entry(urbq, &endp->urb_more, urb_more) {
2391 endp->queue_size -= 1;
2398 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
2400 "\n", urb, endp->endp_number, endp, endp->ring->number,
2401 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
2402 endp->usb_endp, endp->usb_addr, endp->queue_size,
2403 endp->queue_next, endp->queue_last);
2407 static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2413 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2416 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2419 if (endp->queue_size == 0) {
2420 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
2422 endp->endp_number, endp, endp->ring->number,
2423 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
2424 endp->usb_endp, endp->usb_addr);
2425 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2428 if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) {
2429 if (endp->active) {
2430 endp->dequeueing = 1;
2431 endp->edset_flush = 1;
2432 u132_endp_queue_work(u132, endp, 0);
2433 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2436 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2437 u132_hcd_abandon_urb(u132, endp, urb, status);
2442 u16 queue_size = endp->queue_size;
2443 u16 queue_scan = endp->queue_next;
2446 if (urb == endp->urb_list[ENDP_QUEUE_MASK &
2448 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
2455 *urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
2457 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
2464 endp->queue_size -= 1;
2465 if (list_empty(&endp->urb_more)) {
2466 spin_unlock_irqrestore(&endp->queue_lock.slock,
2469 struct list_head *next = endp->urb_more.next;
2474 spin_unlock_irqrestore(&endp->queue_lock.slock,
2481 } else if (list_empty(&endp->urb_more)) {
2483 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2485 endp->endp_number, endp, endp->ring->number,
2486 endp->input ? 'I' : ' ',
2487 endp->output ? 'O' : ' ', endp->usb_endp,
2488 endp->usb_addr, endp->queue_size,
2489 endp->queue_next, endp->queue_last);
2490 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2496 retval = dequeue_from_overflow_chain(u132, endp,
2498 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2518 struct u132_endp *endp = u132->endp[endp_number - 1];
2519 return u132_endp_urb_dequeue(u132, endp, urb, status);
2522 struct u132_endp *endp = u132->endp[endp_number - 1];
2523 return u132_endp_urb_dequeue(u132, endp, urb, status);
2537 struct u132_endp *endp = hep->hcpriv;
2538 if (endp)
2539 u132_endp_put_kref(u132, endp);
2984 struct u132_endp *endp = u132->endp[endps];
2985 if (endp)
2986 u132_endp_cancel_work(u132, endp);
3052 u132->endp[endps] = NULL;