Lines Matching defs:c67x00
3 * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
13 #include "c67x00.h"
14 #include "c67x00-hcd.h"
136 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
138 struct device *dev = c67x00_hcd_dev(c67x00);
159 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
161 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
198 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
205 c67x00->urb_count--;
208 c67x00->urb_iso_count--;
209 if (c67x00->urb_iso_count == 0)
210 c67x00->max_frame_bw = MAX_FRAME_BW_STD;
218 list_for_each_entry(td, &c67x00->td_list, td_list)
231 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
237 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
242 if (frame_after(c67x00->current_frame, ep_data->next_frame))
244 frame_add(c67x00->current_frame, 1);
248 /* Allocate and initialize a new c67x00 endpoint data structure */
263 ep_data->next_frame = frame_add(c67x00->current_frame, 1);
269 list_add(&ep_data->node, &c67x00->list[type]);
273 list_for_each_entry(prev, &c67x00->list[type], node) {
307 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
311 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
313 spin_lock_irqsave(&c67x00->lock, flags);
318 spin_unlock_irqrestore(&c67x00->lock, flags);
323 reinit_completion(&c67x00->endpoint_disable);
324 c67x00_sched_kick(c67x00);
325 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
327 spin_lock_irqsave(&c67x00->lock, flags);
330 spin_unlock_irqrestore(&c67x00->lock, flags);
348 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
358 spin_lock_irqsave(&c67x00->lock, flags);
374 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
397 if (c67x00->urb_iso_count == 0)
398 c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
399 c67x00->urb_iso_count++;
423 if (!c67x00->urb_count++)
424 c67x00_ll_hpi_enable_sofeop(c67x00->sie);
426 c67x00_sched_kick(c67x00);
427 spin_unlock_irqrestore(&c67x00->lock, flags);
434 spin_unlock_irqrestore(&c67x00->lock, flags);
443 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
447 spin_lock_irqsave(&c67x00->lock, flags);
452 c67x00_release_urb(c67x00, urb);
455 spin_unlock(&c67x00->lock);
457 spin_lock(&c67x00->lock);
459 spin_unlock_irqrestore(&c67x00->lock, flags);
464 spin_unlock_irqrestore(&c67x00->lock, flags);
471 * pre: c67x00 locked, urb unlocked
474 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
486 c67x00_release_urb(c67x00, urb);
487 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
488 spin_unlock(&c67x00->lock);
489 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
490 spin_lock(&c67x00->lock);
495 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
535 if (unlikely(bit_time + c67x00->bandwidth_allocated >=
536 c67x00->max_frame_bw))
539 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
540 c67x00->td_base_addr + SIE_TD_SIZE))
543 if (unlikely(c67x00->next_buf_addr + len >=
544 c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
548 if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
549 MAX_PERIODIC_BW(c67x00->max_frame_bw)))
551 c67x00->periodic_bw_allocated += bit_time;
554 c67x00->bandwidth_allocated += bit_time;
563 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
573 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
585 !(c67x00->low_speed_ports & (1 << urbp->port)))
610 td->td_addr = c67x00->next_td_addr;
611 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
614 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
615 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
624 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
631 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
633 list_add_tail(&td->td_list, &c67x00->td_list);
645 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
674 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
691 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
699 ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
709 ret = c67x00_add_data_urb(c67x00, urb);
717 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
730 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
734 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
737 return c67x00_add_data_urb(c67x00, urb);
742 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
746 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
757 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
760 dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n",
765 c67x00_giveback_urb(c67x00, urb, 0);
777 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
784 list_for_each_entry(ep_data, &c67x00->list[type], node) {
791 add(c67x00, urb);
796 static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
801 if (!list_empty(&c67x00->td_list)) {
802 dev_warn(c67x00_hcd_dev(c67x00),
804 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
805 dbg_td(c67x00, td, "Unprocessed td");
811 c67x00->bandwidth_allocated = 0;
812 c67x00->periodic_bw_allocated = 0;
814 c67x00->next_td_addr = c67x00->td_base_addr;
815 c67x00->next_buf_addr = c67x00->buf_base_addr;
818 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
819 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
820 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
821 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
830 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
832 c67x00_ll_read_mem_le16(c67x00->sie->dev,
836 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
840 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
843 dbg_td(c67x00, td, "ERROR_FLAG");
847 /* dbg_td(c67x00, td, "STALL"); */
851 dbg_td(c67x00, td, "TIMEOUT");
889 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
895 while (td->td_list.next != &c67x00->td_list) {
907 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
932 c67x00_clear_pipe(c67x00, td);
938 c67x00_giveback_urb(c67x00, urb, 0);
946 c67x00_clear_pipe(c67x00, td);
947 c67x00_giveback_urb(c67x00, urb, 0);
953 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
967 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
969 c67x00_giveback_urb(c67x00, urb, 0);
975 * c67x00_check_td_list - handle tds which have been processed by the c67x00
978 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
985 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
987 c67x00_parse_td(c67x00, td);
995 c67x00_handle_isoc(c67x00, td);
1003 c67x00_giveback_urb(c67x00, urb,
1004 c67x00_td_to_error(c67x00, td));
1018 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
1024 c67x00_handle_successful_td(c67x00, td);
1028 c67x00_clear_pipe(c67x00, td);
1041 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
1046 return !c67x00_ll_husb_get_current_td(c67x00->sie);
1052 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
1057 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
1060 c67x00_ll_write_mem_le16(c67x00->sie->dev,
1064 static void c67x00_send_frame(struct c67x00_hcd *c67x00)
1068 if (list_empty(&c67x00->td_list))
1069 dev_warn(c67x00_hcd_dev(c67x00),
1073 list_for_each_entry(td, &c67x00->td_list, td_list) {
1074 if (td->td_list.next == &c67x00->td_list)
1077 c67x00_send_td(c67x00, td);
1080 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
1088 static void c67x00_do_work(struct c67x00_hcd *c67x00)
1090 spin_lock(&c67x00->lock);
1092 if (!c67x00_all_tds_processed(c67x00))
1095 c67x00_check_td_list(c67x00);
1099 complete(&c67x00->endpoint_disable);
1101 if (!list_empty(&c67x00->td_list))
1104 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
1105 if (c67x00->current_frame == c67x00->last_frame)
1107 c67x00->last_frame = c67x00->current_frame;
1110 if (!c67x00->urb_count) {
1111 c67x00_ll_hpi_disable_sofeop(c67x00->sie);
1115 c67x00_fill_frame(c67x00);
1116 if (!list_empty(&c67x00->td_list))
1118 c67x00_send_frame(c67x00);
1121 spin_unlock(&c67x00->lock);
1128 struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet);
1129 c67x00_do_work(c67x00);
1132 void c67x00_sched_kick(struct c67x00_hcd *c67x00)
1134 tasklet_hi_schedule(&c67x00->tasklet);
1137 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
1139 tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet);
1143 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
1145 tasklet_kill(&c67x00->tasklet);