Lines Matching refs:ppd

101 static void clear_sdma_activelist(struct qib_pportdata *ppd)
105 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
112 unmap_desc(ppd, idx);
113 if (++idx == ppd->sdma_descq_cnt)
124 struct qib_pportdata *ppd = from_tasklet(ppd, t,
128 spin_lock_irqsave(&ppd->sdma_lock, flags);
140 qib_sdma_make_progress(ppd);
142 clear_sdma_activelist(ppd);
148 ppd->sdma_descq_removed = ppd->sdma_descq_added;
155 ppd->sdma_descq_tail = 0;
156 ppd->sdma_descq_head = 0;
157 ppd->sdma_head_dma[0] = 0;
158 ppd->sdma_generation = 0;
160 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
162 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
170 static void sdma_hw_start_up(struct qib_pportdata *ppd)
172 struct qib_sdma_state *ss = &ppd->sdma_state;
176 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
178 ppd->dd->f_sdma_hw_start_up(ppd);
181 static void sdma_sw_tear_down(struct qib_pportdata *ppd)
183 struct qib_sdma_state *ss = &ppd->sdma_state;
189 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
191 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
194 static void sdma_set_state(struct qib_pportdata *ppd,
197 struct qib_sdma_state *ss = &ppd->sdma_state;
227 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
230 static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
232 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
242 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
245 static int alloc_sdma(struct qib_pportdata *ppd)
247 ppd->sdma_descq_cnt = sdma_descq_cnt;
248 if (!ppd->sdma_descq_cnt)
249 ppd->sdma_descq_cnt = 256;
252 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
253 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
256 if (!ppd->sdma_descq) {
257 qib_dev_err(ppd->dd,
263 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
264 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
265 if (!ppd->sdma_head_dma) {
266 qib_dev_err(ppd->dd,
270 ppd->sdma_head_dma[0] = 0;
274 dma_free_coherent(&ppd->dd->pcidev->dev,
275 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
276 ppd->sdma_descq_phys);
277 ppd->sdma_descq = NULL;
278 ppd->sdma_descq_phys = 0;
280 ppd->sdma_descq_cnt = 0;
284 static void free_sdma(struct qib_pportdata *ppd)
286 struct qib_devdata *dd = ppd->dd;
288 if (ppd->sdma_head_dma) {
290 (void *)ppd->sdma_head_dma,
291 ppd->sdma_head_phys);
292 ppd->sdma_head_dma = NULL;
293 ppd->sdma_head_phys = 0;
296 if (ppd->sdma_descq) {
298 ppd->sdma_descq_cnt * sizeof(u64[2]),
299 ppd->sdma_descq, ppd->sdma_descq_phys);
300 ppd->sdma_descq = NULL;
301 ppd->sdma_descq_phys = 0;
305 static inline void make_sdma_desc(struct qib_pportdata *ppd,
316 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
325 int qib_sdma_make_progress(struct qib_pportdata *ppd)
329 struct qib_devdata *dd = ppd->dd;
334 hwhead = dd->f_sdma_gethead(ppd);
342 if (!list_empty(&ppd->sdma_activelist)) {
343 lp = ppd->sdma_activelist.next;
348 while (ppd->sdma_descq_head != hwhead) {
351 (idx == ppd->sdma_descq_head)) {
352 unmap_desc(ppd, ppd->sdma_descq_head);
353 if (++idx == ppd->sdma_descq_cnt)
358 ppd->sdma_descq_removed++;
361 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
362 ppd->sdma_descq_head = 0;
365 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
371 if (list_empty(&ppd->sdma_activelist))
374 lp = ppd->sdma_activelist.next;
383 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
390 void qib_sdma_intr(struct qib_pportdata *ppd)
394 spin_lock_irqsave(&ppd->sdma_lock, flags);
396 __qib_sdma_intr(ppd);
398 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
401 void __qib_sdma_intr(struct qib_pportdata *ppd)
403 if (__qib_sdma_running(ppd)) {
404 qib_sdma_make_progress(ppd);
405 if (!list_empty(&ppd->sdma_userpending))
406 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
410 int qib_setup_sdma(struct qib_pportdata *ppd)
412 struct qib_devdata *dd = ppd->dd;
416 ret = alloc_sdma(ppd);
421 ppd->dd->f_sdma_init_early(ppd);
422 spin_lock_irqsave(&ppd->sdma_lock, flags);
423 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
424 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
427 kref_init(&ppd->sdma_state.kref);
428 init_completion(&ppd->sdma_state.comp);
430 ppd->sdma_generation = 0;
431 ppd->sdma_descq_head = 0;
432 ppd->sdma_descq_removed = 0;
433 ppd->sdma_descq_added = 0;
435 ppd->sdma_intrequest = 0;
436 INIT_LIST_HEAD(&ppd->sdma_userpending);
438 INIT_LIST_HEAD(&ppd->sdma_activelist);
440 tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
442 ret = dd->f_init_sdma_regs(ppd);
446 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
451 qib_teardown_sdma(ppd);
456 void qib_teardown_sdma(struct qib_pportdata *ppd)
458 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
465 sdma_finalput(&ppd->sdma_state);
467 free_sdma(ppd);
470 int qib_sdma_running(struct qib_pportdata *ppd)
475 spin_lock_irqsave(&ppd->sdma_lock, flags);
476 ret = __qib_sdma_running(ppd);
477 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
489 static void complete_sdma_err_req(struct qib_pportdata *ppd,
498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
499 clear_sdma_activelist(ppd);
511 int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526 spin_lock_irqsave(&ppd->sdma_lock, flags);
529 if (unlikely(!__qib_sdma_running(ppd))) {
530 complete_sdma_err_req(ppd, tx);
534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
535 if (qib_sdma_make_progress(ppd))
537 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
538 ppd->dd->f_sdma_set_desc_cnt(ppd,
539 ppd->sdma_descq_cnt / 2);
544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
551 tail = ppd->sdma_descq_tail;
552 descqp = &ppd->sdma_descq[tail].qw[0];
557 if (++tail == ppd->sdma_descq_cnt) {
559 descqp = &ppd->sdma_descq[0].qw[0];
560 ++ppd->sdma_generation;
571 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
573 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
578 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
587 if (++tail == ppd->sdma_descq_cnt) {
589 descqp = &ppd->sdma_descq[0].qw[0];
590 ++ppd->sdma_generation;
598 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
608 ppd->dd->f_sdma_update_tail(ppd, tail);
609 ppd->sdma_descq_added += tx->txreq.sg_count;
610 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
616 tail = ppd->sdma_descq_cnt - 1;
619 if (tail == ppd->sdma_descq_tail)
621 unmap_desc(ppd, tail);
654 dev = &ppd->dd->verbs_dev;
659 ibp = &ppd->ibport_data;
673 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
680 void dump_sdma_state(struct qib_pportdata *ppd)
690 head = ppd->sdma_descq_head;
691 tail = ppd->sdma_descq_tail;
692 cnt = qib_sdma_descq_freecnt(ppd);
693 descq = ppd->sdma_descq;
695 qib_dev_porterr(ppd->dd, ppd->port,
696 "SDMA ppd->sdma_descq_head: %u\n", head);
697 qib_dev_porterr(ppd->dd, ppd->port,
698 "SDMA ppd->sdma_descq_tail: %u\n", tail);
699 qib_dev_porterr(ppd->dd, ppd->port,
718 qib_dev_porterr(ppd->dd, ppd->port,
721 if (++head == ppd->sdma_descq_cnt)
726 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
728 qib_dev_porterr(ppd->dd, ppd->port,
733 void qib_sdma_process_event(struct qib_pportdata *ppd,
738 spin_lock_irqsave(&ppd->sdma_lock, flags);
740 __qib_sdma_process_event(ppd, event);
742 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
743 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
745 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
748 void __qib_sdma_process_event(struct qib_pportdata *ppd,
751 struct qib_sdma_state *ss = &ppd->sdma_state;
769 sdma_get(&ppd->sdma_state);
770 sdma_set_state(ppd,
776 sdma_sw_tear_down(ppd);
796 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
797 sdma_sw_tear_down(ppd);
802 sdma_set_state(ppd, ss->go_s99_running ?
830 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
831 sdma_sw_tear_down(ppd);
838 sdma_set_state(ppd, qib_sdma_state_s99_running);
861 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
871 sdma_set_state(ppd,
873 sdma_hw_start_up(ppd);
894 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
895 sdma_start_sw_clean_up(ppd);
907 sdma_set_state(ppd,
909 sdma_start_sw_clean_up(ppd);
928 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
929 sdma_start_sw_clean_up(ppd);
943 sdma_set_state(ppd,
945 ppd->dd->f_sdma_hw_clean_up(ppd);
962 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
963 sdma_start_sw_clean_up(ppd);
976 sdma_set_state(ppd,
978 sdma_start_sw_clean_up(ppd);
981 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
985 sdma_set_state(ppd,
987 sdma_start_sw_clean_up(ppd);
990 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);