Lines Matching defs:ioat_chan
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
80 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
94 struct ioatdma_chan *ioat_chan;
111 ioat_chan = ioat_chan_by_index(instance, bit);
112 if (test_bit(IOAT_RUN, &ioat_chan->state))
113 tasklet_schedule(&ioat_chan->cleanup_task);
127 struct ioatdma_chan *ioat_chan = data;
129 if (test_bit(IOAT_RUN, &ioat_chan->state))
130 tasklet_schedule(&ioat_chan->cleanup_task);
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
139 int chan_id = chan_num(ioat_chan);
145 clear_bit(IOAT_RUN, &ioat_chan->state);
162 del_timer_sync(&ioat_chan->timer);
165 tasklet_kill(&ioat_chan->cleanup_task);
168 ioat_cleanup_event(&ioat_chan->cleanup_task);
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 ioat_chan->issued = ioat_chan->head;
175 writew(ioat_chan->dmacount,
176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 dev_dbg(to_dev(ioat_chan),
179 __func__, ioat_chan->head, ioat_chan->tail,
180 ioat_chan->issued, ioat_chan->dmacount);
185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
187 if (ioat_ring_pending(ioat_chan)) {
188 spin_lock_bh(&ioat_chan->prep_lock);
189 __ioat_issue_pending(ioat_chan);
190 spin_unlock_bh(&ioat_chan->prep_lock);
196 * @ioat_chan: ioat+ channel
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 __ioat_issue_pending(ioat_chan);
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
212 if (ioat_ring_space(ioat_chan) < 1) {
213 dev_err(to_dev(ioat_chan),
218 dev_dbg(to_dev(ioat_chan),
220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 dump_desc_dbg(ioat_chan, desc);
237 ioat_chan->head += 1;
238 __ioat_issue_pending(ioat_chan);
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
243 spin_lock_bh(&ioat_chan->prep_lock);
244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 __ioat_start_null_desc(ioat_chan);
246 spin_unlock_bh(&ioat_chan->prep_lock);
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
252 ioat_chan->issued = ioat_chan->tail;
253 ioat_chan->dmacount = 0;
254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
256 dev_dbg(to_dev(ioat_chan),
258 __func__, ioat_chan->head, ioat_chan->tail,
259 ioat_chan->issued, ioat_chan->dmacount);
261 if (ioat_ring_pending(ioat_chan)) {
264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 __ioat_issue_pending(ioat_chan);
268 __ioat_start_null_desc(ioat_chan);
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
277 status = ioat_chansts(ioat_chan);
279 ioat_suspend(ioat_chan);
285 status = ioat_chansts(ioat_chan);
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
297 ioat_reset(ioat_chan);
298 while (ioat_reset_pending(ioat_chan)) {
310 __releases(&ioat_chan->prep_lock)
313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
328 ioat_chan->head += ioat_chan->produce;
330 ioat_update_pending(ioat_chan);
331 spin_unlock_bh(&ioat_chan->prep_lock);
341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 phys = ioat_chan->descs[chunk].hw + offs;
374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
386 ioat_chan->desc_chunks = chunks;
389 struct ioat_descs *descs = &ioat_chan->descs[i];
391 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
397 descs = &ioat_chan->descs[idx];
398 dma_free_coherent(to_dev(ioat_chan),
405 ioat_chan->desc_chunks = 0;
419 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
420 dma_free_coherent(to_dev(ioat_chan),
422 ioat_chan->descs[idx].virt,
423 ioat_chan->descs[idx].hw);
424 ioat_chan->descs[idx].virt = NULL;
425 ioat_chan->descs[idx].hw = 0;
428 ioat_chan->desc_chunks = 0;
451 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
460 * @ioat_chan: ioat,3 channel (ring) to operate on
463 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
464 __acquires(&ioat_chan->prep_lock)
466 spin_lock_bh(&ioat_chan->prep_lock);
471 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
472 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
473 __func__, num_descs, ioat_chan->head,
474 ioat_chan->tail, ioat_chan->issued);
475 ioat_chan->produce = num_descs;
478 spin_unlock_bh(&ioat_chan->prep_lock);
480 dev_dbg_ratelimited(to_dev(ioat_chan),
482 __func__, num_descs, ioat_chan->head,
483 ioat_chan->tail, ioat_chan->issued);
489 if (time_is_before_jiffies(ioat_chan->timer.expires)
490 && timer_pending(&ioat_chan->timer)) {
491 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
492 ioat_timer_event(&ioat_chan->timer);
529 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
534 completion = *ioat_chan->completion;
537 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
543 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
546 *phys_complete = ioat_get_current_completion(ioat_chan);
547 if (*phys_complete == ioat_chan->last_completion)
550 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
551 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
557 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
588 * @ioat_chan: channel (ring) to clean
591 static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
593 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
596 int idx = ioat_chan->tail, i;
599 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
600 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
612 active = ioat_ring_active(ioat_chan);
616 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
617 desc = ioat_get_ring_ent(ioat_chan, idx + i);
618 dump_desc_dbg(ioat_chan, desc);
622 desc_get_errstat(ioat_chan, desc);
651 ioat_chan->tail = idx + i;
654 ioat_chan->last_completion = phys_complete;
657 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
659 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
663 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
664 writew(min((ioat_chan->intr_coalesce * (active - i)),
666 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
667 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
671 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
675 spin_lock_bh(&ioat_chan->cleanup_lock);
677 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
678 __ioat_cleanup(ioat_chan, phys_complete);
680 if (is_ioat_halted(*ioat_chan->completion)) {
681 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
685 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
686 ioat_eh(ioat_chan);
690 spin_unlock_bh(&ioat_chan->cleanup_lock);
695 struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
697 ioat_cleanup(ioat_chan);
698 if (!test_bit(IOAT_RUN, &ioat_chan->state))
700 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
703 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
708 writel(lower_32_bits(ioat_chan->completion_dma),
709 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
710 writel(upper_32_bits(ioat_chan->completion_dma),
711 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
713 ioat_quiesce(ioat_chan, 0);
714 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
715 __ioat_cleanup(ioat_chan, phys_complete);
717 __ioat_restart_chan(ioat_chan);
721 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
723 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
726 int idx = ioat_chan->tail, i;
733 active = ioat_ring_active(ioat_chan);
739 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
740 desc = ioat_get_ring_ent(ioat_chan, idx + i);
768 ioat_chan->tail = idx + active;
770 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
771 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
774 static void ioat_eh(struct ioatdma_chan *ioat_chan)
776 struct pci_dev *pdev = to_pdev(ioat_chan);
788 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
789 __ioat_cleanup(ioat_chan, phys_complete);
791 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
794 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
797 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
799 dump_desc_dbg(ioat_chan, desc);
836 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
838 dev_err(to_dev(ioat_chan), "Errors handled:\n");
839 ioat_print_chanerrs(ioat_chan, err_handled);
840 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
841 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
857 *ioat_chan->completion = desc->txd.phys;
859 spin_lock_bh(&ioat_chan->prep_lock);
862 ioat_abort_descs(ioat_chan);
864 ioat_reset_hw(ioat_chan);
867 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
870 ioat_restart_channel(ioat_chan);
871 spin_unlock_bh(&ioat_chan->prep_lock);
874 static void check_active(struct ioatdma_chan *ioat_chan)
876 if (ioat_ring_active(ioat_chan)) {
877 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
881 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
882 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
885 static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
887 spin_lock_bh(&ioat_chan->prep_lock);
888 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
889 spin_unlock_bh(&ioat_chan->prep_lock);
891 ioat_abort_descs(ioat_chan);
892 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
893 ioat_reset_hw(ioat_chan);
894 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
895 ioat_restart_channel(ioat_chan);
897 spin_lock_bh(&ioat_chan->prep_lock);
898 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
899 spin_unlock_bh(&ioat_chan->prep_lock);
904 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
908 status = ioat_chansts(ioat_chan);
916 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
917 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
919 dev_err(to_dev(ioat_chan), "Errors:\n");
920 ioat_print_chanerrs(ioat_chan, chanerr);
922 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
923 spin_lock_bh(&ioat_chan->cleanup_lock);
924 ioat_reboot_chan(ioat_chan);
925 spin_unlock_bh(&ioat_chan->cleanup_lock);
931 spin_lock_bh(&ioat_chan->cleanup_lock);
934 if (!ioat_ring_active(ioat_chan)) {
935 spin_lock_bh(&ioat_chan->prep_lock);
936 check_active(ioat_chan);
937 spin_unlock_bh(&ioat_chan->prep_lock);
942 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
946 __ioat_cleanup(ioat_chan, phys_complete);
954 if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
957 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
960 dev_err(to_dev(ioat_chan), "Errors:\n");
961 ioat_print_chanerrs(ioat_chan, chanerr);
963 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
964 ioat_ring_active(ioat_chan));
966 ioat_reboot_chan(ioat_chan);
972 if (ioat_ring_pending(ioat_chan)) {
973 dev_warn(to_dev(ioat_chan),
975 spin_lock_bh(&ioat_chan->prep_lock);
976 __ioat_issue_pending(ioat_chan);
977 spin_unlock_bh(&ioat_chan->prep_lock);
980 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
981 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
983 spin_unlock_bh(&ioat_chan->cleanup_lock);
990 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
997 ioat_cleanup(ioat_chan);
1002 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
1007 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1013 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1015 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1016 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1048 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));