Lines Matching refs:ctrl
201 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
210 static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
213 if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
214 dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
225 static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
230 ctrl->stats.dma_sw_err++;
232 ctrl->stats.dma_hw_fault++;
234 ctrl->stats.dma_cancelled++;
237 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
239 rsxx_free_dma(ctrl, dma);
242 int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
252 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
254 rsxx_free_dma(ctrl, dma);
261 static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
268 spin_lock_bh(&ctrl->queue_lock);
269 ctrl->stats.sw_q_depth++;
270 list_add(&dma->list, &ctrl->queue);
271 spin_unlock_bh(&ctrl->queue_lock);
274 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
281 dev_dbg(CARD_TO_DEV(ctrl->card),
286 ctrl->stats.crc_errors++;
288 ctrl->stats.hard_errors++;
290 ctrl->stats.soft_errors++;
295 if (ctrl->card->scrub_hard) {
298 ctrl->stats.reads_retried++;
301 ctrl->stats.reads_failed++;
305 ctrl->stats.reads_failed++;
313 ctrl->stats.reads_failed++;
319 ctrl->stats.writes_failed++;
324 ctrl->stats.discards_failed++;
328 dev_err(CARD_TO_DEV(ctrl->card),
338 rsxx_requeue_dma(ctrl, dma);
340 rsxx_complete_dma(ctrl, dma, status);
345 struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
348 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
349 unlikely(ctrl->card->eeh_state))
352 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
357 dev_warn(CARD_TO_DEV(ctrl->card),
359 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
360 mod_timer(&ctrl->activity_timer,
363 dev_warn(CARD_TO_DEV(ctrl->card),
365 ctrl->id);
366 ctrl->card->dma_fault = 1;
369 spin_lock(&ctrl->queue_lock);
370 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
371 spin_unlock(&ctrl->queue_lock);
373 cnt += rsxx_dma_cancel(ctrl);
376 dev_info(CARD_TO_DEV(ctrl->card),
378 cnt, ctrl->id);
382 static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
390 hw_cmd_buf = ctrl->cmd.buf;
392 if (unlikely(ctrl->card->halt) ||
393 unlikely(ctrl->card->eeh_state))
397 spin_lock_bh(&ctrl->queue_lock);
398 if (list_empty(&ctrl->queue)) {
399 spin_unlock_bh(&ctrl->queue_lock);
402 spin_unlock_bh(&ctrl->queue_lock);
404 tag = pop_tracker(ctrl->trackers);
408 spin_lock_bh(&ctrl->queue_lock);
409 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
411 ctrl->stats.sw_q_depth--;
412 spin_unlock_bh(&ctrl->queue_lock);
419 if (unlikely(ctrl->card->dma_fault)) {
420 push_tracker(ctrl->trackers, tag);
421 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
441 dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
443 if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
444 push_tracker(ctrl->trackers, tag);
445 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
450 set_tracker_dma(ctrl->trackers, tag, dma);
451 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
452 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
453 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
454 hw_cmd_buf[ctrl->cmd.idx].sub_page =
458 hw_cmd_buf[ctrl->cmd.idx].device_addr =
461 hw_cmd_buf[ctrl->cmd.idx].host_addr =
464 dev_dbg(CARD_TO_DEV(ctrl->card),
466 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
468 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
472 ctrl->stats.writes_issued++;
474 ctrl->stats.discards_issued++;
476 ctrl->stats.reads_issued++;
481 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
482 mod_timer(&ctrl->activity_timer,
485 if (unlikely(ctrl->card->eeh_state)) {
486 del_timer_sync(&ctrl->activity_timer);
490 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
494 static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
503 hw_st_buf = ctrl->status.buf;
505 if (unlikely(ctrl->card->halt) ||
506 unlikely(ctrl->card->dma_fault) ||
507 unlikely(ctrl->card->eeh_state))
510 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
512 while (count == ctrl->e_cnt) {
522 status = hw_st_buf[ctrl->status.idx].status;
523 tag = hw_st_buf[ctrl->status.idx].tag;
525 dma = get_tracker_dma(ctrl->trackers, tag);
527 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
528 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
529 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
531 dev_err(CARD_TO_DEV(ctrl->card),
534 tag, ctrl->status.idx, ctrl->id);
538 dev_dbg(CARD_TO_DEV(ctrl->card),
541 ctrl->id, dma->laddr, tag, status, count,
542 ctrl->status.idx);
544 atomic_dec(&ctrl->stats.hw_q_depth);
546 mod_timer(&ctrl->activity_timer,
550 rsxx_handle_dma_error(ctrl, dma, status);
552 rsxx_complete_dma(ctrl, dma, 0);
554 push_tracker(ctrl->trackers, tag);
556 ctrl->status.idx = (ctrl->status.idx + 1) &
558 ctrl->e_cnt++;
560 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
563 dma_intr_coal_auto_tune(ctrl->card);
565 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
566 del_timer_sync(&ctrl->activity_timer);
568 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
569 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
570 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
572 spin_lock_bh(&ctrl->queue_lock);
573 if (ctrl->stats.sw_q_depth)
574 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
575 spin_unlock_bh(&ctrl->queue_lock);
580 struct rsxx_dma_ctrl *ctrl;
582 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
584 mutex_lock(&ctrl->work_lock);
585 rsxx_issue_dmas(ctrl);
586 mutex_unlock(&ctrl->work_lock);
591 struct rsxx_dma_ctrl *ctrl;
593 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
595 mutex_lock(&ctrl->work_lock);
596 rsxx_dma_done(ctrl);
597 mutex_unlock(&ctrl->work_lock);
742 spin_lock_bh(&card->ctrl[i].queue_lock);
743 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
744 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
745 spin_unlock_bh(&card->ctrl[i].queue_lock);
747 queue_work(card->ctrl[i].issue_wq,
748 &card->ctrl[i].issue_dma_work);
756 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
763 int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
765 ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
766 &ctrl->status.dma_addr, GFP_KERNEL);
767 ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
768 &ctrl->cmd.dma_addr, GFP_KERNEL);
769 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
772 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
773 iowrite32(lower_32_bits(ctrl->status.dma_addr),
774 ctrl->regmap + SB_ADD_LO);
775 iowrite32(upper_32_bits(ctrl->status.dma_addr),
776 ctrl->regmap + SB_ADD_HI);
778 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
779 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
780 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
782 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
783 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
785 ctrl->status.idx);
788 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
789 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
791 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
792 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
794 ctrl->status.idx);
797 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
804 struct rsxx_dma_ctrl *ctrl)
809 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
811 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
812 if (!ctrl->trackers)
815 ctrl->trackers->head = 0;
817 ctrl->trackers->list[i].next_tag = i + 1;
818 ctrl->trackers->list[i].dma = NULL;
820 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
821 spin_lock_init(&ctrl->trackers->lock);
823 spin_lock_init(&ctrl->queue_lock);
824 mutex_init(&ctrl->work_lock);
825 INIT_LIST_HEAD(&ctrl->queue);
827 timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
829 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
830 if (!ctrl->issue_wq)
833 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
834 if (!ctrl->done_wq)
837 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
838 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
840 st = rsxx_hw_buffers_init(dev, ctrl);
902 card->ctrl[i].regmap = card->regmap + (i * 4096);
911 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
915 card->ctrl[i].card = card;
916 card->ctrl[i].id = i;
935 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
937 if (ctrl->issue_wq) {
938 destroy_workqueue(ctrl->issue_wq);
939 ctrl->issue_wq = NULL;
942 if (ctrl->done_wq) {
943 destroy_workqueue(ctrl->done_wq);
944 ctrl->done_wq = NULL;
947 if (ctrl->trackers)
948 vfree(ctrl->trackers);
950 if (ctrl->status.buf)
952 ctrl->status.buf,
953 ctrl->status.dma_addr);
954 if (ctrl->cmd.buf)
956 ctrl->cmd.buf, ctrl->cmd.dma_addr);
962 int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
970 dma = get_tracker_dma(ctrl->trackers, i);
972 atomic_dec(&ctrl->stats.hw_q_depth);
973 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
974 push_tracker(ctrl->trackers, i);
984 struct rsxx_dma_ctrl *ctrl;
988 ctrl = &card->ctrl[i];
990 if (ctrl->issue_wq) {
991 destroy_workqueue(ctrl->issue_wq);
992 ctrl->issue_wq = NULL;
995 if (ctrl->done_wq) {
996 destroy_workqueue(ctrl->done_wq);
997 ctrl->done_wq = NULL;
1000 if (timer_pending(&ctrl->activity_timer))
1001 del_timer_sync(&ctrl->activity_timer);
1004 spin_lock_bh(&ctrl->queue_lock);
1005 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
1006 spin_unlock_bh(&ctrl->queue_lock);
1008 rsxx_dma_cancel(ctrl);
1010 vfree(ctrl->trackers);
1013 ctrl->status.buf, ctrl->status.dma_addr);
1015 ctrl->cmd.buf, ctrl->cmd.dma_addr);
1036 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1041 card->ctrl[i].stats.writes_issued--;
1043 card->ctrl[i].stats.discards_issued--;
1045 card->ctrl[i].stats.reads_issued--;
1056 push_tracker(card->ctrl[i].trackers, j);
1060 spin_lock_bh(&card->ctrl[i].queue_lock);
1061 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1063 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1064 card->ctrl[i].stats.sw_q_depth += cnt;
1065 card->ctrl[i].e_cnt = 0;
1066 spin_unlock_bh(&card->ctrl[i].queue_lock);