Lines Matching refs:can
13 #include <linux/can/dev.h>
262 struct can_priv can;
278 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
493 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
498 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
499 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
502 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
507 spin_lock_irqsave(&can->lock, irq);
508 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
511 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
513 spin_unlock_irqrestore(&can->lock, irq);
516 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
521 spin_lock_irqsave(&can->lock, irq);
522 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
524 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
525 spin_unlock_irqrestore(&can->lock, irq);
528 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
538 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
543 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
548 spin_lock_irqsave(&can->lock, irq);
550 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
551 if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
553 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
562 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
572 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
574 spin_unlock_irqrestore(&can->lock, irq);
577 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
582 spin_lock_irqsave(&can->lock, irq);
583 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
585 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
587 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
593 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
594 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
599 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
601 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
604 spin_unlock_irqrestore(&can->lock, irq);
607 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
612 del_timer(&can->bec_poll_timer);
614 if (!completion_done(&can->flush_comp))
615 kvaser_pciefd_start_controller_flush(can);
617 if (!wait_for_completion_timeout(&can->flush_comp,
619 netdev_err(can->can.dev, "Timeout during bus on flush\n");
623 spin_lock_irqsave(&can->lock, irq);
624 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
625 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
628 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
630 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
632 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
633 spin_unlock_irqrestore(&can->lock, irq);
635 if (!wait_for_completion_timeout(&can->start_comp,
637 netdev_err(can->can.dev, "Timeout during bus on reset\n");
641 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
642 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
644 kvaser_pciefd_set_tx_irq(can);
645 kvaser_pciefd_setup_controller(can);
647 can->can.state = CAN_STATE_ERROR_ACTIVE;
648 netif_wake_queue(can->can.dev);
649 can->bec.txerr = 0;
650 can->bec.rxerr = 0;
651 can->err_rep_cnt = 0;
656 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
662 spin_lock_irqsave(&can->lock, irq);
663 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
668 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
669 spin_unlock_irqrestore(&can->lock, irq);
672 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
678 kvaser_pciefd_pwm_stop(can);
679 spin_lock_irqsave(&can->lock, irq);
682 top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
686 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
692 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
693 spin_unlock_irqrestore(&can->lock, irq);
699 struct kvaser_pciefd_can *can = netdev_priv(netdev);
705 err = kvaser_pciefd_bus_on(can);
716 struct kvaser_pciefd_can *can = netdev_priv(netdev);
720 if (!completion_done(&can->flush_comp))
721 kvaser_pciefd_start_controller_flush(can);
723 if (!wait_for_completion_timeout(&can->flush_comp,
725 netdev_err(can->can.dev, "Timeout during stop\n");
728 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
729 del_timer(&can->bec_poll_timer);
731 can->can.state = CAN_STATE_STOPPED;
738 struct kvaser_pciefd_can *can,
743 int seq = can->echo_idx;
747 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
779 struct kvaser_pciefd_can *can = netdev_priv(netdev);
788 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
790 spin_lock_irqsave(&can->echo_lock, irq_flags);
793 can_put_echo_skb(skb, netdev, can->echo_idx);
796 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
800 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
802 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
808 iowrite32_rep(can->reg_base +
812 __raw_writel(data_last, can->reg_base +
816 __raw_writel(0, can->reg_base +
820 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
825 can->can.echo_skb[can->echo_idx])
828 spin_unlock_irqrestore(&can->echo_lock, irq_flags);
833 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
841 bt = &can->can.data_bittiming;
843 bt = &can->can.bittiming;
852 spin_lock_irqsave(&can->lock, irq_flags);
853 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
857 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
860 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
865 spin_unlock_irqrestore(&can->lock, irq_flags);
870 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
872 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
875 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
877 spin_unlock_irqrestore(&can->lock, irq_flags);
893 struct kvaser_pciefd_can *can = netdev_priv(ndev);
898 if (!can->can.restart_ms)
899 ret = kvaser_pciefd_bus_on(can);
911 struct kvaser_pciefd_can *can = netdev_priv(ndev);
913 bec->rxerr = can->bec.rxerr;
914 bec->txerr = can->bec.txerr;
920 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
922 kvaser_pciefd_enable_err_gen(can);
923 kvaser_pciefd_request_status(can);
924 can->err_rep_cnt = 0;
940 struct kvaser_pciefd_can *can;
948 can = netdev_priv(netdev);
950 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
953 can->kv_pcie = pcie;
954 can->cmd_seq = 0;
955 can->err_rep_cnt = 0;
956 can->bec.txerr = 0;
957 can->bec.rxerr = 0;
959 init_completion(&can->start_comp);
960 init_completion(&can->flush_comp);
961 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
965 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
967 tx_npackets = ioread32(can->reg_base +
978 can->can.clock.freq = pcie->freq;
979 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
980 can->echo_idx = 0;
981 spin_lock_init(&can->echo_lock);
982 spin_lock_init(&can->lock);
983 can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
984 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
986 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
987 can->can.do_set_data_bittiming =
990 can->can.do_set_mode = kvaser_pciefd_set_mode;
991 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
993 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
997 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1007 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
1013 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1015 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1017 pcie->can[i] = can;
1018 kvaser_pciefd_pwm_start(can);
1029 int err = register_candev(pcie->can[i]->can.dev);
1036 unregister_candev(pcie->can[j]->can.dev);
1176 priv = &pcie->can[ch_id]->can;
1222 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1228 can_change_state(can->can.dev, cf, tx_state, rx_state);
1231 struct net_device *ndev = can->can.dev;
1234 spin_lock_irqsave(&can->lock, irq_flags);
1235 netif_stop_queue(can->can.dev);
1236 spin_unlock_irqrestore(&can->lock, irq_flags);
1239 if (!can->can.restart_ms) {
1240 kvaser_pciefd_start_controller_flush(can);
1272 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1277 struct net_device *ndev = can->can.dev;
1283 old_state = can->can.state;
1294 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1299 can->can.restart_ms) {
1300 can->can.can_stats.restarts++;
1306 can->err_rep_cnt++;
1307 can->can.can_stats.bus_error++;
1313 can->bec.txerr = bec.txerr;
1314 can->bec.rxerr = bec.rxerr;
1324 can->kv_pcie->freq_to_ticks_div));
1340 struct kvaser_pciefd_can *can;
1346 can = pcie->can[ch_id];
1348 kvaser_pciefd_rx_error_frame(can, p);
1349 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1351 kvaser_pciefd_disable_err_gen(can);
1353 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1357 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1363 old_state = can->can.state;
1372 struct net_device *ndev = can->can.dev;
1385 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1390 can->can.restart_ms) {
1391 can->can.can_stats.restarts++;
1398 can->kv_pcie->freq_to_ticks_div));
1405 can->bec.txerr = bec.txerr;
1406 can->bec.rxerr = bec.rxerr;
1409 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1417 struct kvaser_pciefd_can *can;
1425 can = pcie->can[ch_id];
1427 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1439 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1441 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1442 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1448 u8 count = ioread32(can->reg_base +
1453 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1457 kvaser_pciefd_handle_status_resp(can, p);
1458 if (can->can.state != CAN_STATE_BUS_OFF &&
1459 can->can.state != CAN_STATE_ERROR_ACTIVE) {
1460 mod_timer(&can->bec_poll_timer,
1466 if (!completion_done(&can->start_comp))
1467 complete(&can->start_comp);
1476 struct kvaser_pciefd_can *can;
1482 can = pcie->can[ch_id];
1486 u8 count = ioread32(can->reg_base +
1491 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1494 int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1495 struct net_device_stats *stats = &can->can.dev->stats;
1500 if (netif_queue_stopped(can->can.dev))
1501 netif_wake_queue(can->can.dev);
1507 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1511 struct net_device_stats *stats = &can->can.dev->stats;
1514 skb = alloc_can_err_skb(can->can.dev, &cf);
1520 can->can.can_stats.arbitration_lost++;
1532 netdev_warn(can->can.dev, "No memory left for err_skb\n");
1539 struct kvaser_pciefd_can *can;
1546 can = pcie->can[ch_id];
1552 kvaser_pciefd_handle_nack_packet(can, p);
1557 netdev_dbg(can->can.dev, "Packet was flushed\n");
1560 int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1561 u8 count = ioread32(can->reg_base +
1565 netif_queue_stopped(can->can.dev))
1566 netif_wake_queue(can->can.dev);
1569 struct net_device_stats *stats = &can->can.dev->stats;
1582 struct kvaser_pciefd_can *can;
1588 can = pcie->can[ch_id];
1590 if (!completion_done(&can->flush_comp))
1591 complete(&can->flush_comp);
1724 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1726 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1729 netdev_err(can->can.dev, "Tx FIFO overflow\n");
1732 netdev_err(can->can.dev,
1736 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1739 netdev_err(can->can.dev, "Rx FIFO overflow\n");
1741 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1760 if (!pcie->can[i]) {
1768 kvaser_pciefd_transmit_irq(pcie->can[i]);
1778 struct kvaser_pciefd_can *can;
1781 can = pcie->can[i];
1782 if (can) {
1784 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1785 kvaser_pciefd_pwm_stop(can);
1786 free_candev(can->can.dev);
1887 struct kvaser_pciefd_can *can;
1891 can = pcie->can[i];
1892 if (can) {
1894 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1895 unregister_candev(can->can.dev);
1896 del_timer(&can->bec_poll_timer);
1897 kvaser_pciefd_pwm_stop(can);
1898 free_candev(can->can.dev);