1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.25) 5 * - PEAK linux canfd driver 6 * - Altera Avalon EPCS flash controller driver 7 */ 8 9#include <linux/kernel.h> 10#include <linux/module.h> 11#include <linux/device.h> 12#include <linux/pci.h> 13#include <linux/can/dev.h> 14#include <linux/timer.h> 15#include <linux/netdevice.h> 16#include <linux/crc32.h> 17#include <linux/iopoll.h> 18 19MODULE_LICENSE("Dual BSD/GPL"); 20MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 21MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 22 23#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 24 25#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 26#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 27#define KVASER_PCIEFD_MAX_ERR_REP 256 28#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 29#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 30#define KVASER_PCIEFD_DMA_COUNT 2 31 32#define KVASER_PCIEFD_DMA_SIZE (4 * 1024) 33#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 34 35#define KVASER_PCIEFD_VENDOR 0x1a07 36#define KVASER_PCIEFD_4HS_ID 0x0d 37#define KVASER_PCIEFD_2HS_ID 0x0e 38#define KVASER_PCIEFD_HS_ID 0x0f 39#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 40#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 41 42/* PCIe IRQ registers */ 43#define KVASER_PCIEFD_IRQ_REG 0x40 44#define KVASER_PCIEFD_IEN_REG 0x50 45/* DMA map */ 46#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 47/* Kvaser KCAN CAN controller registers */ 48#define KVASER_PCIEFD_KCAN0_BASE 0x10000 49#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 50#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 51#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 52#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 53#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 54#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 55#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 56#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 57#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 58#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 59#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 60#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 61#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 62#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 63/* Loopback control register */ 64#define KVASER_PCIEFD_LOOP_REG 0x1f000 65/* System identification and information registers */ 66#define KVASER_PCIEFD_SYSID_BASE 0x1f020 67#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 68#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 69#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) 70#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 71/* Shared receive buffer registers */ 72#define KVASER_PCIEFD_SRB_BASE 0x1f200 73#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) 74#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 75#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 76#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 77#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 78#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) 79#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 80/* EPCS flash controller registers */ 81#define KVASER_PCIEFD_SPI_BASE 0x1fc00 82#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE 83#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) 84#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) 85#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) 86#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) 87 88#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f 89#define KVASER_PCIEFD_IRQ_SRB BIT(4) 90 91#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 92#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 93#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 94 95/* Reset DMA buffer 0, 1 and FIFO offset */ 96#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 97#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 98#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 99 100/* DMA packet done, buffer 0 and 1 */ 101#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 102#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 103/* DMA overflow, buffer 0 and 1 */ 104#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 105#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 106/* DMA underflow, buffer 0 and 1 */ 107#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 108#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 109 110/* DMA idle */ 111#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 112/* DMA support */ 113#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 114 115/* SRB current packet level */ 116#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff 117 118/* DMA Enable */ 119#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 120 121/* EPCS flash controller definitions */ 122#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) 123#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) 124#define KVASER_PCIEFD_CFG_MAX_PARAMS 256 125#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d 126#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 127#define KVASER_PCIEFD_CFG_SYS_VER 1 128#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 129#define KVASER_PCIEFD_SPI_TMT BIT(5) 130#define KVASER_PCIEFD_SPI_TRDY BIT(6) 131#define KVASER_PCIEFD_SPI_RRDY BIT(7) 132#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 133/* Commands for controlling the onboard flash */ 134#define KVASER_PCIEFD_FLASH_RES_CMD 0xab 135#define KVASER_PCIEFD_FLASH_READ_CMD 0x3 136#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 137 138/* Kvaser KCAN definitions */ 139#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) 140#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) 141 142#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 143/* Request status packet */ 144#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 145/* Abort, flush and reset */ 146#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 147 148/* Tx FIFO unaligned read */ 149#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 150/* Tx FIFO unaligned end */ 151#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 152/* Bus parameter protection error */ 153#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 154/* FDF bit when controller is in classic mode */ 155#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 156/* Rx FIFO overflow */ 157#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 158/* Abort done */ 159#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 160/* Tx buffer flush done */ 161#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 162/* Tx FIFO overflow */ 163#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 164/* Tx FIFO empty */ 165#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 166/* Transmitter unaligned */ 167#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 168 169#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 170 171#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 172/* Abort request */ 173#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 174/* Idle state. Controller in reset mode and no abort or flush pending */ 175#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 176/* Bus off */ 177#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 178/* Reset mode request */ 179#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 180/* Controller in reset mode */ 181#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 182/* Controller got one-shot capability */ 183#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 184/* Controller got CAN FD capability */ 185#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 186#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ 187 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ 188 KVASER_PCIEFD_KCAN_STAT_IRM) 189 190/* Reset mode */ 191#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 192/* Listen only mode */ 193#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 194/* Error packet enable */ 195#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 196/* CAN FD non-ISO */ 197#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 198/* Acknowledgment packet type */ 199#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 200/* Active error flag enable. Clear to force error passive */ 201#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 202/* Classic CAN mode */ 203#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 204 205#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 206#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 207#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 208 209#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 210 211/* Kvaser KCAN packet types */ 212#define KVASER_PCIEFD_PACK_TYPE_DATA 0 213#define KVASER_PCIEFD_PACK_TYPE_ACK 1 214#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 215#define KVASER_PCIEFD_PACK_TYPE_ERROR 3 216#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 217#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 218#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 219#define KVASER_PCIEFD_PACK_TYPE_STATUS 8 220#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 221 222/* Kvaser KCAN packet common definitions */ 223#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff 224#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 225#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 226 227/* Kvaser KCAN TDATA and RDATA first word */ 228#define KVASER_PCIEFD_RPACKET_IDE BIT(30) 229#define KVASER_PCIEFD_RPACKET_RTR BIT(29) 230/* Kvaser KCAN TDATA and RDATA second word */ 231#define KVASER_PCIEFD_RPACKET_ESI BIT(13) 232#define KVASER_PCIEFD_RPACKET_BRS BIT(14) 233#define KVASER_PCIEFD_RPACKET_FDF BIT(15) 234#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 235/* Kvaser KCAN TDATA second word */ 236#define KVASER_PCIEFD_TPACKET_SMS BIT(16) 237#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 238 239/* Kvaser KCAN APACKET */ 240#define KVASER_PCIEFD_APACKET_FLU BIT(8) 241#define KVASER_PCIEFD_APACKET_CT BIT(9) 242#define KVASER_PCIEFD_APACKET_ABL BIT(10) 243#define KVASER_PCIEFD_APACKET_NACK BIT(11) 244 245/* Kvaser KCAN SPACK first word */ 246#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 247#define KVASER_PCIEFD_SPACK_BOFF BIT(16) 248#define KVASER_PCIEFD_SPACK_IDET BIT(20) 249#define KVASER_PCIEFD_SPACK_IRM BIT(21) 250#define KVASER_PCIEFD_SPACK_RMCD BIT(22) 251/* Kvaser KCAN SPACK second word */ 252#define KVASER_PCIEFD_SPACK_AUTO BIT(21) 253#define KVASER_PCIEFD_SPACK_EWLR BIT(23) 254#define KVASER_PCIEFD_SPACK_EPLR BIT(24) 255 256/* Kvaser KCAN_EPACK second word */ 257#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 258 259struct kvaser_pciefd; 260 261struct kvaser_pciefd_can { 262 struct can_priv can; 263 struct kvaser_pciefd *kv_pcie; 264 void __iomem *reg_base; 265 struct can_berr_counter bec; 266 u8 cmd_seq; 267 int err_rep_cnt; 268 int echo_idx; 269 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 270 spinlock_t echo_lock; /* Locks the message echo buffer */ 271 struct timer_list bec_poll_timer; 272 struct completion start_comp, flush_comp; 273}; 274 275struct kvaser_pciefd { 276 struct pci_dev *pci; 277 void __iomem *reg_base; 278 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 279 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 280 u8 nr_channels; 281 u32 bus_freq; 282 u32 freq; 283 u32 freq_to_ticks_div; 284}; 285 286struct kvaser_pciefd_rx_packet { 287 u32 header[2]; 288 u64 timestamp; 289}; 290 291struct kvaser_pciefd_tx_packet { 292 u32 header[2]; 293 u8 data[64]; 294}; 295 296static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 297 .name = KVASER_PCIEFD_DRV_NAME, 298 .tseg1_min = 1, 299 .tseg1_max = 512, 300 .tseg2_min = 1, 301 .tseg2_max = 32, 302 .sjw_max = 16, 303 .brp_min = 1, 304 .brp_max = 8192, 305 .brp_inc = 1, 306}; 307 308struct kvaser_pciefd_cfg_param { 309 __le32 magic; 310 __le32 nr; 311 __le32 len; 312 u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; 313}; 314 315struct kvaser_pciefd_cfg_img { 316 __le32 version; 317 __le32 magic; 318 __le32 crc; 319 struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; 320}; 321 322static struct pci_device_id kvaser_pciefd_id_table[] = { 323 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, 324 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, 325 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, 326 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, 327 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, 328 { 0,}, 329}; 330MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 331 332/* Onboard flash memory functions */ 333static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) 334{ 335 u32 res; 336 int ret; 337 338 ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, 339 res, res & msk, 0, 10); 340 341 return ret; 342} 343 344static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, 345 u32 tx_len, u8 *rx, u32 rx_len) 346{ 347 int c; 348 349 iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); 350 iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 351 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 352 353 c = tx_len; 354 while (c--) { 355 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 356 return -EIO; 357 358 iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 359 360 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 361 return -EIO; 362 363 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 364 } 365 366 c = rx_len; 367 while (c-- > 0) { 368 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 369 return -EIO; 370 371 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 372 373 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 374 return -EIO; 375 376 *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 377 } 378 379 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) 380 return -EIO; 381 382 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 383 384 if (c != -1) { 385 dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); 386 return -EIO; 387 } 388 389 return 0; 390} 391 392static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, 393 struct kvaser_pciefd_cfg_img *img) 394{ 395 int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; 396 int res, crc; 397 u8 *crc_buff; 398 399 u8 cmd[] = { 400 KVASER_PCIEFD_FLASH_READ_CMD, 401 (u8)((offset >> 16) & 0xff), 402 (u8)((offset >> 8) & 0xff), 403 (u8)(offset & 0xff) 404 }; 405 406 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, 407 KVASER_PCIEFD_CFG_IMG_SZ); 408 if (res) 409 return res; 410 411 crc_buff = (u8 *)img->params; 412 413 if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { 414 dev_err(&pcie->pci->dev, 415 "Config flash corrupted, version number is wrong\n"); 416 return -ENODEV; 417 } 418 419 if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { 420 dev_err(&pcie->pci->dev, 421 "Config flash corrupted, magic number is wrong\n"); 422 return -ENODEV; 423 } 424 425 crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); 426 if (le32_to_cpu(img->crc) != crc) { 427 dev_err(&pcie->pci->dev, 428 "Stored CRC does not match flash image contents\n"); 429 return -EIO; 430 } 431 432 return 0; 433} 434 435static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, 436 struct kvaser_pciefd_cfg_img *img) 437{ 438 struct kvaser_pciefd_cfg_param *param; 439 440 param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; 441 memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); 442} 443 444static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) 445{ 446 int res; 447 struct kvaser_pciefd_cfg_img *img; 448 449 /* Read electronic signature */ 450 u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; 451 452 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); 453 if (res) 454 return -EIO; 455 456 img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); 457 if (!img) 458 return -ENOMEM; 459 460 if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { 461 dev_err(&pcie->pci->dev, 462 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", 463 cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); 464 465 res = -ENODEV; 466 goto image_free; 467 } 468 469 cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; 470 res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); 471 if (res) { 472 goto image_free; 473 } else if (cmd[0] & 1) { 474 res = -EIO; 475 /* No write is ever done, the WIP should never be set */ 476 dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); 477 goto image_free; 478 } 479 480 res = kvaser_pciefd_cfg_read_and_verify(pcie, img); 481 if (res) { 482 res = -EIO; 483 goto image_free; 484 } 485 486 kvaser_pciefd_cfg_read_params(pcie, img); 487 488image_free: 489 kfree(img); 490 return res; 491} 492 493static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 494{ 495 u32 cmd; 496 497 cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; 498 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 499 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 500} 501 502static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 503{ 504 u32 mode; 505 unsigned long irq; 506 507 spin_lock_irqsave(&can->lock, irq); 508 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 509 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 510 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 511 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 512 } 513 spin_unlock_irqrestore(&can->lock, irq); 514} 515 516static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 517{ 518 u32 mode; 519 unsigned long irq; 520 521 spin_lock_irqsave(&can->lock, irq); 522 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 523 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 524 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 525 spin_unlock_irqrestore(&can->lock, irq); 526} 527 528static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 529{ 530 u32 msk; 531 532 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 533 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 534 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 535 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 536 KVASER_PCIEFD_KCAN_IRQ_TAR; 537 538 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 539 540 return 0; 541} 542 543static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 544{ 545 u32 mode; 546 unsigned long irq; 547 548 spin_lock_irqsave(&can->lock, irq); 549 550 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 551 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 552 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 553 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 554 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 555 else 556 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 557 } else { 558 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 559 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 560 } 561 562 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 563 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 564 else 565 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; 566 567 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 568 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 569 /* Use ACK packet type */ 570 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 571 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 572 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 573 574 spin_unlock_irqrestore(&can->lock, irq); 575} 576 577static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 578{ 579 u32 status; 580 unsigned long irq; 581 582 spin_lock_irqsave(&can->lock, irq); 583 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 584 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 585 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 586 587 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 588 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 589 u32 cmd; 590 591 /* If controller is already idle, run abort, flush and reset */ 592 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 593 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 594 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 595 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 596 u32 mode; 597 598 /* Put controller in reset mode */ 599 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 600 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 601 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 602 } 603 604 spin_unlock_irqrestore(&can->lock, irq); 605} 606 607static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 608{ 609 u32 mode; 610 unsigned long irq; 611 612 del_timer(&can->bec_poll_timer); 613 614 if (!completion_done(&can->flush_comp)) 615 kvaser_pciefd_start_controller_flush(can); 616 617 if (!wait_for_completion_timeout(&can->flush_comp, 618 KVASER_PCIEFD_WAIT_TIMEOUT)) { 619 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 620 return -ETIMEDOUT; 621 } 622 623 spin_lock_irqsave(&can->lock, irq); 624 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 625 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 626 627 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 628 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 629 630 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 631 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 632 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 633 spin_unlock_irqrestore(&can->lock, irq); 634 635 if (!wait_for_completion_timeout(&can->start_comp, 636 KVASER_PCIEFD_WAIT_TIMEOUT)) { 637 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 638 return -ETIMEDOUT; 639 } 640 /* Reset interrupt handling */ 641 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 642 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 643 644 kvaser_pciefd_set_tx_irq(can); 645 kvaser_pciefd_setup_controller(can); 646 647 can->can.state = CAN_STATE_ERROR_ACTIVE; 648 netif_wake_queue(can->can.dev); 649 can->bec.txerr = 0; 650 can->bec.rxerr = 0; 651 can->err_rep_cnt = 0; 652 653 return 0; 654} 655 656static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 657{ 658 u8 top; 659 u32 pwm_ctrl; 660 unsigned long irq; 661 662 spin_lock_irqsave(&can->lock, irq); 663 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 664 top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; 665 666 /* Set duty cycle to zero */ 667 pwm_ctrl |= top; 668 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 669 spin_unlock_irqrestore(&can->lock, irq); 670} 671 672static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 673{ 674 int top, trigger; 675 u32 pwm_ctrl; 676 unsigned long irq; 677 678 kvaser_pciefd_pwm_stop(can); 679 spin_lock_irqsave(&can->lock, irq); 680 681 /* Set frequency to 500 KHz*/ 682 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 683 684 pwm_ctrl = top & 0xff; 685 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 686 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 687 688 /* Set duty cycle to 95 */ 689 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 690 pwm_ctrl = trigger & 0xff; 691 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 692 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 693 spin_unlock_irqrestore(&can->lock, irq); 694} 695 696static int kvaser_pciefd_open(struct net_device *netdev) 697{ 698 int err; 699 struct kvaser_pciefd_can *can = netdev_priv(netdev); 700 701 err = open_candev(netdev); 702 if (err) 703 return err; 704 705 err = kvaser_pciefd_bus_on(can); 706 if (err) { 707 close_candev(netdev); 708 return err; 709 } 710 711 return 0; 712} 713 714static int kvaser_pciefd_stop(struct net_device *netdev) 715{ 716 struct kvaser_pciefd_can *can = netdev_priv(netdev); 717 int ret = 0; 718 719 /* Don't interrupt ongoing flush */ 720 if (!completion_done(&can->flush_comp)) 721 kvaser_pciefd_start_controller_flush(can); 722 723 if (!wait_for_completion_timeout(&can->flush_comp, 724 KVASER_PCIEFD_WAIT_TIMEOUT)) { 725 netdev_err(can->can.dev, "Timeout during stop\n"); 726 ret = -ETIMEDOUT; 727 } else { 728 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 729 del_timer(&can->bec_poll_timer); 730 } 731 can->can.state = CAN_STATE_STOPPED; 732 close_candev(netdev); 733 734 return ret; 735} 736 737static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 738 struct kvaser_pciefd_can *can, 739 struct sk_buff *skb) 740{ 741 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 742 int packet_size; 743 int seq = can->echo_idx; 744 745 memset(p, 0, sizeof(*p)); 746 747 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 748 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 749 750 if (cf->can_id & CAN_RTR_FLAG) 751 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 752 753 if (cf->can_id & CAN_EFF_FLAG) 754 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 755 756 p->header[0] |= cf->can_id & CAN_EFF_MASK; 757 p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; 758 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 759 760 if (can_is_canfd_skb(skb)) { 761 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 762 if (cf->flags & CANFD_BRS) 763 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 764 if (cf->flags & CANFD_ESI) 765 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 766 } 767 768 p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; 769 770 packet_size = cf->len; 771 memcpy(p->data, cf->data, packet_size); 772 773 return DIV_ROUND_UP(packet_size, 4); 774} 775 776static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 777 struct net_device *netdev) 778{ 779 struct kvaser_pciefd_can *can = netdev_priv(netdev); 780 unsigned long irq_flags; 781 struct kvaser_pciefd_tx_packet packet; 782 int nwords; 783 u8 count; 784 785 if (can_dropped_invalid_skb(netdev, skb)) 786 return NETDEV_TX_OK; 787 788 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 789 790 spin_lock_irqsave(&can->echo_lock, irq_flags); 791 792 /* Prepare and save echo skb in internal slot */ 793 can_put_echo_skb(skb, netdev, can->echo_idx); 794 795 /* Move echo index to the next slot */ 796 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 797 798 /* Write header to fifo */ 799 iowrite32(packet.header[0], 800 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 801 iowrite32(packet.header[1], 802 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 803 804 if (nwords) { 805 u32 data_last = ((u32 *)packet.data)[nwords - 1]; 806 807 /* Write data to fifo, except last word */ 808 iowrite32_rep(can->reg_base + 809 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 810 nwords - 1); 811 /* Write last word to end of fifo */ 812 __raw_writel(data_last, can->reg_base + 813 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 814 } else { 815 /* Complete write to fifo */ 816 __raw_writel(0, can->reg_base + 817 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 818 } 819 820 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 821 /* No room for a new message, stop the queue until at least one 822 * successful transmit 823 */ 824 if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || 825 can->can.echo_skb[can->echo_idx]) 826 netif_stop_queue(netdev); 827 828 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 829 830 return NETDEV_TX_OK; 831} 832 833static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 834{ 835 u32 mode, test, btrn; 836 unsigned long irq_flags; 837 int ret; 838 struct can_bittiming *bt; 839 840 if (data) 841 bt = &can->can.data_bittiming; 842 else 843 bt = &can->can.bittiming; 844 845 btrn = ((bt->phase_seg2 - 1) & 0x1f) << 846 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | 847 (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << 848 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | 849 ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | 850 ((bt->brp - 1) & 0x1fff); 851 852 spin_lock_irqsave(&can->lock, irq_flags); 853 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 854 855 /* Put the circuit in reset mode */ 856 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 857 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 858 859 /* Can only set bittiming if in reset mode */ 860 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 861 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 862 0, 10); 863 864 if (ret) { 865 spin_unlock_irqrestore(&can->lock, irq_flags); 866 return -EBUSY; 867 } 868 869 if (data) 870 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 871 else 872 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 873 874 /* Restore previous reset mode status */ 875 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 876 877 spin_unlock_irqrestore(&can->lock, irq_flags); 878 return 0; 879} 880 881static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 882{ 883 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 884} 885 886static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 887{ 888 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 889} 890 891static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 892{ 893 struct kvaser_pciefd_can *can = netdev_priv(ndev); 894 int ret = 0; 895 896 switch (mode) { 897 case CAN_MODE_START: 898 if (!can->can.restart_ms) 899 ret = kvaser_pciefd_bus_on(can); 900 break; 901 default: 902 return -EOPNOTSUPP; 903 } 904 905 return ret; 906} 907 908static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 909 struct can_berr_counter *bec) 910{ 911 struct kvaser_pciefd_can *can = netdev_priv(ndev); 912 913 bec->rxerr = can->bec.rxerr; 914 bec->txerr = can->bec.txerr; 915 return 0; 916} 917 918static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 919{ 920 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 921 922 kvaser_pciefd_enable_err_gen(can); 923 kvaser_pciefd_request_status(can); 924 can->err_rep_cnt = 0; 925} 926 927static const struct net_device_ops kvaser_pciefd_netdev_ops = { 928 .ndo_open = kvaser_pciefd_open, 929 .ndo_stop = kvaser_pciefd_stop, 930 .ndo_start_xmit = kvaser_pciefd_start_xmit, 931 .ndo_change_mtu = can_change_mtu, 932}; 933 934static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 935{ 936 int i; 937 938 for (i = 0; i < pcie->nr_channels; i++) { 939 struct net_device *netdev; 940 struct kvaser_pciefd_can *can; 941 u32 status, tx_npackets; 942 943 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 944 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 945 if (!netdev) 946 return -ENOMEM; 947 948 can = netdev_priv(netdev); 949 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 950 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 951 i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 952 953 can->kv_pcie = pcie; 954 can->cmd_seq = 0; 955 can->err_rep_cnt = 0; 956 can->bec.txerr = 0; 957 can->bec.rxerr = 0; 958 959 init_completion(&can->start_comp); 960 init_completion(&can->flush_comp); 961 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 962 0); 963 964 /* Disable Bus load reporting */ 965 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 966 967 tx_npackets = ioread32(can->reg_base + 968 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 969 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & 970 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { 971 dev_err(&pcie->pci->dev, 972 "Max Tx count is smaller than expected\n"); 973 974 free_candev(netdev); 975 return -ENODEV; 976 } 977 978 can->can.clock.freq = pcie->freq; 979 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; 980 can->echo_idx = 0; 981 spin_lock_init(&can->echo_lock); 982 spin_lock_init(&can->lock); 983 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 984 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 985 986 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 987 can->can.do_set_data_bittiming = 988 kvaser_pciefd_set_data_bittiming; 989 990 can->can.do_set_mode = kvaser_pciefd_set_mode; 991 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 992 993 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 994 CAN_CTRLMODE_FD | 995 CAN_CTRLMODE_FD_NON_ISO; 996 997 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 998 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 999 dev_err(&pcie->pci->dev, 1000 "CAN FD not supported as expected %d\n", i); 1001 1002 free_candev(netdev); 1003 return -ENODEV; 1004 } 1005 1006 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 1007 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 1008 1009 netdev->flags |= IFF_ECHO; 1010 1011 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 1012 1013 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1014 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1015 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1016 1017 pcie->can[i] = can; 1018 kvaser_pciefd_pwm_start(can); 1019 } 1020 1021 return 0; 1022} 1023 1024static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1025{ 1026 int i; 1027 1028 for (i = 0; i < pcie->nr_channels; i++) { 1029 int err = register_candev(pcie->can[i]->can.dev); 1030 1031 if (err) { 1032 int j; 1033 1034 /* Unregister all successfully registered devices. */ 1035 for (j = 0; j < i; j++) 1036 unregister_candev(pcie->can[j]->can.dev); 1037 return err; 1038 } 1039 } 1040 1041 return 0; 1042} 1043 1044static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 1045 dma_addr_t addr, int offset) 1046{ 1047 u32 word1, word2; 1048 1049#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1050 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 1051 word2 = addr >> 32; 1052#else 1053 word1 = addr; 1054 word2 = 0; 1055#endif 1056 iowrite32(word1, pcie->reg_base + offset); 1057 iowrite32(word2, pcie->reg_base + offset + 4); 1058} 1059 1060static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1061{ 1062 int i; 1063 u32 srb_status; 1064 u32 srb_packet_count; 1065 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1066 1067 /* Disable the DMA */ 1068 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1069 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1070 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 1071 1072 pcie->dma_data[i] = 1073 dmam_alloc_coherent(&pcie->pci->dev, 1074 KVASER_PCIEFD_DMA_SIZE, 1075 &dma_addr[i], 1076 GFP_KERNEL); 1077 1078 if (!pcie->dma_data[i] || !dma_addr[i]) { 1079 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1080 KVASER_PCIEFD_DMA_SIZE); 1081 return -ENOMEM; 1082 } 1083 1084 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 1085 } 1086 1087 /* Reset Rx FIFO, and both DMA buffers */ 1088 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1089 KVASER_PCIEFD_SRB_CMD_RDB1, 1090 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1091 1092 /* Empty Rx FIFO */ 1093 srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) & 1094 KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK; 1095 while (srb_packet_count) { 1096 /* Drop current packet in FIFO */ 1097 ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 1098 srb_packet_count--; 1099 } 1100 1101 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1102 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1103 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1104 return -EIO; 1105 } 1106 1107 /* Enable the DMA */ 1108 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1109 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1110 1111 return 0; 1112} 1113 1114static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1115{ 1116 u32 sysid, srb_status, build; 1117 u8 sysid_nr_chan; 1118 int ret; 1119 1120 ret = kvaser_pciefd_read_cfg(pcie); 1121 if (ret) 1122 return ret; 1123 1124 sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 1125 sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; 1126 if (pcie->nr_channels != sysid_nr_chan) { 1127 dev_err(&pcie->pci->dev, 1128 "Number of channels does not match: %u vs %u\n", 1129 pcie->nr_channels, 1130 sysid_nr_chan); 1131 return -ENODEV; 1132 } 1133 1134 if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) 1135 pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; 1136 1137 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 1138 dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", 1139 (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, 1140 sysid & 0xff, 1141 (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); 1142 1143 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1144 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1145 dev_err(&pcie->pci->dev, 1146 "Hardware without DMA is not supported\n"); 1147 return -ENODEV; 1148 } 1149 1150 pcie->bus_freq = ioread32(pcie->reg_base + 1151 KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1152 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1153 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1154 if (pcie->freq_to_ticks_div == 0) 1155 pcie->freq_to_ticks_div = 1; 1156 1157 /* Turn off all loopback functionality */ 1158 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 1159 return ret; 1160} 1161 1162static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1163 struct kvaser_pciefd_rx_packet *p, 1164 __le32 *data) 1165{ 1166 struct sk_buff *skb; 1167 struct canfd_frame *cf; 1168 struct can_priv *priv; 1169 struct net_device_stats *stats; 1170 struct skb_shared_hwtstamps *shhwtstamps; 1171 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1172 1173 if (ch_id >= pcie->nr_channels) 1174 return -EIO; 1175 1176 priv = &pcie->can[ch_id]->can; 1177 stats = &priv->dev->stats; 1178 1179 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1180 skb = alloc_canfd_skb(priv->dev, &cf); 1181 if (!skb) { 1182 stats->rx_dropped++; 1183 return -ENOMEM; 1184 } 1185 1186 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1187 cf->flags |= CANFD_BRS; 1188 1189 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1190 cf->flags |= CANFD_ESI; 1191 } else { 1192 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1193 if (!skb) { 1194 stats->rx_dropped++; 1195 return -ENOMEM; 1196 } 1197 } 1198 1199 cf->can_id = p->header[0] & CAN_EFF_MASK; 1200 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1201 cf->can_id |= CAN_EFF_FLAG; 1202 1203 cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1204 1205 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) 1206 cf->can_id |= CAN_RTR_FLAG; 1207 else 1208 memcpy(cf->data, data, cf->len); 1209 1210 shhwtstamps = skb_hwtstamps(skb); 1211 1212 shhwtstamps->hwtstamp = 1213 ns_to_ktime(div_u64(p->timestamp * 1000, 1214 pcie->freq_to_ticks_div)); 1215 1216 stats->rx_bytes += cf->len; 1217 stats->rx_packets++; 1218 1219 return netif_rx(skb); 1220} 1221 1222static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1223 struct can_frame *cf, 1224 enum can_state new_state, 1225 enum can_state tx_state, 1226 enum can_state rx_state) 1227{ 1228 can_change_state(can->can.dev, cf, tx_state, rx_state); 1229 1230 if (new_state == CAN_STATE_BUS_OFF) { 1231 struct net_device *ndev = can->can.dev; 1232 unsigned long irq_flags; 1233 1234 spin_lock_irqsave(&can->lock, irq_flags); 1235 netif_stop_queue(can->can.dev); 1236 spin_unlock_irqrestore(&can->lock, irq_flags); 1237 1238 /* Prevent CAN controller from auto recover from bus off */ 1239 if (!can->can.restart_ms) { 1240 kvaser_pciefd_start_controller_flush(can); 1241 can_bus_off(ndev); 1242 } 1243 } 1244} 1245 1246static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1247 struct can_berr_counter *bec, 1248 enum can_state *new_state, 1249 enum can_state *tx_state, 1250 enum can_state *rx_state) 1251{ 1252 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1253 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1254 *new_state = CAN_STATE_BUS_OFF; 1255 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1256 *new_state = CAN_STATE_BUS_OFF; 1257 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1258 *new_state = CAN_STATE_ERROR_PASSIVE; 1259 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1260 *new_state = CAN_STATE_ERROR_PASSIVE; 1261 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1262 *new_state = CAN_STATE_ERROR_WARNING; 1263 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1264 *new_state = CAN_STATE_ERROR_WARNING; 1265 else 1266 *new_state = CAN_STATE_ERROR_ACTIVE; 1267 1268 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1269 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1270} 1271 1272static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1273 struct kvaser_pciefd_rx_packet *p) 1274{ 1275 struct can_berr_counter bec; 1276 enum can_state old_state, new_state, tx_state, rx_state; 1277 struct net_device *ndev = can->can.dev; 1278 struct sk_buff *skb; 1279 struct can_frame *cf = NULL; 1280 struct skb_shared_hwtstamps *shhwtstamps; 1281 struct net_device_stats *stats = &ndev->stats; 1282 1283 old_state = can->can.state; 1284 1285 bec.txerr = p->header[0] & 0xff; 1286 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1287 1288 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1289 &rx_state); 1290 1291 skb = alloc_can_err_skb(ndev, &cf); 1292 1293 if (new_state != old_state) { 1294 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1295 rx_state); 1296 1297 if (old_state == CAN_STATE_BUS_OFF && 1298 new_state == CAN_STATE_ERROR_ACTIVE && 1299 can->can.restart_ms) { 1300 can->can.can_stats.restarts++; 1301 if (skb) 1302 cf->can_id |= CAN_ERR_RESTARTED; 1303 } 1304 } 1305 1306 can->err_rep_cnt++; 1307 can->can.can_stats.bus_error++; 1308 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1309 stats->tx_errors++; 1310 else 1311 stats->rx_errors++; 1312 1313 can->bec.txerr = bec.txerr; 1314 can->bec.rxerr = bec.rxerr; 1315 1316 if (!skb) { 1317 stats->rx_dropped++; 1318 return -ENOMEM; 1319 } 1320 1321 shhwtstamps = skb_hwtstamps(skb); 1322 shhwtstamps->hwtstamp = 1323 ns_to_ktime(div_u64(p->timestamp * 1000, 1324 can->kv_pcie->freq_to_ticks_div)); 1325 cf->can_id |= CAN_ERR_BUSERROR; 1326 1327 cf->data[6] = bec.txerr; 1328 cf->data[7] = bec.rxerr; 1329 1330 stats->rx_packets++; 1331 stats->rx_bytes += cf->can_dlc; 1332 1333 netif_rx(skb); 1334 return 0; 1335} 1336 1337static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1338 struct kvaser_pciefd_rx_packet *p) 1339{ 1340 struct kvaser_pciefd_can *can; 1341 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1342 1343 if (ch_id >= pcie->nr_channels) 1344 return -EIO; 1345 1346 can = pcie->can[ch_id]; 1347 1348 kvaser_pciefd_rx_error_frame(can, p); 1349 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1350 /* Do not report more errors, until bec_poll_timer expires */ 1351 kvaser_pciefd_disable_err_gen(can); 1352 /* Start polling the error counters */ 1353 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1354 return 0; 1355} 1356 1357static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1358 struct kvaser_pciefd_rx_packet *p) 1359{ 1360 struct can_berr_counter bec; 1361 enum can_state old_state, new_state, tx_state, rx_state; 1362 1363 old_state = can->can.state; 1364 1365 bec.txerr = p->header[0] & 0xff; 1366 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1367 1368 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1369 &rx_state); 1370 1371 if (new_state != old_state) { 1372 struct net_device *ndev = can->can.dev; 1373 struct sk_buff *skb; 1374 struct can_frame *cf; 1375 struct skb_shared_hwtstamps *shhwtstamps; 1376 1377 skb = alloc_can_err_skb(ndev, &cf); 1378 if (!skb) { 1379 struct net_device_stats *stats = &ndev->stats; 1380 1381 stats->rx_dropped++; 1382 return -ENOMEM; 1383 } 1384 1385 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1386 rx_state); 1387 1388 if (old_state == CAN_STATE_BUS_OFF && 1389 new_state == CAN_STATE_ERROR_ACTIVE && 1390 can->can.restart_ms) { 1391 can->can.can_stats.restarts++; 1392 cf->can_id |= CAN_ERR_RESTARTED; 1393 } 1394 1395 shhwtstamps = skb_hwtstamps(skb); 1396 shhwtstamps->hwtstamp = 1397 ns_to_ktime(div_u64(p->timestamp * 1000, 1398 can->kv_pcie->freq_to_ticks_div)); 1399 1400 cf->data[6] = bec.txerr; 1401 cf->data[7] = bec.rxerr; 1402 1403 netif_rx(skb); 1404 } 1405 can->bec.txerr = bec.txerr; 1406 can->bec.rxerr = bec.rxerr; 1407 /* Check if we need to poll the error counters */ 1408 if (bec.txerr || bec.rxerr) 1409 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1410 1411 return 0; 1412} 1413 1414static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1415 struct kvaser_pciefd_rx_packet *p) 1416{ 1417 struct kvaser_pciefd_can *can; 1418 u8 cmdseq; 1419 u32 status; 1420 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1421 1422 if (ch_id >= pcie->nr_channels) 1423 return -EIO; 1424 1425 can = pcie->can[ch_id]; 1426 1427 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1428 cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; 1429 1430 /* Reset done, start abort and flush */ 1431 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1432 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1433 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1434 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1435 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1436 u32 cmd; 1437 1438 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1439 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1440 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 1441 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 1442 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 1443 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1444 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1445 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1446 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1447 /* Reset detected, send end of flush if no packet are in FIFO */ 1448 u8 count = ioread32(can->reg_base + 1449 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1450 1451 if (!count) 1452 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1453 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1454 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1455 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { 1456 /* Response to status request received */ 1457 kvaser_pciefd_handle_status_resp(can, p); 1458 if (can->can.state != CAN_STATE_BUS_OFF && 1459 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1460 mod_timer(&can->bec_poll_timer, 1461 KVASER_PCIEFD_BEC_POLL_FREQ); 1462 } 1463 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1464 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { 1465 /* Reset to bus on detected */ 1466 if (!completion_done(&can->start_comp)) 1467 complete(&can->start_comp); 1468 } 1469 1470 return 0; 1471} 1472 1473static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, 1474 struct kvaser_pciefd_rx_packet *p) 1475{ 1476 struct kvaser_pciefd_can *can; 1477 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1478 1479 if (ch_id >= pcie->nr_channels) 1480 return -EIO; 1481 1482 can = pcie->can[ch_id]; 1483 1484 /* If this is the last flushed packet, send end of flush */ 1485 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1486 u8 count = ioread32(can->reg_base + 1487 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1488 1489 if (count == 0) 1490 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1491 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1492 } else { 1493 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1494 int dlc = can_get_echo_skb(can->can.dev, echo_idx); 1495 struct net_device_stats *stats = &can->can.dev->stats; 1496 1497 stats->tx_bytes += dlc; 1498 stats->tx_packets++; 1499 1500 if (netif_queue_stopped(can->can.dev)) 1501 netif_wake_queue(can->can.dev); 1502 } 1503 1504 return 0; 1505} 1506 1507static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1508 struct kvaser_pciefd_rx_packet *p) 1509{ 1510 struct sk_buff *skb; 1511 struct net_device_stats *stats = &can->can.dev->stats; 1512 struct can_frame *cf; 1513 1514 skb = alloc_can_err_skb(can->can.dev, &cf); 1515 1516 stats->tx_errors++; 1517 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1518 if (skb) 1519 cf->can_id |= CAN_ERR_LOSTARB; 1520 can->can.can_stats.arbitration_lost++; 1521 } else if (skb) { 1522 cf->can_id |= CAN_ERR_ACK; 1523 } 1524 1525 if (skb) { 1526 cf->can_id |= CAN_ERR_BUSERROR; 1527 stats->rx_bytes += cf->can_dlc; 1528 stats->rx_packets++; 1529 netif_rx(skb); 1530 } else { 1531 stats->rx_dropped++; 1532 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1533 } 1534} 1535 1536static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1537 struct kvaser_pciefd_rx_packet *p) 1538{ 1539 struct kvaser_pciefd_can *can; 1540 bool one_shot_fail = false; 1541 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1542 1543 if (ch_id >= pcie->nr_channels) 1544 return -EIO; 1545 1546 can = pcie->can[ch_id]; 1547 /* Ignore control packet ACK */ 1548 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1549 return 0; 1550 1551 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1552 kvaser_pciefd_handle_nack_packet(can, p); 1553 one_shot_fail = true; 1554 } 1555 1556 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1557 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1558 } else { 1559 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1560 int dlc = can_get_echo_skb(can->can.dev, echo_idx); 1561 u8 count = ioread32(can->reg_base + 1562 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1563 1564 if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && 1565 netif_queue_stopped(can->can.dev)) 1566 netif_wake_queue(can->can.dev); 1567 1568 if (!one_shot_fail) { 1569 struct net_device_stats *stats = &can->can.dev->stats; 1570 1571 stats->tx_bytes += dlc; 1572 stats->tx_packets++; 1573 } 1574 } 1575 1576 return 0; 1577} 1578 1579static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1580 struct kvaser_pciefd_rx_packet *p) 1581{ 1582 struct kvaser_pciefd_can *can; 1583 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1584 1585 if (ch_id >= pcie->nr_channels) 1586 return -EIO; 1587 1588 can = pcie->can[ch_id]; 1589 1590 if (!completion_done(&can->flush_comp)) 1591 complete(&can->flush_comp); 1592 1593 return 0; 1594} 1595 1596static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1597 int dma_buf) 1598{ 1599 __le32 *buffer = pcie->dma_data[dma_buf]; 1600 __le64 timestamp; 1601 struct kvaser_pciefd_rx_packet packet; 1602 struct kvaser_pciefd_rx_packet *p = &packet; 1603 u8 type; 1604 int pos = *start_pos; 1605 int size; 1606 int ret = 0; 1607 1608 size = le32_to_cpu(buffer[pos++]); 1609 if (!size) { 1610 *start_pos = 0; 1611 return 0; 1612 } 1613 1614 p->header[0] = le32_to_cpu(buffer[pos++]); 1615 p->header[1] = le32_to_cpu(buffer[pos++]); 1616 1617 /* Read 64-bit timestamp */ 1618 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1619 pos += 2; 1620 p->timestamp = le64_to_cpu(timestamp); 1621 1622 type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; 1623 switch (type) { 1624 case KVASER_PCIEFD_PACK_TYPE_DATA: 1625 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1626 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1627 u8 data_len; 1628 1629 data_len = can_dlc2len(p->header[1] >> 1630 KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1631 pos += DIV_ROUND_UP(data_len, 4); 1632 } 1633 break; 1634 1635 case KVASER_PCIEFD_PACK_TYPE_ACK: 1636 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1637 break; 1638 1639 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1640 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1641 break; 1642 1643 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1644 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1645 break; 1646 1647 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1648 ret = kvaser_pciefd_handle_eack_packet(pcie, p); 1649 break; 1650 1651 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1652 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1653 break; 1654 1655 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1656 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1657 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1658 dev_info(&pcie->pci->dev, 1659 "Received unexpected packet type 0x%08X\n", type); 1660 break; 1661 1662 default: 1663 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1664 ret = -EIO; 1665 break; 1666 } 1667 1668 if (ret) 1669 return ret; 1670 1671 /* Position does not point to the end of the package, 1672 * corrupted packet size? 1673 */ 1674 if ((*start_pos + size) != pos) 1675 return -EIO; 1676 1677 /* Point to the next packet header, if any */ 1678 *start_pos = pos; 1679 1680 return ret; 1681} 1682 1683static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1684{ 1685 int pos = 0; 1686 int res = 0; 1687 1688 do { 1689 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1690 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1691 1692 return res; 1693} 1694 1695static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1696{ 1697 u32 irq; 1698 1699 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1700 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1701 kvaser_pciefd_read_buffer(pcie, 0); 1702 /* Reset DMA buffer 0 */ 1703 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1704 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1705 } 1706 1707 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1708 kvaser_pciefd_read_buffer(pcie, 1); 1709 /* Reset DMA buffer 1 */ 1710 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1711 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1712 } 1713 1714 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1715 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1716 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1717 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1718 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1719 1720 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1721 return 0; 1722} 1723 1724static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1725{ 1726 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1727 1728 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1729 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1730 1731 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1732 netdev_err(can->can.dev, 1733 "Fail to change bittiming, when not in reset mode\n"); 1734 1735 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1736 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1737 1738 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1739 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1740 1741 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1742 return 0; 1743} 1744 1745static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1746{ 1747 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1748 u32 board_irq; 1749 int i; 1750 1751 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1752 1753 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) 1754 return IRQ_NONE; 1755 1756 if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1757 kvaser_pciefd_receive_irq(pcie); 1758 1759 for (i = 0; i < pcie->nr_channels; i++) { 1760 if (!pcie->can[i]) { 1761 dev_err(&pcie->pci->dev, 1762 "IRQ mask points to unallocated controller\n"); 1763 break; 1764 } 1765 1766 /* Check that mask matches channel (i) IRQ mask */ 1767 if (board_irq & (1 << i)) 1768 kvaser_pciefd_transmit_irq(pcie->can[i]); 1769 } 1770 1771 iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1772 return IRQ_HANDLED; 1773} 1774 1775static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1776{ 1777 int i; 1778 struct kvaser_pciefd_can *can; 1779 1780 for (i = 0; i < pcie->nr_channels; i++) { 1781 can = pcie->can[i]; 1782 if (can) { 1783 iowrite32(0, 1784 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1785 kvaser_pciefd_pwm_stop(can); 1786 free_candev(can->can.dev); 1787 } 1788 } 1789} 1790 1791static int kvaser_pciefd_probe(struct pci_dev *pdev, 1792 const struct pci_device_id *id) 1793{ 1794 int err; 1795 struct kvaser_pciefd *pcie; 1796 1797 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1798 if (!pcie) 1799 return -ENOMEM; 1800 1801 pci_set_drvdata(pdev, pcie); 1802 pcie->pci = pdev; 1803 1804 err = pci_enable_device(pdev); 1805 if (err) 1806 return err; 1807 1808 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1809 if (err) 1810 goto err_disable_pci; 1811 1812 pcie->reg_base = pci_iomap(pdev, 0, 0); 1813 if (!pcie->reg_base) { 1814 err = -ENOMEM; 1815 goto err_release_regions; 1816 } 1817 1818 err = kvaser_pciefd_setup_board(pcie); 1819 if (err) 1820 goto err_pci_iounmap; 1821 1822 err = kvaser_pciefd_setup_dma(pcie); 1823 if (err) 1824 goto err_pci_iounmap; 1825 1826 pci_set_master(pdev); 1827 1828 err = kvaser_pciefd_setup_can_ctrls(pcie); 1829 if (err) 1830 goto err_teardown_can_ctrls; 1831 1832 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1833 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1834 if (err) 1835 goto err_teardown_can_ctrls; 1836 1837 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1838 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1839 1840 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1841 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1842 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1843 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1844 1845 /* Reset IRQ handling, expected to be off before */ 1846 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1847 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1848 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1849 pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1850 1851 /* Ready the DMA buffers */ 1852 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1853 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1854 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1855 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1856 1857 err = kvaser_pciefd_reg_candev(pcie); 1858 if (err) 1859 goto err_free_irq; 1860 1861 return 0; 1862 1863err_free_irq: 1864 /* Disable PCI interrupts */ 1865 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1866 free_irq(pcie->pci->irq, pcie); 1867 1868err_teardown_can_ctrls: 1869 kvaser_pciefd_teardown_can_ctrls(pcie); 1870 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1871 pci_clear_master(pdev); 1872 1873err_pci_iounmap: 1874 pci_iounmap(pdev, pcie->reg_base); 1875 1876err_release_regions: 1877 pci_release_regions(pdev); 1878 1879err_disable_pci: 1880 pci_disable_device(pdev); 1881 1882 return err; 1883} 1884 1885static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1886{ 1887 struct kvaser_pciefd_can *can; 1888 int i; 1889 1890 for (i = 0; i < pcie->nr_channels; i++) { 1891 can = pcie->can[i]; 1892 if (can) { 1893 iowrite32(0, 1894 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1895 unregister_candev(can->can.dev); 1896 del_timer(&can->bec_poll_timer); 1897 kvaser_pciefd_pwm_stop(can); 1898 free_candev(can->can.dev); 1899 } 1900 } 1901} 1902 1903static void kvaser_pciefd_remove(struct pci_dev *pdev) 1904{ 1905 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1906 1907 kvaser_pciefd_remove_all_ctrls(pcie); 1908 1909 /* Turn off IRQ generation */ 1910 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1911 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1912 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1913 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1914 1915 free_irq(pcie->pci->irq, pcie); 1916 1917 pci_clear_master(pdev); 1918 pci_iounmap(pdev, pcie->reg_base); 1919 pci_release_regions(pdev); 1920 pci_disable_device(pdev); 1921} 1922 1923static struct pci_driver kvaser_pciefd = { 1924 .name = KVASER_PCIEFD_DRV_NAME, 1925 .id_table = kvaser_pciefd_id_table, 1926 .probe = kvaser_pciefd_probe, 1927 .remove = kvaser_pciefd_remove, 1928}; 1929 1930module_pci_driver(kvaser_pciefd) 1931