0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <linux/device.h>
0012 #include <linux/ethtool.h>
0013 #include <linux/pci.h>
0014 #include <linux/can/dev.h>
0015 #include <linux/timer.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/crc32.h>
0018 #include <linux/iopoll.h>
0019
0020 MODULE_LICENSE("Dual BSD/GPL");
0021 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
0022 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
0023
0024 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
0025
0026 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
0027 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
0028 #define KVASER_PCIEFD_MAX_ERR_REP 256
0029 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
0030 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
0031 #define KVASER_PCIEFD_DMA_COUNT 2
0032
0033 #define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
0034 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
0035
0036 #define KVASER_PCIEFD_VENDOR 0x1a07
0037 #define KVASER_PCIEFD_4HS_ID 0x0d
0038 #define KVASER_PCIEFD_2HS_ID 0x0e
0039 #define KVASER_PCIEFD_HS_ID 0x0f
0040 #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
0041 #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
0042
0043
0044 #define KVASER_PCIEFD_IRQ_REG 0x40
0045 #define KVASER_PCIEFD_IEN_REG 0x50
0046
0047 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
0048
0049 #define KVASER_PCIEFD_KCAN0_BASE 0x10000
0050 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
0051 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
0052 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
0053 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
0054 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
0055 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
0056 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
0057 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
0058 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
0059 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
0060 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
0061 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
0062 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
0063 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
0064
0065 #define KVASER_PCIEFD_LOOP_REG 0x1f000
0066
0067 #define KVASER_PCIEFD_SYSID_BASE 0x1f020
0068 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
0069 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
0070 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
0071 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
0072
0073 #define KVASER_PCIEFD_SRB_BASE 0x1f200
0074 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
0075 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
0076 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
0077 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
0078 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
0079
0080 #define KVASER_PCIEFD_SPI_BASE 0x1fc00
0081 #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
0082 #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
0083 #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
0084 #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
0085 #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
0086
0087 #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
0088 #define KVASER_PCIEFD_IRQ_SRB BIT(4)
0089
0090 #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
0091 #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
0092 #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
0093
0094
0095 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
0096 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
0097 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
0098
0099
0100 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
0101 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
0102
0103 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
0104 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
0105
0106 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
0107 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
0108
0109
0110 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
0111
0112 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
0113
0114
0115 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
0116
0117
0118 #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
0119 #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
0120 #define KVASER_PCIEFD_CFG_MAX_PARAMS 256
0121 #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
0122 #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
0123 #define KVASER_PCIEFD_CFG_SYS_VER 1
0124 #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
0125 #define KVASER_PCIEFD_SPI_TMT BIT(5)
0126 #define KVASER_PCIEFD_SPI_TRDY BIT(6)
0127 #define KVASER_PCIEFD_SPI_RRDY BIT(7)
0128 #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
0129
0130 #define KVASER_PCIEFD_FLASH_RES_CMD 0xab
0131 #define KVASER_PCIEFD_FLASH_READ_CMD 0x3
0132 #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
0133
0134
0135 #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
0136 #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
0137
0138 #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
0139
0140 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
0141
0142 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
0143
0144
0145 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
0146
0147 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
0148
0149 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
0150
0151 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
0152
0153 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
0154
0155 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
0156
0157 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
0158
0159 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
0160
0161 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
0162
0163 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
0164
0165 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
0166
0167 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
0168
0169 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
0170
0171 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
0172
0173 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
0174
0175 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
0176
0177 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
0178
0179 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
0180
0181 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
0182 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
0183 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
0184 KVASER_PCIEFD_KCAN_STAT_IRM)
0185
0186
0187 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
0188
0189 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
0190
0191 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
0192
0193 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
0194
0195 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
0196
0197 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
0198
0199 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
0200
0201 #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
0202 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
0203 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
0204
0205 #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
0206
0207
0208 #define KVASER_PCIEFD_PACK_TYPE_DATA 0
0209 #define KVASER_PCIEFD_PACK_TYPE_ACK 1
0210 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
0211 #define KVASER_PCIEFD_PACK_TYPE_ERROR 3
0212 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
0213 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
0214 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
0215 #define KVASER_PCIEFD_PACK_TYPE_STATUS 8
0216 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
0217
0218
0219 #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
0220 #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
0221 #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
0222
0223
0224 #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
0225 #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
0226
0227 #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
0228 #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
0229 #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
0230 #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
0231
0232 #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
0233 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
0234
0235
0236 #define KVASER_PCIEFD_APACKET_FLU BIT(8)
0237 #define KVASER_PCIEFD_APACKET_CT BIT(9)
0238 #define KVASER_PCIEFD_APACKET_ABL BIT(10)
0239 #define KVASER_PCIEFD_APACKET_NACK BIT(11)
0240
0241
0242 #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
0243 #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
0244 #define KVASER_PCIEFD_SPACK_IDET BIT(20)
0245 #define KVASER_PCIEFD_SPACK_IRM BIT(21)
0246 #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
0247
0248 #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
0249 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
0250 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
0251
0252
0253 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
0254
0255 struct kvaser_pciefd;
0256
0257 struct kvaser_pciefd_can {
0258 struct can_priv can;
0259 struct kvaser_pciefd *kv_pcie;
0260 void __iomem *reg_base;
0261 struct can_berr_counter bec;
0262 u8 cmd_seq;
0263 int err_rep_cnt;
0264 int echo_idx;
0265 spinlock_t lock;
0266 spinlock_t echo_lock;
0267 struct timer_list bec_poll_timer;
0268 struct completion start_comp, flush_comp;
0269 };
0270
0271 struct kvaser_pciefd {
0272 struct pci_dev *pci;
0273 void __iomem *reg_base;
0274 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
0275 void *dma_data[KVASER_PCIEFD_DMA_COUNT];
0276 u8 nr_channels;
0277 u32 bus_freq;
0278 u32 freq;
0279 u32 freq_to_ticks_div;
0280 };
0281
0282 struct kvaser_pciefd_rx_packet {
0283 u32 header[2];
0284 u64 timestamp;
0285 };
0286
0287 struct kvaser_pciefd_tx_packet {
0288 u32 header[2];
0289 u8 data[64];
0290 };
0291
0292 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
0293 .name = KVASER_PCIEFD_DRV_NAME,
0294 .tseg1_min = 1,
0295 .tseg1_max = 512,
0296 .tseg2_min = 1,
0297 .tseg2_max = 32,
0298 .sjw_max = 16,
0299 .brp_min = 1,
0300 .brp_max = 8192,
0301 .brp_inc = 1,
0302 };
0303
0304 struct kvaser_pciefd_cfg_param {
0305 __le32 magic;
0306 __le32 nr;
0307 __le32 len;
0308 u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
0309 };
0310
0311 struct kvaser_pciefd_cfg_img {
0312 __le32 version;
0313 __le32 magic;
0314 __le32 crc;
0315 struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
0316 };
0317
0318 static struct pci_device_id kvaser_pciefd_id_table[] = {
0319 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
0320 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
0321 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
0322 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
0323 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
0324 { 0,},
0325 };
0326 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
0327
0328
0329 static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
0330 {
0331 u32 res;
0332 int ret;
0333
0334 ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
0335 res, res & msk, 0, 10);
0336
0337 return ret;
0338 }
0339
0340 static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
0341 u32 tx_len, u8 *rx, u32 rx_len)
0342 {
0343 int c;
0344
0345 iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
0346 iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
0347 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
0348
0349 c = tx_len;
0350 while (c--) {
0351 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
0352 return -EIO;
0353
0354 iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
0355
0356 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
0357 return -EIO;
0358
0359 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
0360 }
0361
0362 c = rx_len;
0363 while (c-- > 0) {
0364 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
0365 return -EIO;
0366
0367 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
0368
0369 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
0370 return -EIO;
0371
0372 *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
0373 }
0374
0375 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
0376 return -EIO;
0377
0378 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
0379
0380 if (c != -1) {
0381 dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
0382 return -EIO;
0383 }
0384
0385 return 0;
0386 }
0387
0388 static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
0389 struct kvaser_pciefd_cfg_img *img)
0390 {
0391 int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
0392 int res, crc;
0393 u8 *crc_buff;
0394
0395 u8 cmd[] = {
0396 KVASER_PCIEFD_FLASH_READ_CMD,
0397 (u8)((offset >> 16) & 0xff),
0398 (u8)((offset >> 8) & 0xff),
0399 (u8)(offset & 0xff)
0400 };
0401
0402 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
0403 KVASER_PCIEFD_CFG_IMG_SZ);
0404 if (res)
0405 return res;
0406
0407 crc_buff = (u8 *)img->params;
0408
0409 if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
0410 dev_err(&pcie->pci->dev,
0411 "Config flash corrupted, version number is wrong\n");
0412 return -ENODEV;
0413 }
0414
0415 if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
0416 dev_err(&pcie->pci->dev,
0417 "Config flash corrupted, magic number is wrong\n");
0418 return -ENODEV;
0419 }
0420
0421 crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
0422 if (le32_to_cpu(img->crc) != crc) {
0423 dev_err(&pcie->pci->dev,
0424 "Stored CRC does not match flash image contents\n");
0425 return -EIO;
0426 }
0427
0428 return 0;
0429 }
0430
0431 static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
0432 struct kvaser_pciefd_cfg_img *img)
0433 {
0434 struct kvaser_pciefd_cfg_param *param;
0435
0436 param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
0437 memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
0438 }
0439
0440 static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
0441 {
0442 int res;
0443 struct kvaser_pciefd_cfg_img *img;
0444
0445
0446 u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
0447
0448 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
0449 if (res)
0450 return -EIO;
0451
0452 img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
0453 if (!img)
0454 return -ENOMEM;
0455
0456 if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
0457 dev_err(&pcie->pci->dev,
0458 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
0459 cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
0460
0461 res = -ENODEV;
0462 goto image_free;
0463 }
0464
0465 cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
0466 res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
0467 if (res) {
0468 goto image_free;
0469 } else if (cmd[0] & 1) {
0470 res = -EIO;
0471
0472 dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
0473 goto image_free;
0474 }
0475
0476 res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
0477 if (res) {
0478 res = -EIO;
0479 goto image_free;
0480 }
0481
0482 kvaser_pciefd_cfg_read_params(pcie, img);
0483
0484 image_free:
0485 kfree(img);
0486 return res;
0487 }
0488
0489 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
0490 {
0491 u32 cmd;
0492
0493 cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
0494 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
0495 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
0496 }
0497
0498 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
0499 {
0500 u32 mode;
0501 unsigned long irq;
0502
0503 spin_lock_irqsave(&can->lock, irq);
0504 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0505 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
0506 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
0507 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0508 }
0509 spin_unlock_irqrestore(&can->lock, irq);
0510 }
0511
0512 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
0513 {
0514 u32 mode;
0515 unsigned long irq;
0516
0517 spin_lock_irqsave(&can->lock, irq);
0518 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0519 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
0520 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0521 spin_unlock_irqrestore(&can->lock, irq);
0522 }
0523
0524 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
0525 {
0526 u32 msk;
0527
0528 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
0529 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
0530 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
0531 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
0532 KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
0533
0534 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0535
0536 return 0;
0537 }
0538
0539 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
0540 {
0541 u32 mode;
0542 unsigned long irq;
0543
0544 spin_lock_irqsave(&can->lock, irq);
0545
0546 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0547 if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
0548 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
0549 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
0550 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
0551 else
0552 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
0553 } else {
0554 mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
0555 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
0556 }
0557
0558 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
0559 mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
0560
0561 mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
0562 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
0563
0564 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
0565 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
0566 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0567
0568 spin_unlock_irqrestore(&can->lock, irq);
0569 }
0570
0571 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
0572 {
0573 u32 status;
0574 unsigned long irq;
0575
0576 spin_lock_irqsave(&can->lock, irq);
0577 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
0578 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
0579 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0580
0581 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
0582 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
0583 u32 cmd;
0584
0585
0586 cmd = KVASER_PCIEFD_KCAN_CMD_AT;
0587 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
0588 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
0589 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
0590 u32 mode;
0591
0592
0593 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0594 mode |= KVASER_PCIEFD_KCAN_MODE_RM;
0595 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0596 }
0597
0598 spin_unlock_irqrestore(&can->lock, irq);
0599 }
0600
0601 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
0602 {
0603 u32 mode;
0604 unsigned long irq;
0605
0606 del_timer(&can->bec_poll_timer);
0607
0608 if (!completion_done(&can->flush_comp))
0609 kvaser_pciefd_start_controller_flush(can);
0610
0611 if (!wait_for_completion_timeout(&can->flush_comp,
0612 KVASER_PCIEFD_WAIT_TIMEOUT)) {
0613 netdev_err(can->can.dev, "Timeout during bus on flush\n");
0614 return -ETIMEDOUT;
0615 }
0616
0617 spin_lock_irqsave(&can->lock, irq);
0618 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0619 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
0620
0621 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
0622 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0623
0624 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0625 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
0626 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0627 spin_unlock_irqrestore(&can->lock, irq);
0628
0629 if (!wait_for_completion_timeout(&can->start_comp,
0630 KVASER_PCIEFD_WAIT_TIMEOUT)) {
0631 netdev_err(can->can.dev, "Timeout during bus on reset\n");
0632 return -ETIMEDOUT;
0633 }
0634
0635 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0636 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
0637
0638 kvaser_pciefd_set_tx_irq(can);
0639 kvaser_pciefd_setup_controller(can);
0640
0641 can->can.state = CAN_STATE_ERROR_ACTIVE;
0642 netif_wake_queue(can->can.dev);
0643 can->bec.txerr = 0;
0644 can->bec.rxerr = 0;
0645 can->err_rep_cnt = 0;
0646
0647 return 0;
0648 }
0649
0650 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
0651 {
0652 u8 top;
0653 u32 pwm_ctrl;
0654 unsigned long irq;
0655
0656 spin_lock_irqsave(&can->lock, irq);
0657 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
0658 top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
0659
0660
0661 pwm_ctrl |= top;
0662 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
0663 spin_unlock_irqrestore(&can->lock, irq);
0664 }
0665
0666 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
0667 {
0668 int top, trigger;
0669 u32 pwm_ctrl;
0670 unsigned long irq;
0671
0672 kvaser_pciefd_pwm_stop(can);
0673 spin_lock_irqsave(&can->lock, irq);
0674
0675
0676 top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
0677
0678 pwm_ctrl = top & 0xff;
0679 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
0680 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
0681
0682
0683 trigger = (100 * top - 95 * (top + 1) + 50) / 100;
0684 pwm_ctrl = trigger & 0xff;
0685 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
0686 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
0687 spin_unlock_irqrestore(&can->lock, irq);
0688 }
0689
0690 static int kvaser_pciefd_open(struct net_device *netdev)
0691 {
0692 int err;
0693 struct kvaser_pciefd_can *can = netdev_priv(netdev);
0694
0695 err = open_candev(netdev);
0696 if (err)
0697 return err;
0698
0699 err = kvaser_pciefd_bus_on(can);
0700 if (err) {
0701 close_candev(netdev);
0702 return err;
0703 }
0704
0705 return 0;
0706 }
0707
0708 static int kvaser_pciefd_stop(struct net_device *netdev)
0709 {
0710 struct kvaser_pciefd_can *can = netdev_priv(netdev);
0711 int ret = 0;
0712
0713
0714 if (!completion_done(&can->flush_comp))
0715 kvaser_pciefd_start_controller_flush(can);
0716
0717 if (!wait_for_completion_timeout(&can->flush_comp,
0718 KVASER_PCIEFD_WAIT_TIMEOUT)) {
0719 netdev_err(can->can.dev, "Timeout during stop\n");
0720 ret = -ETIMEDOUT;
0721 } else {
0722 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
0723 del_timer(&can->bec_poll_timer);
0724 }
0725 close_candev(netdev);
0726
0727 return ret;
0728 }
0729
0730 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
0731 struct kvaser_pciefd_can *can,
0732 struct sk_buff *skb)
0733 {
0734 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
0735 int packet_size;
0736 int seq = can->echo_idx;
0737
0738 memset(p, 0, sizeof(*p));
0739
0740 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
0741 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
0742
0743 if (cf->can_id & CAN_RTR_FLAG)
0744 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
0745
0746 if (cf->can_id & CAN_EFF_FLAG)
0747 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
0748
0749 p->header[0] |= cf->can_id & CAN_EFF_MASK;
0750 p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
0751 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
0752
0753 if (can_is_canfd_skb(skb)) {
0754 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
0755 if (cf->flags & CANFD_BRS)
0756 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
0757 if (cf->flags & CANFD_ESI)
0758 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
0759 }
0760
0761 p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
0762
0763 packet_size = cf->len;
0764 memcpy(p->data, cf->data, packet_size);
0765
0766 return DIV_ROUND_UP(packet_size, 4);
0767 }
0768
0769 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
0770 struct net_device *netdev)
0771 {
0772 struct kvaser_pciefd_can *can = netdev_priv(netdev);
0773 unsigned long irq_flags;
0774 struct kvaser_pciefd_tx_packet packet;
0775 int nwords;
0776 u8 count;
0777
0778 if (can_dropped_invalid_skb(netdev, skb))
0779 return NETDEV_TX_OK;
0780
0781 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
0782
0783 spin_lock_irqsave(&can->echo_lock, irq_flags);
0784
0785
0786 can_put_echo_skb(skb, netdev, can->echo_idx, 0);
0787
0788
0789 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
0790
0791
0792 iowrite32(packet.header[0],
0793 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
0794 iowrite32(packet.header[1],
0795 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
0796
0797 if (nwords) {
0798 u32 data_last = ((u32 *)packet.data)[nwords - 1];
0799
0800
0801 iowrite32_rep(can->reg_base +
0802 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
0803 nwords - 1);
0804
0805 __raw_writel(data_last, can->reg_base +
0806 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
0807 } else {
0808
0809 __raw_writel(0, can->reg_base +
0810 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
0811 }
0812
0813 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
0814
0815
0816
0817 if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
0818 can->can.echo_skb[can->echo_idx])
0819 netif_stop_queue(netdev);
0820
0821 spin_unlock_irqrestore(&can->echo_lock, irq_flags);
0822
0823 return NETDEV_TX_OK;
0824 }
0825
0826 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
0827 {
0828 u32 mode, test, btrn;
0829 unsigned long irq_flags;
0830 int ret;
0831 struct can_bittiming *bt;
0832
0833 if (data)
0834 bt = &can->can.data_bittiming;
0835 else
0836 bt = &can->can.bittiming;
0837
0838 btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
0839 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
0840 (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
0841 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
0842 ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
0843 ((bt->brp - 1) & 0x1fff);
0844
0845 spin_lock_irqsave(&can->lock, irq_flags);
0846 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0847
0848
0849 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
0850 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0851
0852
0853 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
0854 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
0855 0, 10);
0856
0857 if (ret) {
0858 spin_unlock_irqrestore(&can->lock, irq_flags);
0859 return -EBUSY;
0860 }
0861
0862 if (data)
0863 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
0864 else
0865 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
0866
0867
0868 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
0869
0870 spin_unlock_irqrestore(&can->lock, irq_flags);
0871 return 0;
0872 }
0873
0874 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
0875 {
0876 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
0877 }
0878
0879 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
0880 {
0881 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
0882 }
0883
0884 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
0885 {
0886 struct kvaser_pciefd_can *can = netdev_priv(ndev);
0887 int ret = 0;
0888
0889 switch (mode) {
0890 case CAN_MODE_START:
0891 if (!can->can.restart_ms)
0892 ret = kvaser_pciefd_bus_on(can);
0893 break;
0894 default:
0895 return -EOPNOTSUPP;
0896 }
0897
0898 return ret;
0899 }
0900
0901 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
0902 struct can_berr_counter *bec)
0903 {
0904 struct kvaser_pciefd_can *can = netdev_priv(ndev);
0905
0906 bec->rxerr = can->bec.rxerr;
0907 bec->txerr = can->bec.txerr;
0908 return 0;
0909 }
0910
0911 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
0912 {
0913 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
0914
0915 kvaser_pciefd_enable_err_gen(can);
0916 kvaser_pciefd_request_status(can);
0917 can->err_rep_cnt = 0;
0918 }
0919
0920 static const struct net_device_ops kvaser_pciefd_netdev_ops = {
0921 .ndo_open = kvaser_pciefd_open,
0922 .ndo_stop = kvaser_pciefd_stop,
0923 .ndo_eth_ioctl = can_eth_ioctl_hwts,
0924 .ndo_start_xmit = kvaser_pciefd_start_xmit,
0925 .ndo_change_mtu = can_change_mtu,
0926 };
0927
0928 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
0929 .get_ts_info = can_ethtool_op_get_ts_info_hwts,
0930 };
0931
0932 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
0933 {
0934 int i;
0935
0936 for (i = 0; i < pcie->nr_channels; i++) {
0937 struct net_device *netdev;
0938 struct kvaser_pciefd_can *can;
0939 u32 status, tx_npackets;
0940
0941 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
0942 KVASER_PCIEFD_CAN_TX_MAX_COUNT);
0943 if (!netdev)
0944 return -ENOMEM;
0945
0946 can = netdev_priv(netdev);
0947 netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
0948 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
0949 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
0950 i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
0951
0952 can->kv_pcie = pcie;
0953 can->cmd_seq = 0;
0954 can->err_rep_cnt = 0;
0955 can->bec.txerr = 0;
0956 can->bec.rxerr = 0;
0957
0958 init_completion(&can->start_comp);
0959 init_completion(&can->flush_comp);
0960 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
0961 0);
0962
0963
0964 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
0965
0966 tx_npackets = ioread32(can->reg_base +
0967 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
0968 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
0969 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
0970 dev_err(&pcie->pci->dev,
0971 "Max Tx count is smaller than expected\n");
0972
0973 free_candev(netdev);
0974 return -ENODEV;
0975 }
0976
0977 can->can.clock.freq = pcie->freq;
0978 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
0979 can->echo_idx = 0;
0980 spin_lock_init(&can->echo_lock);
0981 spin_lock_init(&can->lock);
0982 can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
0983 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
0984
0985 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
0986 can->can.do_set_data_bittiming =
0987 kvaser_pciefd_set_data_bittiming;
0988
0989 can->can.do_set_mode = kvaser_pciefd_set_mode;
0990 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
0991
0992 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
0993 CAN_CTRLMODE_FD |
0994 CAN_CTRLMODE_FD_NON_ISO;
0995
0996 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
0997 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
0998 dev_err(&pcie->pci->dev,
0999 "CAN FD not supported as expected %d\n", i);
1000
1001 free_candev(netdev);
1002 return -ENODEV;
1003 }
1004
1005 if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
1006 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
1007
1008 netdev->flags |= IFF_ECHO;
1009
1010 SET_NETDEV_DEV(netdev, &pcie->pci->dev);
1011
1012 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1013 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
1014 KVASER_PCIEFD_KCAN_IRQ_TFD,
1015 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1016
1017 pcie->can[i] = can;
1018 kvaser_pciefd_pwm_start(can);
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1025 {
1026 int i;
1027
1028 for (i = 0; i < pcie->nr_channels; i++) {
1029 int err = register_candev(pcie->can[i]->can.dev);
1030
1031 if (err) {
1032 int j;
1033
1034
1035 for (j = 0; j < i; j++)
1036 unregister_candev(pcie->can[j]->can.dev);
1037 return err;
1038 }
1039 }
1040
1041 return 0;
1042 }
1043
1044 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1045 dma_addr_t addr, int offset)
1046 {
1047 u32 word1, word2;
1048
1049 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1050 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1051 word2 = addr >> 32;
1052 #else
1053 word1 = addr;
1054 word2 = 0;
1055 #endif
1056 iowrite32(word1, pcie->reg_base + offset);
1057 iowrite32(word2, pcie->reg_base + offset + 4);
1058 }
1059
1060 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1061 {
1062 int i;
1063 u32 srb_status;
1064 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1065
1066
1067 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1068 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1069 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1070
1071 pcie->dma_data[i] =
1072 dmam_alloc_coherent(&pcie->pci->dev,
1073 KVASER_PCIEFD_DMA_SIZE,
1074 &dma_addr[i],
1075 GFP_KERNEL);
1076
1077 if (!pcie->dma_data[i] || !dma_addr[i]) {
1078 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1079 KVASER_PCIEFD_DMA_SIZE);
1080 return -ENOMEM;
1081 }
1082
1083 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1084 }
1085
1086
1087 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1088 KVASER_PCIEFD_SRB_CMD_RDB1,
1089 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1090
1091 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1092 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1093 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1094 return -EIO;
1095 }
1096
1097
1098 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1099 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1100
1101 return 0;
1102 }
1103
1104 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1105 {
1106 u32 sysid, srb_status, build;
1107 u8 sysid_nr_chan;
1108 int ret;
1109
1110 ret = kvaser_pciefd_read_cfg(pcie);
1111 if (ret)
1112 return ret;
1113
1114 sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1115 sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1116 if (pcie->nr_channels != sysid_nr_chan) {
1117 dev_err(&pcie->pci->dev,
1118 "Number of channels does not match: %u vs %u\n",
1119 pcie->nr_channels,
1120 sysid_nr_chan);
1121 return -ENODEV;
1122 }
1123
1124 if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1125 pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1126
1127 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1128 dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1129 (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1130 sysid & 0xff,
1131 (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1132
1133 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1134 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1135 dev_err(&pcie->pci->dev,
1136 "Hardware without DMA is not supported\n");
1137 return -ENODEV;
1138 }
1139
1140 pcie->bus_freq = ioread32(pcie->reg_base +
1141 KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1142 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1143 pcie->freq_to_ticks_div = pcie->freq / 1000000;
1144 if (pcie->freq_to_ticks_div == 0)
1145 pcie->freq_to_ticks_div = 1;
1146
1147
1148 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1149 return ret;
1150 }
1151
1152 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1153 struct kvaser_pciefd_rx_packet *p,
1154 __le32 *data)
1155 {
1156 struct sk_buff *skb;
1157 struct canfd_frame *cf;
1158 struct can_priv *priv;
1159 struct net_device_stats *stats;
1160 struct skb_shared_hwtstamps *shhwtstamps;
1161 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1162
1163 if (ch_id >= pcie->nr_channels)
1164 return -EIO;
1165
1166 priv = &pcie->can[ch_id]->can;
1167 stats = &priv->dev->stats;
1168
1169 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1170 skb = alloc_canfd_skb(priv->dev, &cf);
1171 if (!skb) {
1172 stats->rx_dropped++;
1173 return -ENOMEM;
1174 }
1175
1176 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1177 cf->flags |= CANFD_BRS;
1178
1179 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1180 cf->flags |= CANFD_ESI;
1181 } else {
1182 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1183 if (!skb) {
1184 stats->rx_dropped++;
1185 return -ENOMEM;
1186 }
1187 }
1188
1189 cf->can_id = p->header[0] & CAN_EFF_MASK;
1190 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1191 cf->can_id |= CAN_EFF_FLAG;
1192
1193 cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1194
1195 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
1196 cf->can_id |= CAN_RTR_FLAG;
1197 } else {
1198 memcpy(cf->data, data, cf->len);
1199
1200 stats->rx_bytes += cf->len;
1201 }
1202 stats->rx_packets++;
1203
1204 shhwtstamps = skb_hwtstamps(skb);
1205
1206 shhwtstamps->hwtstamp =
1207 ns_to_ktime(div_u64(p->timestamp * 1000,
1208 pcie->freq_to_ticks_div));
1209
1210 return netif_rx(skb);
1211 }
1212
1213 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1214 struct can_frame *cf,
1215 enum can_state new_state,
1216 enum can_state tx_state,
1217 enum can_state rx_state)
1218 {
1219 can_change_state(can->can.dev, cf, tx_state, rx_state);
1220
1221 if (new_state == CAN_STATE_BUS_OFF) {
1222 struct net_device *ndev = can->can.dev;
1223 unsigned long irq_flags;
1224
1225 spin_lock_irqsave(&can->lock, irq_flags);
1226 netif_stop_queue(can->can.dev);
1227 spin_unlock_irqrestore(&can->lock, irq_flags);
1228
1229
1230 if (!can->can.restart_ms) {
1231 kvaser_pciefd_start_controller_flush(can);
1232 can_bus_off(ndev);
1233 }
1234 }
1235 }
1236
1237 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1238 struct can_berr_counter *bec,
1239 enum can_state *new_state,
1240 enum can_state *tx_state,
1241 enum can_state *rx_state)
1242 {
1243 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1244 p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1245 *new_state = CAN_STATE_BUS_OFF;
1246 else if (bec->txerr >= 255 || bec->rxerr >= 255)
1247 *new_state = CAN_STATE_BUS_OFF;
1248 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1249 *new_state = CAN_STATE_ERROR_PASSIVE;
1250 else if (bec->txerr >= 128 || bec->rxerr >= 128)
1251 *new_state = CAN_STATE_ERROR_PASSIVE;
1252 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1253 *new_state = CAN_STATE_ERROR_WARNING;
1254 else if (bec->txerr >= 96 || bec->rxerr >= 96)
1255 *new_state = CAN_STATE_ERROR_WARNING;
1256 else
1257 *new_state = CAN_STATE_ERROR_ACTIVE;
1258
1259 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1260 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1261 }
1262
1263 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1264 struct kvaser_pciefd_rx_packet *p)
1265 {
1266 struct can_berr_counter bec;
1267 enum can_state old_state, new_state, tx_state, rx_state;
1268 struct net_device *ndev = can->can.dev;
1269 struct sk_buff *skb;
1270 struct can_frame *cf = NULL;
1271 struct skb_shared_hwtstamps *shhwtstamps;
1272 struct net_device_stats *stats = &ndev->stats;
1273
1274 old_state = can->can.state;
1275
1276 bec.txerr = p->header[0] & 0xff;
1277 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1278
1279 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1280 &rx_state);
1281
1282 skb = alloc_can_err_skb(ndev, &cf);
1283
1284 if (new_state != old_state) {
1285 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1286 rx_state);
1287
1288 if (old_state == CAN_STATE_BUS_OFF &&
1289 new_state == CAN_STATE_ERROR_ACTIVE &&
1290 can->can.restart_ms) {
1291 can->can.can_stats.restarts++;
1292 if (skb)
1293 cf->can_id |= CAN_ERR_RESTARTED;
1294 }
1295 }
1296
1297 can->err_rep_cnt++;
1298 can->can.can_stats.bus_error++;
1299 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
1300 stats->tx_errors++;
1301 else
1302 stats->rx_errors++;
1303
1304 can->bec.txerr = bec.txerr;
1305 can->bec.rxerr = bec.rxerr;
1306
1307 if (!skb) {
1308 stats->rx_dropped++;
1309 return -ENOMEM;
1310 }
1311
1312 shhwtstamps = skb_hwtstamps(skb);
1313 shhwtstamps->hwtstamp =
1314 ns_to_ktime(div_u64(p->timestamp * 1000,
1315 can->kv_pcie->freq_to_ticks_div));
1316 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
1317
1318 cf->data[6] = bec.txerr;
1319 cf->data[7] = bec.rxerr;
1320
1321 netif_rx(skb);
1322 return 0;
1323 }
1324
1325 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1326 struct kvaser_pciefd_rx_packet *p)
1327 {
1328 struct kvaser_pciefd_can *can;
1329 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1330
1331 if (ch_id >= pcie->nr_channels)
1332 return -EIO;
1333
1334 can = pcie->can[ch_id];
1335
1336 kvaser_pciefd_rx_error_frame(can, p);
1337 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1338
1339 kvaser_pciefd_disable_err_gen(can);
1340
1341 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1342 return 0;
1343 }
1344
1345 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1346 struct kvaser_pciefd_rx_packet *p)
1347 {
1348 struct can_berr_counter bec;
1349 enum can_state old_state, new_state, tx_state, rx_state;
1350
1351 old_state = can->can.state;
1352
1353 bec.txerr = p->header[0] & 0xff;
1354 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1355
1356 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1357 &rx_state);
1358
1359 if (new_state != old_state) {
1360 struct net_device *ndev = can->can.dev;
1361 struct sk_buff *skb;
1362 struct can_frame *cf;
1363 struct skb_shared_hwtstamps *shhwtstamps;
1364
1365 skb = alloc_can_err_skb(ndev, &cf);
1366 if (!skb) {
1367 struct net_device_stats *stats = &ndev->stats;
1368
1369 stats->rx_dropped++;
1370 return -ENOMEM;
1371 }
1372
1373 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1374 rx_state);
1375
1376 if (old_state == CAN_STATE_BUS_OFF &&
1377 new_state == CAN_STATE_ERROR_ACTIVE &&
1378 can->can.restart_ms) {
1379 can->can.can_stats.restarts++;
1380 cf->can_id |= CAN_ERR_RESTARTED;
1381 }
1382
1383 shhwtstamps = skb_hwtstamps(skb);
1384 shhwtstamps->hwtstamp =
1385 ns_to_ktime(div_u64(p->timestamp * 1000,
1386 can->kv_pcie->freq_to_ticks_div));
1387
1388 cf->data[6] = bec.txerr;
1389 cf->data[7] = bec.rxerr;
1390
1391 netif_rx(skb);
1392 }
1393 can->bec.txerr = bec.txerr;
1394 can->bec.rxerr = bec.rxerr;
1395
1396 if (bec.txerr || bec.rxerr)
1397 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1398
1399 return 0;
1400 }
1401
1402 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1403 struct kvaser_pciefd_rx_packet *p)
1404 {
1405 struct kvaser_pciefd_can *can;
1406 u8 cmdseq;
1407 u32 status;
1408 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1409
1410 if (ch_id >= pcie->nr_channels)
1411 return -EIO;
1412
1413 can = pcie->can[ch_id];
1414
1415 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1416 cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1417
1418
1419 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1420 p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1421 p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1422 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1423 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1424 u32 cmd;
1425
1426 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1427 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1428 cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1429 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1430 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1431
1432 iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1433 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1434 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1435 p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1436 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1437 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1438
1439 u8 count = ioread32(can->reg_base +
1440 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1441
1442 if (!count)
1443 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1444 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1445 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1446 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1447
1448 kvaser_pciefd_handle_status_resp(can, p);
1449 if (can->can.state != CAN_STATE_BUS_OFF &&
1450 can->can.state != CAN_STATE_ERROR_ACTIVE) {
1451 mod_timer(&can->bec_poll_timer,
1452 KVASER_PCIEFD_BEC_POLL_FREQ);
1453 }
1454 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1455 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1456
1457 if (!completion_done(&can->start_comp))
1458 complete(&can->start_comp);
1459 }
1460
1461 return 0;
1462 }
1463
1464 static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1465 struct kvaser_pciefd_rx_packet *p)
1466 {
1467 struct kvaser_pciefd_can *can;
1468 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1469
1470 if (ch_id >= pcie->nr_channels)
1471 return -EIO;
1472
1473 can = pcie->can[ch_id];
1474
1475
1476 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1477 u8 count = ioread32(can->reg_base +
1478 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1479
1480 if (count == 0)
1481 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1482 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1483 } else {
1484 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1485 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1486 struct net_device_stats *stats = &can->can.dev->stats;
1487
1488 stats->tx_bytes += dlc;
1489 stats->tx_packets++;
1490
1491 if (netif_queue_stopped(can->can.dev))
1492 netif_wake_queue(can->can.dev);
1493 }
1494
1495 return 0;
1496 }
1497
1498 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1499 struct kvaser_pciefd_rx_packet *p)
1500 {
1501 struct sk_buff *skb;
1502 struct net_device_stats *stats = &can->can.dev->stats;
1503 struct can_frame *cf;
1504
1505 skb = alloc_can_err_skb(can->can.dev, &cf);
1506
1507 stats->tx_errors++;
1508 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1509 if (skb)
1510 cf->can_id |= CAN_ERR_LOSTARB;
1511 can->can.can_stats.arbitration_lost++;
1512 } else if (skb) {
1513 cf->can_id |= CAN_ERR_ACK;
1514 }
1515
1516 if (skb) {
1517 cf->can_id |= CAN_ERR_BUSERROR;
1518 netif_rx(skb);
1519 } else {
1520 stats->rx_dropped++;
1521 netdev_warn(can->can.dev, "No memory left for err_skb\n");
1522 }
1523 }
1524
1525 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1526 struct kvaser_pciefd_rx_packet *p)
1527 {
1528 struct kvaser_pciefd_can *can;
1529 bool one_shot_fail = false;
1530 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1531
1532 if (ch_id >= pcie->nr_channels)
1533 return -EIO;
1534
1535 can = pcie->can[ch_id];
1536
1537 if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1538 return 0;
1539
1540 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1541 kvaser_pciefd_handle_nack_packet(can, p);
1542 one_shot_fail = true;
1543 }
1544
1545 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1546 netdev_dbg(can->can.dev, "Packet was flushed\n");
1547 } else {
1548 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1549 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1550 u8 count = ioread32(can->reg_base +
1551 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1552
1553 if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1554 netif_queue_stopped(can->can.dev))
1555 netif_wake_queue(can->can.dev);
1556
1557 if (!one_shot_fail) {
1558 struct net_device_stats *stats = &can->can.dev->stats;
1559
1560 stats->tx_bytes += dlc;
1561 stats->tx_packets++;
1562 }
1563 }
1564
1565 return 0;
1566 }
1567
1568 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1569 struct kvaser_pciefd_rx_packet *p)
1570 {
1571 struct kvaser_pciefd_can *can;
1572 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1573
1574 if (ch_id >= pcie->nr_channels)
1575 return -EIO;
1576
1577 can = pcie->can[ch_id];
1578
1579 if (!completion_done(&can->flush_comp))
1580 complete(&can->flush_comp);
1581
1582 return 0;
1583 }
1584
1585 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1586 int dma_buf)
1587 {
1588 __le32 *buffer = pcie->dma_data[dma_buf];
1589 __le64 timestamp;
1590 struct kvaser_pciefd_rx_packet packet;
1591 struct kvaser_pciefd_rx_packet *p = &packet;
1592 u8 type;
1593 int pos = *start_pos;
1594 int size;
1595 int ret = 0;
1596
1597 size = le32_to_cpu(buffer[pos++]);
1598 if (!size) {
1599 *start_pos = 0;
1600 return 0;
1601 }
1602
1603 p->header[0] = le32_to_cpu(buffer[pos++]);
1604 p->header[1] = le32_to_cpu(buffer[pos++]);
1605
1606
1607 memcpy(×tamp, &buffer[pos], sizeof(__le64));
1608 pos += 2;
1609 p->timestamp = le64_to_cpu(timestamp);
1610
1611 type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1612 switch (type) {
1613 case KVASER_PCIEFD_PACK_TYPE_DATA:
1614 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1615 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1616 u8 data_len;
1617
1618 data_len = can_fd_dlc2len(p->header[1] >>
1619 KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1620 pos += DIV_ROUND_UP(data_len, 4);
1621 }
1622 break;
1623
1624 case KVASER_PCIEFD_PACK_TYPE_ACK:
1625 ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1626 break;
1627
1628 case KVASER_PCIEFD_PACK_TYPE_STATUS:
1629 ret = kvaser_pciefd_handle_status_packet(pcie, p);
1630 break;
1631
1632 case KVASER_PCIEFD_PACK_TYPE_ERROR:
1633 ret = kvaser_pciefd_handle_error_packet(pcie, p);
1634 break;
1635
1636 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1637 ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1638 break;
1639
1640 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1641 ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1642 break;
1643
1644 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1645 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1646 case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1647 dev_info(&pcie->pci->dev,
1648 "Received unexpected packet type 0x%08X\n", type);
1649 break;
1650
1651 default:
1652 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1653 ret = -EIO;
1654 break;
1655 }
1656
1657 if (ret)
1658 return ret;
1659
1660
1661
1662
1663 if ((*start_pos + size) != pos)
1664 return -EIO;
1665
1666
1667 *start_pos = pos;
1668
1669 return ret;
1670 }
1671
1672 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1673 {
1674 int pos = 0;
1675 int res = 0;
1676
1677 do {
1678 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1679 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1680
1681 return res;
1682 }
1683
1684 static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1685 {
1686 u32 irq;
1687
1688 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1689 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1690 kvaser_pciefd_read_buffer(pcie, 0);
1691
1692 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1693 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1694 }
1695
1696 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1697 kvaser_pciefd_read_buffer(pcie, 1);
1698
1699 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1700 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1701 }
1702
1703 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1704 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1705 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1706 irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1707 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1708
1709 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1710 return 0;
1711 }
1712
1713 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1714 {
1715 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1716
1717 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1718 netdev_err(can->can.dev, "Tx FIFO overflow\n");
1719
1720 if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1721 u8 count = ioread32(can->reg_base +
1722 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1723
1724 if (count == 0)
1725 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1726 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1727 }
1728
1729 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1730 netdev_err(can->can.dev,
1731 "Fail to change bittiming, when not in reset mode\n");
1732
1733 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1734 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1735
1736 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1737 netdev_err(can->can.dev, "Rx FIFO overflow\n");
1738
1739 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1740 return 0;
1741 }
1742
1743 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1744 {
1745 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1746 u32 board_irq;
1747 int i;
1748
1749 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1750
1751 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1752 return IRQ_NONE;
1753
1754 if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1755 kvaser_pciefd_receive_irq(pcie);
1756
1757 for (i = 0; i < pcie->nr_channels; i++) {
1758 if (!pcie->can[i]) {
1759 dev_err(&pcie->pci->dev,
1760 "IRQ mask points to unallocated controller\n");
1761 break;
1762 }
1763
1764
1765 if (board_irq & (1 << i))
1766 kvaser_pciefd_transmit_irq(pcie->can[i]);
1767 }
1768
1769 iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1770 return IRQ_HANDLED;
1771 }
1772
1773 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1774 {
1775 int i;
1776 struct kvaser_pciefd_can *can;
1777
1778 for (i = 0; i < pcie->nr_channels; i++) {
1779 can = pcie->can[i];
1780 if (can) {
1781 iowrite32(0,
1782 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1783 kvaser_pciefd_pwm_stop(can);
1784 free_candev(can->can.dev);
1785 }
1786 }
1787 }
1788
1789 static int kvaser_pciefd_probe(struct pci_dev *pdev,
1790 const struct pci_device_id *id)
1791 {
1792 int err;
1793 struct kvaser_pciefd *pcie;
1794
1795 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1796 if (!pcie)
1797 return -ENOMEM;
1798
1799 pci_set_drvdata(pdev, pcie);
1800 pcie->pci = pdev;
1801
1802 err = pci_enable_device(pdev);
1803 if (err)
1804 return err;
1805
1806 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1807 if (err)
1808 goto err_disable_pci;
1809
1810 pcie->reg_base = pci_iomap(pdev, 0, 0);
1811 if (!pcie->reg_base) {
1812 err = -ENOMEM;
1813 goto err_release_regions;
1814 }
1815
1816 err = kvaser_pciefd_setup_board(pcie);
1817 if (err)
1818 goto err_pci_iounmap;
1819
1820 err = kvaser_pciefd_setup_dma(pcie);
1821 if (err)
1822 goto err_pci_iounmap;
1823
1824 pci_set_master(pdev);
1825
1826 err = kvaser_pciefd_setup_can_ctrls(pcie);
1827 if (err)
1828 goto err_teardown_can_ctrls;
1829
1830 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1831 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1832
1833 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1834 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1835 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1836 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1837
1838
1839 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1840 pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1841 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1842 pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1843
1844
1845 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1846 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1847 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1848 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1849
1850 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1851 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1852 if (err)
1853 goto err_teardown_can_ctrls;
1854
1855 err = kvaser_pciefd_reg_candev(pcie);
1856 if (err)
1857 goto err_free_irq;
1858
1859 return 0;
1860
1861 err_free_irq:
1862 free_irq(pcie->pci->irq, pcie);
1863
1864 err_teardown_can_ctrls:
1865 kvaser_pciefd_teardown_can_ctrls(pcie);
1866 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1867 pci_clear_master(pdev);
1868
1869 err_pci_iounmap:
1870 pci_iounmap(pdev, pcie->reg_base);
1871
1872 err_release_regions:
1873 pci_release_regions(pdev);
1874
1875 err_disable_pci:
1876 pci_disable_device(pdev);
1877
1878 return err;
1879 }
1880
1881 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1882 {
1883 struct kvaser_pciefd_can *can;
1884 int i;
1885
1886 for (i = 0; i < pcie->nr_channels; i++) {
1887 can = pcie->can[i];
1888 if (can) {
1889 iowrite32(0,
1890 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1891 unregister_candev(can->can.dev);
1892 del_timer(&can->bec_poll_timer);
1893 kvaser_pciefd_pwm_stop(can);
1894 free_candev(can->can.dev);
1895 }
1896 }
1897 }
1898
1899 static void kvaser_pciefd_remove(struct pci_dev *pdev)
1900 {
1901 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1902
1903 kvaser_pciefd_remove_all_ctrls(pcie);
1904
1905
1906 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1907 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1908 pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1909 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1910
1911 free_irq(pcie->pci->irq, pcie);
1912
1913 pci_clear_master(pdev);
1914 pci_iounmap(pdev, pcie->reg_base);
1915 pci_release_regions(pdev);
1916 pci_disable_device(pdev);
1917 }
1918
1919 static struct pci_driver kvaser_pciefd = {
1920 .name = KVASER_PCIEFD_DRV_NAME,
1921 .id_table = kvaser_pciefd_id_table,
1922 .probe = kvaser_pciefd_probe,
1923 .remove = kvaser_pciefd_remove,
1924 };
1925
1926 module_pci_driver(kvaser_pciefd)