0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/bitfield.h>
0013 #include <linux/clk.h>
0014 #include <linux/errno.h>
0015 #include <linux/ethtool.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/io.h>
0019 #include <linux/kernel.h>
0020 #include <linux/module.h>
0021 #include <linux/netdevice.h>
0022 #include <linux/of.h>
0023 #include <linux/of_device.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/skbuff.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/string.h>
0028 #include <linux/types.h>
0029 #include <linux/can/dev.h>
0030 #include <linux/can/error.h>
0031 #include <linux/pm_runtime.h>
0032
0033 #define DRIVER_NAME "xilinx_can"
0034
0035
0036 enum xcan_reg {
0037 XCAN_SRR_OFFSET = 0x00,
0038 XCAN_MSR_OFFSET = 0x04,
0039 XCAN_BRPR_OFFSET = 0x08,
0040 XCAN_BTR_OFFSET = 0x0C,
0041 XCAN_ECR_OFFSET = 0x10,
0042 XCAN_ESR_OFFSET = 0x14,
0043 XCAN_SR_OFFSET = 0x18,
0044 XCAN_ISR_OFFSET = 0x1C,
0045 XCAN_IER_OFFSET = 0x20,
0046 XCAN_ICR_OFFSET = 0x24,
0047
0048
0049 XCAN_TXFIFO_OFFSET = 0x30,
0050 XCAN_RXFIFO_OFFSET = 0x50,
0051 XCAN_AFR_OFFSET = 0x60,
0052
0053
0054 XCAN_F_BRPR_OFFSET = 0x088,
0055
0056
0057 XCAN_F_BTR_OFFSET = 0x08C,
0058 XCAN_TRR_OFFSET = 0x0090,
0059 XCAN_AFR_EXT_OFFSET = 0x00E0,
0060 XCAN_FSR_OFFSET = 0x00E8,
0061 XCAN_TXMSG_BASE_OFFSET = 0x0100,
0062 XCAN_RXMSG_BASE_OFFSET = 0x1100,
0063 XCAN_RXMSG_2_BASE_OFFSET = 0x2100,
0064 XCAN_AFR_2_MASK_OFFSET = 0x0A00,
0065 XCAN_AFR_2_ID_OFFSET = 0x0A04,
0066 };
0067
0068 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
0069 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
0070 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
0071 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
0072 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
0073
0074 #define XCAN_CANFD_FRAME_SIZE 0x48
0075 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
0076 XCAN_CANFD_FRAME_SIZE * (n))
0077 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
0078 XCAN_CANFD_FRAME_SIZE * (n))
0079 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
0080 XCAN_CANFD_FRAME_SIZE * (n))
0081
0082
0083 #define XCAN_TX_MAILBOX_IDX 0
0084
0085
0086 #define XCAN_SRR_CEN_MASK 0x00000002
0087 #define XCAN_SRR_RESET_MASK 0x00000001
0088 #define XCAN_MSR_LBACK_MASK 0x00000002
0089 #define XCAN_MSR_SLEEP_MASK 0x00000001
0090 #define XCAN_BRPR_BRP_MASK 0x000000FF
0091 #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8)
0092 #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8)
0093 #define XCAN_BTR_SJW_MASK 0x00000180
0094 #define XCAN_BTR_TS2_MASK 0x00000070
0095 #define XCAN_BTR_TS1_MASK 0x0000000F
0096 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000
0097 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00
0098 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F
0099 #define XCAN_ECR_REC_MASK 0x0000FF00
0100 #define XCAN_ECR_TEC_MASK 0x000000FF
0101 #define XCAN_ESR_ACKER_MASK 0x00000010
0102 #define XCAN_ESR_BERR_MASK 0x00000008
0103 #define XCAN_ESR_STER_MASK 0x00000004
0104 #define XCAN_ESR_FMER_MASK 0x00000002
0105 #define XCAN_ESR_CRCER_MASK 0x00000001
0106 #define XCAN_SR_TDCV_MASK GENMASK(22, 16)
0107 #define XCAN_SR_TXFLL_MASK 0x00000400
0108 #define XCAN_SR_ESTAT_MASK 0x00000180
0109 #define XCAN_SR_ERRWRN_MASK 0x00000040
0110 #define XCAN_SR_NORMAL_MASK 0x00000008
0111 #define XCAN_SR_LBACK_MASK 0x00000002
0112 #define XCAN_SR_CONFIG_MASK 0x00000001
0113 #define XCAN_IXR_RXMNF_MASK 0x00020000
0114 #define XCAN_IXR_TXFEMP_MASK 0x00004000
0115 #define XCAN_IXR_WKUP_MASK 0x00000800
0116 #define XCAN_IXR_SLP_MASK 0x00000400
0117 #define XCAN_IXR_BSOFF_MASK 0x00000200
0118 #define XCAN_IXR_ERROR_MASK 0x00000100
0119 #define XCAN_IXR_RXNEMP_MASK 0x00000080
0120 #define XCAN_IXR_RXOFLW_MASK 0x00000040
0121 #define XCAN_IXR_RXOK_MASK 0x00000010
0122 #define XCAN_IXR_TXFLL_MASK 0x00000004
0123 #define XCAN_IXR_TXOK_MASK 0x00000002
0124 #define XCAN_IXR_ARBLST_MASK 0x00000001
0125 #define XCAN_IDR_ID1_MASK 0xFFE00000
0126 #define XCAN_IDR_SRR_MASK 0x00100000
0127 #define XCAN_IDR_IDE_MASK 0x00080000
0128 #define XCAN_IDR_ID2_MASK 0x0007FFFE
0129 #define XCAN_IDR_RTR_MASK 0x00000001
0130 #define XCAN_DLCR_DLC_MASK 0xF0000000
0131 #define XCAN_FSR_FL_MASK 0x00003F00
0132 #define XCAN_2_FSR_FL_MASK 0x00007F00
0133 #define XCAN_FSR_IRI_MASK 0x00000080
0134 #define XCAN_FSR_RI_MASK 0x0000001F
0135 #define XCAN_2_FSR_RI_MASK 0x0000003F
0136 #define XCAN_DLCR_EDL_MASK 0x08000000
0137 #define XCAN_DLCR_BRS_MASK 0x04000000
0138
0139
0140 #define XCAN_BRPR_TDC_ENABLE BIT(16)
0141 #define XCAN_BTR_SJW_SHIFT 7
0142 #define XCAN_BTR_TS2_SHIFT 4
0143 #define XCAN_BTR_SJW_SHIFT_CANFD 16
0144 #define XCAN_BTR_TS2_SHIFT_CANFD 8
0145 #define XCAN_IDR_ID1_SHIFT 21
0146 #define XCAN_IDR_ID2_SHIFT 1
0147 #define XCAN_DLCR_DLC_SHIFT 28
0148 #define XCAN_ESR_REC_SHIFT 8
0149
0150
0151 #define XCAN_FRAME_MAX_DATA_LEN 8
0152 #define XCANFD_DW_BYTES 4
0153 #define XCAN_TIMEOUT (1 * HZ)
0154
0155
0156 #define XCAN_FLAG_TXFEMP 0x0001
0157
0158 #define XCAN_FLAG_RXMNF 0x0002
0159
0160 #define XCAN_FLAG_EXT_FILTERS 0x0004
0161
0162 #define XCAN_FLAG_TX_MAILBOXES 0x0008
0163
0164
0165
0166 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
0167 #define XCAN_FLAG_CANFD_2 0x0020
0168
0169 enum xcan_ip_type {
0170 XAXI_CAN = 0,
0171 XZYNQ_CANPS,
0172 XAXI_CANFD,
0173 XAXI_CANFD_2_0,
0174 };
0175
0176 struct xcan_devtype_data {
0177 enum xcan_ip_type cantype;
0178 unsigned int flags;
0179 const struct can_bittiming_const *bittiming_const;
0180 const char *bus_clk_name;
0181 unsigned int btr_ts2_shift;
0182 unsigned int btr_sjw_shift;
0183 };
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 struct xcan_priv {
0203 struct can_priv can;
0204 spinlock_t tx_lock;
0205 unsigned int tx_head;
0206 unsigned int tx_tail;
0207 unsigned int tx_max;
0208 struct napi_struct napi;
0209 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
0210 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
0211 u32 val);
0212 struct device *dev;
0213 void __iomem *reg_base;
0214 unsigned long irq_flags;
0215 struct clk *bus_clk;
0216 struct clk *can_clk;
0217 struct xcan_devtype_data devtype;
0218 };
0219
0220
0221 static const struct can_bittiming_const xcan_bittiming_const = {
0222 .name = DRIVER_NAME,
0223 .tseg1_min = 1,
0224 .tseg1_max = 16,
0225 .tseg2_min = 1,
0226 .tseg2_max = 8,
0227 .sjw_max = 4,
0228 .brp_min = 1,
0229 .brp_max = 256,
0230 .brp_inc = 1,
0231 };
0232
0233
0234 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
0235 .name = DRIVER_NAME,
0236 .tseg1_min = 1,
0237 .tseg1_max = 64,
0238 .tseg2_min = 1,
0239 .tseg2_max = 16,
0240 .sjw_max = 16,
0241 .brp_min = 1,
0242 .brp_max = 256,
0243 .brp_inc = 1,
0244 };
0245
0246
0247 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
0248 .name = DRIVER_NAME,
0249 .tseg1_min = 1,
0250 .tseg1_max = 16,
0251 .tseg2_min = 1,
0252 .tseg2_max = 8,
0253 .sjw_max = 8,
0254 .brp_min = 1,
0255 .brp_max = 256,
0256 .brp_inc = 1,
0257 };
0258
0259
0260 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
0261 .name = DRIVER_NAME,
0262 .tseg1_min = 1,
0263 .tseg1_max = 256,
0264 .tseg2_min = 1,
0265 .tseg2_max = 128,
0266 .sjw_max = 128,
0267 .brp_min = 1,
0268 .brp_max = 256,
0269 .brp_inc = 1,
0270 };
0271
0272
0273 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
0274 .name = DRIVER_NAME,
0275 .tseg1_min = 1,
0276 .tseg1_max = 32,
0277 .tseg2_min = 1,
0278 .tseg2_max = 16,
0279 .sjw_max = 16,
0280 .brp_min = 1,
0281 .brp_max = 256,
0282 .brp_inc = 1,
0283 };
0284
0285
0286 static const struct can_tdc_const xcan_tdc_const_canfd = {
0287 .tdcv_min = 0,
0288 .tdcv_max = 0,
0289 .tdco_min = 0,
0290 .tdco_max = 32,
0291 .tdcf_min = 0,
0292 .tdcf_max = 0,
0293 };
0294
0295
0296 static const struct can_tdc_const xcan_tdc_const_canfd2 = {
0297 .tdcv_min = 0,
0298 .tdcv_max = 0,
0299 .tdco_min = 0,
0300 .tdco_max = 64,
0301 .tdcf_min = 0,
0302 .tdcf_max = 0,
0303 };
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
0314 u32 val)
0315 {
0316 iowrite32(val, priv->reg_base + reg);
0317 }
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
0328 {
0329 return ioread32(priv->reg_base + reg);
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
0341 u32 val)
0342 {
0343 iowrite32be(val, priv->reg_base + reg);
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
0355 {
0356 return ioread32be(priv->reg_base + reg);
0357 }
0358
0359
0360
0361
0362
0363
0364
0365 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
0366 {
0367
0368
0369
0370 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
0371 return XCAN_IXR_RXOK_MASK;
0372 else
0373 return XCAN_IXR_RXNEMP_MASK;
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static int set_reset_mode(struct net_device *ndev)
0386 {
0387 struct xcan_priv *priv = netdev_priv(ndev);
0388 unsigned long timeout;
0389
0390 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
0391
0392 timeout = jiffies + XCAN_TIMEOUT;
0393 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
0394 if (time_after(jiffies, timeout)) {
0395 netdev_warn(ndev, "timed out for config mode\n");
0396 return -ETIMEDOUT;
0397 }
0398 usleep_range(500, 10000);
0399 }
0400
0401
0402 priv->tx_head = 0;
0403 priv->tx_tail = 0;
0404
0405 return 0;
0406 }
0407
0408
0409
0410
0411
0412
0413
0414
0415 static int xcan_set_bittiming(struct net_device *ndev)
0416 {
0417 struct xcan_priv *priv = netdev_priv(ndev);
0418 struct can_bittiming *bt = &priv->can.bittiming;
0419 struct can_bittiming *dbt = &priv->can.data_bittiming;
0420 u32 btr0, btr1;
0421 u32 is_config_mode;
0422
0423
0424
0425
0426 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
0427 XCAN_SR_CONFIG_MASK;
0428 if (!is_config_mode) {
0429 netdev_alert(ndev,
0430 "BUG! Cannot set bittiming - CAN is not in config mode\n");
0431 return -EPERM;
0432 }
0433
0434
0435 btr0 = (bt->brp - 1);
0436
0437
0438 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
0439
0440
0441 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
0442
0443
0444 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
0445
0446 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
0447 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
0448
0449 if (priv->devtype.cantype == XAXI_CANFD ||
0450 priv->devtype.cantype == XAXI_CANFD_2_0) {
0451
0452 btr0 = dbt->brp - 1;
0453 if (can_tdc_is_enabled(&priv->can)) {
0454 if (priv->devtype.cantype == XAXI_CANFD)
0455 btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
0456 XCAN_BRPR_TDC_ENABLE;
0457 else
0458 btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
0459 XCAN_BRPR_TDC_ENABLE;
0460 }
0461
0462
0463 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
0464
0465
0466 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
0467
0468
0469 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
0470
0471 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
0472 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
0473 }
0474
0475 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
0476 priv->read_reg(priv, XCAN_BRPR_OFFSET),
0477 priv->read_reg(priv, XCAN_BTR_OFFSET));
0478
0479 return 0;
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 static int xcan_chip_start(struct net_device *ndev)
0493 {
0494 struct xcan_priv *priv = netdev_priv(ndev);
0495 u32 reg_msr;
0496 int err;
0497 u32 ier;
0498
0499
0500 err = set_reset_mode(ndev);
0501 if (err < 0)
0502 return err;
0503
0504 err = xcan_set_bittiming(ndev);
0505 if (err < 0)
0506 return err;
0507
0508
0509
0510
0511
0512
0513
0514
0515 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
0516 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
0517 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
0518 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
0519
0520 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
0521 ier |= XCAN_IXR_RXMNF_MASK;
0522
0523 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
0524
0525
0526 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
0527 reg_msr = XCAN_MSR_LBACK_MASK;
0528 else
0529 reg_msr = 0x0;
0530
0531
0532
0533
0534 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
0535 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
0536
0537 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
0538 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
0539
0540 netdev_dbg(ndev, "status:#x%08x\n",
0541 priv->read_reg(priv, XCAN_SR_OFFSET));
0542
0543 priv->can.state = CAN_STATE_ERROR_ACTIVE;
0544 return 0;
0545 }
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
0557 {
0558 int ret;
0559
0560 switch (mode) {
0561 case CAN_MODE_START:
0562 ret = xcan_chip_start(ndev);
0563 if (ret < 0) {
0564 netdev_err(ndev, "xcan_chip_start failed!\n");
0565 return ret;
0566 }
0567 netif_wake_queue(ndev);
0568 break;
0569 default:
0570 ret = -EOPNOTSUPP;
0571 break;
0572 }
0573
0574 return ret;
0575 }
0576
0577
0578
0579
0580
0581
0582
0583 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
0584 int frame_offset)
0585 {
0586 u32 id, dlc, data[2] = {0, 0};
0587 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
0588 u32 ramoff, dwindex = 0, i;
0589 struct xcan_priv *priv = netdev_priv(ndev);
0590
0591
0592 if (cf->can_id & CAN_EFF_FLAG) {
0593
0594 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
0595 XCAN_IDR_ID2_MASK;
0596 id |= (((cf->can_id & CAN_EFF_MASK) >>
0597 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
0598 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
0599
0600
0601
0602
0603 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
0604
0605 if (cf->can_id & CAN_RTR_FLAG)
0606
0607 id |= XCAN_IDR_RTR_MASK;
0608 } else {
0609
0610 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
0611 XCAN_IDR_ID1_MASK;
0612
0613 if (cf->can_id & CAN_RTR_FLAG)
0614
0615 id |= XCAN_IDR_SRR_MASK;
0616 }
0617
0618 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
0619 if (can_is_canfd_skb(skb)) {
0620 if (cf->flags & CANFD_BRS)
0621 dlc |= XCAN_DLCR_BRS_MASK;
0622 dlc |= XCAN_DLCR_EDL_MASK;
0623 }
0624
0625 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
0626 (priv->devtype.flags & XCAN_FLAG_TXFEMP))
0627 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
0628 else
0629 can_put_echo_skb(skb, ndev, 0, 0);
0630
0631 priv->tx_head++;
0632
0633 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
0634
0635
0636
0637 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
0638 if (priv->devtype.cantype == XAXI_CANFD ||
0639 priv->devtype.cantype == XAXI_CANFD_2_0) {
0640 for (i = 0; i < cf->len; i += 4) {
0641 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
0642 (dwindex * XCANFD_DW_BYTES);
0643 priv->write_reg(priv, ramoff,
0644 be32_to_cpup((__be32 *)(cf->data + i)));
0645 dwindex++;
0646 }
0647 } else {
0648 if (cf->len > 0)
0649 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
0650 if (cf->len > 4)
0651 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
0652
0653 if (!(cf->can_id & CAN_RTR_FLAG)) {
0654 priv->write_reg(priv,
0655 XCAN_FRAME_DW1_OFFSET(frame_offset),
0656 data[0]);
0657
0658
0659
0660 priv->write_reg(priv,
0661 XCAN_FRAME_DW2_OFFSET(frame_offset),
0662 data[1]);
0663 }
0664 }
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
0675 {
0676 struct xcan_priv *priv = netdev_priv(ndev);
0677 unsigned long flags;
0678
0679
0680 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
0681 XCAN_SR_TXFLL_MASK))
0682 return -ENOSPC;
0683
0684 spin_lock_irqsave(&priv->tx_lock, flags);
0685
0686 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
0687
0688
0689 if (priv->tx_max > 1)
0690 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
0691
0692
0693 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
0694 netif_stop_queue(ndev);
0695
0696 spin_unlock_irqrestore(&priv->tx_lock, flags);
0697
0698 return 0;
0699 }
0700
0701
0702
0703
0704
0705
0706
0707
0708 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
0709 {
0710 struct xcan_priv *priv = netdev_priv(ndev);
0711 unsigned long flags;
0712
0713 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
0714 BIT(XCAN_TX_MAILBOX_IDX)))
0715 return -ENOSPC;
0716
0717 spin_lock_irqsave(&priv->tx_lock, flags);
0718
0719 xcan_write_frame(ndev, skb,
0720 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
0721
0722
0723 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
0724
0725 netif_stop_queue(ndev);
0726
0727 spin_unlock_irqrestore(&priv->tx_lock, flags);
0728
0729 return 0;
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
0742 {
0743 struct xcan_priv *priv = netdev_priv(ndev);
0744 int ret;
0745
0746 if (can_dropped_invalid_skb(ndev, skb))
0747 return NETDEV_TX_OK;
0748
0749 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
0750 ret = xcan_start_xmit_mailbox(skb, ndev);
0751 else
0752 ret = xcan_start_xmit_fifo(skb, ndev);
0753
0754 if (ret < 0) {
0755 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
0756 netif_stop_queue(ndev);
0757 return NETDEV_TX_BUSY;
0758 }
0759
0760 return NETDEV_TX_OK;
0761 }
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 static int xcan_rx(struct net_device *ndev, int frame_base)
0775 {
0776 struct xcan_priv *priv = netdev_priv(ndev);
0777 struct net_device_stats *stats = &ndev->stats;
0778 struct can_frame *cf;
0779 struct sk_buff *skb;
0780 u32 id_xcan, dlc, data[2] = {0, 0};
0781
0782 skb = alloc_can_skb(ndev, &cf);
0783 if (unlikely(!skb)) {
0784 stats->rx_dropped++;
0785 return 0;
0786 }
0787
0788
0789 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
0790 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
0791 XCAN_DLCR_DLC_SHIFT;
0792
0793
0794 cf->len = can_cc_dlc2len(dlc);
0795
0796
0797 if (id_xcan & XCAN_IDR_IDE_MASK) {
0798
0799 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
0800 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
0801 XCAN_IDR_ID2_SHIFT;
0802 cf->can_id |= CAN_EFF_FLAG;
0803 if (id_xcan & XCAN_IDR_RTR_MASK)
0804 cf->can_id |= CAN_RTR_FLAG;
0805 } else {
0806
0807 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
0808 XCAN_IDR_ID1_SHIFT;
0809 if (id_xcan & XCAN_IDR_SRR_MASK)
0810 cf->can_id |= CAN_RTR_FLAG;
0811 }
0812
0813
0814 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
0815 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
0816
0817 if (!(cf->can_id & CAN_RTR_FLAG)) {
0818
0819 if (cf->len > 0)
0820 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
0821 if (cf->len > 4)
0822 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
0823
0824 stats->rx_bytes += cf->len;
0825 }
0826 stats->rx_packets++;
0827
0828 netif_receive_skb(skb);
0829
0830 return 1;
0831 }
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 static int xcanfd_rx(struct net_device *ndev, int frame_base)
0845 {
0846 struct xcan_priv *priv = netdev_priv(ndev);
0847 struct net_device_stats *stats = &ndev->stats;
0848 struct canfd_frame *cf;
0849 struct sk_buff *skb;
0850 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
0851
0852 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
0853 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
0854 if (dlc & XCAN_DLCR_EDL_MASK)
0855 skb = alloc_canfd_skb(ndev, &cf);
0856 else
0857 skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
0858
0859 if (unlikely(!skb)) {
0860 stats->rx_dropped++;
0861 return 0;
0862 }
0863
0864
0865
0866
0867 if (dlc & XCAN_DLCR_EDL_MASK)
0868 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
0869 XCAN_DLCR_DLC_SHIFT);
0870 else
0871 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
0872 XCAN_DLCR_DLC_SHIFT);
0873
0874
0875 if (id_xcan & XCAN_IDR_IDE_MASK) {
0876
0877 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
0878 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
0879 XCAN_IDR_ID2_SHIFT;
0880 cf->can_id |= CAN_EFF_FLAG;
0881 if (id_xcan & XCAN_IDR_RTR_MASK)
0882 cf->can_id |= CAN_RTR_FLAG;
0883 } else {
0884
0885 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
0886 XCAN_IDR_ID1_SHIFT;
0887 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
0888 XCAN_IDR_SRR_MASK))
0889 cf->can_id |= CAN_RTR_FLAG;
0890 }
0891
0892
0893 if (dlc & XCAN_DLCR_EDL_MASK) {
0894 for (i = 0; i < cf->len; i += 4) {
0895 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
0896 (dwindex * XCANFD_DW_BYTES);
0897 data[0] = priv->read_reg(priv, dw_offset);
0898 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
0899 dwindex++;
0900 }
0901 } else {
0902 for (i = 0; i < cf->len; i += 4) {
0903 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
0904 data[0] = priv->read_reg(priv, dw_offset + i);
0905 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
0906 }
0907 }
0908
0909 if (!(cf->can_id & CAN_RTR_FLAG))
0910 stats->rx_bytes += cf->len;
0911 stats->rx_packets++;
0912
0913 netif_receive_skb(skb);
0914
0915 return 1;
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929 static enum can_state xcan_current_error_state(struct net_device *ndev)
0930 {
0931 struct xcan_priv *priv = netdev_priv(ndev);
0932 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
0933
0934 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
0935 return CAN_STATE_ERROR_PASSIVE;
0936 else if (status & XCAN_SR_ERRWRN_MASK)
0937 return CAN_STATE_ERROR_WARNING;
0938 else
0939 return CAN_STATE_ERROR_ACTIVE;
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 static void xcan_set_error_state(struct net_device *ndev,
0952 enum can_state new_state,
0953 struct can_frame *cf)
0954 {
0955 struct xcan_priv *priv = netdev_priv(ndev);
0956 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
0957 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
0958 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
0959 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
0960 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
0961
0962
0963 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
0964 return;
0965
0966 can_change_state(ndev, cf, tx_state, rx_state);
0967
0968 if (cf) {
0969 cf->can_id |= CAN_ERR_CNT;
0970 cf->data[6] = txerr;
0971 cf->data[7] = rxerr;
0972 }
0973 }
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
0984 {
0985 struct xcan_priv *priv = netdev_priv(ndev);
0986 enum can_state old_state = priv->can.state;
0987 enum can_state new_state;
0988
0989
0990
0991
0992 if (old_state != CAN_STATE_ERROR_WARNING &&
0993 old_state != CAN_STATE_ERROR_PASSIVE)
0994 return;
0995
0996 new_state = xcan_current_error_state(ndev);
0997
0998 if (new_state != old_state) {
0999 struct sk_buff *skb;
1000 struct can_frame *cf;
1001
1002 skb = alloc_can_err_skb(ndev, &cf);
1003
1004 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
1005
1006 if (skb)
1007 netif_rx(skb);
1008 }
1009 }
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1021 {
1022 struct xcan_priv *priv = netdev_priv(ndev);
1023 struct net_device_stats *stats = &ndev->stats;
1024 struct can_frame cf = { };
1025 u32 err_status;
1026
1027 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
1028 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
1029
1030 if (isr & XCAN_IXR_BSOFF_MASK) {
1031 priv->can.state = CAN_STATE_BUS_OFF;
1032 priv->can.can_stats.bus_off++;
1033
1034 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1035 can_bus_off(ndev);
1036 cf.can_id |= CAN_ERR_BUSOFF;
1037 } else {
1038 enum can_state new_state = xcan_current_error_state(ndev);
1039
1040 if (new_state != priv->can.state)
1041 xcan_set_error_state(ndev, new_state, &cf);
1042 }
1043
1044
1045 if (isr & XCAN_IXR_ARBLST_MASK) {
1046 priv->can.can_stats.arbitration_lost++;
1047 cf.can_id |= CAN_ERR_LOSTARB;
1048 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1049 }
1050
1051
1052 if (isr & XCAN_IXR_RXOFLW_MASK) {
1053 stats->rx_over_errors++;
1054 stats->rx_errors++;
1055 cf.can_id |= CAN_ERR_CRTL;
1056 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1057 }
1058
1059
1060 if (isr & XCAN_IXR_RXMNF_MASK) {
1061 stats->rx_dropped++;
1062 stats->rx_errors++;
1063 netdev_err(ndev, "RX match not finished, frame discarded\n");
1064 cf.can_id |= CAN_ERR_CRTL;
1065 cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1066 }
1067
1068
1069 if (isr & XCAN_IXR_ERROR_MASK) {
1070 bool berr_reporting = false;
1071
1072 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1073 berr_reporting = true;
1074 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1075 }
1076
1077
1078 if (err_status & XCAN_ESR_ACKER_MASK) {
1079 stats->tx_errors++;
1080 if (berr_reporting) {
1081 cf.can_id |= CAN_ERR_ACK;
1082 cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1083 }
1084 }
1085
1086
1087 if (err_status & XCAN_ESR_BERR_MASK) {
1088 stats->tx_errors++;
1089 if (berr_reporting) {
1090 cf.can_id |= CAN_ERR_PROT;
1091 cf.data[2] = CAN_ERR_PROT_BIT;
1092 }
1093 }
1094
1095
1096 if (err_status & XCAN_ESR_STER_MASK) {
1097 stats->rx_errors++;
1098 if (berr_reporting) {
1099 cf.can_id |= CAN_ERR_PROT;
1100 cf.data[2] = CAN_ERR_PROT_STUFF;
1101 }
1102 }
1103
1104
1105 if (err_status & XCAN_ESR_FMER_MASK) {
1106 stats->rx_errors++;
1107 if (berr_reporting) {
1108 cf.can_id |= CAN_ERR_PROT;
1109 cf.data[2] = CAN_ERR_PROT_FORM;
1110 }
1111 }
1112
1113
1114 if (err_status & XCAN_ESR_CRCER_MASK) {
1115 stats->rx_errors++;
1116 if (berr_reporting) {
1117 cf.can_id |= CAN_ERR_PROT;
1118 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1119 }
1120 }
1121 priv->can.can_stats.bus_error++;
1122 }
1123
1124 if (cf.can_id) {
1125 struct can_frame *skb_cf;
1126 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1127
1128 if (skb) {
1129 skb_cf->can_id |= cf.can_id;
1130 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1131 netif_rx(skb);
1132 }
1133 }
1134
1135 netdev_dbg(ndev, "%s: error status register:0x%x\n",
1136 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1148 {
1149 struct xcan_priv *priv = netdev_priv(ndev);
1150
1151
1152 if (isr & XCAN_IXR_SLP_MASK)
1153 priv->can.state = CAN_STATE_SLEEPING;
1154
1155
1156 if (isr & XCAN_IXR_WKUP_MASK)
1157 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1158 }
1159
1160
1161
1162
1163
1164
1165
1166 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1167 {
1168 int offset;
1169
1170 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1171 u32 fsr, mask;
1172
1173
1174
1175
1176 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1177
1178 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1179
1180
1181 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1182 mask = XCAN_2_FSR_FL_MASK;
1183 else
1184 mask = XCAN_FSR_FL_MASK;
1185
1186 if (!(fsr & mask))
1187 return -ENOENT;
1188
1189 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1190 offset =
1191 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1192 else
1193 offset =
1194 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1195
1196 } else {
1197
1198 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1199 XCAN_IXR_RXNEMP_MASK))
1200 return -ENOENT;
1201
1202
1203 offset = XCAN_RXFIFO_OFFSET;
1204 }
1205
1206 return offset;
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1220 {
1221 struct net_device *ndev = napi->dev;
1222 struct xcan_priv *priv = netdev_priv(ndev);
1223 u32 ier;
1224 int work_done = 0;
1225 int frame_offset;
1226
1227 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1228 (work_done < quota)) {
1229 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1230 work_done += xcanfd_rx(ndev, frame_offset);
1231 else
1232 work_done += xcan_rx(ndev, frame_offset);
1233
1234 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1235
1236 priv->write_reg(priv, XCAN_FSR_OFFSET,
1237 XCAN_FSR_IRI_MASK);
1238 else
1239
1240
1241
1242 priv->write_reg(priv, XCAN_ICR_OFFSET,
1243 XCAN_IXR_RXNEMP_MASK);
1244 }
1245
1246 if (work_done)
1247 xcan_update_error_state_after_rxtx(ndev);
1248
1249 if (work_done < quota) {
1250 if (napi_complete_done(napi, work_done)) {
1251 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1252 ier |= xcan_rx_int_mask(priv);
1253 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1254 }
1255 }
1256 return work_done;
1257 }
1258
1259
1260
1261
1262
1263
1264 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1265 {
1266 struct xcan_priv *priv = netdev_priv(ndev);
1267 struct net_device_stats *stats = &ndev->stats;
1268 unsigned int frames_in_fifo;
1269 int frames_sent = 1;
1270 unsigned long flags;
1271 int retries = 0;
1272
1273
1274
1275
1276
1277
1278
1279 spin_lock_irqsave(&priv->tx_lock, flags);
1280
1281 frames_in_fifo = priv->tx_head - priv->tx_tail;
1282
1283 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1284
1285 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1286 spin_unlock_irqrestore(&priv->tx_lock, flags);
1287 return;
1288 }
1289
1290
1291
1292
1293 if (frames_in_fifo > 1) {
1294 WARN_ON(frames_in_fifo > priv->tx_max);
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 while ((isr & XCAN_IXR_TXOK_MASK) &&
1307 !WARN_ON(++retries == 100)) {
1308 priv->write_reg(priv, XCAN_ICR_OFFSET,
1309 XCAN_IXR_TXOK_MASK);
1310 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1311 }
1312
1313 if (isr & XCAN_IXR_TXFEMP_MASK) {
1314
1315 frames_sent = frames_in_fifo;
1316 }
1317 } else {
1318
1319 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1320 }
1321
1322 while (frames_sent--) {
1323 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1324 priv->tx_max, NULL);
1325 priv->tx_tail++;
1326 stats->tx_packets++;
1327 }
1328
1329 netif_wake_queue(ndev);
1330
1331 spin_unlock_irqrestore(&priv->tx_lock, flags);
1332
1333 xcan_update_error_state_after_rxtx(ndev);
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1348 {
1349 struct net_device *ndev = (struct net_device *)dev_id;
1350 struct xcan_priv *priv = netdev_priv(ndev);
1351 u32 isr, ier;
1352 u32 isr_errors;
1353 u32 rx_int_mask = xcan_rx_int_mask(priv);
1354
1355
1356 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1357 if (!isr)
1358 return IRQ_NONE;
1359
1360
1361 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1362 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1363 XCAN_IXR_WKUP_MASK));
1364 xcan_state_interrupt(ndev, isr);
1365 }
1366
1367
1368 if (isr & XCAN_IXR_TXOK_MASK)
1369 xcan_tx_interrupt(ndev, isr);
1370
1371
1372 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1373 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1374 XCAN_IXR_RXMNF_MASK);
1375 if (isr_errors) {
1376 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1377 xcan_err_interrupt(ndev, isr);
1378 }
1379
1380
1381 if (isr & rx_int_mask) {
1382 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1383 ier &= ~rx_int_mask;
1384 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1385 napi_schedule(&priv->napi);
1386 }
1387 return IRQ_HANDLED;
1388 }
1389
1390
1391
1392
1393
1394
1395
1396
1397 static void xcan_chip_stop(struct net_device *ndev)
1398 {
1399 struct xcan_priv *priv = netdev_priv(ndev);
1400 int ret;
1401
1402
1403 ret = set_reset_mode(ndev);
1404 if (ret < 0)
1405 netdev_dbg(ndev, "set_reset_mode() Failed\n");
1406
1407 priv->can.state = CAN_STATE_STOPPED;
1408 }
1409
1410
1411
1412
1413
1414
1415
1416
1417 static int xcan_open(struct net_device *ndev)
1418 {
1419 struct xcan_priv *priv = netdev_priv(ndev);
1420 int ret;
1421
1422 ret = pm_runtime_get_sync(priv->dev);
1423 if (ret < 0) {
1424 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1425 __func__, ret);
1426 goto err;
1427 }
1428
1429 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1430 ndev->name, ndev);
1431 if (ret < 0) {
1432 netdev_err(ndev, "irq allocation for CAN failed\n");
1433 goto err;
1434 }
1435
1436
1437 ret = set_reset_mode(ndev);
1438 if (ret < 0) {
1439 netdev_err(ndev, "mode resetting failed!\n");
1440 goto err_irq;
1441 }
1442
1443
1444 ret = open_candev(ndev);
1445 if (ret)
1446 goto err_irq;
1447
1448 ret = xcan_chip_start(ndev);
1449 if (ret < 0) {
1450 netdev_err(ndev, "xcan_chip_start failed!\n");
1451 goto err_candev;
1452 }
1453
1454 napi_enable(&priv->napi);
1455 netif_start_queue(ndev);
1456
1457 return 0;
1458
1459 err_candev:
1460 close_candev(ndev);
1461 err_irq:
1462 free_irq(ndev->irq, ndev);
1463 err:
1464 pm_runtime_put(priv->dev);
1465
1466 return ret;
1467 }
1468
1469
1470
1471
1472
1473
1474
1475 static int xcan_close(struct net_device *ndev)
1476 {
1477 struct xcan_priv *priv = netdev_priv(ndev);
1478
1479 netif_stop_queue(ndev);
1480 napi_disable(&priv->napi);
1481 xcan_chip_stop(ndev);
1482 free_irq(ndev->irq, ndev);
1483 close_candev(ndev);
1484
1485 pm_runtime_put(priv->dev);
1486
1487 return 0;
1488 }
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 static int xcan_get_berr_counter(const struct net_device *ndev,
1499 struct can_berr_counter *bec)
1500 {
1501 struct xcan_priv *priv = netdev_priv(ndev);
1502 int ret;
1503
1504 ret = pm_runtime_get_sync(priv->dev);
1505 if (ret < 0) {
1506 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1507 __func__, ret);
1508 pm_runtime_put(priv->dev);
1509 return ret;
1510 }
1511
1512 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1513 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1514 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1515
1516 pm_runtime_put(priv->dev);
1517
1518 return 0;
1519 }
1520
1521
1522
1523
1524
1525
1526
1527
1528 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
1529 {
1530 struct xcan_priv *priv = netdev_priv(ndev);
1531
1532 *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
1533
1534 return 0;
1535 }
1536
1537 static const struct net_device_ops xcan_netdev_ops = {
1538 .ndo_open = xcan_open,
1539 .ndo_stop = xcan_close,
1540 .ndo_start_xmit = xcan_start_xmit,
1541 .ndo_change_mtu = can_change_mtu,
1542 };
1543
1544 static const struct ethtool_ops xcan_ethtool_ops = {
1545 .get_ts_info = ethtool_op_get_ts_info,
1546 };
1547
1548
1549
1550
1551
1552
1553
1554
1555 static int __maybe_unused xcan_suspend(struct device *dev)
1556 {
1557 struct net_device *ndev = dev_get_drvdata(dev);
1558
1559 if (netif_running(ndev)) {
1560 netif_stop_queue(ndev);
1561 netif_device_detach(ndev);
1562 xcan_chip_stop(ndev);
1563 }
1564
1565 return pm_runtime_force_suspend(dev);
1566 }
1567
1568
1569
1570
1571
1572
1573
1574
1575 static int __maybe_unused xcan_resume(struct device *dev)
1576 {
1577 struct net_device *ndev = dev_get_drvdata(dev);
1578 int ret;
1579
1580 ret = pm_runtime_force_resume(dev);
1581 if (ret) {
1582 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1583 return ret;
1584 }
1585
1586 if (netif_running(ndev)) {
1587 ret = xcan_chip_start(ndev);
1588 if (ret) {
1589 dev_err(dev, "xcan_chip_start failed on resume\n");
1590 return ret;
1591 }
1592
1593 netif_device_attach(ndev);
1594 netif_start_queue(ndev);
1595 }
1596
1597 return 0;
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1608 {
1609 struct net_device *ndev = dev_get_drvdata(dev);
1610 struct xcan_priv *priv = netdev_priv(ndev);
1611
1612 clk_disable_unprepare(priv->bus_clk);
1613 clk_disable_unprepare(priv->can_clk);
1614
1615 return 0;
1616 }
1617
1618
1619
1620
1621
1622
1623
1624
1625 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1626 {
1627 struct net_device *ndev = dev_get_drvdata(dev);
1628 struct xcan_priv *priv = netdev_priv(ndev);
1629 int ret;
1630
1631 ret = clk_prepare_enable(priv->bus_clk);
1632 if (ret) {
1633 dev_err(dev, "Cannot enable clock.\n");
1634 return ret;
1635 }
1636 ret = clk_prepare_enable(priv->can_clk);
1637 if (ret) {
1638 dev_err(dev, "Cannot enable clock.\n");
1639 clk_disable_unprepare(priv->bus_clk);
1640 return ret;
1641 }
1642
1643 return 0;
1644 }
1645
1646 static const struct dev_pm_ops xcan_dev_pm_ops = {
1647 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1648 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1649 };
1650
1651 static const struct xcan_devtype_data xcan_zynq_data = {
1652 .cantype = XZYNQ_CANPS,
1653 .flags = XCAN_FLAG_TXFEMP,
1654 .bittiming_const = &xcan_bittiming_const,
1655 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1656 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1657 .bus_clk_name = "pclk",
1658 };
1659
1660 static const struct xcan_devtype_data xcan_axi_data = {
1661 .cantype = XAXI_CAN,
1662 .bittiming_const = &xcan_bittiming_const,
1663 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1664 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1665 .bus_clk_name = "s_axi_aclk",
1666 };
1667
1668 static const struct xcan_devtype_data xcan_canfd_data = {
1669 .cantype = XAXI_CANFD,
1670 .flags = XCAN_FLAG_EXT_FILTERS |
1671 XCAN_FLAG_RXMNF |
1672 XCAN_FLAG_TX_MAILBOXES |
1673 XCAN_FLAG_RX_FIFO_MULTI,
1674 .bittiming_const = &xcan_bittiming_const_canfd,
1675 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1676 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1677 .bus_clk_name = "s_axi_aclk",
1678 };
1679
1680 static const struct xcan_devtype_data xcan_canfd2_data = {
1681 .cantype = XAXI_CANFD_2_0,
1682 .flags = XCAN_FLAG_EXT_FILTERS |
1683 XCAN_FLAG_RXMNF |
1684 XCAN_FLAG_TX_MAILBOXES |
1685 XCAN_FLAG_CANFD_2 |
1686 XCAN_FLAG_RX_FIFO_MULTI,
1687 .bittiming_const = &xcan_bittiming_const_canfd2,
1688 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1689 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1690 .bus_clk_name = "s_axi_aclk",
1691 };
1692
1693
1694 static const struct of_device_id xcan_of_match[] = {
1695 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1696 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1697 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1698 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1699 { },
1700 };
1701 MODULE_DEVICE_TABLE(of, xcan_of_match);
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 static int xcan_probe(struct platform_device *pdev)
1713 {
1714 struct net_device *ndev;
1715 struct xcan_priv *priv;
1716 const struct of_device_id *of_id;
1717 const struct xcan_devtype_data *devtype = &xcan_axi_data;
1718 void __iomem *addr;
1719 int ret;
1720 int rx_max, tx_max;
1721 u32 hw_tx_max = 0, hw_rx_max = 0;
1722 const char *hw_tx_max_property;
1723
1724
1725 addr = devm_platform_ioremap_resource(pdev, 0);
1726 if (IS_ERR(addr)) {
1727 ret = PTR_ERR(addr);
1728 goto err;
1729 }
1730
1731 of_id = of_match_device(xcan_of_match, &pdev->dev);
1732 if (of_id && of_id->data)
1733 devtype = of_id->data;
1734
1735 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1736 "tx-mailbox-count" : "tx-fifo-depth";
1737
1738 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1739 &hw_tx_max);
1740 if (ret < 0) {
1741 dev_err(&pdev->dev, "missing %s property\n",
1742 hw_tx_max_property);
1743 goto err;
1744 }
1745
1746 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1747 &hw_rx_max);
1748 if (ret < 0) {
1749 dev_err(&pdev->dev,
1750 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1751 goto err;
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1773 (devtype->flags & XCAN_FLAG_TXFEMP))
1774 tx_max = min(hw_tx_max, 2U);
1775 else
1776 tx_max = 1;
1777
1778 rx_max = hw_rx_max;
1779
1780
1781 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1782 if (!ndev)
1783 return -ENOMEM;
1784
1785 priv = netdev_priv(ndev);
1786 priv->dev = &pdev->dev;
1787 priv->can.bittiming_const = devtype->bittiming_const;
1788 priv->can.do_set_mode = xcan_do_set_mode;
1789 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1790 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1791 CAN_CTRLMODE_BERR_REPORTING;
1792
1793 if (devtype->cantype == XAXI_CANFD) {
1794 priv->can.data_bittiming_const =
1795 &xcan_data_bittiming_const_canfd;
1796 priv->can.tdc_const = &xcan_tdc_const_canfd;
1797 }
1798
1799 if (devtype->cantype == XAXI_CANFD_2_0) {
1800 priv->can.data_bittiming_const =
1801 &xcan_data_bittiming_const_canfd2;
1802 priv->can.tdc_const = &xcan_tdc_const_canfd2;
1803 }
1804
1805 if (devtype->cantype == XAXI_CANFD ||
1806 devtype->cantype == XAXI_CANFD_2_0) {
1807 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
1808 CAN_CTRLMODE_TDC_AUTO;
1809 priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
1810 }
1811
1812 priv->reg_base = addr;
1813 priv->tx_max = tx_max;
1814 priv->devtype = *devtype;
1815 spin_lock_init(&priv->tx_lock);
1816
1817
1818 ret = platform_get_irq(pdev, 0);
1819 if (ret < 0)
1820 goto err_free;
1821
1822 ndev->irq = ret;
1823
1824 ndev->flags |= IFF_ECHO;
1825
1826 platform_set_drvdata(pdev, ndev);
1827 SET_NETDEV_DEV(ndev, &pdev->dev);
1828 ndev->netdev_ops = &xcan_netdev_ops;
1829 ndev->ethtool_ops = &xcan_ethtool_ops;
1830
1831
1832 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1833 if (IS_ERR(priv->can_clk)) {
1834 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
1835 "device clock not found\n");
1836 goto err_free;
1837 }
1838
1839 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1840 if (IS_ERR(priv->bus_clk)) {
1841 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
1842 "bus clock not found\n");
1843 goto err_free;
1844 }
1845
1846 priv->write_reg = xcan_write_reg_le;
1847 priv->read_reg = xcan_read_reg_le;
1848
1849 pm_runtime_enable(&pdev->dev);
1850 ret = pm_runtime_get_sync(&pdev->dev);
1851 if (ret < 0) {
1852 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1853 __func__, ret);
1854 goto err_disableclks;
1855 }
1856
1857 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1858 priv->write_reg = xcan_write_reg_be;
1859 priv->read_reg = xcan_read_reg_be;
1860 }
1861
1862 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1863
1864 netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
1865
1866 ret = register_candev(ndev);
1867 if (ret) {
1868 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1869 goto err_disableclks;
1870 }
1871
1872 pm_runtime_put(&pdev->dev);
1873
1874 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1875 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1876 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1877 }
1878
1879 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1880 priv->reg_base, ndev->irq, priv->can.clock.freq,
1881 hw_tx_max, priv->tx_max);
1882
1883 return 0;
1884
1885 err_disableclks:
1886 pm_runtime_put(priv->dev);
1887 pm_runtime_disable(&pdev->dev);
1888 err_free:
1889 free_candev(ndev);
1890 err:
1891 return ret;
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901 static int xcan_remove(struct platform_device *pdev)
1902 {
1903 struct net_device *ndev = platform_get_drvdata(pdev);
1904
1905 unregister_candev(ndev);
1906 pm_runtime_disable(&pdev->dev);
1907 free_candev(ndev);
1908
1909 return 0;
1910 }
1911
1912 static struct platform_driver xcan_driver = {
1913 .probe = xcan_probe,
1914 .remove = xcan_remove,
1915 .driver = {
1916 .name = DRIVER_NAME,
1917 .pm = &xcan_dev_pm_ops,
1918 .of_match_table = xcan_of_match,
1919 },
1920 };
1921
1922 module_platform_driver(xcan_driver);
1923
1924 MODULE_LICENSE("GPL");
1925 MODULE_AUTHOR("Xilinx Inc");
1926 MODULE_DESCRIPTION("Xilinx CAN interface");