Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * at91_can.c - CAN network driver for AT91 SoC CAN controller
0004  *
0005  * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
0006  * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
0007  */
0008 
0009 #include <linux/clk.h>
0010 #include <linux/errno.h>
0011 #include <linux/ethtool.h>
0012 #include <linux/if_arp.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/of.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/rtnetlink.h>
0020 #include <linux/skbuff.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/string.h>
0023 #include <linux/types.h>
0024 
0025 #include <linux/can/dev.h>
0026 #include <linux/can/error.h>
0027 
0028 #define AT91_MB_MASK(i)     ((1 << (i)) - 1)
0029 
0030 /* Common registers */
0031 enum at91_reg {
0032     AT91_MR     = 0x000,
0033     AT91_IER    = 0x004,
0034     AT91_IDR    = 0x008,
0035     AT91_IMR    = 0x00C,
0036     AT91_SR     = 0x010,
0037     AT91_BR     = 0x014,
0038     AT91_TIM    = 0x018,
0039     AT91_TIMESTP    = 0x01C,
0040     AT91_ECR    = 0x020,
0041     AT91_TCR    = 0x024,
0042     AT91_ACR    = 0x028,
0043 };
0044 
0045 /* Mailbox registers (0 <= i <= 15) */
0046 #define AT91_MMR(i)     ((enum at91_reg)(0x200 + ((i) * 0x20)))
0047 #define AT91_MAM(i)     ((enum at91_reg)(0x204 + ((i) * 0x20)))
0048 #define AT91_MID(i)     ((enum at91_reg)(0x208 + ((i) * 0x20)))
0049 #define AT91_MFID(i)        ((enum at91_reg)(0x20C + ((i) * 0x20)))
0050 #define AT91_MSR(i)     ((enum at91_reg)(0x210 + ((i) * 0x20)))
0051 #define AT91_MDL(i)     ((enum at91_reg)(0x214 + ((i) * 0x20)))
0052 #define AT91_MDH(i)     ((enum at91_reg)(0x218 + ((i) * 0x20)))
0053 #define AT91_MCR(i)     ((enum at91_reg)(0x21C + ((i) * 0x20)))
0054 
0055 /* Register bits */
0056 #define AT91_MR_CANEN       BIT(0)
0057 #define AT91_MR_LPM     BIT(1)
0058 #define AT91_MR_ABM     BIT(2)
0059 #define AT91_MR_OVL     BIT(3)
0060 #define AT91_MR_TEOF        BIT(4)
0061 #define AT91_MR_TTM     BIT(5)
0062 #define AT91_MR_TIMFRZ      BIT(6)
0063 #define AT91_MR_DRPT        BIT(7)
0064 
0065 #define AT91_SR_RBSY        BIT(29)
0066 
0067 #define AT91_MMR_PRIO_SHIFT (16)
0068 
0069 #define AT91_MID_MIDE       BIT(29)
0070 
0071 #define AT91_MSR_MRTR       BIT(20)
0072 #define AT91_MSR_MABT       BIT(22)
0073 #define AT91_MSR_MRDY       BIT(23)
0074 #define AT91_MSR_MMI        BIT(24)
0075 
0076 #define AT91_MCR_MRTR       BIT(20)
0077 #define AT91_MCR_MTCR       BIT(23)
0078 
0079 /* Mailbox Modes */
0080 enum at91_mb_mode {
0081     AT91_MB_MODE_DISABLED   = 0,
0082     AT91_MB_MODE_RX     = 1,
0083     AT91_MB_MODE_RX_OVRWR   = 2,
0084     AT91_MB_MODE_TX     = 3,
0085     AT91_MB_MODE_CONSUMER   = 4,
0086     AT91_MB_MODE_PRODUCER   = 5,
0087 };
0088 
0089 /* Interrupt mask bits */
0090 #define AT91_IRQ_ERRA       BIT(16)
0091 #define AT91_IRQ_WARN       BIT(17)
0092 #define AT91_IRQ_ERRP       BIT(18)
0093 #define AT91_IRQ_BOFF       BIT(19)
0094 #define AT91_IRQ_SLEEP      BIT(20)
0095 #define AT91_IRQ_WAKEUP     BIT(21)
0096 #define AT91_IRQ_TOVF       BIT(22)
0097 #define AT91_IRQ_TSTP       BIT(23)
0098 #define AT91_IRQ_CERR       BIT(24)
0099 #define AT91_IRQ_SERR       BIT(25)
0100 #define AT91_IRQ_AERR       BIT(26)
0101 #define AT91_IRQ_FERR       BIT(27)
0102 #define AT91_IRQ_BERR       BIT(28)
0103 
0104 #define AT91_IRQ_ERR_ALL    (0x1fff0000)
0105 #define AT91_IRQ_ERR_FRAME  (AT91_IRQ_CERR | AT91_IRQ_SERR | \
0106                  AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
0107 #define AT91_IRQ_ERR_LINE   (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
0108                  AT91_IRQ_ERRP | AT91_IRQ_BOFF)
0109 
0110 #define AT91_IRQ_ALL        (0x1fffffff)
0111 
0112 enum at91_devtype {
0113     AT91_DEVTYPE_SAM9263,
0114     AT91_DEVTYPE_SAM9X5,
0115 };
0116 
0117 struct at91_devtype_data {
0118     unsigned int rx_first;
0119     unsigned int rx_split;
0120     unsigned int rx_last;
0121     unsigned int tx_shift;
0122     enum at91_devtype type;
0123 };
0124 
0125 struct at91_priv {
0126     struct can_priv can;        /* must be the first member! */
0127     struct napi_struct napi;
0128 
0129     void __iomem *reg_base;
0130 
0131     u32 reg_sr;
0132     unsigned int tx_next;
0133     unsigned int tx_echo;
0134     unsigned int rx_next;
0135     struct at91_devtype_data devtype_data;
0136 
0137     struct clk *clk;
0138     struct at91_can_data *pdata;
0139 
0140     canid_t mb0_id;
0141 };
0142 
0143 static const struct at91_devtype_data at91_at91sam9263_data = {
0144     .rx_first = 1,
0145     .rx_split = 8,
0146     .rx_last = 11,
0147     .tx_shift = 2,
0148     .type = AT91_DEVTYPE_SAM9263,
0149 };
0150 
0151 static const struct at91_devtype_data at91_at91sam9x5_data = {
0152     .rx_first = 0,
0153     .rx_split = 4,
0154     .rx_last = 5,
0155     .tx_shift = 1,
0156     .type = AT91_DEVTYPE_SAM9X5,
0157 };
0158 
0159 static const struct can_bittiming_const at91_bittiming_const = {
0160     .name       = KBUILD_MODNAME,
0161     .tseg1_min  = 4,
0162     .tseg1_max  = 16,
0163     .tseg2_min  = 2,
0164     .tseg2_max  = 8,
0165     .sjw_max    = 4,
0166     .brp_min    = 2,
0167     .brp_max    = 128,
0168     .brp_inc    = 1,
0169 };
0170 
0171 #define AT91_IS(_model) \
0172 static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \
0173 { \
0174     return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
0175 }
0176 
0177 AT91_IS(9263);
0178 AT91_IS(9X5);
0179 
0180 static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
0181 {
0182     return priv->devtype_data.rx_first;
0183 }
0184 
0185 static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
0186 {
0187     return priv->devtype_data.rx_last;
0188 }
0189 
0190 static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
0191 {
0192     return priv->devtype_data.rx_split;
0193 }
0194 
0195 static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
0196 {
0197     return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
0198 }
0199 
0200 static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
0201 {
0202     return get_mb_rx_split(priv) - 1;
0203 }
0204 
0205 static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
0206 {
0207     return AT91_MB_MASK(get_mb_rx_split(priv)) &
0208         ~AT91_MB_MASK(get_mb_rx_first(priv));
0209 }
0210 
0211 static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
0212 {
0213     return priv->devtype_data.tx_shift;
0214 }
0215 
0216 static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
0217 {
0218     return 1 << get_mb_tx_shift(priv);
0219 }
0220 
0221 static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
0222 {
0223     return get_mb_rx_last(priv) + 1;
0224 }
0225 
0226 static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
0227 {
0228     return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
0229 }
0230 
0231 static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
0232 {
0233     return get_mb_tx_shift(priv);
0234 }
0235 
0236 static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
0237 {
0238     return 0xf << get_mb_tx_shift(priv);
0239 }
0240 
0241 static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
0242 {
0243     return AT91_MB_MASK(get_mb_tx_shift(priv));
0244 }
0245 
0246 static inline unsigned int get_next_mask(const struct at91_priv *priv)
0247 {
0248     return get_next_mb_mask(priv) | get_next_prio_mask(priv);
0249 }
0250 
0251 static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
0252 {
0253     return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
0254         ~AT91_MB_MASK(get_mb_rx_first(priv));
0255 }
0256 
0257 static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
0258 {
0259     return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
0260         ~AT91_MB_MASK(get_mb_tx_first(priv));
0261 }
0262 
0263 static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
0264 {
0265     return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
0266 }
0267 
0268 static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
0269 {
0270     return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
0271 }
0272 
0273 static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
0274 {
0275     return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
0276 }
0277 
0278 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
0279 {
0280     return readl_relaxed(priv->reg_base + reg);
0281 }
0282 
0283 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
0284                   u32 value)
0285 {
0286     writel_relaxed(value, priv->reg_base + reg);
0287 }
0288 
0289 static inline void set_mb_mode_prio(const struct at91_priv *priv,
0290                     unsigned int mb, enum at91_mb_mode mode,
0291                     int prio)
0292 {
0293     at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
0294 }
0295 
0296 static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
0297                    enum at91_mb_mode mode)
0298 {
0299     set_mb_mode_prio(priv, mb, mode, 0);
0300 }
0301 
0302 static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
0303 {
0304     u32 reg_mid;
0305 
0306     if (can_id & CAN_EFF_FLAG)
0307         reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
0308     else
0309         reg_mid = (can_id & CAN_SFF_MASK) << 18;
0310 
0311     return reg_mid;
0312 }
0313 
0314 static void at91_setup_mailboxes(struct net_device *dev)
0315 {
0316     struct at91_priv *priv = netdev_priv(dev);
0317     unsigned int i;
0318     u32 reg_mid;
0319 
0320     /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
0321      * mailbox is disabled. The next 11 mailboxes are used as a
0322      * reception FIFO. The last mailbox is configured with
0323      * overwrite option. The overwrite flag indicates a FIFO
0324      * overflow.
0325      */
0326     reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
0327     for (i = 0; i < get_mb_rx_first(priv); i++) {
0328         set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
0329         at91_write(priv, AT91_MID(i), reg_mid);
0330         at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
0331     }
0332 
0333     for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
0334         set_mb_mode(priv, i, AT91_MB_MODE_RX);
0335     set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
0336 
0337     /* reset acceptance mask and id register */
0338     for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
0339         at91_write(priv, AT91_MAM(i), 0x0);
0340         at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
0341     }
0342 
0343     /* The last 4 mailboxes are used for transmitting. */
0344     for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
0345         set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
0346 
0347     /* Reset tx and rx helper pointers */
0348     priv->tx_next = priv->tx_echo = 0;
0349     priv->rx_next = get_mb_rx_first(priv);
0350 }
0351 
0352 static int at91_set_bittiming(struct net_device *dev)
0353 {
0354     const struct at91_priv *priv = netdev_priv(dev);
0355     const struct can_bittiming *bt = &priv->can.bittiming;
0356     u32 reg_br;
0357 
0358     reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
0359         ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
0360         ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
0361         ((bt->phase_seg2 - 1) << 0);
0362 
0363     netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
0364 
0365     at91_write(priv, AT91_BR, reg_br);
0366 
0367     return 0;
0368 }
0369 
0370 static int at91_get_berr_counter(const struct net_device *dev,
0371                  struct can_berr_counter *bec)
0372 {
0373     const struct at91_priv *priv = netdev_priv(dev);
0374     u32 reg_ecr = at91_read(priv, AT91_ECR);
0375 
0376     bec->rxerr = reg_ecr & 0xff;
0377     bec->txerr = reg_ecr >> 16;
0378 
0379     return 0;
0380 }
0381 
0382 static void at91_chip_start(struct net_device *dev)
0383 {
0384     struct at91_priv *priv = netdev_priv(dev);
0385     u32 reg_mr, reg_ier;
0386 
0387     /* disable interrupts */
0388     at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
0389 
0390     /* disable chip */
0391     reg_mr = at91_read(priv, AT91_MR);
0392     at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
0393 
0394     at91_set_bittiming(dev);
0395     at91_setup_mailboxes(dev);
0396 
0397     /* enable chip */
0398     if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
0399         reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
0400     else
0401         reg_mr = AT91_MR_CANEN;
0402     at91_write(priv, AT91_MR, reg_mr);
0403 
0404     priv->can.state = CAN_STATE_ERROR_ACTIVE;
0405 
0406     /* Enable interrupts */
0407     reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
0408     at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
0409     at91_write(priv, AT91_IER, reg_ier);
0410 }
0411 
0412 static void at91_chip_stop(struct net_device *dev, enum can_state state)
0413 {
0414     struct at91_priv *priv = netdev_priv(dev);
0415     u32 reg_mr;
0416 
0417     /* disable interrupts */
0418     at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
0419 
0420     reg_mr = at91_read(priv, AT91_MR);
0421     at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
0422 
0423     priv->can.state = state;
0424 }
0425 
0426 /* theory of operation:
0427  *
0428  * According to the datasheet priority 0 is the highest priority, 15
0429  * is the lowest. If two mailboxes have the same priority level the
0430  * message of the mailbox with the lowest number is sent first.
0431  *
0432  * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
0433  * the next mailbox with prio 0, and so on, until all mailboxes are
0434  * used. Then we start from the beginning with mailbox
0435  * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
0436  * prio 1. When we reach the last mailbox with prio 15, we have to
0437  * stop sending, waiting for all messages to be delivered, then start
0438  * again with mailbox AT91_MB_TX_FIRST prio 0.
0439  *
0440  * We use the priv->tx_next as counter for the next transmission
0441  * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
0442  * encode the mailbox number, the upper 4 bits the mailbox priority:
0443  *
0444  * priv->tx_next = (prio << get_next_prio_shift(priv)) |
0445  *                 (mb - get_mb_tx_first(priv));
0446  *
0447  */
0448 static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
0449 {
0450     struct at91_priv *priv = netdev_priv(dev);
0451     struct can_frame *cf = (struct can_frame *)skb->data;
0452     unsigned int mb, prio;
0453     u32 reg_mid, reg_mcr;
0454 
0455     if (can_dropped_invalid_skb(dev, skb))
0456         return NETDEV_TX_OK;
0457 
0458     mb = get_tx_next_mb(priv);
0459     prio = get_tx_next_prio(priv);
0460 
0461     if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
0462         netif_stop_queue(dev);
0463 
0464         netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
0465         return NETDEV_TX_BUSY;
0466     }
0467     reg_mid = at91_can_id_to_reg_mid(cf->can_id);
0468     reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
0469         (cf->len << 16) | AT91_MCR_MTCR;
0470 
0471     /* disable MB while writing ID (see datasheet) */
0472     set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
0473     at91_write(priv, AT91_MID(mb), reg_mid);
0474     set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
0475 
0476     at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
0477     at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
0478 
0479     /* This triggers transmission */
0480     at91_write(priv, AT91_MCR(mb), reg_mcr);
0481 
0482     /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
0483     can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
0484 
0485     /* we have to stop the queue and deliver all messages in case
0486      * of a prio+mb counter wrap around. This is the case if
0487      * tx_next buffer prio and mailbox equals 0.
0488      *
0489      * also stop the queue if next buffer is still in use
0490      * (== not ready)
0491      */
0492     priv->tx_next++;
0493     if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
0494           AT91_MSR_MRDY) ||
0495         (priv->tx_next & get_next_mask(priv)) == 0)
0496         netif_stop_queue(dev);
0497 
0498     /* Enable interrupt for this mailbox */
0499     at91_write(priv, AT91_IER, 1 << mb);
0500 
0501     return NETDEV_TX_OK;
0502 }
0503 
0504 /**
0505  * at91_activate_rx_low - activate lower rx mailboxes
0506  * @priv: a91 context
0507  *
0508  * Reenables the lower mailboxes for reception of new CAN messages
0509  */
0510 static inline void at91_activate_rx_low(const struct at91_priv *priv)
0511 {
0512     u32 mask = get_mb_rx_low_mask(priv);
0513 
0514     at91_write(priv, AT91_TCR, mask);
0515 }
0516 
0517 /**
0518  * at91_activate_rx_mb - reactive single rx mailbox
0519  * @priv: a91 context
0520  * @mb: mailbox to reactivate
0521  *
0522  * Reenables given mailbox for reception of new CAN messages
0523  */
0524 static inline void at91_activate_rx_mb(const struct at91_priv *priv,
0525                        unsigned int mb)
0526 {
0527     u32 mask = 1 << mb;
0528 
0529     at91_write(priv, AT91_TCR, mask);
0530 }
0531 
0532 /**
0533  * at91_rx_overflow_err - send error frame due to rx overflow
0534  * @dev: net device
0535  */
0536 static void at91_rx_overflow_err(struct net_device *dev)
0537 {
0538     struct net_device_stats *stats = &dev->stats;
0539     struct sk_buff *skb;
0540     struct can_frame *cf;
0541 
0542     netdev_dbg(dev, "RX buffer overflow\n");
0543     stats->rx_over_errors++;
0544     stats->rx_errors++;
0545 
0546     skb = alloc_can_err_skb(dev, &cf);
0547     if (unlikely(!skb))
0548         return;
0549 
0550     cf->can_id |= CAN_ERR_CRTL;
0551     cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
0552 
0553     netif_receive_skb(skb);
0554 }
0555 
0556 /**
0557  * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
0558  * @dev: net device
0559  * @mb: mailbox number to read from
0560  * @cf: can frame where to store message
0561  *
0562  * Reads a CAN message from the given mailbox and stores data into
0563  * given can frame. "mb" and "cf" must be valid.
0564  */
0565 static void at91_read_mb(struct net_device *dev, unsigned int mb,
0566              struct can_frame *cf)
0567 {
0568     const struct at91_priv *priv = netdev_priv(dev);
0569     u32 reg_msr, reg_mid;
0570 
0571     reg_mid = at91_read(priv, AT91_MID(mb));
0572     if (reg_mid & AT91_MID_MIDE)
0573         cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
0574     else
0575         cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
0576 
0577     reg_msr = at91_read(priv, AT91_MSR(mb));
0578     cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);
0579 
0580     if (reg_msr & AT91_MSR_MRTR) {
0581         cf->can_id |= CAN_RTR_FLAG;
0582     } else {
0583         *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
0584         *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
0585     }
0586 
0587     /* allow RX of extended frames */
0588     at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
0589 
0590     if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
0591         at91_rx_overflow_err(dev);
0592 }
0593 
0594 /**
0595  * at91_read_msg - read CAN message from mailbox
0596  * @dev: net device
0597  * @mb: mail box to read from
0598  *
0599  * Reads a CAN message from given mailbox, and put into linux network
0600  * RX queue, does all housekeeping chores (stats, ...)
0601  */
0602 static void at91_read_msg(struct net_device *dev, unsigned int mb)
0603 {
0604     struct net_device_stats *stats = &dev->stats;
0605     struct can_frame *cf;
0606     struct sk_buff *skb;
0607 
0608     skb = alloc_can_skb(dev, &cf);
0609     if (unlikely(!skb)) {
0610         stats->rx_dropped++;
0611         return;
0612     }
0613 
0614     at91_read_mb(dev, mb, cf);
0615 
0616     stats->rx_packets++;
0617     if (!(cf->can_id & CAN_RTR_FLAG))
0618         stats->rx_bytes += cf->len;
0619 
0620     netif_receive_skb(skb);
0621 }
0622 
0623 /**
0624  * at91_poll_rx - read multiple CAN messages from mailboxes
0625  * @dev: net device
0626  * @quota: max number of pkgs we're allowed to receive
0627  *
0628  * Theory of Operation:
0629  *
0630  * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
0631  * on the chip are reserved for RX. We split them into 2 groups. The
0632  * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
0633  *
0634  * Like it or not, but the chip always saves a received CAN message
0635  * into the first free mailbox it finds (starting with the
0636  * lowest). This makes it very difficult to read the messages in the
0637  * right order from the chip. This is how we work around that problem:
0638  *
0639  * The first message goes into mb nr. 1 and issues an interrupt. All
0640  * rx ints are disabled in the interrupt handler and a napi poll is
0641  * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
0642  * receive another message).
0643  *
0644  *    lower mbxs      upper
0645  *     ____^______    __^__
0646  *    /           \  /     \
0647  * +-+-+-+-+-+-+-+-++-+-+-+-+
0648  * | |x|x|x|x|x|x|x|| | | | |
0649  * +-+-+-+-+-+-+-+-++-+-+-+-+
0650  *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
0651  *  0 1 2 3 4 5  6 7 8 9 0 1  / box
0652  *  ^
0653  *  |
0654  *   \
0655  *     unused, due to chip bug
0656  *
0657  * The variable priv->rx_next points to the next mailbox to read a
0658  * message from. As long we're in the lower mailboxes we just read the
0659  * mailbox but not re-enable it.
0660  *
0661  * With completion of the last of the lower mailboxes, we re-enable the
0662  * whole first group, but continue to look for filled mailboxes in the
0663  * upper mailboxes. Imagine the second group like overflow mailboxes,
0664  * which takes CAN messages if the lower goup is full. While in the
0665  * upper group we re-enable the mailbox right after reading it. Giving
0666  * the chip more room to store messages.
0667  *
0668  * After finishing we look again in the lower group if we've still
0669  * quota.
0670  *
0671  */
0672 static int at91_poll_rx(struct net_device *dev, int quota)
0673 {
0674     struct at91_priv *priv = netdev_priv(dev);
0675     u32 reg_sr = at91_read(priv, AT91_SR);
0676     const unsigned long *addr = (unsigned long *)&reg_sr;
0677     unsigned int mb;
0678     int received = 0;
0679 
0680     if (priv->rx_next > get_mb_rx_low_last(priv) &&
0681         reg_sr & get_mb_rx_low_mask(priv))
0682         netdev_info(dev,
0683                 "order of incoming frames cannot be guaranteed\n");
0684 
0685  again:
0686     for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
0687          mb < get_mb_tx_first(priv) && quota > 0;
0688          reg_sr = at91_read(priv, AT91_SR),
0689          mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
0690         at91_read_msg(dev, mb);
0691 
0692         /* reactivate mailboxes */
0693         if (mb == get_mb_rx_low_last(priv))
0694             /* all lower mailboxed, if just finished it */
0695             at91_activate_rx_low(priv);
0696         else if (mb > get_mb_rx_low_last(priv))
0697             /* only the mailbox we read */
0698             at91_activate_rx_mb(priv, mb);
0699 
0700         received++;
0701         quota--;
0702     }
0703 
0704     /* upper group completed, look again in lower */
0705     if (priv->rx_next > get_mb_rx_low_last(priv) &&
0706         mb > get_mb_rx_last(priv)) {
0707         priv->rx_next = get_mb_rx_first(priv);
0708         if (quota > 0)
0709             goto again;
0710     }
0711 
0712     return received;
0713 }
0714 
0715 static void at91_poll_err_frame(struct net_device *dev,
0716                 struct can_frame *cf, u32 reg_sr)
0717 {
0718     struct at91_priv *priv = netdev_priv(dev);
0719 
0720     /* CRC error */
0721     if (reg_sr & AT91_IRQ_CERR) {
0722         netdev_dbg(dev, "CERR irq\n");
0723         dev->stats.rx_errors++;
0724         priv->can.can_stats.bus_error++;
0725         cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
0726     }
0727 
0728     /* Stuffing Error */
0729     if (reg_sr & AT91_IRQ_SERR) {
0730         netdev_dbg(dev, "SERR irq\n");
0731         dev->stats.rx_errors++;
0732         priv->can.can_stats.bus_error++;
0733         cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
0734         cf->data[2] |= CAN_ERR_PROT_STUFF;
0735     }
0736 
0737     /* Acknowledgement Error */
0738     if (reg_sr & AT91_IRQ_AERR) {
0739         netdev_dbg(dev, "AERR irq\n");
0740         dev->stats.tx_errors++;
0741         cf->can_id |= CAN_ERR_ACK;
0742     }
0743 
0744     /* Form error */
0745     if (reg_sr & AT91_IRQ_FERR) {
0746         netdev_dbg(dev, "FERR irq\n");
0747         dev->stats.rx_errors++;
0748         priv->can.can_stats.bus_error++;
0749         cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
0750         cf->data[2] |= CAN_ERR_PROT_FORM;
0751     }
0752 
0753     /* Bit Error */
0754     if (reg_sr & AT91_IRQ_BERR) {
0755         netdev_dbg(dev, "BERR irq\n");
0756         dev->stats.tx_errors++;
0757         priv->can.can_stats.bus_error++;
0758         cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
0759         cf->data[2] |= CAN_ERR_PROT_BIT;
0760     }
0761 }
0762 
0763 static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
0764 {
0765     struct sk_buff *skb;
0766     struct can_frame *cf;
0767 
0768     if (quota == 0)
0769         return 0;
0770 
0771     skb = alloc_can_err_skb(dev, &cf);
0772     if (unlikely(!skb))
0773         return 0;
0774 
0775     at91_poll_err_frame(dev, cf, reg_sr);
0776 
0777     netif_receive_skb(skb);
0778 
0779     return 1;
0780 }
0781 
0782 static int at91_poll(struct napi_struct *napi, int quota)
0783 {
0784     struct net_device *dev = napi->dev;
0785     const struct at91_priv *priv = netdev_priv(dev);
0786     u32 reg_sr = at91_read(priv, AT91_SR);
0787     int work_done = 0;
0788 
0789     if (reg_sr & get_irq_mb_rx(priv))
0790         work_done += at91_poll_rx(dev, quota - work_done);
0791 
0792     /* The error bits are clear on read,
0793      * so use saved value from irq handler.
0794      */
0795     reg_sr |= priv->reg_sr;
0796     if (reg_sr & AT91_IRQ_ERR_FRAME)
0797         work_done += at91_poll_err(dev, quota - work_done, reg_sr);
0798 
0799     if (work_done < quota) {
0800         /* enable IRQs for frame errors and all mailboxes >= rx_next */
0801         u32 reg_ier = AT91_IRQ_ERR_FRAME;
0802 
0803         reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
0804 
0805         napi_complete_done(napi, work_done);
0806         at91_write(priv, AT91_IER, reg_ier);
0807     }
0808 
0809     return work_done;
0810 }
0811 
0812 /* theory of operation:
0813  *
0814  * priv->tx_echo holds the number of the oldest can_frame put for
0815  * transmission into the hardware, but not yet ACKed by the CAN tx
0816  * complete IRQ.
0817  *
0818  * We iterate from priv->tx_echo to priv->tx_next and check if the
0819  * packet has been transmitted, echo it back to the CAN framework. If
0820  * we discover a not yet transmitted package, stop looking for more.
0821  *
0822  */
0823 static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
0824 {
0825     struct at91_priv *priv = netdev_priv(dev);
0826     u32 reg_msr;
0827     unsigned int mb;
0828 
0829     /* masking of reg_sr not needed, already done by at91_irq */
0830 
0831     for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
0832         mb = get_tx_echo_mb(priv);
0833 
0834         /* no event in mailbox? */
0835         if (!(reg_sr & (1 << mb)))
0836             break;
0837 
0838         /* Disable irq for this TX mailbox */
0839         at91_write(priv, AT91_IDR, 1 << mb);
0840 
0841         /* only echo if mailbox signals us a transfer
0842          * complete (MSR_MRDY). Otherwise it's a tansfer
0843          * abort. "can_bus_off()" takes care about the skbs
0844          * parked in the echo queue.
0845          */
0846         reg_msr = at91_read(priv, AT91_MSR(mb));
0847         if (likely(reg_msr & AT91_MSR_MRDY &&
0848                ~reg_msr & AT91_MSR_MABT)) {
0849             /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
0850             dev->stats.tx_bytes +=
0851                 can_get_echo_skb(dev,
0852                          mb - get_mb_tx_first(priv),
0853                          NULL);
0854             dev->stats.tx_packets++;
0855         }
0856     }
0857 
0858     /* restart queue if we don't have a wrap around but restart if
0859      * we get a TX int for the last can frame directly before a
0860      * wrap around.
0861      */
0862     if ((priv->tx_next & get_next_mask(priv)) != 0 ||
0863         (priv->tx_echo & get_next_mask(priv)) == 0)
0864         netif_wake_queue(dev);
0865 }
0866 
0867 static void at91_irq_err_state(struct net_device *dev,
0868                    struct can_frame *cf, enum can_state new_state)
0869 {
0870     struct at91_priv *priv = netdev_priv(dev);
0871     u32 reg_idr = 0, reg_ier = 0;
0872     struct can_berr_counter bec;
0873 
0874     at91_get_berr_counter(dev, &bec);
0875 
0876     switch (priv->can.state) {
0877     case CAN_STATE_ERROR_ACTIVE:
0878         /* from: ERROR_ACTIVE
0879          * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
0880          * =>  : there was a warning int
0881          */
0882         if (new_state >= CAN_STATE_ERROR_WARNING &&
0883             new_state <= CAN_STATE_BUS_OFF) {
0884             netdev_dbg(dev, "Error Warning IRQ\n");
0885             priv->can.can_stats.error_warning++;
0886 
0887             cf->can_id |= CAN_ERR_CRTL;
0888             cf->data[1] = (bec.txerr > bec.rxerr) ?
0889                 CAN_ERR_CRTL_TX_WARNING :
0890                 CAN_ERR_CRTL_RX_WARNING;
0891         }
0892         fallthrough;
0893     case CAN_STATE_ERROR_WARNING:
0894         /* from: ERROR_ACTIVE, ERROR_WARNING
0895          * to  : ERROR_PASSIVE, BUS_OFF
0896          * =>  : error passive int
0897          */
0898         if (new_state >= CAN_STATE_ERROR_PASSIVE &&
0899             new_state <= CAN_STATE_BUS_OFF) {
0900             netdev_dbg(dev, "Error Passive IRQ\n");
0901             priv->can.can_stats.error_passive++;
0902 
0903             cf->can_id |= CAN_ERR_CRTL;
0904             cf->data[1] = (bec.txerr > bec.rxerr) ?
0905                 CAN_ERR_CRTL_TX_PASSIVE :
0906                 CAN_ERR_CRTL_RX_PASSIVE;
0907         }
0908         break;
0909     case CAN_STATE_BUS_OFF:
0910         /* from: BUS_OFF
0911          * to  : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
0912          */
0913         if (new_state <= CAN_STATE_ERROR_PASSIVE) {
0914             cf->can_id |= CAN_ERR_RESTARTED;
0915 
0916             netdev_dbg(dev, "restarted\n");
0917             priv->can.can_stats.restarts++;
0918 
0919             netif_carrier_on(dev);
0920             netif_wake_queue(dev);
0921         }
0922         break;
0923     default:
0924         break;
0925     }
0926 
0927     /* process state changes depending on the new state */
0928     switch (new_state) {
0929     case CAN_STATE_ERROR_ACTIVE:
0930         /* actually we want to enable AT91_IRQ_WARN here, but
0931          * it screws up the system under certain
0932          * circumstances. so just enable AT91_IRQ_ERRP, thus
0933          * the "fallthrough"
0934          */
0935         netdev_dbg(dev, "Error Active\n");
0936         cf->can_id |= CAN_ERR_PROT;
0937         cf->data[2] = CAN_ERR_PROT_ACTIVE;
0938         fallthrough;
0939     case CAN_STATE_ERROR_WARNING:
0940         reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
0941         reg_ier = AT91_IRQ_ERRP;
0942         break;
0943     case CAN_STATE_ERROR_PASSIVE:
0944         reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
0945         reg_ier = AT91_IRQ_BOFF;
0946         break;
0947     case CAN_STATE_BUS_OFF:
0948         reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
0949             AT91_IRQ_WARN | AT91_IRQ_BOFF;
0950         reg_ier = 0;
0951 
0952         cf->can_id |= CAN_ERR_BUSOFF;
0953 
0954         netdev_dbg(dev, "bus-off\n");
0955         netif_carrier_off(dev);
0956         priv->can.can_stats.bus_off++;
0957 
0958         /* turn off chip, if restart is disabled */
0959         if (!priv->can.restart_ms) {
0960             at91_chip_stop(dev, CAN_STATE_BUS_OFF);
0961             return;
0962         }
0963         break;
0964     default:
0965         break;
0966     }
0967 
0968     at91_write(priv, AT91_IDR, reg_idr);
0969     at91_write(priv, AT91_IER, reg_ier);
0970 }
0971 
0972 static int at91_get_state_by_bec(const struct net_device *dev,
0973                  enum can_state *state)
0974 {
0975     struct can_berr_counter bec;
0976     int err;
0977 
0978     err = at91_get_berr_counter(dev, &bec);
0979     if (err)
0980         return err;
0981 
0982     if (bec.txerr < 96 && bec.rxerr < 96)
0983         *state = CAN_STATE_ERROR_ACTIVE;
0984     else if (bec.txerr < 128 && bec.rxerr < 128)
0985         *state = CAN_STATE_ERROR_WARNING;
0986     else if (bec.txerr < 256 && bec.rxerr < 256)
0987         *state = CAN_STATE_ERROR_PASSIVE;
0988     else
0989         *state = CAN_STATE_BUS_OFF;
0990 
0991     return 0;
0992 }
0993 
0994 static void at91_irq_err(struct net_device *dev)
0995 {
0996     struct at91_priv *priv = netdev_priv(dev);
0997     struct sk_buff *skb;
0998     struct can_frame *cf;
0999     enum can_state new_state;
1000     u32 reg_sr;
1001     int err;
1002 
1003     if (at91_is_sam9263(priv)) {
1004         reg_sr = at91_read(priv, AT91_SR);
1005 
1006         /* we need to look at the unmasked reg_sr */
1007         if (unlikely(reg_sr & AT91_IRQ_BOFF)) {
1008             new_state = CAN_STATE_BUS_OFF;
1009         } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) {
1010             new_state = CAN_STATE_ERROR_PASSIVE;
1011         } else if (unlikely(reg_sr & AT91_IRQ_WARN)) {
1012             new_state = CAN_STATE_ERROR_WARNING;
1013         } else if (likely(reg_sr & AT91_IRQ_ERRA)) {
1014             new_state = CAN_STATE_ERROR_ACTIVE;
1015         } else {
1016             netdev_err(dev, "BUG! hardware in undefined state\n");
1017             return;
1018         }
1019     } else {
1020         err = at91_get_state_by_bec(dev, &new_state);
1021         if (err)
1022             return;
1023     }
1024 
1025     /* state hasn't changed */
1026     if (likely(new_state == priv->can.state))
1027         return;
1028 
1029     skb = alloc_can_err_skb(dev, &cf);
1030     if (unlikely(!skb))
1031         return;
1032 
1033     at91_irq_err_state(dev, cf, new_state);
1034 
1035     netif_rx(skb);
1036 
1037     priv->can.state = new_state;
1038 }
1039 
1040 /* interrupt handler
1041  */
1042 static irqreturn_t at91_irq(int irq, void *dev_id)
1043 {
1044     struct net_device *dev = dev_id;
1045     struct at91_priv *priv = netdev_priv(dev);
1046     irqreturn_t handled = IRQ_NONE;
1047     u32 reg_sr, reg_imr;
1048 
1049     reg_sr = at91_read(priv, AT91_SR);
1050     reg_imr = at91_read(priv, AT91_IMR);
1051 
1052     /* Ignore masked interrupts */
1053     reg_sr &= reg_imr;
1054     if (!reg_sr)
1055         goto exit;
1056 
1057     handled = IRQ_HANDLED;
1058 
1059     /* Receive or error interrupt? -> napi */
1060     if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
1061         /* The error bits are clear on read,
1062          * save for later use.
1063          */
1064         priv->reg_sr = reg_sr;
1065         at91_write(priv, AT91_IDR,
1066                get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
1067         napi_schedule(&priv->napi);
1068     }
1069 
1070     /* Transmission complete interrupt */
1071     if (reg_sr & get_irq_mb_tx(priv))
1072         at91_irq_tx(dev, reg_sr);
1073 
1074     at91_irq_err(dev);
1075 
1076  exit:
1077     return handled;
1078 }
1079 
1080 static int at91_open(struct net_device *dev)
1081 {
1082     struct at91_priv *priv = netdev_priv(dev);
1083     int err;
1084 
1085     err = clk_prepare_enable(priv->clk);
1086     if (err)
1087         return err;
1088 
1089     /* check or determine and set bittime */
1090     err = open_candev(dev);
1091     if (err)
1092         goto out;
1093 
1094     /* register interrupt handler */
1095     if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
1096             dev->name, dev)) {
1097         err = -EAGAIN;
1098         goto out_close;
1099     }
1100 
1101     /* start chip and queuing */
1102     at91_chip_start(dev);
1103     napi_enable(&priv->napi);
1104     netif_start_queue(dev);
1105 
1106     return 0;
1107 
1108  out_close:
1109     close_candev(dev);
1110  out:
1111     clk_disable_unprepare(priv->clk);
1112 
1113     return err;
1114 }
1115 
1116 /* stop CAN bus activity
1117  */
1118 static int at91_close(struct net_device *dev)
1119 {
1120     struct at91_priv *priv = netdev_priv(dev);
1121 
1122     netif_stop_queue(dev);
1123     napi_disable(&priv->napi);
1124     at91_chip_stop(dev, CAN_STATE_STOPPED);
1125 
1126     free_irq(dev->irq, dev);
1127     clk_disable_unprepare(priv->clk);
1128 
1129     close_candev(dev);
1130 
1131     return 0;
1132 }
1133 
1134 static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1135 {
1136     switch (mode) {
1137     case CAN_MODE_START:
1138         at91_chip_start(dev);
1139         netif_wake_queue(dev);
1140         break;
1141 
1142     default:
1143         return -EOPNOTSUPP;
1144     }
1145 
1146     return 0;
1147 }
1148 
1149 static const struct net_device_ops at91_netdev_ops = {
1150     .ndo_open   = at91_open,
1151     .ndo_stop   = at91_close,
1152     .ndo_start_xmit = at91_start_xmit,
1153     .ndo_change_mtu = can_change_mtu,
1154 };
1155 
1156 static const struct ethtool_ops at91_ethtool_ops = {
1157     .get_ts_info = ethtool_op_get_ts_info,
1158 };
1159 
1160 static ssize_t mb0_id_show(struct device *dev,
1161                struct device_attribute *attr, char *buf)
1162 {
1163     struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1164 
1165     if (priv->mb0_id & CAN_EFF_FLAG)
1166         return sysfs_emit(buf, "0x%08x\n", priv->mb0_id);
1167     else
1168         return sysfs_emit(buf, "0x%03x\n", priv->mb0_id);
1169 }
1170 
1171 static ssize_t mb0_id_store(struct device *dev,
1172                 struct device_attribute *attr,
1173                 const char *buf, size_t count)
1174 {
1175     struct net_device *ndev = to_net_dev(dev);
1176     struct at91_priv *priv = netdev_priv(ndev);
1177     unsigned long can_id;
1178     ssize_t ret;
1179     int err;
1180 
1181     rtnl_lock();
1182 
1183     if (ndev->flags & IFF_UP) {
1184         ret = -EBUSY;
1185         goto out;
1186     }
1187 
1188     err = kstrtoul(buf, 0, &can_id);
1189     if (err) {
1190         ret = err;
1191         goto out;
1192     }
1193 
1194     if (can_id & CAN_EFF_FLAG)
1195         can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1196     else
1197         can_id &= CAN_SFF_MASK;
1198 
1199     priv->mb0_id = can_id;
1200     ret = count;
1201 
1202  out:
1203     rtnl_unlock();
1204     return ret;
1205 }
1206 
1207 static DEVICE_ATTR_RW(mb0_id);
1208 
1209 static struct attribute *at91_sysfs_attrs[] = {
1210     &dev_attr_mb0_id.attr,
1211     NULL,
1212 };
1213 
1214 static const struct attribute_group at91_sysfs_attr_group = {
1215     .attrs = at91_sysfs_attrs,
1216 };
1217 
1218 #if defined(CONFIG_OF)
1219 static const struct of_device_id at91_can_dt_ids[] = {
1220     {
1221         .compatible = "atmel,at91sam9x5-can",
1222         .data = &at91_at91sam9x5_data,
1223     }, {
1224         .compatible = "atmel,at91sam9263-can",
1225         .data = &at91_at91sam9263_data,
1226     }, {
1227         /* sentinel */
1228     }
1229 };
1230 MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
1231 #endif
1232 
1233 static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
1234 {
1235     if (pdev->dev.of_node) {
1236         const struct of_device_id *match;
1237 
1238         match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
1239         if (!match) {
1240             dev_err(&pdev->dev, "no matching node found in dtb\n");
1241             return NULL;
1242         }
1243         return (const struct at91_devtype_data *)match->data;
1244     }
1245     return (const struct at91_devtype_data *)
1246         platform_get_device_id(pdev)->driver_data;
1247 }
1248 
1249 static int at91_can_probe(struct platform_device *pdev)
1250 {
1251     const struct at91_devtype_data *devtype_data;
1252     struct net_device *dev;
1253     struct at91_priv *priv;
1254     struct resource *res;
1255     struct clk *clk;
1256     void __iomem *addr;
1257     int err, irq;
1258 
1259     devtype_data = at91_can_get_driver_data(pdev);
1260     if (!devtype_data) {
1261         dev_err(&pdev->dev, "no driver data\n");
1262         err = -ENODEV;
1263         goto exit;
1264     }
1265 
1266     clk = clk_get(&pdev->dev, "can_clk");
1267     if (IS_ERR(clk)) {
1268         dev_err(&pdev->dev, "no clock defined\n");
1269         err = -ENODEV;
1270         goto exit;
1271     }
1272 
1273     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1274     irq = platform_get_irq(pdev, 0);
1275     if (!res || irq <= 0) {
1276         err = -ENODEV;
1277         goto exit_put;
1278     }
1279 
1280     if (!request_mem_region(res->start,
1281                 resource_size(res),
1282                 pdev->name)) {
1283         err = -EBUSY;
1284         goto exit_put;
1285     }
1286 
1287     addr = ioremap(res->start, resource_size(res));
1288     if (!addr) {
1289         err = -ENOMEM;
1290         goto exit_release;
1291     }
1292 
1293     dev = alloc_candev(sizeof(struct at91_priv),
1294                1 << devtype_data->tx_shift);
1295     if (!dev) {
1296         err = -ENOMEM;
1297         goto exit_iounmap;
1298     }
1299 
1300     dev->netdev_ops = &at91_netdev_ops;
1301     dev->ethtool_ops = &at91_ethtool_ops;
1302     dev->irq = irq;
1303     dev->flags |= IFF_ECHO;
1304 
1305     priv = netdev_priv(dev);
1306     priv->can.clock.freq = clk_get_rate(clk);
1307     priv->can.bittiming_const = &at91_bittiming_const;
1308     priv->can.do_set_mode = at91_set_mode;
1309     priv->can.do_get_berr_counter = at91_get_berr_counter;
1310     priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1311         CAN_CTRLMODE_LISTENONLY;
1312     priv->reg_base = addr;
1313     priv->devtype_data = *devtype_data;
1314     priv->clk = clk;
1315     priv->pdata = dev_get_platdata(&pdev->dev);
1316     priv->mb0_id = 0x7ff;
1317 
1318     netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
1319 
1320     if (at91_is_sam9263(priv))
1321         dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1322 
1323     platform_set_drvdata(pdev, dev);
1324     SET_NETDEV_DEV(dev, &pdev->dev);
1325 
1326     err = register_candev(dev);
1327     if (err) {
1328         dev_err(&pdev->dev, "registering netdev failed\n");
1329         goto exit_free;
1330     }
1331 
1332     dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1333          priv->reg_base, dev->irq);
1334 
1335     return 0;
1336 
1337  exit_free:
1338     free_candev(dev);
1339  exit_iounmap:
1340     iounmap(addr);
1341  exit_release:
1342     release_mem_region(res->start, resource_size(res));
1343  exit_put:
1344     clk_put(clk);
1345  exit:
1346     return err;
1347 }
1348 
1349 static int at91_can_remove(struct platform_device *pdev)
1350 {
1351     struct net_device *dev = platform_get_drvdata(pdev);
1352     struct at91_priv *priv = netdev_priv(dev);
1353     struct resource *res;
1354 
1355     unregister_netdev(dev);
1356 
1357     iounmap(priv->reg_base);
1358 
1359     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1360     release_mem_region(res->start, resource_size(res));
1361 
1362     clk_put(priv->clk);
1363 
1364     free_candev(dev);
1365 
1366     return 0;
1367 }
1368 
1369 static const struct platform_device_id at91_can_id_table[] = {
1370     {
1371         .name = "at91sam9x5_can",
1372         .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1373     }, {
1374         .name = "at91_can",
1375         .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1376     }, {
1377         /* sentinel */
1378     }
1379 };
1380 MODULE_DEVICE_TABLE(platform, at91_can_id_table);
1381 
1382 static struct platform_driver at91_can_driver = {
1383     .probe = at91_can_probe,
1384     .remove = at91_can_remove,
1385     .driver = {
1386         .name = KBUILD_MODNAME,
1387         .of_match_table = of_match_ptr(at91_can_dt_ids),
1388     },
1389     .id_table = at91_can_id_table,
1390 };
1391 
1392 module_platform_driver(at91_can_driver);
1393 
1394 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1395 MODULE_LICENSE("GPL v2");
1396 MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");