Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2014      Protonic Holland,
0003  *                         David Jander
0004  * Copyright (C) 2014-2021 Pengutronix,
0005  *                         Marc Kleine-Budde <kernel@pengutronix.de>
0006  */
0007 
0008 #include <linux/can/dev.h>
0009 #include <linux/can/rx-offload.h>
0010 
0011 struct can_rx_offload_cb {
0012     u32 timestamp;
0013 };
0014 
0015 static inline struct can_rx_offload_cb *
0016 can_rx_offload_get_cb(struct sk_buff *skb)
0017 {
0018     BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
0019 
0020     return (struct can_rx_offload_cb *)skb->cb;
0021 }
0022 
0023 static inline bool
0024 can_rx_offload_le(struct can_rx_offload *offload,
0025           unsigned int a, unsigned int b)
0026 {
0027     if (offload->inc)
0028         return a <= b;
0029     else
0030         return a >= b;
0031 }
0032 
0033 static inline unsigned int
0034 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
0035 {
0036     if (offload->inc)
0037         return (*val)++;
0038     else
0039         return (*val)--;
0040 }
0041 
0042 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
0043 {
0044     struct can_rx_offload *offload = container_of(napi,
0045                               struct can_rx_offload,
0046                               napi);
0047     struct net_device *dev = offload->dev;
0048     struct net_device_stats *stats = &dev->stats;
0049     struct sk_buff *skb;
0050     int work_done = 0;
0051 
0052     while ((work_done < quota) &&
0053            (skb = skb_dequeue(&offload->skb_queue))) {
0054         struct can_frame *cf = (struct can_frame *)skb->data;
0055 
0056         work_done++;
0057         if (!(cf->can_id & CAN_ERR_FLAG)) {
0058             stats->rx_packets++;
0059             if (!(cf->can_id & CAN_RTR_FLAG))
0060                 stats->rx_bytes += cf->len;
0061         }
0062         netif_receive_skb(skb);
0063     }
0064 
0065     if (work_done < quota) {
0066         napi_complete_done(napi, work_done);
0067 
0068         /* Check if there was another interrupt */
0069         if (!skb_queue_empty(&offload->skb_queue))
0070             napi_reschedule(&offload->napi);
0071     }
0072 
0073     return work_done;
0074 }
0075 
0076 static inline void
0077 __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
0078              int (*compare)(struct sk_buff *a, struct sk_buff *b))
0079 {
0080     struct sk_buff *pos, *insert = NULL;
0081 
0082     skb_queue_reverse_walk(head, pos) {
0083         const struct can_rx_offload_cb *cb_pos, *cb_new;
0084 
0085         cb_pos = can_rx_offload_get_cb(pos);
0086         cb_new = can_rx_offload_get_cb(new);
0087 
0088         netdev_dbg(new->dev,
0089                "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
0090                __func__,
0091                cb_pos->timestamp, cb_new->timestamp,
0092                cb_new->timestamp - cb_pos->timestamp,
0093                skb_queue_len(head));
0094 
0095         if (compare(pos, new) < 0)
0096             continue;
0097         insert = pos;
0098         break;
0099     }
0100     if (!insert)
0101         __skb_queue_head(head, new);
0102     else
0103         __skb_queue_after(head, insert, new);
0104 }
0105 
0106 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
0107 {
0108     const struct can_rx_offload_cb *cb_a, *cb_b;
0109 
0110     cb_a = can_rx_offload_get_cb(a);
0111     cb_b = can_rx_offload_get_cb(b);
0112 
0113     /* Subtract two u32 and return result as int, to keep
0114      * difference steady around the u32 overflow.
0115      */
0116     return cb_b->timestamp - cb_a->timestamp;
0117 }
0118 
0119 /**
0120  * can_rx_offload_offload_one() - Read one CAN frame from HW
0121  * @offload: pointer to rx_offload context
0122  * @n: number of mailbox to read
0123  *
0124  * The task of this function is to read a CAN frame from mailbox @n
0125  * from the device and return the mailbox's content as a struct
0126  * sk_buff.
0127  *
0128  * If the struct can_rx_offload::skb_queue exceeds the maximal queue
0129  * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
0130  * allocated, the mailbox contents is discarded by reading it into an
0131  * overflow buffer. This way the mailbox is marked as free by the
0132  * driver.
0133  *
0134  * Return: A pointer to skb containing the CAN frame on success.
0135  *
0136  *         NULL if the mailbox @n is empty.
0137  *
0138  *         ERR_PTR() in case of an error
0139  */
0140 static struct sk_buff *
0141 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
0142 {
0143     struct sk_buff *skb;
0144     struct can_rx_offload_cb *cb;
0145     bool drop = false;
0146     u32 timestamp;
0147 
0148     /* If queue is full drop frame */
0149     if (unlikely(skb_queue_len(&offload->skb_queue) >
0150              offload->skb_queue_len_max))
0151         drop = true;
0152 
0153     skb = offload->mailbox_read(offload, n, &timestamp, drop);
0154     /* Mailbox was empty. */
0155     if (unlikely(!skb))
0156         return NULL;
0157 
0158     /* There was a problem reading the mailbox, propagate
0159      * error value.
0160      */
0161     if (IS_ERR(skb)) {
0162         offload->dev->stats.rx_dropped++;
0163         offload->dev->stats.rx_fifo_errors++;
0164 
0165         return skb;
0166     }
0167 
0168     /* Mailbox was read. */
0169     cb = can_rx_offload_get_cb(skb);
0170     cb->timestamp = timestamp;
0171 
0172     return skb;
0173 }
0174 
0175 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
0176                      u64 pending)
0177 {
0178     unsigned int i;
0179     int received = 0;
0180 
0181     for (i = offload->mb_first;
0182          can_rx_offload_le(offload, i, offload->mb_last);
0183          can_rx_offload_inc(offload, &i)) {
0184         struct sk_buff *skb;
0185 
0186         if (!(pending & BIT_ULL(i)))
0187             continue;
0188 
0189         skb = can_rx_offload_offload_one(offload, i);
0190         if (IS_ERR_OR_NULL(skb))
0191             continue;
0192 
0193         __skb_queue_add_sort(&offload->skb_irq_queue, skb,
0194                      can_rx_offload_compare);
0195         received++;
0196     }
0197 
0198     return received;
0199 }
0200 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
0201 
0202 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
0203 {
0204     struct sk_buff *skb;
0205     int received = 0;
0206 
0207     while (1) {
0208         skb = can_rx_offload_offload_one(offload, 0);
0209         if (IS_ERR(skb))
0210             continue;
0211         if (!skb)
0212             break;
0213 
0214         __skb_queue_tail(&offload->skb_irq_queue, skb);
0215         received++;
0216     }
0217 
0218     return received;
0219 }
0220 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
0221 
0222 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
0223                 struct sk_buff *skb, u32 timestamp)
0224 {
0225     struct can_rx_offload_cb *cb;
0226 
0227     if (skb_queue_len(&offload->skb_queue) >
0228         offload->skb_queue_len_max) {
0229         dev_kfree_skb_any(skb);
0230         return -ENOBUFS;
0231     }
0232 
0233     cb = can_rx_offload_get_cb(skb);
0234     cb->timestamp = timestamp;
0235 
0236     __skb_queue_add_sort(&offload->skb_irq_queue, skb,
0237                  can_rx_offload_compare);
0238 
0239     return 0;
0240 }
0241 EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
0242 
0243 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
0244                      unsigned int idx, u32 timestamp,
0245                      unsigned int *frame_len_ptr)
0246 {
0247     struct net_device *dev = offload->dev;
0248     struct net_device_stats *stats = &dev->stats;
0249     struct sk_buff *skb;
0250     u8 len;
0251     int err;
0252 
0253     skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
0254     if (!skb)
0255         return 0;
0256 
0257     err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
0258     if (err) {
0259         stats->rx_errors++;
0260         stats->tx_fifo_errors++;
0261     }
0262 
0263     return len;
0264 }
0265 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
0266 
0267 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
0268                   struct sk_buff *skb)
0269 {
0270     if (skb_queue_len(&offload->skb_queue) >
0271         offload->skb_queue_len_max) {
0272         dev_kfree_skb_any(skb);
0273         return -ENOBUFS;
0274     }
0275 
0276     __skb_queue_tail(&offload->skb_irq_queue, skb);
0277 
0278     return 0;
0279 }
0280 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
0281 
0282 void can_rx_offload_irq_finish(struct can_rx_offload *offload)
0283 {
0284     unsigned long flags;
0285     int queue_len;
0286 
0287     if (skb_queue_empty_lockless(&offload->skb_irq_queue))
0288         return;
0289 
0290     spin_lock_irqsave(&offload->skb_queue.lock, flags);
0291     skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
0292     spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
0293 
0294     queue_len = skb_queue_len(&offload->skb_queue);
0295     if (queue_len > offload->skb_queue_len_max / 8)
0296         netdev_dbg(offload->dev, "%s: queue_len=%d\n",
0297                __func__, queue_len);
0298 
0299     napi_schedule(&offload->napi);
0300 }
0301 EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
0302 
0303 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
0304 {
0305     unsigned long flags;
0306     int queue_len;
0307 
0308     if (skb_queue_empty_lockless(&offload->skb_irq_queue))
0309         return;
0310 
0311     spin_lock_irqsave(&offload->skb_queue.lock, flags);
0312     skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
0313     spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
0314 
0315     queue_len = skb_queue_len(&offload->skb_queue);
0316     if (queue_len > offload->skb_queue_len_max / 8)
0317         netdev_dbg(offload->dev, "%s: queue_len=%d\n",
0318                __func__, queue_len);
0319 
0320     local_bh_disable();
0321     napi_schedule(&offload->napi);
0322     local_bh_enable();
0323 }
0324 EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
0325 
0326 static int can_rx_offload_init_queue(struct net_device *dev,
0327                      struct can_rx_offload *offload,
0328                      unsigned int weight)
0329 {
0330     offload->dev = dev;
0331 
0332     /* Limit queue len to 4x the weight (rounted to next power of two) */
0333     offload->skb_queue_len_max = 2 << fls(weight);
0334     offload->skb_queue_len_max *= 4;
0335     skb_queue_head_init(&offload->skb_queue);
0336     __skb_queue_head_init(&offload->skb_irq_queue);
0337 
0338     netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
0339                   weight);
0340 
0341     dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
0342         __func__, offload->skb_queue_len_max);
0343 
0344     return 0;
0345 }
0346 
0347 int can_rx_offload_add_timestamp(struct net_device *dev,
0348                  struct can_rx_offload *offload)
0349 {
0350     unsigned int weight;
0351 
0352     if (offload->mb_first > BITS_PER_LONG_LONG ||
0353         offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
0354         return -EINVAL;
0355 
0356     if (offload->mb_first < offload->mb_last) {
0357         offload->inc = true;
0358         weight = offload->mb_last - offload->mb_first;
0359     } else {
0360         offload->inc = false;
0361         weight = offload->mb_first - offload->mb_last;
0362     }
0363 
0364     return can_rx_offload_init_queue(dev, offload, weight);
0365 }
0366 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
0367 
0368 int can_rx_offload_add_fifo(struct net_device *dev,
0369                 struct can_rx_offload *offload, unsigned int weight)
0370 {
0371     if (!offload->mailbox_read)
0372         return -EINVAL;
0373 
0374     return can_rx_offload_init_queue(dev, offload, weight);
0375 }
0376 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
0377 
0378 int can_rx_offload_add_manual(struct net_device *dev,
0379                   struct can_rx_offload *offload,
0380                   unsigned int weight)
0381 {
0382     if (offload->mailbox_read)
0383         return -EINVAL;
0384 
0385     return can_rx_offload_init_queue(dev, offload, weight);
0386 }
0387 EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
0388 
0389 void can_rx_offload_enable(struct can_rx_offload *offload)
0390 {
0391     napi_enable(&offload->napi);
0392 }
0393 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
0394 
0395 void can_rx_offload_del(struct can_rx_offload *offload)
0396 {
0397     netif_napi_del(&offload->napi);
0398     skb_queue_purge(&offload->skb_queue);
0399     __skb_queue_purge(&offload->skb_irq_queue);
0400 }
0401 EXPORT_SYMBOL_GPL(can_rx_offload_del);