0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 #include <linux/module.h>
0043 #include <linux/init.h>
0044 #include <linux/uio.h>
0045 #include <linux/net.h>
0046 #include <linux/slab.h>
0047 #include <linux/netdevice.h>
0048 #include <linux/socket.h>
0049 #include <linux/if_arp.h>
0050 #include <linux/skbuff.h>
0051 #include <linux/can.h>
0052 #include <linux/can/core.h>
0053 #include <linux/can/skb.h>
0054 #include <linux/can/raw.h>
0055 #include <net/sock.h>
0056 #include <net/net_namespace.h>
0057
0058 MODULE_DESCRIPTION("PF_CAN raw protocol");
0059 MODULE_LICENSE("Dual BSD/GPL");
0060 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
0061 MODULE_ALIAS("can-proto-1");
0062
0063 #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
0064
0065 #define MASK_ALL 0
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 struct uniqframe {
0077 int skbcnt;
0078 const struct sk_buff *skb;
0079 unsigned int join_rx_count;
0080 };
0081
0082 struct raw_sock {
0083 struct sock sk;
0084 int bound;
0085 int ifindex;
0086 struct list_head notifier;
0087 int loopback;
0088 int recv_own_msgs;
0089 int fd_frames;
0090 int join_filters;
0091 int count;
0092 struct can_filter dfilter;
0093 struct can_filter *filter;
0094 can_err_mask_t err_mask;
0095 struct uniqframe __percpu *uniq;
0096 };
0097
0098 static LIST_HEAD(raw_notifier_list);
0099 static DEFINE_SPINLOCK(raw_notifier_lock);
0100 static struct raw_sock *raw_busy_notifier;
0101
0102
0103
0104
0105
0106 static inline unsigned int *raw_flags(struct sk_buff *skb)
0107 {
0108 sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
0109 sizeof(unsigned int));
0110
0111
0112 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
0113 }
0114
0115 static inline struct raw_sock *raw_sk(const struct sock *sk)
0116 {
0117 return (struct raw_sock *)sk;
0118 }
0119
0120 static void raw_rcv(struct sk_buff *oskb, void *data)
0121 {
0122 struct sock *sk = (struct sock *)data;
0123 struct raw_sock *ro = raw_sk(sk);
0124 struct sockaddr_can *addr;
0125 struct sk_buff *skb;
0126 unsigned int *pflags;
0127
0128
0129 if (!ro->recv_own_msgs && oskb->sk == sk)
0130 return;
0131
0132
0133 if (!ro->fd_frames && oskb->len != CAN_MTU)
0134 return;
0135
0136
0137 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
0138 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
0139 if (ro->join_filters) {
0140 this_cpu_inc(ro->uniq->join_rx_count);
0141
0142 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
0143 return;
0144 } else {
0145 return;
0146 }
0147 } else {
0148 this_cpu_ptr(ro->uniq)->skb = oskb;
0149 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
0150 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
0151
0152 if (ro->join_filters && ro->count > 1)
0153 return;
0154 }
0155
0156
0157 skb = skb_clone(oskb, GFP_ATOMIC);
0158 if (!skb)
0159 return;
0160
0161
0162
0163
0164
0165
0166
0167 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
0168 addr = (struct sockaddr_can *)skb->cb;
0169 memset(addr, 0, sizeof(*addr));
0170 addr->can_family = AF_CAN;
0171 addr->can_ifindex = skb->dev->ifindex;
0172
0173
0174 pflags = raw_flags(skb);
0175 *pflags = 0;
0176 if (oskb->sk)
0177 *pflags |= MSG_DONTROUTE;
0178 if (oskb->sk == sk)
0179 *pflags |= MSG_CONFIRM;
0180
0181 if (sock_queue_rcv_skb(sk, skb) < 0)
0182 kfree_skb(skb);
0183 }
0184
0185 static int raw_enable_filters(struct net *net, struct net_device *dev,
0186 struct sock *sk, struct can_filter *filter,
0187 int count)
0188 {
0189 int err = 0;
0190 int i;
0191
0192 for (i = 0; i < count; i++) {
0193 err = can_rx_register(net, dev, filter[i].can_id,
0194 filter[i].can_mask,
0195 raw_rcv, sk, "raw", sk);
0196 if (err) {
0197
0198 while (--i >= 0)
0199 can_rx_unregister(net, dev, filter[i].can_id,
0200 filter[i].can_mask,
0201 raw_rcv, sk);
0202 break;
0203 }
0204 }
0205
0206 return err;
0207 }
0208
0209 static int raw_enable_errfilter(struct net *net, struct net_device *dev,
0210 struct sock *sk, can_err_mask_t err_mask)
0211 {
0212 int err = 0;
0213
0214 if (err_mask)
0215 err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
0216 raw_rcv, sk, "raw", sk);
0217
0218 return err;
0219 }
0220
0221 static void raw_disable_filters(struct net *net, struct net_device *dev,
0222 struct sock *sk, struct can_filter *filter,
0223 int count)
0224 {
0225 int i;
0226
0227 for (i = 0; i < count; i++)
0228 can_rx_unregister(net, dev, filter[i].can_id,
0229 filter[i].can_mask, raw_rcv, sk);
0230 }
0231
0232 static inline void raw_disable_errfilter(struct net *net,
0233 struct net_device *dev,
0234 struct sock *sk,
0235 can_err_mask_t err_mask)
0236
0237 {
0238 if (err_mask)
0239 can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
0240 raw_rcv, sk);
0241 }
0242
0243 static inline void raw_disable_allfilters(struct net *net,
0244 struct net_device *dev,
0245 struct sock *sk)
0246 {
0247 struct raw_sock *ro = raw_sk(sk);
0248
0249 raw_disable_filters(net, dev, sk, ro->filter, ro->count);
0250 raw_disable_errfilter(net, dev, sk, ro->err_mask);
0251 }
0252
0253 static int raw_enable_allfilters(struct net *net, struct net_device *dev,
0254 struct sock *sk)
0255 {
0256 struct raw_sock *ro = raw_sk(sk);
0257 int err;
0258
0259 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
0260 if (!err) {
0261 err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
0262 if (err)
0263 raw_disable_filters(net, dev, sk, ro->filter,
0264 ro->count);
0265 }
0266
0267 return err;
0268 }
0269
0270 static void raw_notify(struct raw_sock *ro, unsigned long msg,
0271 struct net_device *dev)
0272 {
0273 struct sock *sk = &ro->sk;
0274
0275 if (!net_eq(dev_net(dev), sock_net(sk)))
0276 return;
0277
0278 if (ro->ifindex != dev->ifindex)
0279 return;
0280
0281 switch (msg) {
0282 case NETDEV_UNREGISTER:
0283 lock_sock(sk);
0284
0285 if (ro->bound)
0286 raw_disable_allfilters(dev_net(dev), dev, sk);
0287
0288 if (ro->count > 1)
0289 kfree(ro->filter);
0290
0291 ro->ifindex = 0;
0292 ro->bound = 0;
0293 ro->count = 0;
0294 release_sock(sk);
0295
0296 sk->sk_err = ENODEV;
0297 if (!sock_flag(sk, SOCK_DEAD))
0298 sk_error_report(sk);
0299 break;
0300
0301 case NETDEV_DOWN:
0302 sk->sk_err = ENETDOWN;
0303 if (!sock_flag(sk, SOCK_DEAD))
0304 sk_error_report(sk);
0305 break;
0306 }
0307 }
0308
0309 static int raw_notifier(struct notifier_block *nb, unsigned long msg,
0310 void *ptr)
0311 {
0312 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0313
0314 if (dev->type != ARPHRD_CAN)
0315 return NOTIFY_DONE;
0316 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
0317 return NOTIFY_DONE;
0318 if (unlikely(raw_busy_notifier))
0319 return NOTIFY_DONE;
0320
0321 spin_lock(&raw_notifier_lock);
0322 list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
0323 spin_unlock(&raw_notifier_lock);
0324 raw_notify(raw_busy_notifier, msg, dev);
0325 spin_lock(&raw_notifier_lock);
0326 }
0327 raw_busy_notifier = NULL;
0328 spin_unlock(&raw_notifier_lock);
0329 return NOTIFY_DONE;
0330 }
0331
0332 static int raw_init(struct sock *sk)
0333 {
0334 struct raw_sock *ro = raw_sk(sk);
0335
0336 ro->bound = 0;
0337 ro->ifindex = 0;
0338
0339
0340 ro->dfilter.can_id = 0;
0341 ro->dfilter.can_mask = MASK_ALL;
0342 ro->filter = &ro->dfilter;
0343 ro->count = 1;
0344
0345
0346 ro->loopback = 1;
0347 ro->recv_own_msgs = 0;
0348 ro->fd_frames = 0;
0349 ro->join_filters = 0;
0350
0351
0352 ro->uniq = alloc_percpu(struct uniqframe);
0353 if (unlikely(!ro->uniq))
0354 return -ENOMEM;
0355
0356
0357 spin_lock(&raw_notifier_lock);
0358 list_add_tail(&ro->notifier, &raw_notifier_list);
0359 spin_unlock(&raw_notifier_lock);
0360
0361 return 0;
0362 }
0363
0364 static int raw_release(struct socket *sock)
0365 {
0366 struct sock *sk = sock->sk;
0367 struct raw_sock *ro;
0368
0369 if (!sk)
0370 return 0;
0371
0372 ro = raw_sk(sk);
0373
0374 spin_lock(&raw_notifier_lock);
0375 while (raw_busy_notifier == ro) {
0376 spin_unlock(&raw_notifier_lock);
0377 schedule_timeout_uninterruptible(1);
0378 spin_lock(&raw_notifier_lock);
0379 }
0380 list_del(&ro->notifier);
0381 spin_unlock(&raw_notifier_lock);
0382
0383 lock_sock(sk);
0384
0385
0386 if (ro->bound) {
0387 if (ro->ifindex) {
0388 struct net_device *dev;
0389
0390 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
0391 if (dev) {
0392 raw_disable_allfilters(dev_net(dev), dev, sk);
0393 dev_put(dev);
0394 }
0395 } else {
0396 raw_disable_allfilters(sock_net(sk), NULL, sk);
0397 }
0398 }
0399
0400 if (ro->count > 1)
0401 kfree(ro->filter);
0402
0403 ro->ifindex = 0;
0404 ro->bound = 0;
0405 ro->count = 0;
0406 free_percpu(ro->uniq);
0407
0408 sock_orphan(sk);
0409 sock->sk = NULL;
0410
0411 release_sock(sk);
0412 sock_put(sk);
0413
0414 return 0;
0415 }
0416
0417 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
0418 {
0419 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
0420 struct sock *sk = sock->sk;
0421 struct raw_sock *ro = raw_sk(sk);
0422 int ifindex;
0423 int err = 0;
0424 int notify_enetdown = 0;
0425
0426 if (len < RAW_MIN_NAMELEN)
0427 return -EINVAL;
0428 if (addr->can_family != AF_CAN)
0429 return -EINVAL;
0430
0431 lock_sock(sk);
0432
0433 if (ro->bound && addr->can_ifindex == ro->ifindex)
0434 goto out;
0435
0436 if (addr->can_ifindex) {
0437 struct net_device *dev;
0438
0439 dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
0440 if (!dev) {
0441 err = -ENODEV;
0442 goto out;
0443 }
0444 if (dev->type != ARPHRD_CAN) {
0445 dev_put(dev);
0446 err = -ENODEV;
0447 goto out;
0448 }
0449 if (!(dev->flags & IFF_UP))
0450 notify_enetdown = 1;
0451
0452 ifindex = dev->ifindex;
0453
0454
0455 err = raw_enable_allfilters(sock_net(sk), dev, sk);
0456 dev_put(dev);
0457 } else {
0458 ifindex = 0;
0459
0460
0461 err = raw_enable_allfilters(sock_net(sk), NULL, sk);
0462 }
0463
0464 if (!err) {
0465 if (ro->bound) {
0466
0467 if (ro->ifindex) {
0468 struct net_device *dev;
0469
0470 dev = dev_get_by_index(sock_net(sk),
0471 ro->ifindex);
0472 if (dev) {
0473 raw_disable_allfilters(dev_net(dev),
0474 dev, sk);
0475 dev_put(dev);
0476 }
0477 } else {
0478 raw_disable_allfilters(sock_net(sk), NULL, sk);
0479 }
0480 }
0481 ro->ifindex = ifindex;
0482 ro->bound = 1;
0483 }
0484
0485 out:
0486 release_sock(sk);
0487
0488 if (notify_enetdown) {
0489 sk->sk_err = ENETDOWN;
0490 if (!sock_flag(sk, SOCK_DEAD))
0491 sk_error_report(sk);
0492 }
0493
0494 return err;
0495 }
0496
0497 static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
0498 int peer)
0499 {
0500 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
0501 struct sock *sk = sock->sk;
0502 struct raw_sock *ro = raw_sk(sk);
0503
0504 if (peer)
0505 return -EOPNOTSUPP;
0506
0507 memset(addr, 0, RAW_MIN_NAMELEN);
0508 addr->can_family = AF_CAN;
0509 addr->can_ifindex = ro->ifindex;
0510
0511 return RAW_MIN_NAMELEN;
0512 }
0513
0514 static int raw_setsockopt(struct socket *sock, int level, int optname,
0515 sockptr_t optval, unsigned int optlen)
0516 {
0517 struct sock *sk = sock->sk;
0518 struct raw_sock *ro = raw_sk(sk);
0519 struct can_filter *filter = NULL;
0520 struct can_filter sfilter;
0521 struct net_device *dev = NULL;
0522 can_err_mask_t err_mask = 0;
0523 int count = 0;
0524 int err = 0;
0525
0526 if (level != SOL_CAN_RAW)
0527 return -EINVAL;
0528
0529 switch (optname) {
0530 case CAN_RAW_FILTER:
0531 if (optlen % sizeof(struct can_filter) != 0)
0532 return -EINVAL;
0533
0534 if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
0535 return -EINVAL;
0536
0537 count = optlen / sizeof(struct can_filter);
0538
0539 if (count > 1) {
0540
0541 filter = memdup_sockptr(optval, optlen);
0542 if (IS_ERR(filter))
0543 return PTR_ERR(filter);
0544 } else if (count == 1) {
0545 if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
0546 return -EFAULT;
0547 }
0548
0549 rtnl_lock();
0550 lock_sock(sk);
0551
0552 if (ro->bound && ro->ifindex) {
0553 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
0554 if (!dev) {
0555 if (count > 1)
0556 kfree(filter);
0557 err = -ENODEV;
0558 goto out_fil;
0559 }
0560 }
0561
0562 if (ro->bound) {
0563
0564 if (count == 1)
0565 err = raw_enable_filters(sock_net(sk), dev, sk,
0566 &sfilter, 1);
0567 else
0568 err = raw_enable_filters(sock_net(sk), dev, sk,
0569 filter, count);
0570 if (err) {
0571 if (count > 1)
0572 kfree(filter);
0573 goto out_fil;
0574 }
0575
0576
0577 raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
0578 ro->count);
0579 }
0580
0581
0582 if (ro->count > 1)
0583 kfree(ro->filter);
0584
0585
0586 if (count == 1) {
0587
0588 ro->dfilter = sfilter;
0589 filter = &ro->dfilter;
0590 }
0591 ro->filter = filter;
0592 ro->count = count;
0593
0594 out_fil:
0595 dev_put(dev);
0596 release_sock(sk);
0597 rtnl_unlock();
0598
0599 break;
0600
0601 case CAN_RAW_ERR_FILTER:
0602 if (optlen != sizeof(err_mask))
0603 return -EINVAL;
0604
0605 if (copy_from_sockptr(&err_mask, optval, optlen))
0606 return -EFAULT;
0607
0608 err_mask &= CAN_ERR_MASK;
0609
0610 rtnl_lock();
0611 lock_sock(sk);
0612
0613 if (ro->bound && ro->ifindex) {
0614 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
0615 if (!dev) {
0616 err = -ENODEV;
0617 goto out_err;
0618 }
0619 }
0620
0621
0622 if (ro->bound) {
0623
0624 err = raw_enable_errfilter(sock_net(sk), dev, sk,
0625 err_mask);
0626
0627 if (err)
0628 goto out_err;
0629
0630
0631 raw_disable_errfilter(sock_net(sk), dev, sk,
0632 ro->err_mask);
0633 }
0634
0635
0636 ro->err_mask = err_mask;
0637
0638 out_err:
0639 dev_put(dev);
0640 release_sock(sk);
0641 rtnl_unlock();
0642
0643 break;
0644
0645 case CAN_RAW_LOOPBACK:
0646 if (optlen != sizeof(ro->loopback))
0647 return -EINVAL;
0648
0649 if (copy_from_sockptr(&ro->loopback, optval, optlen))
0650 return -EFAULT;
0651
0652 break;
0653
0654 case CAN_RAW_RECV_OWN_MSGS:
0655 if (optlen != sizeof(ro->recv_own_msgs))
0656 return -EINVAL;
0657
0658 if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
0659 return -EFAULT;
0660
0661 break;
0662
0663 case CAN_RAW_FD_FRAMES:
0664 if (optlen != sizeof(ro->fd_frames))
0665 return -EINVAL;
0666
0667 if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
0668 return -EFAULT;
0669
0670 break;
0671
0672 case CAN_RAW_JOIN_FILTERS:
0673 if (optlen != sizeof(ro->join_filters))
0674 return -EINVAL;
0675
0676 if (copy_from_sockptr(&ro->join_filters, optval, optlen))
0677 return -EFAULT;
0678
0679 break;
0680
0681 default:
0682 return -ENOPROTOOPT;
0683 }
0684 return err;
0685 }
0686
0687 static int raw_getsockopt(struct socket *sock, int level, int optname,
0688 char __user *optval, int __user *optlen)
0689 {
0690 struct sock *sk = sock->sk;
0691 struct raw_sock *ro = raw_sk(sk);
0692 int len;
0693 void *val;
0694 int err = 0;
0695
0696 if (level != SOL_CAN_RAW)
0697 return -EINVAL;
0698 if (get_user(len, optlen))
0699 return -EFAULT;
0700 if (len < 0)
0701 return -EINVAL;
0702
0703 switch (optname) {
0704 case CAN_RAW_FILTER:
0705 lock_sock(sk);
0706 if (ro->count > 0) {
0707 int fsize = ro->count * sizeof(struct can_filter);
0708
0709
0710 if (len < fsize) {
0711
0712 err = -ERANGE;
0713 if (put_user(fsize, optlen))
0714 err = -EFAULT;
0715 } else {
0716 if (len > fsize)
0717 len = fsize;
0718 if (copy_to_user(optval, ro->filter, len))
0719 err = -EFAULT;
0720 }
0721 } else {
0722 len = 0;
0723 }
0724 release_sock(sk);
0725
0726 if (!err)
0727 err = put_user(len, optlen);
0728 return err;
0729
0730 case CAN_RAW_ERR_FILTER:
0731 if (len > sizeof(can_err_mask_t))
0732 len = sizeof(can_err_mask_t);
0733 val = &ro->err_mask;
0734 break;
0735
0736 case CAN_RAW_LOOPBACK:
0737 if (len > sizeof(int))
0738 len = sizeof(int);
0739 val = &ro->loopback;
0740 break;
0741
0742 case CAN_RAW_RECV_OWN_MSGS:
0743 if (len > sizeof(int))
0744 len = sizeof(int);
0745 val = &ro->recv_own_msgs;
0746 break;
0747
0748 case CAN_RAW_FD_FRAMES:
0749 if (len > sizeof(int))
0750 len = sizeof(int);
0751 val = &ro->fd_frames;
0752 break;
0753
0754 case CAN_RAW_JOIN_FILTERS:
0755 if (len > sizeof(int))
0756 len = sizeof(int);
0757 val = &ro->join_filters;
0758 break;
0759
0760 default:
0761 return -ENOPROTOOPT;
0762 }
0763
0764 if (put_user(len, optlen))
0765 return -EFAULT;
0766 if (copy_to_user(optval, val, len))
0767 return -EFAULT;
0768 return 0;
0769 }
0770
0771 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
0772 {
0773 struct sock *sk = sock->sk;
0774 struct raw_sock *ro = raw_sk(sk);
0775 struct sockcm_cookie sockc;
0776 struct sk_buff *skb;
0777 struct net_device *dev;
0778 int ifindex;
0779 int err;
0780
0781 if (msg->msg_name) {
0782 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
0783
0784 if (msg->msg_namelen < RAW_MIN_NAMELEN)
0785 return -EINVAL;
0786
0787 if (addr->can_family != AF_CAN)
0788 return -EINVAL;
0789
0790 ifindex = addr->can_ifindex;
0791 } else {
0792 ifindex = ro->ifindex;
0793 }
0794
0795 dev = dev_get_by_index(sock_net(sk), ifindex);
0796 if (!dev)
0797 return -ENXIO;
0798
0799 err = -EINVAL;
0800 if (ro->fd_frames && dev->mtu == CANFD_MTU) {
0801 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
0802 goto put_dev;
0803 } else {
0804 if (unlikely(size != CAN_MTU))
0805 goto put_dev;
0806 }
0807
0808 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
0809 msg->msg_flags & MSG_DONTWAIT, &err);
0810 if (!skb)
0811 goto put_dev;
0812
0813 can_skb_reserve(skb);
0814 can_skb_prv(skb)->ifindex = dev->ifindex;
0815 can_skb_prv(skb)->skbcnt = 0;
0816
0817 err = memcpy_from_msg(skb_put(skb, size), msg, size);
0818 if (err < 0)
0819 goto free_skb;
0820
0821 sockcm_init(&sockc, sk);
0822 if (msg->msg_controllen) {
0823 err = sock_cmsg_send(sk, msg, &sockc);
0824 if (unlikely(err))
0825 goto free_skb;
0826 }
0827
0828 skb->dev = dev;
0829 skb->priority = sk->sk_priority;
0830 skb->tstamp = sockc.transmit_time;
0831
0832 skb_setup_tx_timestamp(skb, sockc.tsflags);
0833
0834 err = can_send(skb, ro->loopback);
0835
0836 dev_put(dev);
0837
0838 if (err)
0839 goto send_failed;
0840
0841 return size;
0842
0843 free_skb:
0844 kfree_skb(skb);
0845 put_dev:
0846 dev_put(dev);
0847 send_failed:
0848 return err;
0849 }
0850
0851 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
0852 int flags)
0853 {
0854 struct sock *sk = sock->sk;
0855 struct sk_buff *skb;
0856 int err = 0;
0857
0858 if (flags & MSG_ERRQUEUE)
0859 return sock_recv_errqueue(sk, msg, size,
0860 SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
0861
0862 skb = skb_recv_datagram(sk, flags, &err);
0863 if (!skb)
0864 return err;
0865
0866 if (size < skb->len)
0867 msg->msg_flags |= MSG_TRUNC;
0868 else
0869 size = skb->len;
0870
0871 err = memcpy_to_msg(msg, skb->data, size);
0872 if (err < 0) {
0873 skb_free_datagram(sk, skb);
0874 return err;
0875 }
0876
0877 sock_recv_cmsgs(msg, sk, skb);
0878
0879 if (msg->msg_name) {
0880 __sockaddr_check_size(RAW_MIN_NAMELEN);
0881 msg->msg_namelen = RAW_MIN_NAMELEN;
0882 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
0883 }
0884
0885
0886 msg->msg_flags |= *(raw_flags(skb));
0887
0888 skb_free_datagram(sk, skb);
0889
0890 return size;
0891 }
0892
0893 static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
0894 unsigned long arg)
0895 {
0896
0897 return -ENOIOCTLCMD;
0898 }
0899
0900 static const struct proto_ops raw_ops = {
0901 .family = PF_CAN,
0902 .release = raw_release,
0903 .bind = raw_bind,
0904 .connect = sock_no_connect,
0905 .socketpair = sock_no_socketpair,
0906 .accept = sock_no_accept,
0907 .getname = raw_getname,
0908 .poll = datagram_poll,
0909 .ioctl = raw_sock_no_ioctlcmd,
0910 .gettstamp = sock_gettstamp,
0911 .listen = sock_no_listen,
0912 .shutdown = sock_no_shutdown,
0913 .setsockopt = raw_setsockopt,
0914 .getsockopt = raw_getsockopt,
0915 .sendmsg = raw_sendmsg,
0916 .recvmsg = raw_recvmsg,
0917 .mmap = sock_no_mmap,
0918 .sendpage = sock_no_sendpage,
0919 };
0920
0921 static struct proto raw_proto __read_mostly = {
0922 .name = "CAN_RAW",
0923 .owner = THIS_MODULE,
0924 .obj_size = sizeof(struct raw_sock),
0925 .init = raw_init,
0926 };
0927
0928 static const struct can_proto raw_can_proto = {
0929 .type = SOCK_RAW,
0930 .protocol = CAN_RAW,
0931 .ops = &raw_ops,
0932 .prot = &raw_proto,
0933 };
0934
0935 static struct notifier_block canraw_notifier = {
0936 .notifier_call = raw_notifier
0937 };
0938
0939 static __init int raw_module_init(void)
0940 {
0941 int err;
0942
0943 pr_info("can: raw protocol\n");
0944
0945 err = can_proto_register(&raw_can_proto);
0946 if (err < 0)
0947 pr_err("can: registration of raw protocol failed\n");
0948 else
0949 register_netdevice_notifier(&canraw_notifier);
0950
0951 return err;
0952 }
0953
0954 static __exit void raw_module_exit(void)
0955 {
0956 can_proto_unregister(&raw_can_proto);
0957 unregister_netdevice_notifier(&canraw_notifier);
0958 }
0959
0960 module_init(raw_module_init);
0961 module_exit(raw_module_exit);