0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/compiler.h>
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/list.h>
0013 #include <linux/skbuff.h>
0014 #include <net/netlink.h>
0015 #include <linux/moduleparam.h>
0016 #include <linux/connector.h>
0017 #include <linux/slab.h>
0018 #include <linux/mutex.h>
0019 #include <linux/proc_fs.h>
0020 #include <linux/spinlock.h>
0021
0022 #include <net/sock.h>
0023
0024 MODULE_LICENSE("GPL");
0025 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
0026 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
0027 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
0028
0029 static struct cn_dev cdev;
0030
0031 static int cn_already_initialized;
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
0062 gfp_t gfp_mask)
0063 {
0064 struct cn_callback_entry *__cbq;
0065 unsigned int size;
0066 struct sk_buff *skb;
0067 struct nlmsghdr *nlh;
0068 struct cn_msg *data;
0069 struct cn_dev *dev = &cdev;
0070 u32 group = 0;
0071 int found = 0;
0072
0073 if (portid || __group) {
0074 group = __group;
0075 } else {
0076 spin_lock_bh(&dev->cbdev->queue_lock);
0077 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
0078 callback_entry) {
0079 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
0080 found = 1;
0081 group = __cbq->group;
0082 break;
0083 }
0084 }
0085 spin_unlock_bh(&dev->cbdev->queue_lock);
0086
0087 if (!found)
0088 return -ENODEV;
0089 }
0090
0091 if (!portid && !netlink_has_listeners(dev->nls, group))
0092 return -ESRCH;
0093
0094 size = sizeof(*msg) + len;
0095
0096 skb = nlmsg_new(size, gfp_mask);
0097 if (!skb)
0098 return -ENOMEM;
0099
0100 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
0101 if (!nlh) {
0102 kfree_skb(skb);
0103 return -EMSGSIZE;
0104 }
0105
0106 data = nlmsg_data(nlh);
0107
0108 memcpy(data, msg, size);
0109
0110 NETLINK_CB(skb).dst_group = group;
0111
0112 if (group)
0113 return netlink_broadcast(dev->nls, skb, portid, group,
0114 gfp_mask);
0115 return netlink_unicast(dev->nls, skb, portid,
0116 !gfpflags_allow_blocking(gfp_mask));
0117 }
0118 EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
0119
0120
0121 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
0122 gfp_t gfp_mask)
0123 {
0124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
0125 }
0126 EXPORT_SYMBOL_GPL(cn_netlink_send);
0127
0128
0129
0130
0131 static int cn_call_callback(struct sk_buff *skb)
0132 {
0133 struct nlmsghdr *nlh;
0134 struct cn_callback_entry *i, *cbq = NULL;
0135 struct cn_dev *dev = &cdev;
0136 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
0137 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
0138 int err = -ENODEV;
0139
0140
0141 nlh = nlmsg_hdr(skb);
0142 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
0143 return -EINVAL;
0144
0145 spin_lock_bh(&dev->cbdev->queue_lock);
0146 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
0147 if (cn_cb_equal(&i->id.id, &msg->id)) {
0148 refcount_inc(&i->refcnt);
0149 cbq = i;
0150 break;
0151 }
0152 }
0153 spin_unlock_bh(&dev->cbdev->queue_lock);
0154
0155 if (cbq != NULL) {
0156 cbq->callback(msg, nsp);
0157 kfree_skb(skb);
0158 cn_queue_release_callback(cbq);
0159 err = 0;
0160 }
0161
0162 return err;
0163 }
0164
0165
0166
0167
0168
0169
0170 static void cn_rx_skb(struct sk_buff *skb)
0171 {
0172 struct nlmsghdr *nlh;
0173 int len, err;
0174
0175 if (skb->len >= NLMSG_HDRLEN) {
0176 nlh = nlmsg_hdr(skb);
0177 len = nlmsg_len(nlh);
0178
0179 if (len < (int)sizeof(struct cn_msg) ||
0180 skb->len < nlh->nlmsg_len ||
0181 len > CONNECTOR_MAX_MSG_SIZE)
0182 return;
0183
0184 err = cn_call_callback(skb_get(skb));
0185 if (err < 0)
0186 kfree_skb(skb);
0187 }
0188 }
0189
0190
0191
0192
0193
0194
0195
0196 int cn_add_callback(const struct cb_id *id, const char *name,
0197 void (*callback)(struct cn_msg *,
0198 struct netlink_skb_parms *))
0199 {
0200 struct cn_dev *dev = &cdev;
0201
0202 if (!cn_already_initialized)
0203 return -EAGAIN;
0204
0205 return cn_queue_add_callback(dev->cbdev, name, id, callback);
0206 }
0207 EXPORT_SYMBOL_GPL(cn_add_callback);
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 void cn_del_callback(const struct cb_id *id)
0218 {
0219 struct cn_dev *dev = &cdev;
0220
0221 cn_queue_del_callback(dev->cbdev, id);
0222 }
0223 EXPORT_SYMBOL_GPL(cn_del_callback);
0224
0225 static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
0226 {
0227 struct cn_queue_dev *dev = cdev.cbdev;
0228 struct cn_callback_entry *cbq;
0229
0230 seq_printf(m, "Name ID\n");
0231
0232 spin_lock_bh(&dev->queue_lock);
0233
0234 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
0235 seq_printf(m, "%-15s %u:%u\n",
0236 cbq->id.name,
0237 cbq->id.id.idx,
0238 cbq->id.id.val);
0239 }
0240
0241 spin_unlock_bh(&dev->queue_lock);
0242
0243 return 0;
0244 }
0245
0246 static int cn_init(void)
0247 {
0248 struct cn_dev *dev = &cdev;
0249 struct netlink_kernel_cfg cfg = {
0250 .groups = CN_NETLINK_USERS + 0xf,
0251 .input = cn_rx_skb,
0252 };
0253
0254 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
0255 if (!dev->nls)
0256 return -EIO;
0257
0258 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
0259 if (!dev->cbdev) {
0260 netlink_kernel_release(dev->nls);
0261 return -EINVAL;
0262 }
0263
0264 cn_already_initialized = 1;
0265
0266 proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
0267
0268 return 0;
0269 }
0270
0271 static void cn_fini(void)
0272 {
0273 struct cn_dev *dev = &cdev;
0274
0275 cn_already_initialized = 0;
0276
0277 remove_proc_entry("connector", init_net.proc_net);
0278
0279 cn_queue_free_dev(dev->cbdev);
0280 netlink_kernel_release(dev->nls);
0281 }
0282
0283 subsys_initcall(cn_init);
0284 module_exit(cn_fini);