0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
0035
0036 #include <linux/export.h>
0037 #include <net/netlink.h>
0038 #include <net/net_namespace.h>
0039 #include <net/netns/generic.h>
0040 #include <net/sock.h>
0041 #include <rdma/rdma_netlink.h>
0042 #include <linux/module.h>
0043 #include "core_priv.h"
0044
0045 static struct {
0046 const struct rdma_nl_cbs *cb_table;
0047
0048
0049
0050 struct rw_semaphore sem;
0051 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
0052
0053 bool rdma_nl_chk_listeners(unsigned int group)
0054 {
0055 struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
0056
0057 return netlink_has_listeners(rnet->nl_sock, group);
0058 }
0059 EXPORT_SYMBOL(rdma_nl_chk_listeners);
0060
0061 static bool is_nl_msg_valid(unsigned int type, unsigned int op)
0062 {
0063 static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
0064 [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
0065 [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
0066 [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
0067 };
0068
0069
0070
0071
0072
0073 BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
0074
0075 if (type >= RDMA_NL_NUM_CLIENTS)
0076 return false;
0077
0078 return (op < max_num_ops[type]) ? true : false;
0079 }
0080
0081 static const struct rdma_nl_cbs *
0082 get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
0083 {
0084 const struct rdma_nl_cbs *cb_table;
0085
0086
0087
0088
0089
0090 if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
0091 return NULL;
0092
0093 cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
0094 if (!cb_table) {
0095
0096
0097
0098
0099 up_read(&rdma_nl_types[type].sem);
0100
0101 request_module("rdma-netlink-subsys-%u", type);
0102
0103 down_read(&rdma_nl_types[type].sem);
0104 cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
0105 }
0106 if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
0107 return NULL;
0108 return cb_table;
0109 }
0110
0111 void rdma_nl_register(unsigned int index,
0112 const struct rdma_nl_cbs cb_table[])
0113 {
0114 if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
0115 WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
0116 return;
0117
0118
0119 smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
0120 }
0121 EXPORT_SYMBOL(rdma_nl_register);
0122
0123 void rdma_nl_unregister(unsigned int index)
0124 {
0125 down_write(&rdma_nl_types[index].sem);
0126 rdma_nl_types[index].cb_table = NULL;
0127 up_write(&rdma_nl_types[index].sem);
0128 }
0129 EXPORT_SYMBOL(rdma_nl_unregister);
0130
0131 void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
0132 int len, int client, int op, int flags)
0133 {
0134 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
0135 if (!*nlh)
0136 return NULL;
0137 return nlmsg_data(*nlh);
0138 }
0139 EXPORT_SYMBOL(ibnl_put_msg);
0140
0141 int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
0142 int len, void *data, int type)
0143 {
0144 if (nla_put(skb, type, len, data)) {
0145 nlmsg_cancel(skb, nlh);
0146 return -EMSGSIZE;
0147 }
0148 return 0;
0149 }
0150 EXPORT_SYMBOL(ibnl_put_attr);
0151
0152 static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
0153 struct netlink_ext_ack *extack)
0154 {
0155 int type = nlh->nlmsg_type;
0156 unsigned int index = RDMA_NL_GET_CLIENT(type);
0157 unsigned int op = RDMA_NL_GET_OP(type);
0158 const struct rdma_nl_cbs *cb_table;
0159 int err = -EINVAL;
0160
0161 if (!is_nl_msg_valid(index, op))
0162 return -EINVAL;
0163
0164 down_read(&rdma_nl_types[index].sem);
0165 cb_table = get_cb_table(skb, index, op);
0166 if (!cb_table)
0167 goto done;
0168
0169 if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
0170 !netlink_capable(skb, CAP_NET_ADMIN)) {
0171 err = -EPERM;
0172 goto done;
0173 }
0174
0175
0176
0177
0178
0179 if (index == RDMA_NL_LS) {
0180 if (cb_table[op].doit)
0181 err = cb_table[op].doit(skb, nlh, extack);
0182 goto done;
0183 }
0184
0185 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
0186 struct netlink_dump_control c = {
0187 .dump = cb_table[op].dump,
0188 };
0189 if (c.dump)
0190 err = netlink_dump_start(skb->sk, skb, nlh, &c);
0191 goto done;
0192 }
0193
0194 if (cb_table[op].doit)
0195 err = cb_table[op].doit(skb, nlh, extack);
0196 done:
0197 up_read(&rdma_nl_types[index].sem);
0198 return err;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207 static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
0208 struct nlmsghdr *,
0209 struct netlink_ext_ack *))
0210 {
0211 struct netlink_ext_ack extack = {};
0212 struct nlmsghdr *nlh;
0213 int err;
0214
0215 while (skb->len >= nlmsg_total_size(0)) {
0216 int msglen;
0217
0218 nlh = nlmsg_hdr(skb);
0219 err = 0;
0220
0221 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
0222 return 0;
0223
0224
0225
0226
0227
0228
0229
0230
0231 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
0232 (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
0233 goto ack;
0234
0235
0236 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
0237 goto ack;
0238
0239 err = cb(skb, nlh, &extack);
0240 if (err == -EINTR)
0241 goto skip;
0242
0243 ack:
0244 if (nlh->nlmsg_flags & NLM_F_ACK || err)
0245 netlink_ack(skb, nlh, err, &extack);
0246
0247 skip:
0248 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
0249 if (msglen > skb->len)
0250 msglen = skb->len;
0251 skb_pull(skb, msglen);
0252 }
0253
0254 return 0;
0255 }
0256
0257 static void rdma_nl_rcv(struct sk_buff *skb)
0258 {
0259 rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
0260 }
0261
0262 int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
0263 {
0264 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
0265 int err;
0266
0267 err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
0268 return (err < 0) ? err : 0;
0269 }
0270 EXPORT_SYMBOL(rdma_nl_unicast);
0271
0272 int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
0273 {
0274 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
0275 int err;
0276
0277 err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
0278 return (err < 0) ? err : 0;
0279 }
0280 EXPORT_SYMBOL(rdma_nl_unicast_wait);
0281
0282 int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
0283 unsigned int group, gfp_t flags)
0284 {
0285 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
0286
0287 return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
0288 }
0289 EXPORT_SYMBOL(rdma_nl_multicast);
0290
0291 void rdma_nl_init(void)
0292 {
0293 int idx;
0294
0295 for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
0296 init_rwsem(&rdma_nl_types[idx].sem);
0297 }
0298
0299 void rdma_nl_exit(void)
0300 {
0301 int idx;
0302
0303 for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
0304 WARN(rdma_nl_types[idx].cb_table,
0305 "Netlink client %d wasn't released prior to unloading %s\n",
0306 idx, KBUILD_MODNAME);
0307 }
0308
0309 int rdma_nl_net_init(struct rdma_dev_net *rnet)
0310 {
0311 struct net *net = read_pnet(&rnet->net);
0312 struct netlink_kernel_cfg cfg = {
0313 .input = rdma_nl_rcv,
0314 };
0315 struct sock *nls;
0316
0317 nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
0318 if (!nls)
0319 return -ENOMEM;
0320
0321 nls->sk_sndtimeo = 10 * HZ;
0322 rnet->nl_sock = nls;
0323 return 0;
0324 }
0325
0326 void rdma_nl_net_exit(struct rdma_dev_net *rnet)
0327 {
0328 netlink_kernel_release(rnet->nl_sock);
0329 }
0330
0331 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);