0001
0002
0003 #include <linux/ethtool_netlink.h>
0004 #include <net/udp_tunnel.h>
0005 #include <net/vxlan.h>
0006
0007 #include "bitset.h"
0008 #include "common.h"
0009 #include "netlink.h"
0010
0011 const struct nla_policy ethnl_tunnel_info_get_policy[] = {
0012 [ETHTOOL_A_TUNNEL_INFO_HEADER] =
0013 NLA_POLICY_NESTED(ethnl_header_policy),
0014 };
0015
0016 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
0017 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
0018 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
0019 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
0020
0021 static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
0022 {
0023 ssize_t size;
0024
0025 size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
0026 udp_tunnel_type_names, compact);
0027 if (size < 0)
0028 return size;
0029
0030 return size +
0031 nla_total_size(0) +
0032 nla_total_size(sizeof(u32));
0033 }
0034
0035 static ssize_t
0036 ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
0037 struct netlink_ext_ack *extack)
0038 {
0039 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
0040 const struct udp_tunnel_nic_info *info;
0041 unsigned int i;
0042 ssize_t ret;
0043 size_t size;
0044
0045 info = req_base->dev->udp_tunnel_nic_info;
0046 if (!info) {
0047 NL_SET_ERR_MSG(extack,
0048 "device does not report tunnel offload info");
0049 return -EOPNOTSUPP;
0050 }
0051
0052 size = nla_total_size(0);
0053
0054 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
0055 if (!info->tables[i].n_entries)
0056 break;
0057
0058 ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
0059 compact);
0060 if (ret < 0)
0061 return ret;
0062 size += ret;
0063
0064 size += udp_tunnel_nic_dump_size(req_base->dev, i);
0065 }
0066
0067 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
0068 ret = ethnl_udp_table_reply_size(0, compact);
0069 if (ret < 0)
0070 return ret;
0071 size += ret;
0072
0073 size += nla_total_size(0) +
0074 nla_total_size(sizeof(__be16)) +
0075 nla_total_size(sizeof(u32));
0076 }
0077
0078 return size;
0079 }
0080
0081 static int
0082 ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
0083 struct sk_buff *skb)
0084 {
0085 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
0086 const struct udp_tunnel_nic_info *info;
0087 struct nlattr *ports, *table, *entry;
0088 unsigned int i;
0089
0090 info = req_base->dev->udp_tunnel_nic_info;
0091 if (!info)
0092 return -EOPNOTSUPP;
0093
0094 ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
0095 if (!ports)
0096 return -EMSGSIZE;
0097
0098 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
0099 if (!info->tables[i].n_entries)
0100 break;
0101
0102 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
0103 if (!table)
0104 goto err_cancel_ports;
0105
0106 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
0107 info->tables[i].n_entries))
0108 goto err_cancel_table;
0109
0110 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
0111 &info->tables[i].tunnel_types, NULL,
0112 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
0113 udp_tunnel_type_names, compact))
0114 goto err_cancel_table;
0115
0116 if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
0117 goto err_cancel_table;
0118
0119 nla_nest_end(skb, table);
0120 }
0121
0122 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
0123 u32 zero = 0;
0124
0125 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
0126 if (!table)
0127 goto err_cancel_ports;
0128
0129 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
0130 goto err_cancel_table;
0131
0132 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
0133 &zero, NULL,
0134 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
0135 udp_tunnel_type_names, compact))
0136 goto err_cancel_table;
0137
0138 entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
0139
0140 if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
0141 htons(IANA_VXLAN_UDP_PORT)) ||
0142 nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
0143 ilog2(UDP_TUNNEL_TYPE_VXLAN)))
0144 goto err_cancel_entry;
0145
0146 nla_nest_end(skb, entry);
0147 nla_nest_end(skb, table);
0148 }
0149
0150 nla_nest_end(skb, ports);
0151
0152 return 0;
0153
0154 err_cancel_entry:
0155 nla_nest_cancel(skb, entry);
0156 err_cancel_table:
0157 nla_nest_cancel(skb, table);
0158 err_cancel_ports:
0159 nla_nest_cancel(skb, ports);
0160 return -EMSGSIZE;
0161 }
0162
0163 int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
0164 {
0165 struct ethnl_req_info req_info = {};
0166 struct nlattr **tb = info->attrs;
0167 struct sk_buff *rskb;
0168 void *reply_payload;
0169 int reply_len;
0170 int ret;
0171
0172 ret = ethnl_parse_header_dev_get(&req_info,
0173 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
0174 genl_info_net(info), info->extack,
0175 true);
0176 if (ret < 0)
0177 return ret;
0178
0179 rtnl_lock();
0180 ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
0181 if (ret < 0)
0182 goto err_unlock_rtnl;
0183 reply_len = ret + ethnl_reply_header_size();
0184
0185 rskb = ethnl_reply_init(reply_len, req_info.dev,
0186 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
0187 ETHTOOL_A_TUNNEL_INFO_HEADER,
0188 info, &reply_payload);
0189 if (!rskb) {
0190 ret = -ENOMEM;
0191 goto err_unlock_rtnl;
0192 }
0193
0194 ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
0195 if (ret)
0196 goto err_free_msg;
0197 rtnl_unlock();
0198 ethnl_parse_header_dev_put(&req_info);
0199 genlmsg_end(rskb, reply_payload);
0200
0201 return genlmsg_reply(rskb, info);
0202
0203 err_free_msg:
0204 nlmsg_free(rskb);
0205 err_unlock_rtnl:
0206 rtnl_unlock();
0207 ethnl_parse_header_dev_put(&req_info);
0208 return ret;
0209 }
0210
0211 struct ethnl_tunnel_info_dump_ctx {
0212 struct ethnl_req_info req_info;
0213 int pos_hash;
0214 int pos_idx;
0215 };
0216
0217 int ethnl_tunnel_info_start(struct netlink_callback *cb)
0218 {
0219 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
0220 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
0221 struct nlattr **tb = info->attrs;
0222 int ret;
0223
0224 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
0225
0226 memset(ctx, 0, sizeof(*ctx));
0227
0228 ret = ethnl_parse_header_dev_get(&ctx->req_info,
0229 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
0230 sock_net(cb->skb->sk), cb->extack,
0231 false);
0232 if (ctx->req_info.dev) {
0233 ethnl_parse_header_dev_put(&ctx->req_info);
0234 ctx->req_info.dev = NULL;
0235 }
0236
0237 return ret;
0238 }
0239
0240 int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
0241 {
0242 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
0243 struct net *net = sock_net(skb->sk);
0244 int s_idx = ctx->pos_idx;
0245 int h, idx = 0;
0246 int ret = 0;
0247 void *ehdr;
0248
0249 rtnl_lock();
0250 cb->seq = net->dev_base_seq;
0251 for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
0252 struct hlist_head *head;
0253 struct net_device *dev;
0254
0255 head = &net->dev_index_head[h];
0256 idx = 0;
0257 hlist_for_each_entry(dev, head, index_hlist) {
0258 if (idx < s_idx)
0259 goto cont;
0260
0261 ehdr = ethnl_dump_put(skb, cb,
0262 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
0263 if (!ehdr) {
0264 ret = -EMSGSIZE;
0265 goto out;
0266 }
0267
0268 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
0269 if (ret < 0) {
0270 genlmsg_cancel(skb, ehdr);
0271 goto out;
0272 }
0273
0274 ctx->req_info.dev = dev;
0275 ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
0276 ctx->req_info.dev = NULL;
0277 if (ret < 0) {
0278 genlmsg_cancel(skb, ehdr);
0279 if (ret == -EOPNOTSUPP)
0280 goto cont;
0281 goto out;
0282 }
0283 genlmsg_end(skb, ehdr);
0284 cont:
0285 idx++;
0286 }
0287 }
0288 out:
0289 rtnl_unlock();
0290
0291 ctx->pos_hash = h;
0292 ctx->pos_idx = idx;
0293 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
0294
0295 if (ret == -EMSGSIZE && skb->len)
0296 return skb->len;
0297 return ret;
0298 }