0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <net/xdp_sock.h>
0011 #include <linux/xdp_diag.h>
0012 #include <linux/sock_diag.h>
0013
0014 #include "xsk_queue.h"
0015 #include "xsk.h"
0016
0017 static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
0018 {
0019 struct xdp_diag_info di = {};
0020
0021 di.ifindex = xs->dev ? xs->dev->ifindex : 0;
0022 di.queue_id = xs->queue_id;
0023 return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
0024 }
0025
0026 static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
0027 struct sk_buff *nlskb)
0028 {
0029 struct xdp_diag_ring dr = {};
0030
0031 dr.entries = queue->nentries;
0032 return nla_put(nlskb, nl_type, sizeof(dr), &dr);
0033 }
0034
0035 static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
0036 struct sk_buff *nlskb)
0037 {
0038 int err = 0;
0039
0040 if (xs->rx)
0041 err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
0042 if (!err && xs->tx)
0043 err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
0044 return err;
0045 }
0046
0047 static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
0048 {
0049 struct xsk_buff_pool *pool = xs->pool;
0050 struct xdp_umem *umem = xs->umem;
0051 struct xdp_diag_umem du = {};
0052 int err;
0053
0054 if (!umem)
0055 return 0;
0056
0057 du.id = umem->id;
0058 du.size = umem->size;
0059 du.num_pages = umem->npgs;
0060 du.chunk_size = umem->chunk_size;
0061 du.headroom = umem->headroom;
0062 du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
0063 du.queue_id = pool ? pool->queue_id : 0;
0064 du.flags = 0;
0065 if (umem->zc)
0066 du.flags |= XDP_DU_F_ZEROCOPY;
0067 du.refs = refcount_read(&umem->users);
0068
0069 err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
0070 if (!err && pool && pool->fq)
0071 err = xsk_diag_put_ring(pool->fq,
0072 XDP_DIAG_UMEM_FILL_RING, nlskb);
0073 if (!err && pool && pool->cq)
0074 err = xsk_diag_put_ring(pool->cq,
0075 XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
0076 return err;
0077 }
0078
0079 static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
0080 {
0081 struct xdp_diag_stats du = {};
0082
0083 du.n_rx_dropped = xs->rx_dropped;
0084 du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
0085 du.n_rx_full = xs->rx_queue_full;
0086 du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
0087 du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
0088 du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
0089 return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
0090 }
0091
0092 static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
0093 struct xdp_diag_req *req,
0094 struct user_namespace *user_ns,
0095 u32 portid, u32 seq, u32 flags, int sk_ino)
0096 {
0097 struct xdp_sock *xs = xdp_sk(sk);
0098 struct xdp_diag_msg *msg;
0099 struct nlmsghdr *nlh;
0100
0101 nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
0102 flags);
0103 if (!nlh)
0104 return -EMSGSIZE;
0105
0106 msg = nlmsg_data(nlh);
0107 memset(msg, 0, sizeof(*msg));
0108 msg->xdiag_family = AF_XDP;
0109 msg->xdiag_type = sk->sk_type;
0110 msg->xdiag_ino = sk_ino;
0111 sock_diag_save_cookie(sk, msg->xdiag_cookie);
0112
0113 mutex_lock(&xs->mutex);
0114 if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
0115 goto out_nlmsg_trim;
0116
0117 if ((req->xdiag_show & XDP_SHOW_INFO) &&
0118 nla_put_u32(nlskb, XDP_DIAG_UID,
0119 from_kuid_munged(user_ns, sock_i_uid(sk))))
0120 goto out_nlmsg_trim;
0121
0122 if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
0123 xsk_diag_put_rings_cfg(xs, nlskb))
0124 goto out_nlmsg_trim;
0125
0126 if ((req->xdiag_show & XDP_SHOW_UMEM) &&
0127 xsk_diag_put_umem(xs, nlskb))
0128 goto out_nlmsg_trim;
0129
0130 if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
0131 sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
0132 goto out_nlmsg_trim;
0133
0134 if ((req->xdiag_show & XDP_SHOW_STATS) &&
0135 xsk_diag_put_stats(xs, nlskb))
0136 goto out_nlmsg_trim;
0137
0138 mutex_unlock(&xs->mutex);
0139 nlmsg_end(nlskb, nlh);
0140 return 0;
0141
0142 out_nlmsg_trim:
0143 mutex_unlock(&xs->mutex);
0144 nlmsg_cancel(nlskb, nlh);
0145 return -EMSGSIZE;
0146 }
0147
0148 static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
0149 {
0150 struct xdp_diag_req *req = nlmsg_data(cb->nlh);
0151 struct net *net = sock_net(nlskb->sk);
0152 int num = 0, s_num = cb->args[0];
0153 struct sock *sk;
0154
0155 mutex_lock(&net->xdp.lock);
0156
0157 sk_for_each(sk, &net->xdp.list) {
0158 if (!net_eq(sock_net(sk), net))
0159 continue;
0160 if (num++ < s_num)
0161 continue;
0162
0163 if (xsk_diag_fill(sk, nlskb, req,
0164 sk_user_ns(NETLINK_CB(cb->skb).sk),
0165 NETLINK_CB(cb->skb).portid,
0166 cb->nlh->nlmsg_seq, NLM_F_MULTI,
0167 sock_i_ino(sk)) < 0) {
0168 num--;
0169 break;
0170 }
0171 }
0172
0173 mutex_unlock(&net->xdp.lock);
0174 cb->args[0] = num;
0175 return nlskb->len;
0176 }
0177
0178 static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
0179 {
0180 struct netlink_dump_control c = { .dump = xsk_diag_dump };
0181 int hdrlen = sizeof(struct xdp_diag_req);
0182 struct net *net = sock_net(nlskb->sk);
0183
0184 if (nlmsg_len(hdr) < hdrlen)
0185 return -EINVAL;
0186
0187 if (!(hdr->nlmsg_flags & NLM_F_DUMP))
0188 return -EOPNOTSUPP;
0189
0190 return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
0191 }
0192
0193 static const struct sock_diag_handler xsk_diag_handler = {
0194 .family = AF_XDP,
0195 .dump = xsk_diag_handler_dump,
0196 };
0197
0198 static int __init xsk_diag_init(void)
0199 {
0200 return sock_diag_register(&xsk_diag_handler);
0201 }
0202
0203 static void __exit xsk_diag_exit(void)
0204 {
0205 sock_diag_unregister(&xsk_diag_handler);
0206 }
0207
0208 module_init(xsk_diag_init);
0209 module_exit(xsk_diag_exit);
0210 MODULE_LICENSE("GPL");
0211 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);