Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * net/core/dst.c   Protocol independent destination cache.
0004  *
0005  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
0006  *
0007  */
0008 
0009 #include <linux/bitops.h>
0010 #include <linux/errno.h>
0011 #include <linux/init.h>
0012 #include <linux/kernel.h>
0013 #include <linux/workqueue.h>
0014 #include <linux/mm.h>
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/skbuff.h>
0019 #include <linux/string.h>
0020 #include <linux/types.h>
0021 #include <net/net_namespace.h>
0022 #include <linux/sched.h>
0023 #include <linux/prefetch.h>
0024 #include <net/lwtunnel.h>
0025 #include <net/xfrm.h>
0026 
0027 #include <net/dst.h>
0028 #include <net/dst_metadata.h>
0029 
0030 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
0031 {
0032     kfree_skb(skb);
0033     return 0;
0034 }
0035 EXPORT_SYMBOL(dst_discard_out);
0036 
0037 const struct dst_metrics dst_default_metrics = {
0038     /* This initializer is needed to force linker to place this variable
0039      * into const section. Otherwise it might end into bss section.
0040      * We really want to avoid false sharing on this variable, and catch
0041      * any writes on it.
0042      */
0043     .refcnt = REFCOUNT_INIT(1),
0044 };
0045 EXPORT_SYMBOL(dst_default_metrics);
0046 
0047 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
0048           struct net_device *dev, int initial_ref, int initial_obsolete,
0049           unsigned short flags)
0050 {
0051     dst->dev = dev;
0052     netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
0053     dst->ops = ops;
0054     dst_init_metrics(dst, dst_default_metrics.metrics, true);
0055     dst->expires = 0UL;
0056 #ifdef CONFIG_XFRM
0057     dst->xfrm = NULL;
0058 #endif
0059     dst->input = dst_discard;
0060     dst->output = dst_discard_out;
0061     dst->error = 0;
0062     dst->obsolete = initial_obsolete;
0063     dst->header_len = 0;
0064     dst->trailer_len = 0;
0065 #ifdef CONFIG_IP_ROUTE_CLASSID
0066     dst->tclassid = 0;
0067 #endif
0068     dst->lwtstate = NULL;
0069     atomic_set(&dst->__refcnt, initial_ref);
0070     dst->__use = 0;
0071     dst->lastuse = jiffies;
0072     dst->flags = flags;
0073     if (!(flags & DST_NOCOUNT))
0074         dst_entries_add(ops, 1);
0075 }
0076 EXPORT_SYMBOL(dst_init);
0077 
0078 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
0079         int initial_ref, int initial_obsolete, unsigned short flags)
0080 {
0081     struct dst_entry *dst;
0082 
0083     if (ops->gc &&
0084         !(flags & DST_NOCOUNT) &&
0085         dst_entries_get_fast(ops) > ops->gc_thresh) {
0086         if (ops->gc(ops)) {
0087             pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
0088             return NULL;
0089         }
0090     }
0091 
0092     dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
0093     if (!dst)
0094         return NULL;
0095 
0096     dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
0097 
0098     return dst;
0099 }
0100 EXPORT_SYMBOL(dst_alloc);
0101 
0102 struct dst_entry *dst_destroy(struct dst_entry * dst)
0103 {
0104     struct dst_entry *child = NULL;
0105 
0106     smp_rmb();
0107 
0108 #ifdef CONFIG_XFRM
0109     if (dst->xfrm) {
0110         struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
0111 
0112         child = xdst->child;
0113     }
0114 #endif
0115     if (!(dst->flags & DST_NOCOUNT))
0116         dst_entries_add(dst->ops, -1);
0117 
0118     if (dst->ops->destroy)
0119         dst->ops->destroy(dst);
0120     netdev_put(dst->dev, &dst->dev_tracker);
0121 
0122     lwtstate_put(dst->lwtstate);
0123 
0124     if (dst->flags & DST_METADATA)
0125         metadata_dst_free((struct metadata_dst *)dst);
0126     else
0127         kmem_cache_free(dst->ops->kmem_cachep, dst);
0128 
0129     dst = child;
0130     if (dst)
0131         dst_release_immediate(dst);
0132     return NULL;
0133 }
0134 EXPORT_SYMBOL(dst_destroy);
0135 
0136 static void dst_destroy_rcu(struct rcu_head *head)
0137 {
0138     struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
0139 
0140     dst = dst_destroy(dst);
0141 }
0142 
0143 /* Operations to mark dst as DEAD and clean up the net device referenced
0144  * by dst:
0145  * 1. put the dst under blackhole interface and discard all tx/rx packets
0146  *    on this route.
0147  * 2. release the net_device
0148  * This function should be called when removing routes from the fib tree
0149  * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
0150  * make the next dst_ops->check() fail.
0151  */
0152 void dst_dev_put(struct dst_entry *dst)
0153 {
0154     struct net_device *dev = dst->dev;
0155 
0156     dst->obsolete = DST_OBSOLETE_DEAD;
0157     if (dst->ops->ifdown)
0158         dst->ops->ifdown(dst, dev, true);
0159     dst->input = dst_discard;
0160     dst->output = dst_discard_out;
0161     dst->dev = blackhole_netdev;
0162     netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
0163                GFP_ATOMIC);
0164 }
0165 EXPORT_SYMBOL(dst_dev_put);
0166 
0167 void dst_release(struct dst_entry *dst)
0168 {
0169     if (dst) {
0170         int newrefcnt;
0171 
0172         newrefcnt = atomic_dec_return(&dst->__refcnt);
0173         if (WARN_ONCE(newrefcnt < 0, "dst_release underflow"))
0174             net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
0175                          __func__, dst, newrefcnt);
0176         if (!newrefcnt)
0177             call_rcu(&dst->rcu_head, dst_destroy_rcu);
0178     }
0179 }
0180 EXPORT_SYMBOL(dst_release);
0181 
0182 void dst_release_immediate(struct dst_entry *dst)
0183 {
0184     if (dst) {
0185         int newrefcnt;
0186 
0187         newrefcnt = atomic_dec_return(&dst->__refcnt);
0188         if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow"))
0189             net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
0190                          __func__, dst, newrefcnt);
0191         if (!newrefcnt)
0192             dst_destroy(dst);
0193     }
0194 }
0195 EXPORT_SYMBOL(dst_release_immediate);
0196 
0197 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
0198 {
0199     struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
0200 
0201     if (p) {
0202         struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
0203         unsigned long prev, new;
0204 
0205         refcount_set(&p->refcnt, 1);
0206         memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
0207 
0208         new = (unsigned long) p;
0209         prev = cmpxchg(&dst->_metrics, old, new);
0210 
0211         if (prev != old) {
0212             kfree(p);
0213             p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
0214             if (prev & DST_METRICS_READ_ONLY)
0215                 p = NULL;
0216         } else if (prev & DST_METRICS_REFCOUNTED) {
0217             if (refcount_dec_and_test(&old_p->refcnt))
0218                 kfree(old_p);
0219         }
0220     }
0221     BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
0222     return (u32 *)p;
0223 }
0224 EXPORT_SYMBOL(dst_cow_metrics_generic);
0225 
0226 /* Caller asserts that dst_metrics_read_only(dst) is false.  */
0227 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
0228 {
0229     unsigned long prev, new;
0230 
0231     new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
0232     prev = cmpxchg(&dst->_metrics, old, new);
0233     if (prev == old)
0234         kfree(__DST_METRICS_PTR(old));
0235 }
0236 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
0237 
0238 struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
0239 {
0240     return NULL;
0241 }
0242 
0243 u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
0244 {
0245     return NULL;
0246 }
0247 
0248 struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
0249                          struct sk_buff *skb,
0250                          const void *daddr)
0251 {
0252     return NULL;
0253 }
0254 
0255 void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
0256                    struct sk_buff *skb, u32 mtu,
0257                    bool confirm_neigh)
0258 {
0259 }
0260 EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
0261 
0262 void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
0263                 struct sk_buff *skb)
0264 {
0265 }
0266 EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
0267 
0268 unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
0269 {
0270     unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
0271 
0272     return mtu ? : dst->dev->mtu;
0273 }
0274 EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
0275 
0276 static struct dst_ops dst_blackhole_ops = {
0277     .family     = AF_UNSPEC,
0278     .neigh_lookup   = dst_blackhole_neigh_lookup,
0279     .check      = dst_blackhole_check,
0280     .cow_metrics    = dst_blackhole_cow_metrics,
0281     .update_pmtu    = dst_blackhole_update_pmtu,
0282     .redirect   = dst_blackhole_redirect,
0283     .mtu        = dst_blackhole_mtu,
0284 };
0285 
0286 static void __metadata_dst_init(struct metadata_dst *md_dst,
0287                 enum metadata_type type, u8 optslen)
0288 {
0289     struct dst_entry *dst;
0290 
0291     dst = &md_dst->dst;
0292     dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
0293          DST_METADATA | DST_NOCOUNT);
0294     memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
0295     md_dst->type = type;
0296 }
0297 
0298 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
0299                     gfp_t flags)
0300 {
0301     struct metadata_dst *md_dst;
0302 
0303     md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
0304     if (!md_dst)
0305         return NULL;
0306 
0307     __metadata_dst_init(md_dst, type, optslen);
0308 
0309     return md_dst;
0310 }
0311 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
0312 
0313 void metadata_dst_free(struct metadata_dst *md_dst)
0314 {
0315 #ifdef CONFIG_DST_CACHE
0316     if (md_dst->type == METADATA_IP_TUNNEL)
0317         dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
0318 #endif
0319     kfree(md_dst);
0320 }
0321 EXPORT_SYMBOL_GPL(metadata_dst_free);
0322 
0323 struct metadata_dst __percpu *
0324 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
0325 {
0326     int cpu;
0327     struct metadata_dst __percpu *md_dst;
0328 
0329     md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
0330                     __alignof__(struct metadata_dst), flags);
0331     if (!md_dst)
0332         return NULL;
0333 
0334     for_each_possible_cpu(cpu)
0335         __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
0336 
0337     return md_dst;
0338 }
0339 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
0340 
0341 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
0342 {
0343 #ifdef CONFIG_DST_CACHE
0344     int cpu;
0345 
0346     for_each_possible_cpu(cpu) {
0347         struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
0348 
0349         if (one_md_dst->type == METADATA_IP_TUNNEL)
0350             dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
0351     }
0352 #endif
0353     free_percpu(md_dst);
0354 }
0355 EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);