Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *      INETPEER - A storage for permanent information about peers
0003  *
0004  *  This source is covered by the GNU GPL, the same as all kernel sources.
0005  *
0006  *  Authors:    Andrey V. Savochkin <saw@msu.ru>
0007  */
0008 
0009 #include <linux/cache.h>
0010 #include <linux/module.h>
0011 #include <linux/types.h>
0012 #include <linux/slab.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/random.h>
0016 #include <linux/timer.h>
0017 #include <linux/time.h>
0018 #include <linux/kernel.h>
0019 #include <linux/mm.h>
0020 #include <linux/net.h>
0021 #include <linux/workqueue.h>
0022 #include <net/ip.h>
0023 #include <net/inetpeer.h>
0024 #include <net/secure_seq.h>
0025 
0026 /*
0027  *  Theory of operations.
0028  *  We keep one entry for each peer IP address.  The nodes contains long-living
0029  *  information about the peer which doesn't depend on routes.
0030  *
0031  *  Nodes are removed only when reference counter goes to 0.
0032  *  When it's happened the node may be removed when a sufficient amount of
0033  *  time has been passed since its last use.  The less-recently-used entry can
0034  *  also be removed if the pool is overloaded i.e. if the total amount of
0035  *  entries is greater-or-equal than the threshold.
0036  *
0037  *  Node pool is organised as an RB tree.
0038  *  Such an implementation has been chosen not just for fun.  It's a way to
0039  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
0040  *  amount of long living nodes in a single hash slot would significantly delay
0041  *  lookups performed with disabled BHs.
0042  *
0043  *  Serialisation issues.
0044  *  1.  Nodes may appear in the tree only with the pool lock held.
0045  *  2.  Nodes may disappear from the tree only with the pool lock held
0046  *      AND reference count being 0.
0047  *  3.  Global variable peer_total is modified under the pool lock.
0048  *  4.  struct inet_peer fields modification:
0049  *      rb_node: pool lock
0050  *      refcnt: atomically against modifications on other CPU;
0051  *         usually under some other lock to prevent node disappearing
0052  *      daddr: unchangeable
0053  */
0054 
0055 static struct kmem_cache *peer_cachep __ro_after_init;
0056 
0057 void inet_peer_base_init(struct inet_peer_base *bp)
0058 {
0059     bp->rb_root = RB_ROOT;
0060     seqlock_init(&bp->lock);
0061     bp->total = 0;
0062 }
0063 EXPORT_SYMBOL_GPL(inet_peer_base_init);
0064 
0065 #define PEER_MAX_GC 32
0066 
0067 /* Exported for sysctl_net_ipv4.  */
0068 int inet_peer_threshold __read_mostly;  /* start to throw entries more
0069                      * aggressively at this stage */
0070 int inet_peer_minttl __read_mostly = 120 * HZ;  /* TTL under high load: 120 sec */
0071 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;  /* usual time to live: 10 min */
0072 
0073 /* Called from ip_output.c:ip_init  */
0074 void __init inet_initpeers(void)
0075 {
0076     u64 nr_entries;
0077 
0078      /* 1% of physical memory */
0079     nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
0080                   100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
0081 
0082     inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
0083 
0084     peer_cachep = kmem_cache_create("inet_peer_cache",
0085             sizeof(struct inet_peer),
0086             0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
0087             NULL);
0088 }
0089 
0090 /* Called with rcu_read_lock() or base->lock held */
0091 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
0092                 struct inet_peer_base *base,
0093                 unsigned int seq,
0094                 struct inet_peer *gc_stack[],
0095                 unsigned int *gc_cnt,
0096                 struct rb_node **parent_p,
0097                 struct rb_node ***pp_p)
0098 {
0099     struct rb_node **pp, *parent, *next;
0100     struct inet_peer *p;
0101 
0102     pp = &base->rb_root.rb_node;
0103     parent = NULL;
0104     while (1) {
0105         int cmp;
0106 
0107         next = rcu_dereference_raw(*pp);
0108         if (!next)
0109             break;
0110         parent = next;
0111         p = rb_entry(parent, struct inet_peer, rb_node);
0112         cmp = inetpeer_addr_cmp(daddr, &p->daddr);
0113         if (cmp == 0) {
0114             if (!refcount_inc_not_zero(&p->refcnt))
0115                 break;
0116             return p;
0117         }
0118         if (gc_stack) {
0119             if (*gc_cnt < PEER_MAX_GC)
0120                 gc_stack[(*gc_cnt)++] = p;
0121         } else if (unlikely(read_seqretry(&base->lock, seq))) {
0122             break;
0123         }
0124         if (cmp == -1)
0125             pp = &next->rb_left;
0126         else
0127             pp = &next->rb_right;
0128     }
0129     *parent_p = parent;
0130     *pp_p = pp;
0131     return NULL;
0132 }
0133 
0134 static void inetpeer_free_rcu(struct rcu_head *head)
0135 {
0136     kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
0137 }
0138 
0139 /* perform garbage collect on all items stacked during a lookup */
0140 static void inet_peer_gc(struct inet_peer_base *base,
0141              struct inet_peer *gc_stack[],
0142              unsigned int gc_cnt)
0143 {
0144     int peer_threshold, peer_maxttl, peer_minttl;
0145     struct inet_peer *p;
0146     __u32 delta, ttl;
0147     int i;
0148 
0149     peer_threshold = READ_ONCE(inet_peer_threshold);
0150     peer_maxttl = READ_ONCE(inet_peer_maxttl);
0151     peer_minttl = READ_ONCE(inet_peer_minttl);
0152 
0153     if (base->total >= peer_threshold)
0154         ttl = 0; /* be aggressive */
0155     else
0156         ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
0157             base->total / peer_threshold * HZ;
0158     for (i = 0; i < gc_cnt; i++) {
0159         p = gc_stack[i];
0160 
0161         /* The READ_ONCE() pairs with the WRITE_ONCE()
0162          * in inet_putpeer()
0163          */
0164         delta = (__u32)jiffies - READ_ONCE(p->dtime);
0165 
0166         if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
0167             gc_stack[i] = NULL;
0168     }
0169     for (i = 0; i < gc_cnt; i++) {
0170         p = gc_stack[i];
0171         if (p) {
0172             rb_erase(&p->rb_node, &base->rb_root);
0173             base->total--;
0174             call_rcu(&p->rcu, inetpeer_free_rcu);
0175         }
0176     }
0177 }
0178 
0179 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
0180                    const struct inetpeer_addr *daddr,
0181                    int create)
0182 {
0183     struct inet_peer *p, *gc_stack[PEER_MAX_GC];
0184     struct rb_node **pp, *parent;
0185     unsigned int gc_cnt, seq;
0186     int invalidated;
0187 
0188     /* Attempt a lockless lookup first.
0189      * Because of a concurrent writer, we might not find an existing entry.
0190      */
0191     rcu_read_lock();
0192     seq = read_seqbegin(&base->lock);
0193     p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
0194     invalidated = read_seqretry(&base->lock, seq);
0195     rcu_read_unlock();
0196 
0197     if (p)
0198         return p;
0199 
0200     /* If no writer did a change during our lookup, we can return early. */
0201     if (!create && !invalidated)
0202         return NULL;
0203 
0204     /* retry an exact lookup, taking the lock before.
0205      * At least, nodes should be hot in our cache.
0206      */
0207     parent = NULL;
0208     write_seqlock_bh(&base->lock);
0209 
0210     gc_cnt = 0;
0211     p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
0212     if (!p && create) {
0213         p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
0214         if (p) {
0215             p->daddr = *daddr;
0216             p->dtime = (__u32)jiffies;
0217             refcount_set(&p->refcnt, 2);
0218             atomic_set(&p->rid, 0);
0219             p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
0220             p->rate_tokens = 0;
0221             p->n_redirects = 0;
0222             /* 60*HZ is arbitrary, but chosen enough high so that the first
0223              * calculation of tokens is at its maximum.
0224              */
0225             p->rate_last = jiffies - 60*HZ;
0226 
0227             rb_link_node(&p->rb_node, parent, pp);
0228             rb_insert_color(&p->rb_node, &base->rb_root);
0229             base->total++;
0230         }
0231     }
0232     if (gc_cnt)
0233         inet_peer_gc(base, gc_stack, gc_cnt);
0234     write_sequnlock_bh(&base->lock);
0235 
0236     return p;
0237 }
0238 EXPORT_SYMBOL_GPL(inet_getpeer);
0239 
0240 void inet_putpeer(struct inet_peer *p)
0241 {
0242     /* The WRITE_ONCE() pairs with itself (we run lockless)
0243      * and the READ_ONCE() in inet_peer_gc()
0244      */
0245     WRITE_ONCE(p->dtime, (__u32)jiffies);
0246 
0247     if (refcount_dec_and_test(&p->refcnt))
0248         call_rcu(&p->rcu, inetpeer_free_rcu);
0249 }
0250 EXPORT_SYMBOL_GPL(inet_putpeer);
0251 
0252 /*
0253  *  Check transmit rate limitation for given message.
0254  *  The rate information is held in the inet_peer entries now.
0255  *  This function is generic and could be used for other purposes
0256  *  too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
0257  *
0258  *  Note that the same inet_peer fields are modified by functions in
0259  *  route.c too, but these work for packet destinations while xrlim_allow
0260  *  works for icmp destinations. This means the rate limiting information
0261  *  for one "ip object" is shared - and these ICMPs are twice limited:
0262  *  by source and by destination.
0263  *
0264  *  RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
0265  *            SHOULD allow setting of rate limits
0266  *
0267  *  Shared between ICMPv4 and ICMPv6.
0268  */
0269 #define XRLIM_BURST_FACTOR 6
0270 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
0271 {
0272     unsigned long now, token;
0273     bool rc = false;
0274 
0275     if (!peer)
0276         return true;
0277 
0278     token = peer->rate_tokens;
0279     now = jiffies;
0280     token += now - peer->rate_last;
0281     peer->rate_last = now;
0282     if (token > XRLIM_BURST_FACTOR * timeout)
0283         token = XRLIM_BURST_FACTOR * timeout;
0284     if (token >= timeout) {
0285         token -= timeout;
0286         rc = true;
0287     }
0288     peer->rate_tokens = token;
0289     return rc;
0290 }
0291 EXPORT_SYMBOL(inet_peer_xrlim_allow);
0292 
0293 void inetpeer_invalidate_tree(struct inet_peer_base *base)
0294 {
0295     struct rb_node *p = rb_first(&base->rb_root);
0296 
0297     while (p) {
0298         struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
0299 
0300         p = rb_next(p);
0301         rb_erase(&peer->rb_node, &base->rb_root);
0302         inet_putpeer(peer);
0303         cond_resched();
0304     }
0305 
0306     base->total = 0;
0307 }
0308 EXPORT_SYMBOL(inetpeer_invalidate_tree);