0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0012
0013 #include <linux/types.h>
0014 #include <linux/netfilter.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/stddef.h>
0018 #include <linux/err.h>
0019 #include <linux/kernel.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/slab.h>
0022 #include <linux/export.h>
0023
0024 #include <net/netfilter/nf_conntrack.h>
0025 #include <net/netfilter/nf_conntrack_core.h>
0026 #include <net/netfilter/nf_conntrack_ecache.h>
0027 #include <net/netfilter/nf_conntrack_extend.h>
0028
0029 static DEFINE_MUTEX(nf_ct_ecache_mutex);
0030
0031 #define DYING_NULLS_VAL ((1 << 30) + 1)
0032 #define ECACHE_MAX_JIFFIES msecs_to_jiffies(10)
0033 #define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10)
0034
0035 enum retry_state {
0036 STATE_CONGESTED,
0037 STATE_RESTART,
0038 STATE_DONE,
0039 };
0040
0041 struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net)
0042 {
0043 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
0044
0045 return &cnet->ecache;
0046 }
0047 #if IS_MODULE(CONFIG_NF_CT_NETLINK)
0048 EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache);
0049 #endif
0050
0051 static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
0052 {
0053 unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
0054 struct hlist_nulls_head evicted_list;
0055 enum retry_state ret = STATE_DONE;
0056 struct nf_conntrack_tuple_hash *h;
0057 struct hlist_nulls_node *n;
0058 unsigned int sent;
0059
0060 INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL);
0061
0062 next:
0063 sent = 0;
0064 spin_lock_bh(&cnet->ecache.dying_lock);
0065
0066 hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) {
0067 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
0068
0069
0070
0071
0072 if (nf_conntrack_event(IPCT_DESTROY, ct)) {
0073 ret = STATE_CONGESTED;
0074 break;
0075 }
0076
0077 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
0078 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list);
0079
0080 if (time_after(stop, jiffies)) {
0081 ret = STATE_RESTART;
0082 break;
0083 }
0084
0085 if (sent++ > 16) {
0086 spin_unlock_bh(&cnet->ecache.dying_lock);
0087 cond_resched();
0088 goto next;
0089 }
0090 }
0091
0092 spin_unlock_bh(&cnet->ecache.dying_lock);
0093
0094 hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
0095 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
0096
0097 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
0098 nf_ct_put(ct);
0099
0100 cond_resched();
0101 }
0102
0103 return ret;
0104 }
0105
0106 static void ecache_work(struct work_struct *work)
0107 {
0108 struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
0109 int ret, delay = -1;
0110
0111 ret = ecache_work_evict_list(cnet);
0112 switch (ret) {
0113 case STATE_CONGESTED:
0114 delay = ECACHE_RETRY_JIFFIES;
0115 break;
0116 case STATE_RESTART:
0117 delay = 0;
0118 break;
0119 case STATE_DONE:
0120 break;
0121 }
0122
0123 if (delay >= 0)
0124 schedule_delayed_work(&cnet->ecache.dwork, delay);
0125 }
0126
0127 static int __nf_conntrack_eventmask_report(struct nf_conntrack_ecache *e,
0128 const u32 events,
0129 const u32 missed,
0130 const struct nf_ct_event *item)
0131 {
0132 struct net *net = nf_ct_net(item->ct);
0133 struct nf_ct_event_notifier *notify;
0134 u32 old, want;
0135 int ret;
0136
0137 if (!((events | missed) & e->ctmask))
0138 return 0;
0139
0140 rcu_read_lock();
0141
0142 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
0143 if (!notify) {
0144 rcu_read_unlock();
0145 return 0;
0146 }
0147
0148 ret = notify->ct_event(events | missed, item);
0149 rcu_read_unlock();
0150
0151 if (likely(ret >= 0 && missed == 0))
0152 return 0;
0153
0154 do {
0155 old = READ_ONCE(e->missed);
0156 if (ret < 0)
0157 want = old | events;
0158 else
0159 want = old & ~missed;
0160 } while (cmpxchg(&e->missed, old, want) != old);
0161
0162 return ret;
0163 }
0164
0165 int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
0166 u32 portid, int report)
0167 {
0168 struct nf_conntrack_ecache *e;
0169 struct nf_ct_event item;
0170 unsigned int missed;
0171 int ret;
0172
0173 if (!nf_ct_is_confirmed(ct))
0174 return 0;
0175
0176 e = nf_ct_ecache_find(ct);
0177 if (!e)
0178 return 0;
0179
0180 memset(&item, 0, sizeof(item));
0181
0182 item.ct = ct;
0183 item.portid = e->portid ? e->portid : portid;
0184 item.report = report;
0185
0186
0187 missed = e->portid ? 0 : e->missed;
0188
0189 ret = __nf_conntrack_eventmask_report(e, events, missed, &item);
0190 if (unlikely(ret < 0 && (events & (1 << IPCT_DESTROY)))) {
0191
0192
0193
0194 if (e->portid == 0 && portid != 0)
0195 e->portid = portid;
0196 }
0197
0198 return ret;
0199 }
0200 EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
0201
0202
0203
0204 void nf_ct_deliver_cached_events(struct nf_conn *ct)
0205 {
0206 struct nf_conntrack_ecache *e;
0207 struct nf_ct_event item;
0208 unsigned int events;
0209
0210 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct))
0211 return;
0212
0213 e = nf_ct_ecache_find(ct);
0214 if (e == NULL)
0215 return;
0216
0217 events = xchg(&e->cache, 0);
0218
0219 item.ct = ct;
0220 item.portid = 0;
0221 item.report = 0;
0222
0223
0224
0225
0226
0227 __nf_conntrack_eventmask_report(e, events, e->missed, &item);
0228 }
0229 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
0230
0231 void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
0232 struct nf_conntrack_expect *exp,
0233 u32 portid, int report)
0234
0235 {
0236 struct net *net = nf_ct_exp_net(exp);
0237 struct nf_ct_event_notifier *notify;
0238 struct nf_conntrack_ecache *e;
0239
0240 rcu_read_lock();
0241 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
0242 if (!notify)
0243 goto out_unlock;
0244
0245 e = nf_ct_ecache_find(exp->master);
0246 if (!e)
0247 goto out_unlock;
0248
0249 if (e->expmask & (1 << event)) {
0250 struct nf_exp_event item = {
0251 .exp = exp,
0252 .portid = portid,
0253 .report = report
0254 };
0255 notify->exp_event(1 << event, &item);
0256 }
0257 out_unlock:
0258 rcu_read_unlock();
0259 }
0260
0261 void nf_conntrack_register_notifier(struct net *net,
0262 const struct nf_ct_event_notifier *new)
0263 {
0264 struct nf_ct_event_notifier *notify;
0265
0266 mutex_lock(&nf_ct_ecache_mutex);
0267 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
0268 lockdep_is_held(&nf_ct_ecache_mutex));
0269 WARN_ON_ONCE(notify);
0270 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
0271 mutex_unlock(&nf_ct_ecache_mutex);
0272 }
0273 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
0274
0275 void nf_conntrack_unregister_notifier(struct net *net)
0276 {
0277 mutex_lock(&nf_ct_ecache_mutex);
0278 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
0279 mutex_unlock(&nf_ct_ecache_mutex);
0280
0281 }
0282 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
0283
0284 void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
0285 {
0286 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
0287
0288 if (state == NFCT_ECACHE_DESTROY_FAIL &&
0289 !delayed_work_pending(&cnet->ecache.dwork)) {
0290 schedule_delayed_work(&cnet->ecache.dwork, HZ);
0291 net->ct.ecache_dwork_pending = true;
0292 } else if (state == NFCT_ECACHE_DESTROY_SENT) {
0293 if (!hlist_nulls_empty(&cnet->ecache.dying_list))
0294 mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
0295 else
0296 net->ct.ecache_dwork_pending = false;
0297 }
0298 }
0299
0300 bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
0301 {
0302 struct net *net = nf_ct_net(ct);
0303 struct nf_conntrack_ecache *e;
0304
0305 switch (net->ct.sysctl_events) {
0306 case 0:
0307
0308 if (ctmask || expmask)
0309 break;
0310 return true;
0311 case 2:
0312 if (!READ_ONCE(net->ct.ctnetlink_has_listener))
0313 return true;
0314 fallthrough;
0315 case 1:
0316
0317 if (!ctmask && !expmask) {
0318 ctmask = ~0;
0319 expmask = ~0;
0320 }
0321 break;
0322 default:
0323 WARN_ON_ONCE(1);
0324 return true;
0325 }
0326
0327 e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
0328 if (e) {
0329 e->ctmask = ctmask;
0330 e->expmask = expmask;
0331 }
0332
0333 return e != NULL;
0334 }
0335 EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add);
0336
0337 #define NF_CT_EVENTS_DEFAULT 2
0338 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
0339
0340 void nf_conntrack_ecache_pernet_init(struct net *net)
0341 {
0342 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
0343
0344 net->ct.sysctl_events = nf_ct_events;
0345
0346 INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
0347 INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL);
0348 spin_lock_init(&cnet->ecache.dying_lock);
0349
0350 BUILD_BUG_ON(__IPCT_MAX >= 16);
0351 }
0352
0353 void nf_conntrack_ecache_pernet_fini(struct net *net)
0354 {
0355 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
0356
0357 cancel_delayed_work_sync(&cnet->ecache.dwork);
0358 }