Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* Copyright 2020 NXP */
0003 
0004 #include <linux/module.h>
0005 #include <linux/types.h>
0006 #include <linux/kernel.h>
0007 #include <linux/string.h>
0008 #include <linux/errno.h>
0009 #include <linux/skbuff.h>
0010 #include <linux/rtnetlink.h>
0011 #include <linux/init.h>
0012 #include <linux/slab.h>
0013 #include <net/act_api.h>
0014 #include <net/netlink.h>
0015 #include <net/pkt_cls.h>
0016 #include <net/tc_act/tc_gate.h>
0017 
0018 static unsigned int gate_net_id;
0019 static struct tc_action_ops act_gate_ops;
0020 
0021 static ktime_t gate_get_time(struct tcf_gate *gact)
0022 {
0023     ktime_t mono = ktime_get();
0024 
0025     switch (gact->tk_offset) {
0026     case TK_OFFS_MAX:
0027         return mono;
0028     default:
0029         return ktime_mono_to_any(mono, gact->tk_offset);
0030     }
0031 
0032     return KTIME_MAX;
0033 }
0034 
0035 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
0036 {
0037     struct tcf_gate_params *param = &gact->param;
0038     ktime_t now, base, cycle;
0039     u64 n;
0040 
0041     base = ns_to_ktime(param->tcfg_basetime);
0042     now = gate_get_time(gact);
0043 
0044     if (ktime_after(base, now)) {
0045         *start = base;
0046         return;
0047     }
0048 
0049     cycle = param->tcfg_cycletime;
0050 
0051     n = div64_u64(ktime_sub_ns(now, base), cycle);
0052     *start = ktime_add_ns(base, (n + 1) * cycle);
0053 }
0054 
0055 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
0056 {
0057     ktime_t expires;
0058 
0059     expires = hrtimer_get_expires(&gact->hitimer);
0060     if (expires == 0)
0061         expires = KTIME_MAX;
0062 
0063     start = min_t(ktime_t, start, expires);
0064 
0065     hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
0066 }
0067 
0068 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
0069 {
0070     struct tcf_gate *gact = container_of(timer, struct tcf_gate,
0071                          hitimer);
0072     struct tcf_gate_params *p = &gact->param;
0073     struct tcfg_gate_entry *next;
0074     ktime_t close_time, now;
0075 
0076     spin_lock(&gact->tcf_lock);
0077 
0078     next = gact->next_entry;
0079 
0080     /* cycle start, clear pending bit, clear total octets */
0081     gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
0082     gact->current_entry_octets = 0;
0083     gact->current_max_octets = next->maxoctets;
0084 
0085     gact->current_close_time = ktime_add_ns(gact->current_close_time,
0086                         next->interval);
0087 
0088     close_time = gact->current_close_time;
0089 
0090     if (list_is_last(&next->list, &p->entries))
0091         next = list_first_entry(&p->entries,
0092                     struct tcfg_gate_entry, list);
0093     else
0094         next = list_next_entry(next, list);
0095 
0096     now = gate_get_time(gact);
0097 
0098     if (ktime_after(now, close_time)) {
0099         ktime_t cycle, base;
0100         u64 n;
0101 
0102         cycle = p->tcfg_cycletime;
0103         base = ns_to_ktime(p->tcfg_basetime);
0104         n = div64_u64(ktime_sub_ns(now, base), cycle);
0105         close_time = ktime_add_ns(base, (n + 1) * cycle);
0106     }
0107 
0108     gact->next_entry = next;
0109 
0110     hrtimer_set_expires(&gact->hitimer, close_time);
0111 
0112     spin_unlock(&gact->tcf_lock);
0113 
0114     return HRTIMER_RESTART;
0115 }
0116 
0117 static int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a,
0118             struct tcf_result *res)
0119 {
0120     struct tcf_gate *gact = to_gate(a);
0121 
0122     spin_lock(&gact->tcf_lock);
0123 
0124     tcf_lastuse_update(&gact->tcf_tm);
0125     bstats_update(&gact->tcf_bstats, skb);
0126 
0127     if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
0128         spin_unlock(&gact->tcf_lock);
0129         return gact->tcf_action;
0130     }
0131 
0132     if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN))
0133         goto drop;
0134 
0135     if (gact->current_max_octets >= 0) {
0136         gact->current_entry_octets += qdisc_pkt_len(skb);
0137         if (gact->current_entry_octets > gact->current_max_octets) {
0138             gact->tcf_qstats.overlimits++;
0139             goto drop;
0140         }
0141     }
0142 
0143     spin_unlock(&gact->tcf_lock);
0144 
0145     return gact->tcf_action;
0146 drop:
0147     gact->tcf_qstats.drops++;
0148     spin_unlock(&gact->tcf_lock);
0149 
0150     return TC_ACT_SHOT;
0151 }
0152 
0153 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
0154     [TCA_GATE_ENTRY_INDEX]      = { .type = NLA_U32 },
0155     [TCA_GATE_ENTRY_GATE]       = { .type = NLA_FLAG },
0156     [TCA_GATE_ENTRY_INTERVAL]   = { .type = NLA_U32 },
0157     [TCA_GATE_ENTRY_IPV]        = { .type = NLA_S32 },
0158     [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 },
0159 };
0160 
0161 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
0162     [TCA_GATE_PARMS]        =
0163         NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
0164     [TCA_GATE_PRIORITY]     = { .type = NLA_S32 },
0165     [TCA_GATE_ENTRY_LIST]       = { .type = NLA_NESTED },
0166     [TCA_GATE_BASE_TIME]        = { .type = NLA_U64 },
0167     [TCA_GATE_CYCLE_TIME]       = { .type = NLA_U64 },
0168     [TCA_GATE_CYCLE_TIME_EXT]   = { .type = NLA_U64 },
0169     [TCA_GATE_FLAGS]        = { .type = NLA_U32 },
0170     [TCA_GATE_CLOCKID]      = { .type = NLA_S32 },
0171 };
0172 
0173 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
0174                struct netlink_ext_ack *extack)
0175 {
0176     u32 interval = 0;
0177 
0178     entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
0179 
0180     if (tb[TCA_GATE_ENTRY_INTERVAL])
0181         interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
0182 
0183     if (interval == 0) {
0184         NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
0185         return -EINVAL;
0186     }
0187 
0188     entry->interval = interval;
0189 
0190     if (tb[TCA_GATE_ENTRY_IPV])
0191         entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
0192     else
0193         entry->ipv = -1;
0194 
0195     if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
0196         entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
0197     else
0198         entry->maxoctets = -1;
0199 
0200     return 0;
0201 }
0202 
0203 static int parse_gate_entry(struct nlattr *n, struct  tcfg_gate_entry *entry,
0204                 int index, struct netlink_ext_ack *extack)
0205 {
0206     struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
0207     int err;
0208 
0209     err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
0210     if (err < 0) {
0211         NL_SET_ERR_MSG(extack, "Could not parse nested entry");
0212         return -EINVAL;
0213     }
0214 
0215     entry->index = index;
0216 
0217     return fill_gate_entry(tb, entry, extack);
0218 }
0219 
0220 static void release_entry_list(struct list_head *entries)
0221 {
0222     struct tcfg_gate_entry *entry, *e;
0223 
0224     list_for_each_entry_safe(entry, e, entries, list) {
0225         list_del(&entry->list);
0226         kfree(entry);
0227     }
0228 }
0229 
0230 static int parse_gate_list(struct nlattr *list_attr,
0231                struct tcf_gate_params *sched,
0232                struct netlink_ext_ack *extack)
0233 {
0234     struct tcfg_gate_entry *entry;
0235     struct nlattr *n;
0236     int err, rem;
0237     int i = 0;
0238 
0239     if (!list_attr)
0240         return -EINVAL;
0241 
0242     nla_for_each_nested(n, list_attr, rem) {
0243         if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
0244             NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
0245             continue;
0246         }
0247 
0248         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0249         if (!entry) {
0250             NL_SET_ERR_MSG(extack, "Not enough memory for entry");
0251             err = -ENOMEM;
0252             goto release_list;
0253         }
0254 
0255         err = parse_gate_entry(n, entry, i, extack);
0256         if (err < 0) {
0257             kfree(entry);
0258             goto release_list;
0259         }
0260 
0261         list_add_tail(&entry->list, &sched->entries);
0262         i++;
0263     }
0264 
0265     sched->num_entries = i;
0266 
0267     return i;
0268 
0269 release_list:
0270     release_entry_list(&sched->entries);
0271 
0272     return err;
0273 }
0274 
0275 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
0276                  enum tk_offsets tko, s32 clockid,
0277                  bool do_init)
0278 {
0279     if (!do_init) {
0280         if (basetime == gact->param.tcfg_basetime &&
0281             tko == gact->tk_offset &&
0282             clockid == gact->param.tcfg_clockid)
0283             return;
0284 
0285         spin_unlock_bh(&gact->tcf_lock);
0286         hrtimer_cancel(&gact->hitimer);
0287         spin_lock_bh(&gact->tcf_lock);
0288     }
0289     gact->param.tcfg_basetime = basetime;
0290     gact->param.tcfg_clockid = clockid;
0291     gact->tk_offset = tko;
0292     hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
0293     gact->hitimer.function = gate_timer_func;
0294 }
0295 
0296 static int tcf_gate_init(struct net *net, struct nlattr *nla,
0297              struct nlattr *est, struct tc_action **a,
0298              struct tcf_proto *tp, u32 flags,
0299              struct netlink_ext_ack *extack)
0300 {
0301     struct tc_action_net *tn = net_generic(net, gate_net_id);
0302     enum tk_offsets tk_offset = TK_OFFS_TAI;
0303     bool bind = flags & TCA_ACT_FLAGS_BIND;
0304     struct nlattr *tb[TCA_GATE_MAX + 1];
0305     struct tcf_chain *goto_ch = NULL;
0306     u64 cycletime = 0, basetime = 0;
0307     struct tcf_gate_params *p;
0308     s32 clockid = CLOCK_TAI;
0309     struct tcf_gate *gact;
0310     struct tc_gate *parm;
0311     int ret = 0, err;
0312     u32 gflags = 0;
0313     s32 prio = -1;
0314     ktime_t start;
0315     u32 index;
0316 
0317     if (!nla)
0318         return -EINVAL;
0319 
0320     err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
0321     if (err < 0)
0322         return err;
0323 
0324     if (!tb[TCA_GATE_PARMS])
0325         return -EINVAL;
0326 
0327     if (tb[TCA_GATE_CLOCKID]) {
0328         clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
0329         switch (clockid) {
0330         case CLOCK_REALTIME:
0331             tk_offset = TK_OFFS_REAL;
0332             break;
0333         case CLOCK_MONOTONIC:
0334             tk_offset = TK_OFFS_MAX;
0335             break;
0336         case CLOCK_BOOTTIME:
0337             tk_offset = TK_OFFS_BOOT;
0338             break;
0339         case CLOCK_TAI:
0340             tk_offset = TK_OFFS_TAI;
0341             break;
0342         default:
0343             NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
0344             return -EINVAL;
0345         }
0346     }
0347 
0348     parm = nla_data(tb[TCA_GATE_PARMS]);
0349     index = parm->index;
0350 
0351     err = tcf_idr_check_alloc(tn, &index, a, bind);
0352     if (err < 0)
0353         return err;
0354 
0355     if (err && bind)
0356         return 0;
0357 
0358     if (!err) {
0359         ret = tcf_idr_create(tn, index, est, a,
0360                      &act_gate_ops, bind, false, flags);
0361         if (ret) {
0362             tcf_idr_cleanup(tn, index);
0363             return ret;
0364         }
0365 
0366         ret = ACT_P_CREATED;
0367     } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
0368         tcf_idr_release(*a, bind);
0369         return -EEXIST;
0370     }
0371 
0372     if (tb[TCA_GATE_PRIORITY])
0373         prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
0374 
0375     if (tb[TCA_GATE_BASE_TIME])
0376         basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
0377 
0378     if (tb[TCA_GATE_FLAGS])
0379         gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
0380 
0381     gact = to_gate(*a);
0382     if (ret == ACT_P_CREATED)
0383         INIT_LIST_HEAD(&gact->param.entries);
0384 
0385     err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
0386     if (err < 0)
0387         goto release_idr;
0388 
0389     spin_lock_bh(&gact->tcf_lock);
0390     p = &gact->param;
0391 
0392     if (tb[TCA_GATE_CYCLE_TIME])
0393         cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
0394 
0395     if (tb[TCA_GATE_ENTRY_LIST]) {
0396         err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
0397         if (err < 0)
0398             goto chain_put;
0399     }
0400 
0401     if (!cycletime) {
0402         struct tcfg_gate_entry *entry;
0403         ktime_t cycle = 0;
0404 
0405         list_for_each_entry(entry, &p->entries, list)
0406             cycle = ktime_add_ns(cycle, entry->interval);
0407         cycletime = cycle;
0408         if (!cycletime) {
0409             err = -EINVAL;
0410             goto chain_put;
0411         }
0412     }
0413     p->tcfg_cycletime = cycletime;
0414 
0415     if (tb[TCA_GATE_CYCLE_TIME_EXT])
0416         p->tcfg_cycletime_ext =
0417             nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
0418 
0419     gate_setup_timer(gact, basetime, tk_offset, clockid,
0420              ret == ACT_P_CREATED);
0421     p->tcfg_priority = prio;
0422     p->tcfg_flags = gflags;
0423     gate_get_start_time(gact, &start);
0424 
0425     gact->current_close_time = start;
0426     gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
0427 
0428     gact->next_entry = list_first_entry(&p->entries,
0429                         struct tcfg_gate_entry, list);
0430 
0431     goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
0432 
0433     gate_start_timer(gact, start);
0434 
0435     spin_unlock_bh(&gact->tcf_lock);
0436 
0437     if (goto_ch)
0438         tcf_chain_put_by_act(goto_ch);
0439 
0440     return ret;
0441 
0442 chain_put:
0443     spin_unlock_bh(&gact->tcf_lock);
0444 
0445     if (goto_ch)
0446         tcf_chain_put_by_act(goto_ch);
0447 release_idr:
0448     /* action is not inserted in any list: it's safe to init hitimer
0449      * without taking tcf_lock.
0450      */
0451     if (ret == ACT_P_CREATED)
0452         gate_setup_timer(gact, gact->param.tcfg_basetime,
0453                  gact->tk_offset, gact->param.tcfg_clockid,
0454                  true);
0455     tcf_idr_release(*a, bind);
0456     return err;
0457 }
0458 
0459 static void tcf_gate_cleanup(struct tc_action *a)
0460 {
0461     struct tcf_gate *gact = to_gate(a);
0462     struct tcf_gate_params *p;
0463 
0464     p = &gact->param;
0465     hrtimer_cancel(&gact->hitimer);
0466     release_entry_list(&p->entries);
0467 }
0468 
0469 static int dumping_entry(struct sk_buff *skb,
0470              struct tcfg_gate_entry *entry)
0471 {
0472     struct nlattr *item;
0473 
0474     item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
0475     if (!item)
0476         return -ENOSPC;
0477 
0478     if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
0479         goto nla_put_failure;
0480 
0481     if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
0482         goto nla_put_failure;
0483 
0484     if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
0485         goto nla_put_failure;
0486 
0487     if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
0488         goto nla_put_failure;
0489 
0490     if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
0491         goto nla_put_failure;
0492 
0493     return nla_nest_end(skb, item);
0494 
0495 nla_put_failure:
0496     nla_nest_cancel(skb, item);
0497     return -1;
0498 }
0499 
0500 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
0501              int bind, int ref)
0502 {
0503     unsigned char *b = skb_tail_pointer(skb);
0504     struct tcf_gate *gact = to_gate(a);
0505     struct tc_gate opt = {
0506         .index    = gact->tcf_index,
0507         .refcnt   = refcount_read(&gact->tcf_refcnt) - ref,
0508         .bindcnt  = atomic_read(&gact->tcf_bindcnt) - bind,
0509     };
0510     struct tcfg_gate_entry *entry;
0511     struct tcf_gate_params *p;
0512     struct nlattr *entry_list;
0513     struct tcf_t t;
0514 
0515     spin_lock_bh(&gact->tcf_lock);
0516     opt.action = gact->tcf_action;
0517 
0518     p = &gact->param;
0519 
0520     if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
0521         goto nla_put_failure;
0522 
0523     if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
0524                   p->tcfg_basetime, TCA_GATE_PAD))
0525         goto nla_put_failure;
0526 
0527     if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
0528                   p->tcfg_cycletime, TCA_GATE_PAD))
0529         goto nla_put_failure;
0530 
0531     if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
0532                   p->tcfg_cycletime_ext, TCA_GATE_PAD))
0533         goto nla_put_failure;
0534 
0535     if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
0536         goto nla_put_failure;
0537 
0538     if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
0539         goto nla_put_failure;
0540 
0541     if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
0542         goto nla_put_failure;
0543 
0544     entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
0545     if (!entry_list)
0546         goto nla_put_failure;
0547 
0548     list_for_each_entry(entry, &p->entries, list) {
0549         if (dumping_entry(skb, entry) < 0)
0550             goto nla_put_failure;
0551     }
0552 
0553     nla_nest_end(skb, entry_list);
0554 
0555     tcf_tm_dump(&t, &gact->tcf_tm);
0556     if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
0557         goto nla_put_failure;
0558     spin_unlock_bh(&gact->tcf_lock);
0559 
0560     return skb->len;
0561 
0562 nla_put_failure:
0563     spin_unlock_bh(&gact->tcf_lock);
0564     nlmsg_trim(skb, b);
0565     return -1;
0566 }
0567 
0568 static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
0569                struct netlink_callback *cb, int type,
0570                const struct tc_action_ops *ops,
0571                struct netlink_ext_ack *extack)
0572 {
0573     struct tc_action_net *tn = net_generic(net, gate_net_id);
0574 
0575     return tcf_generic_walker(tn, skb, cb, type, ops, extack);
0576 }
0577 
0578 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
0579                   u64 drops, u64 lastuse, bool hw)
0580 {
0581     struct tcf_gate *gact = to_gate(a);
0582     struct tcf_t *tm = &gact->tcf_tm;
0583 
0584     tcf_action_update_stats(a, bytes, packets, drops, hw);
0585     tm->lastuse = max_t(u64, tm->lastuse, lastuse);
0586 }
0587 
0588 static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index)
0589 {
0590     struct tc_action_net *tn = net_generic(net, gate_net_id);
0591 
0592     return tcf_idr_search(tn, a, index);
0593 }
0594 
0595 static size_t tcf_gate_get_fill_size(const struct tc_action *act)
0596 {
0597     return nla_total_size(sizeof(struct tc_gate));
0598 }
0599 
0600 static void tcf_gate_entry_destructor(void *priv)
0601 {
0602     struct action_gate_entry *oe = priv;
0603 
0604     kfree(oe);
0605 }
0606 
0607 static int tcf_gate_get_entries(struct flow_action_entry *entry,
0608                 const struct tc_action *act)
0609 {
0610     entry->gate.entries = tcf_gate_get_list(act);
0611 
0612     if (!entry->gate.entries)
0613         return -EINVAL;
0614 
0615     entry->destructor = tcf_gate_entry_destructor;
0616     entry->destructor_priv = entry->gate.entries;
0617 
0618     return 0;
0619 }
0620 
0621 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
0622                       u32 *index_inc, bool bind,
0623                       struct netlink_ext_ack *extack)
0624 {
0625     int err;
0626 
0627     if (bind) {
0628         struct flow_action_entry *entry = entry_data;
0629 
0630         entry->id = FLOW_ACTION_GATE;
0631         entry->gate.prio = tcf_gate_prio(act);
0632         entry->gate.basetime = tcf_gate_basetime(act);
0633         entry->gate.cycletime = tcf_gate_cycletime(act);
0634         entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
0635         entry->gate.num_entries = tcf_gate_num_entries(act);
0636         err = tcf_gate_get_entries(entry, act);
0637         if (err)
0638             return err;
0639         *index_inc = 1;
0640     } else {
0641         struct flow_offload_action *fl_action = entry_data;
0642 
0643         fl_action->id = FLOW_ACTION_GATE;
0644     }
0645 
0646     return 0;
0647 }
0648 
0649 static struct tc_action_ops act_gate_ops = {
0650     .kind       =   "gate",
0651     .id     =   TCA_ID_GATE,
0652     .owner      =   THIS_MODULE,
0653     .act        =   tcf_gate_act,
0654     .dump       =   tcf_gate_dump,
0655     .init       =   tcf_gate_init,
0656     .cleanup    =   tcf_gate_cleanup,
0657     .walk       =   tcf_gate_walker,
0658     .stats_update   =   tcf_gate_stats_update,
0659     .get_fill_size  =   tcf_gate_get_fill_size,
0660     .lookup     =   tcf_gate_search,
0661     .offload_act_setup =    tcf_gate_offload_act_setup,
0662     .size       =   sizeof(struct tcf_gate),
0663 };
0664 
0665 static __net_init int gate_init_net(struct net *net)
0666 {
0667     struct tc_action_net *tn = net_generic(net, gate_net_id);
0668 
0669     return tc_action_net_init(net, tn, &act_gate_ops);
0670 }
0671 
0672 static void __net_exit gate_exit_net(struct list_head *net_list)
0673 {
0674     tc_action_net_exit(net_list, gate_net_id);
0675 }
0676 
0677 static struct pernet_operations gate_net_ops = {
0678     .init = gate_init_net,
0679     .exit_batch = gate_exit_net,
0680     .id   = &gate_net_id,
0681     .size = sizeof(struct tc_action_net),
0682 };
0683 
0684 static int __init gate_init_module(void)
0685 {
0686     return tcf_register_action(&act_gate_ops, &gate_net_ops);
0687 }
0688 
0689 static void __exit gate_cleanup_module(void)
0690 {
0691     tcf_unregister_action(&act_gate_ops, &gate_net_ops);
0692 }
0693 
0694 module_init(gate_init_module);
0695 module_exit(gate_cleanup_module);
0696 MODULE_LICENSE("GPL v2");