0001 #ifndef _NET_FLOW_OFFLOAD_H
0002 #define _NET_FLOW_OFFLOAD_H
0003
0004 #include <linux/kernel.h>
0005 #include <linux/list.h>
0006 #include <linux/netlink.h>
0007 #include <net/flow_dissector.h>
0008
0009 struct flow_match {
0010 struct flow_dissector *dissector;
0011 void *mask;
0012 void *key;
0013 };
0014
0015 struct flow_match_meta {
0016 struct flow_dissector_key_meta *key, *mask;
0017 };
0018
0019 struct flow_match_basic {
0020 struct flow_dissector_key_basic *key, *mask;
0021 };
0022
0023 struct flow_match_control {
0024 struct flow_dissector_key_control *key, *mask;
0025 };
0026
0027 struct flow_match_eth_addrs {
0028 struct flow_dissector_key_eth_addrs *key, *mask;
0029 };
0030
0031 struct flow_match_vlan {
0032 struct flow_dissector_key_vlan *key, *mask;
0033 };
0034
0035 struct flow_match_ipv4_addrs {
0036 struct flow_dissector_key_ipv4_addrs *key, *mask;
0037 };
0038
0039 struct flow_match_ipv6_addrs {
0040 struct flow_dissector_key_ipv6_addrs *key, *mask;
0041 };
0042
0043 struct flow_match_ip {
0044 struct flow_dissector_key_ip *key, *mask;
0045 };
0046
0047 struct flow_match_ports {
0048 struct flow_dissector_key_ports *key, *mask;
0049 };
0050
0051 struct flow_match_ports_range {
0052 struct flow_dissector_key_ports_range *key, *mask;
0053 };
0054
0055 struct flow_match_icmp {
0056 struct flow_dissector_key_icmp *key, *mask;
0057 };
0058
0059 struct flow_match_tcp {
0060 struct flow_dissector_key_tcp *key, *mask;
0061 };
0062
0063 struct flow_match_mpls {
0064 struct flow_dissector_key_mpls *key, *mask;
0065 };
0066
0067 struct flow_match_enc_keyid {
0068 struct flow_dissector_key_keyid *key, *mask;
0069 };
0070
0071 struct flow_match_enc_opts {
0072 struct flow_dissector_key_enc_opts *key, *mask;
0073 };
0074
0075 struct flow_match_ct {
0076 struct flow_dissector_key_ct *key, *mask;
0077 };
0078
0079 struct flow_match_pppoe {
0080 struct flow_dissector_key_pppoe *key, *mask;
0081 };
0082
0083 struct flow_rule;
0084
0085 void flow_rule_match_meta(const struct flow_rule *rule,
0086 struct flow_match_meta *out);
0087 void flow_rule_match_basic(const struct flow_rule *rule,
0088 struct flow_match_basic *out);
0089 void flow_rule_match_control(const struct flow_rule *rule,
0090 struct flow_match_control *out);
0091 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
0092 struct flow_match_eth_addrs *out);
0093 void flow_rule_match_vlan(const struct flow_rule *rule,
0094 struct flow_match_vlan *out);
0095 void flow_rule_match_cvlan(const struct flow_rule *rule,
0096 struct flow_match_vlan *out);
0097 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
0098 struct flow_match_ipv4_addrs *out);
0099 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
0100 struct flow_match_ipv6_addrs *out);
0101 void flow_rule_match_ip(const struct flow_rule *rule,
0102 struct flow_match_ip *out);
0103 void flow_rule_match_ports(const struct flow_rule *rule,
0104 struct flow_match_ports *out);
0105 void flow_rule_match_ports_range(const struct flow_rule *rule,
0106 struct flow_match_ports_range *out);
0107 void flow_rule_match_tcp(const struct flow_rule *rule,
0108 struct flow_match_tcp *out);
0109 void flow_rule_match_icmp(const struct flow_rule *rule,
0110 struct flow_match_icmp *out);
0111 void flow_rule_match_mpls(const struct flow_rule *rule,
0112 struct flow_match_mpls *out);
0113 void flow_rule_match_enc_control(const struct flow_rule *rule,
0114 struct flow_match_control *out);
0115 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
0116 struct flow_match_ipv4_addrs *out);
0117 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
0118 struct flow_match_ipv6_addrs *out);
0119 void flow_rule_match_enc_ip(const struct flow_rule *rule,
0120 struct flow_match_ip *out);
0121 void flow_rule_match_enc_ports(const struct flow_rule *rule,
0122 struct flow_match_ports *out);
0123 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
0124 struct flow_match_enc_keyid *out);
0125 void flow_rule_match_enc_opts(const struct flow_rule *rule,
0126 struct flow_match_enc_opts *out);
0127 void flow_rule_match_ct(const struct flow_rule *rule,
0128 struct flow_match_ct *out);
0129 void flow_rule_match_pppoe(const struct flow_rule *rule,
0130 struct flow_match_pppoe *out);
0131
0132 enum flow_action_id {
0133 FLOW_ACTION_ACCEPT = 0,
0134 FLOW_ACTION_DROP,
0135 FLOW_ACTION_TRAP,
0136 FLOW_ACTION_GOTO,
0137 FLOW_ACTION_REDIRECT,
0138 FLOW_ACTION_MIRRED,
0139 FLOW_ACTION_REDIRECT_INGRESS,
0140 FLOW_ACTION_MIRRED_INGRESS,
0141 FLOW_ACTION_VLAN_PUSH,
0142 FLOW_ACTION_VLAN_POP,
0143 FLOW_ACTION_VLAN_MANGLE,
0144 FLOW_ACTION_TUNNEL_ENCAP,
0145 FLOW_ACTION_TUNNEL_DECAP,
0146 FLOW_ACTION_MANGLE,
0147 FLOW_ACTION_ADD,
0148 FLOW_ACTION_CSUM,
0149 FLOW_ACTION_MARK,
0150 FLOW_ACTION_PTYPE,
0151 FLOW_ACTION_PRIORITY,
0152 FLOW_ACTION_WAKE,
0153 FLOW_ACTION_QUEUE,
0154 FLOW_ACTION_SAMPLE,
0155 FLOW_ACTION_POLICE,
0156 FLOW_ACTION_CT,
0157 FLOW_ACTION_CT_METADATA,
0158 FLOW_ACTION_MPLS_PUSH,
0159 FLOW_ACTION_MPLS_POP,
0160 FLOW_ACTION_MPLS_MANGLE,
0161 FLOW_ACTION_GATE,
0162 FLOW_ACTION_PPPOE_PUSH,
0163 FLOW_ACTION_JUMP,
0164 FLOW_ACTION_PIPE,
0165 FLOW_ACTION_VLAN_PUSH_ETH,
0166 FLOW_ACTION_VLAN_POP_ETH,
0167 FLOW_ACTION_CONTINUE,
0168 NUM_FLOW_ACTIONS,
0169 };
0170
0171
0172
0173
0174
0175 enum flow_action_mangle_base {
0176 FLOW_ACT_MANGLE_UNSPEC = 0,
0177 FLOW_ACT_MANGLE_HDR_TYPE_ETH,
0178 FLOW_ACT_MANGLE_HDR_TYPE_IP4,
0179 FLOW_ACT_MANGLE_HDR_TYPE_IP6,
0180 FLOW_ACT_MANGLE_HDR_TYPE_TCP,
0181 FLOW_ACT_MANGLE_HDR_TYPE_UDP,
0182 };
0183
0184 enum flow_action_hw_stats_bit {
0185 FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
0186 FLOW_ACTION_HW_STATS_DELAYED_BIT,
0187 FLOW_ACTION_HW_STATS_DISABLED_BIT,
0188
0189 FLOW_ACTION_HW_STATS_NUM_BITS
0190 };
0191
0192 enum flow_action_hw_stats {
0193 FLOW_ACTION_HW_STATS_IMMEDIATE =
0194 BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
0195 FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
0196 FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
0197 FLOW_ACTION_HW_STATS_DELAYED,
0198 FLOW_ACTION_HW_STATS_DISABLED =
0199 BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
0200 FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
0201 };
0202
0203 typedef void (*action_destr)(void *priv);
0204
0205 struct flow_action_cookie {
0206 u32 cookie_len;
0207 u8 cookie[];
0208 };
0209
0210 struct flow_action_cookie *flow_action_cookie_create(void *data,
0211 unsigned int len,
0212 gfp_t gfp);
0213 void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
0214
0215 struct flow_action_entry {
0216 enum flow_action_id id;
0217 u32 hw_index;
0218 enum flow_action_hw_stats hw_stats;
0219 action_destr destructor;
0220 void *destructor_priv;
0221 union {
0222 u32 chain_index;
0223 struct net_device *dev;
0224 struct {
0225 u16 vid;
0226 __be16 proto;
0227 u8 prio;
0228 } vlan;
0229 struct {
0230 unsigned char dst[ETH_ALEN];
0231 unsigned char src[ETH_ALEN];
0232 } vlan_push_eth;
0233 struct {
0234
0235 enum flow_action_mangle_base htype;
0236 u32 offset;
0237 u32 mask;
0238 u32 val;
0239 } mangle;
0240 struct ip_tunnel_info *tunnel;
0241 u32 csum_flags;
0242 u32 mark;
0243 u16 ptype;
0244 u32 priority;
0245 struct {
0246 u32 ctx;
0247 u32 index;
0248 u8 vf;
0249 } queue;
0250 struct {
0251 struct psample_group *psample_group;
0252 u32 rate;
0253 u32 trunc_size;
0254 bool truncate;
0255 } sample;
0256 struct {
0257 u32 burst;
0258 u64 rate_bytes_ps;
0259 u64 peakrate_bytes_ps;
0260 u32 avrate;
0261 u16 overhead;
0262 u64 burst_pkt;
0263 u64 rate_pkt_ps;
0264 u32 mtu;
0265 struct {
0266 enum flow_action_id act_id;
0267 u32 extval;
0268 } exceed, notexceed;
0269 } police;
0270 struct {
0271 int action;
0272 u16 zone;
0273 struct nf_flowtable *flow_table;
0274 } ct;
0275 struct {
0276 unsigned long cookie;
0277 u32 mark;
0278 u32 labels[4];
0279 bool orig_dir;
0280 } ct_metadata;
0281 struct {
0282 u32 label;
0283 __be16 proto;
0284 u8 tc;
0285 u8 bos;
0286 u8 ttl;
0287 } mpls_push;
0288 struct {
0289 __be16 proto;
0290 } mpls_pop;
0291 struct {
0292 u32 label;
0293 u8 tc;
0294 u8 bos;
0295 u8 ttl;
0296 } mpls_mangle;
0297 struct {
0298 s32 prio;
0299 u64 basetime;
0300 u64 cycletime;
0301 u64 cycletimeext;
0302 u32 num_entries;
0303 struct action_gate_entry *entries;
0304 } gate;
0305 struct {
0306 u16 sid;
0307 } pppoe;
0308 };
0309 struct flow_action_cookie *cookie;
0310 };
0311
0312 struct flow_action {
0313 unsigned int num_entries;
0314 struct flow_action_entry entries[];
0315 };
0316
0317 static inline bool flow_action_has_entries(const struct flow_action *action)
0318 {
0319 return action->num_entries;
0320 }
0321
0322
0323
0324
0325
0326
0327
0328 static inline bool flow_offload_has_one_action(const struct flow_action *action)
0329 {
0330 return action->num_entries == 1;
0331 }
0332
0333 static inline bool flow_action_is_last_entry(const struct flow_action *action,
0334 const struct flow_action_entry *entry)
0335 {
0336 return entry == &action->entries[action->num_entries - 1];
0337 }
0338
0339 #define flow_action_for_each(__i, __act, __actions) \
0340 for (__i = 0, __act = &(__actions)->entries[0]; \
0341 __i < (__actions)->num_entries; \
0342 __act = &(__actions)->entries[++__i])
0343
0344 static inline bool
0345 flow_action_mixed_hw_stats_check(const struct flow_action *action,
0346 struct netlink_ext_ack *extack)
0347 {
0348 const struct flow_action_entry *action_entry;
0349 u8 last_hw_stats;
0350 int i;
0351
0352 if (flow_offload_has_one_action(action))
0353 return true;
0354
0355 flow_action_for_each(i, action_entry, action) {
0356 if (i && action_entry->hw_stats != last_hw_stats) {
0357 NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
0358 return false;
0359 }
0360 last_hw_stats = action_entry->hw_stats;
0361 }
0362 return true;
0363 }
0364
0365 static inline const struct flow_action_entry *
0366 flow_action_first_entry_get(const struct flow_action *action)
0367 {
0368 WARN_ON(!flow_action_has_entries(action));
0369 return &action->entries[0];
0370 }
0371
0372 static inline bool
0373 __flow_action_hw_stats_check(const struct flow_action *action,
0374 struct netlink_ext_ack *extack,
0375 bool check_allow_bit,
0376 enum flow_action_hw_stats_bit allow_bit)
0377 {
0378 const struct flow_action_entry *action_entry;
0379
0380 if (!flow_action_has_entries(action))
0381 return true;
0382 if (!flow_action_mixed_hw_stats_check(action, extack))
0383 return false;
0384
0385 action_entry = flow_action_first_entry_get(action);
0386
0387
0388 WARN_ON_ONCE(!action_entry->hw_stats);
0389
0390 if (!check_allow_bit &&
0391 ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
0392 NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
0393 return false;
0394 } else if (check_allow_bit &&
0395 !(action_entry->hw_stats & BIT(allow_bit))) {
0396 NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
0397 return false;
0398 }
0399 return true;
0400 }
0401
0402 static inline bool
0403 flow_action_hw_stats_check(const struct flow_action *action,
0404 struct netlink_ext_ack *extack,
0405 enum flow_action_hw_stats_bit allow_bit)
0406 {
0407 return __flow_action_hw_stats_check(action, extack, true, allow_bit);
0408 }
0409
0410 static inline bool
0411 flow_action_basic_hw_stats_check(const struct flow_action *action,
0412 struct netlink_ext_ack *extack)
0413 {
0414 return __flow_action_hw_stats_check(action, extack, false, 0);
0415 }
0416
0417 struct flow_rule {
0418 struct flow_match match;
0419 struct flow_action action;
0420 };
0421
0422 struct flow_rule *flow_rule_alloc(unsigned int num_actions);
0423
0424 static inline bool flow_rule_match_key(const struct flow_rule *rule,
0425 enum flow_dissector_key_id key)
0426 {
0427 return dissector_uses_key(rule->match.dissector, key);
0428 }
0429
0430 struct flow_stats {
0431 u64 pkts;
0432 u64 bytes;
0433 u64 drops;
0434 u64 lastused;
0435 enum flow_action_hw_stats used_hw_stats;
0436 bool used_hw_stats_valid;
0437 };
0438
0439 static inline void flow_stats_update(struct flow_stats *flow_stats,
0440 u64 bytes, u64 pkts,
0441 u64 drops, u64 lastused,
0442 enum flow_action_hw_stats used_hw_stats)
0443 {
0444 flow_stats->pkts += pkts;
0445 flow_stats->bytes += bytes;
0446 flow_stats->drops += drops;
0447 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
0448
0449
0450
0451
0452 WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
0453 flow_stats->used_hw_stats |= used_hw_stats;
0454 flow_stats->used_hw_stats_valid = true;
0455 }
0456
0457 enum flow_block_command {
0458 FLOW_BLOCK_BIND,
0459 FLOW_BLOCK_UNBIND,
0460 };
0461
0462 enum flow_block_binder_type {
0463 FLOW_BLOCK_BINDER_TYPE_UNSPEC,
0464 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
0465 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
0466 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
0467 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
0468 };
0469
0470 struct flow_block {
0471 struct list_head cb_list;
0472 };
0473
0474 struct netlink_ext_ack;
0475
0476 struct flow_block_offload {
0477 enum flow_block_command command;
0478 enum flow_block_binder_type binder_type;
0479 bool block_shared;
0480 bool unlocked_driver_cb;
0481 struct net *net;
0482 struct flow_block *block;
0483 struct list_head cb_list;
0484 struct list_head *driver_block_list;
0485 struct netlink_ext_ack *extack;
0486 struct Qdisc *sch;
0487 struct list_head *cb_list_head;
0488 };
0489
0490 enum tc_setup_type;
0491 typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
0492 void *cb_priv);
0493
0494 struct flow_block_cb;
0495
0496 struct flow_block_indr {
0497 struct list_head list;
0498 struct net_device *dev;
0499 struct Qdisc *sch;
0500 enum flow_block_binder_type binder_type;
0501 void *data;
0502 void *cb_priv;
0503 void (*cleanup)(struct flow_block_cb *block_cb);
0504 };
0505
0506 struct flow_block_cb {
0507 struct list_head driver_list;
0508 struct list_head list;
0509 flow_setup_cb_t *cb;
0510 void *cb_ident;
0511 void *cb_priv;
0512 void (*release)(void *cb_priv);
0513 struct flow_block_indr indr;
0514 unsigned int refcnt;
0515 };
0516
0517 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
0518 void *cb_ident, void *cb_priv,
0519 void (*release)(void *cb_priv));
0520 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
0521 void *cb_ident, void *cb_priv,
0522 void (*release)(void *cb_priv),
0523 struct flow_block_offload *bo,
0524 struct net_device *dev,
0525 struct Qdisc *sch, void *data,
0526 void *indr_cb_priv,
0527 void (*cleanup)(struct flow_block_cb *block_cb));
0528 void flow_block_cb_free(struct flow_block_cb *block_cb);
0529
0530 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
0531 flow_setup_cb_t *cb, void *cb_ident);
0532
0533 void *flow_block_cb_priv(struct flow_block_cb *block_cb);
0534 void flow_block_cb_incref(struct flow_block_cb *block_cb);
0535 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
0536
0537 static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
0538 struct flow_block_offload *offload)
0539 {
0540 list_add_tail(&block_cb->list, &offload->cb_list);
0541 }
0542
0543 static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
0544 struct flow_block_offload *offload)
0545 {
0546 list_move(&block_cb->list, &offload->cb_list);
0547 }
0548
0549 static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
0550 struct flow_block_offload *offload)
0551 {
0552 list_del(&block_cb->indr.list);
0553 list_move(&block_cb->list, &offload->cb_list);
0554 }
0555
0556 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
0557 struct list_head *driver_block_list);
0558
0559 int flow_block_cb_setup_simple(struct flow_block_offload *f,
0560 struct list_head *driver_list,
0561 flow_setup_cb_t *cb,
0562 void *cb_ident, void *cb_priv, bool ingress_only);
0563
0564 enum flow_cls_command {
0565 FLOW_CLS_REPLACE,
0566 FLOW_CLS_DESTROY,
0567 FLOW_CLS_STATS,
0568 FLOW_CLS_TMPLT_CREATE,
0569 FLOW_CLS_TMPLT_DESTROY,
0570 };
0571
0572 struct flow_cls_common_offload {
0573 u32 chain_index;
0574 __be16 protocol;
0575 u32 prio;
0576 struct netlink_ext_ack *extack;
0577 };
0578
0579 struct flow_cls_offload {
0580 struct flow_cls_common_offload common;
0581 enum flow_cls_command command;
0582 unsigned long cookie;
0583 struct flow_rule *rule;
0584 struct flow_stats stats;
0585 u32 classid;
0586 };
0587
0588 enum offload_act_command {
0589 FLOW_ACT_REPLACE,
0590 FLOW_ACT_DESTROY,
0591 FLOW_ACT_STATS,
0592 };
0593
0594 struct flow_offload_action {
0595 struct netlink_ext_ack *extack;
0596 enum offload_act_command command;
0597 enum flow_action_id id;
0598 u32 index;
0599 struct flow_stats stats;
0600 struct flow_action action;
0601 };
0602
0603 struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
0604
0605 static inline struct flow_rule *
0606 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
0607 {
0608 return flow_cmd->rule;
0609 }
0610
0611 static inline void flow_block_init(struct flow_block *flow_block)
0612 {
0613 INIT_LIST_HEAD(&flow_block->cb_list);
0614 }
0615
0616 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
0617 enum tc_setup_type type, void *type_data,
0618 void *data,
0619 void (*cleanup)(struct flow_block_cb *block_cb));
0620
0621 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
0622 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
0623 void (*release)(void *cb_priv));
0624 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
0625 enum tc_setup_type type, void *data,
0626 struct flow_block_offload *bo,
0627 void (*cleanup)(struct flow_block_cb *block_cb));
0628 bool flow_indr_dev_exists(void);
0629
0630 #endif