0001
0002
0003
0004 #include <linux/mutex.h>
0005 #include <linux/rhashtable.h>
0006 #include <net/ipv6.h>
0007
0008 #include "spectrum_mr.h"
0009 #include "spectrum_router.h"
0010
0011 struct mlxsw_sp_mr {
0012 const struct mlxsw_sp_mr_ops *mr_ops;
0013 void *catchall_route_priv;
0014 struct delayed_work stats_update_dw;
0015 struct list_head table_list;
0016 struct mutex table_list_lock;
0017 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000
0018 unsigned long priv[];
0019
0020 };
0021
0022 struct mlxsw_sp_mr_vif;
0023 struct mlxsw_sp_mr_vif_ops {
0024 bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
0025 };
0026
0027 struct mlxsw_sp_mr_vif {
0028 struct net_device *dev;
0029 const struct mlxsw_sp_rif *rif;
0030 unsigned long vif_flags;
0031
0032
0033
0034
0035 struct list_head route_evif_list;
0036
0037
0038
0039
0040 struct list_head route_ivif_list;
0041
0042
0043 const struct mlxsw_sp_mr_vif_ops *ops;
0044 };
0045
0046 struct mlxsw_sp_mr_route_vif_entry {
0047 struct list_head vif_node;
0048 struct list_head route_node;
0049 struct mlxsw_sp_mr_vif *mr_vif;
0050 struct mlxsw_sp_mr_route *mr_route;
0051 };
0052
0053 struct mlxsw_sp_mr_table;
0054 struct mlxsw_sp_mr_table_ops {
0055 bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
0056 const struct mr_mfc *mfc);
0057 void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
0058 struct mlxsw_sp_mr_route_key *key,
0059 struct mr_mfc *mfc);
0060 bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
0061 const struct mlxsw_sp_mr_route *mr_route);
0062 };
0063
0064 struct mlxsw_sp_mr_table {
0065 struct list_head node;
0066 enum mlxsw_sp_l3proto proto;
0067 struct mlxsw_sp *mlxsw_sp;
0068 u32 vr_id;
0069 struct mlxsw_sp_mr_vif vifs[MAXVIFS];
0070 struct list_head route_list;
0071 struct mutex route_list_lock;
0072 struct rhashtable route_ht;
0073 const struct mlxsw_sp_mr_table_ops *ops;
0074 char catchall_route_priv[];
0075
0076 };
0077
0078 struct mlxsw_sp_mr_route {
0079 struct list_head node;
0080 struct rhash_head ht_node;
0081 struct mlxsw_sp_mr_route_key key;
0082 enum mlxsw_sp_mr_route_action route_action;
0083 u16 min_mtu;
0084 struct mr_mfc *mfc;
0085 void *route_priv;
0086 const struct mlxsw_sp_mr_table *mr_table;
0087
0088 struct list_head evif_list;
0089
0090 struct mlxsw_sp_mr_route_vif_entry ivif;
0091 };
0092
0093 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
0094 .key_len = sizeof(struct mlxsw_sp_mr_route_key),
0095 .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
0096 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
0097 .automatic_shrinking = true,
0098 };
0099
0100 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
0101 {
0102 return vif->ops->is_regular(vif) && vif->dev && vif->rif;
0103 }
0104
0105 static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
0106 {
0107 return vif->dev;
0108 }
0109
0110 static bool
0111 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
0112 {
0113 vifi_t ivif = mr_route->mfc->mfc_parent;
0114
0115 return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
0116 }
0117
0118 static int
0119 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
0120 {
0121 struct mlxsw_sp_mr_route_vif_entry *rve;
0122 int valid_evifs;
0123
0124 valid_evifs = 0;
0125 list_for_each_entry(rve, &mr_route->evif_list, route_node)
0126 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
0127 valid_evifs++;
0128 return valid_evifs;
0129 }
0130
0131 static enum mlxsw_sp_mr_route_action
0132 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
0133 {
0134 struct mlxsw_sp_mr_route_vif_entry *rve;
0135
0136
0137 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
0138 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0139
0140
0141
0142
0143 if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
0144 mr_route) &&
0145 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
0146 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0147
0148
0149 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
0150 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0151
0152
0153
0154
0155 list_for_each_entry(rve, &mr_route->evif_list, route_node)
0156 if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
0157 return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
0158
0159 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
0160 }
0161
0162 static enum mlxsw_sp_mr_route_prio
0163 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
0164 {
0165 return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
0166 mr_route) ?
0167 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
0168 }
0169
0170 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
0171 struct mlxsw_sp_mr_vif *mr_vif)
0172 {
0173 struct mlxsw_sp_mr_route_vif_entry *rve;
0174
0175 rve = kzalloc(sizeof(*rve), GFP_KERNEL);
0176 if (!rve)
0177 return -ENOMEM;
0178 rve->mr_route = mr_route;
0179 rve->mr_vif = mr_vif;
0180 list_add_tail(&rve->route_node, &mr_route->evif_list);
0181 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
0182 return 0;
0183 }
0184
0185 static void
0186 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
0187 {
0188 list_del(&rve->route_node);
0189 list_del(&rve->vif_node);
0190 kfree(rve);
0191 }
0192
0193 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
0194 struct mlxsw_sp_mr_vif *mr_vif)
0195 {
0196 mr_route->ivif.mr_route = mr_route;
0197 mr_route->ivif.mr_vif = mr_vif;
0198 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
0199 }
0200
0201 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
0202 {
0203 list_del(&mr_route->ivif.vif_node);
0204 }
0205
0206 static int
0207 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
0208 struct mlxsw_sp_mr_route *mr_route,
0209 struct mlxsw_sp_mr_route_info *route_info)
0210 {
0211 struct mlxsw_sp_mr_route_vif_entry *rve;
0212 u16 *erif_indices;
0213 u16 irif_index;
0214 u16 erif = 0;
0215
0216 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
0217 GFP_KERNEL);
0218 if (!erif_indices)
0219 return -ENOMEM;
0220
0221 list_for_each_entry(rve, &mr_route->evif_list, route_node) {
0222 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
0223 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
0224
0225 erif_indices[erif++] = rifi;
0226 }
0227 }
0228
0229 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
0230 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
0231 else
0232 irif_index = 0;
0233
0234 route_info->irif_index = irif_index;
0235 route_info->erif_indices = erif_indices;
0236 route_info->min_mtu = mr_route->min_mtu;
0237 route_info->route_action = mr_route->route_action;
0238 route_info->erif_num = erif;
0239 return 0;
0240 }
0241
0242 static void
0243 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
0244 {
0245 kfree(route_info->erif_indices);
0246 }
0247
0248 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
0249 struct mlxsw_sp_mr_route *mr_route,
0250 bool replace)
0251 {
0252 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0253 struct mlxsw_sp_mr_route_info route_info;
0254 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0255 int err;
0256
0257 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
0258 if (err)
0259 return err;
0260
0261 if (!replace) {
0262 struct mlxsw_sp_mr_route_params route_params;
0263
0264 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
0265 GFP_KERNEL);
0266 if (!mr_route->route_priv) {
0267 err = -ENOMEM;
0268 goto out;
0269 }
0270
0271 route_params.key = mr_route->key;
0272 route_params.value = route_info;
0273 route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
0274 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
0275 mr_route->route_priv,
0276 &route_params);
0277 if (err)
0278 kfree(mr_route->route_priv);
0279 } else {
0280 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
0281 &route_info);
0282 }
0283 out:
0284 mlxsw_sp_mr_route_info_destroy(&route_info);
0285 return err;
0286 }
0287
0288 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
0289 struct mlxsw_sp_mr_route *mr_route)
0290 {
0291 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0292 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0293
0294 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
0295 kfree(mr_route->route_priv);
0296 }
0297
0298 static struct mlxsw_sp_mr_route *
0299 mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
0300 struct mr_mfc *mfc)
0301 {
0302 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
0303 struct mlxsw_sp_mr_route *mr_route;
0304 int err = 0;
0305 int i;
0306
0307
0308 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
0309 if (!mr_route)
0310 return ERR_PTR(-ENOMEM);
0311 INIT_LIST_HEAD(&mr_route->evif_list);
0312
0313
0314 mr_route->min_mtu = ETH_MAX_MTU;
0315 mr_cache_hold(mfc);
0316 mr_route->mfc = mfc;
0317 mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
0318
0319 mr_route->mr_table = mr_table;
0320 for (i = 0; i < MAXVIFS; i++) {
0321 if (mfc->mfc_un.res.ttls[i] != 255) {
0322 err = mlxsw_sp_mr_route_evif_link(mr_route,
0323 &mr_table->vifs[i]);
0324 if (err)
0325 goto err;
0326 if (mr_table->vifs[i].dev &&
0327 mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
0328 mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
0329 }
0330 }
0331 mlxsw_sp_mr_route_ivif_link(mr_route,
0332 &mr_table->vifs[mfc->mfc_parent]);
0333
0334 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
0335 return mr_route;
0336 err:
0337 mr_cache_put(mfc);
0338 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
0339 mlxsw_sp_mr_route_evif_unlink(rve);
0340 kfree(mr_route);
0341 return ERR_PTR(err);
0342 }
0343
0344 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
0345 struct mlxsw_sp_mr_route *mr_route)
0346 {
0347 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
0348
0349 mlxsw_sp_mr_route_ivif_unlink(mr_route);
0350 mr_cache_put(mr_route->mfc);
0351 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
0352 mlxsw_sp_mr_route_evif_unlink(rve);
0353 kfree(mr_route);
0354 }
0355
0356 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
0357 bool offload)
0358 {
0359 if (offload)
0360 mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
0361 else
0362 mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
0363 }
0364
0365 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
0366 {
0367 bool offload;
0368
0369 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0370 mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
0371 }
0372
0373 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
0374 struct mlxsw_sp_mr_route *mr_route)
0375 {
0376 WARN_ON_ONCE(!mutex_is_locked(&mr_table->route_list_lock));
0377
0378 mlxsw_sp_mr_mfc_offload_set(mr_route, false);
0379 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
0380 mlxsw_sp_mr_route_ht_params);
0381 list_del(&mr_route->node);
0382 mlxsw_sp_mr_route_erase(mr_table, mr_route);
0383 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
0384 }
0385
0386 int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
0387 struct mr_mfc *mfc, bool replace)
0388 {
0389 struct mlxsw_sp_mr_route *mr_orig_route = NULL;
0390 struct mlxsw_sp_mr_route *mr_route;
0391 int err;
0392
0393 if (!mr_table->ops->is_route_valid(mr_table, mfc))
0394 return -EINVAL;
0395
0396
0397 mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
0398 if (IS_ERR(mr_route))
0399 return PTR_ERR(mr_route);
0400
0401
0402 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
0403 &mr_route->key,
0404 mlxsw_sp_mr_route_ht_params);
0405 if (replace) {
0406
0407
0408 if (WARN_ON(!mr_orig_route)) {
0409 err = -ENOENT;
0410 goto err_no_orig_route;
0411 }
0412 mr_route->route_priv = mr_orig_route->route_priv;
0413 } else if (mr_orig_route) {
0414
0415
0416
0417 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
0418 "Offloading proxy routes is not supported.\n");
0419 err = -EINVAL;
0420 goto err_duplicate_route;
0421 }
0422
0423
0424 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
0425 if (err)
0426 goto err_mr_route_write;
0427
0428
0429 mutex_lock(&mr_table->route_list_lock);
0430 list_add_tail(&mr_route->node, &mr_table->route_list);
0431 mutex_unlock(&mr_table->route_list_lock);
0432 err = rhashtable_insert_fast(&mr_table->route_ht,
0433 &mr_route->ht_node,
0434 mlxsw_sp_mr_route_ht_params);
0435 if (err)
0436 goto err_rhashtable_insert;
0437
0438
0439 if (replace) {
0440 rhashtable_remove_fast(&mr_table->route_ht,
0441 &mr_orig_route->ht_node,
0442 mlxsw_sp_mr_route_ht_params);
0443 list_del(&mr_orig_route->node);
0444 mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
0445 }
0446
0447 mlxsw_sp_mr_mfc_offload_update(mr_route);
0448 return 0;
0449
0450 err_rhashtable_insert:
0451 mutex_lock(&mr_table->route_list_lock);
0452 list_del(&mr_route->node);
0453 mutex_unlock(&mr_table->route_list_lock);
0454 mlxsw_sp_mr_route_erase(mr_table, mr_route);
0455 err_mr_route_write:
0456 err_no_orig_route:
0457 err_duplicate_route:
0458 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
0459 return err;
0460 }
0461
0462 void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
0463 struct mr_mfc *mfc)
0464 {
0465 struct mlxsw_sp_mr_route *mr_route;
0466 struct mlxsw_sp_mr_route_key key;
0467
0468 mr_table->ops->key_create(mr_table, &key, mfc);
0469 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
0470 mlxsw_sp_mr_route_ht_params);
0471 if (mr_route) {
0472 mutex_lock(&mr_table->route_list_lock);
0473 __mlxsw_sp_mr_route_del(mr_table, mr_route);
0474 mutex_unlock(&mr_table->route_list_lock);
0475 }
0476 }
0477
0478
0479 static int
0480 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
0481 struct mlxsw_sp_mr_route_vif_entry *rve)
0482 {
0483 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0484 enum mlxsw_sp_mr_route_action route_action;
0485 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0486 u16 irif_index;
0487 int err;
0488
0489 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
0490 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
0491 return 0;
0492
0493
0494 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
0495 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
0496 irif_index);
0497 if (err)
0498 return err;
0499
0500 err = mr->mr_ops->route_action_update(mlxsw_sp,
0501 rve->mr_route->route_priv,
0502 route_action);
0503 if (err)
0504
0505
0506
0507 return err;
0508
0509 rve->mr_route->route_action = route_action;
0510 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
0511 return 0;
0512 }
0513
0514 static void
0515 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
0516 struct mlxsw_sp_mr_route_vif_entry *rve)
0517 {
0518 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0519 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0520
0521 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
0522 MLXSW_SP_MR_ROUTE_ACTION_TRAP);
0523 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0524 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
0525 }
0526
0527
0528 static int
0529 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
0530 struct mlxsw_sp_mr_route_vif_entry *rve)
0531 {
0532 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0533 enum mlxsw_sp_mr_route_action route_action;
0534 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0535 u16 erif_index = 0;
0536 int err;
0537
0538
0539 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
0540 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
0541 err = mr->mr_ops->route_erif_add(mlxsw_sp,
0542 rve->mr_route->route_priv,
0543 erif_index);
0544 if (err)
0545 return err;
0546 }
0547
0548
0549
0550
0551 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
0552 if (route_action != rve->mr_route->route_action) {
0553 err = mr->mr_ops->route_action_update(mlxsw_sp,
0554 rve->mr_route->route_priv,
0555 route_action);
0556 if (err)
0557 goto err_route_action_update;
0558 }
0559
0560
0561 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
0562 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
0563 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
0564 rve->mr_route->route_priv,
0565 rve->mr_route->min_mtu);
0566 if (err)
0567 goto err_route_min_mtu_update;
0568 }
0569
0570 rve->mr_route->route_action = route_action;
0571 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
0572 return 0;
0573
0574 err_route_min_mtu_update:
0575 if (route_action != rve->mr_route->route_action)
0576 mr->mr_ops->route_action_update(mlxsw_sp,
0577 rve->mr_route->route_priv,
0578 rve->mr_route->route_action);
0579 err_route_action_update:
0580 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
0581 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
0582 erif_index);
0583 return err;
0584 }
0585
0586
0587 static void
0588 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
0589 struct mlxsw_sp_mr_route_vif_entry *rve)
0590 {
0591 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0592 enum mlxsw_sp_mr_route_action route_action;
0593 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0594 u16 rifi;
0595
0596
0597 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
0598 return;
0599
0600
0601
0602
0603
0604
0605 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
0606 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
0607 else
0608 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
0609 if (route_action != rve->mr_route->route_action)
0610 mr->mr_ops->route_action_update(mlxsw_sp,
0611 rve->mr_route->route_priv,
0612 route_action);
0613
0614
0615 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
0616 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
0617 rve->mr_route->route_action = route_action;
0618 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
0619 }
0620
0621 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
0622 struct net_device *dev,
0623 struct mlxsw_sp_mr_vif *mr_vif,
0624 unsigned long vif_flags,
0625 const struct mlxsw_sp_rif *rif)
0626 {
0627 struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
0628 int err;
0629
0630
0631 mr_vif->dev = dev;
0632 mr_vif->rif = rif;
0633 mr_vif->vif_flags = vif_flags;
0634
0635
0636 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
0637 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
0638 if (err)
0639 goto err_irif_unresolve;
0640 }
0641
0642
0643 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
0644 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
0645 if (err)
0646 goto err_erif_unresolve;
0647 }
0648 return 0;
0649
0650 err_erif_unresolve:
0651 list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
0652 vif_node)
0653 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
0654 err_irif_unresolve:
0655 list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
0656 vif_node)
0657 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
0658 mr_vif->rif = NULL;
0659 return err;
0660 }
0661
0662 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
0663 struct net_device *dev,
0664 struct mlxsw_sp_mr_vif *mr_vif)
0665 {
0666 struct mlxsw_sp_mr_route_vif_entry *rve;
0667
0668
0669 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
0670 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
0671
0672
0673 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
0674 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
0675
0676
0677 mr_vif->dev = dev;
0678 mr_vif->rif = NULL;
0679 }
0680
0681 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
0682 struct net_device *dev, vifi_t vif_index,
0683 unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
0684 {
0685 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
0686
0687 if (WARN_ON(vif_index >= MAXVIFS))
0688 return -EINVAL;
0689 if (mr_vif->dev)
0690 return -EEXIST;
0691 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
0692 }
0693
0694 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
0695 {
0696 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
0697
0698 if (WARN_ON(vif_index >= MAXVIFS))
0699 return;
0700 if (WARN_ON(!mr_vif->dev))
0701 return;
0702 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
0703 }
0704
0705 static struct mlxsw_sp_mr_vif *
0706 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
0707 const struct net_device *dev)
0708 {
0709 vifi_t vif_index;
0710
0711 for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
0712 if (mr_table->vifs[vif_index].dev == dev)
0713 return &mr_table->vifs[vif_index];
0714 return NULL;
0715 }
0716
0717 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
0718 const struct mlxsw_sp_rif *rif)
0719 {
0720 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
0721 struct mlxsw_sp_mr_vif *mr_vif;
0722
0723 if (!rif_dev)
0724 return 0;
0725
0726 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
0727 if (!mr_vif)
0728 return 0;
0729 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
0730 mr_vif->vif_flags, rif);
0731 }
0732
0733 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
0734 const struct mlxsw_sp_rif *rif)
0735 {
0736 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
0737 struct mlxsw_sp_mr_vif *mr_vif;
0738
0739 if (!rif_dev)
0740 return;
0741
0742 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
0743 if (!mr_vif)
0744 return;
0745 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
0746 }
0747
0748 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
0749 const struct mlxsw_sp_rif *rif, int mtu)
0750 {
0751 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
0752 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0753 struct mlxsw_sp_mr_route_vif_entry *rve;
0754 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0755 struct mlxsw_sp_mr_vif *mr_vif;
0756
0757 if (!rif_dev)
0758 return;
0759
0760
0761 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
0762 if (!mr_vif)
0763 return;
0764
0765
0766 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
0767 if (mtu < rve->mr_route->min_mtu) {
0768 rve->mr_route->min_mtu = mtu;
0769 mr->mr_ops->route_min_mtu_update(mlxsw_sp,
0770 rve->mr_route->route_priv,
0771 mtu);
0772 }
0773 }
0774 }
0775
0776
0777 static bool
0778 mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
0779 const struct mr_mfc *c)
0780 {
0781 struct mfc_cache *mfc = (struct mfc_cache *) c;
0782
0783
0784
0785
0786 if (mfc->mfc_origin == htonl(INADDR_ANY) &&
0787 mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
0788 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
0789 "Offloading proxy routes is not supported.\n");
0790 return false;
0791 }
0792 return true;
0793 }
0794
0795 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
0796 struct mlxsw_sp_mr_route_key *key,
0797 struct mr_mfc *c)
0798 {
0799 const struct mfc_cache *mfc = (struct mfc_cache *) c;
0800 bool starg;
0801
0802 starg = (mfc->mfc_origin == htonl(INADDR_ANY));
0803
0804 memset(key, 0, sizeof(*key));
0805 key->vrid = mr_table->vr_id;
0806 key->proto = MLXSW_SP_L3_PROTO_IPV4;
0807 key->group.addr4 = mfc->mfc_mcastgrp;
0808 key->group_mask.addr4 = htonl(0xffffffff);
0809 key->source.addr4 = mfc->mfc_origin;
0810 key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
0811 }
0812
0813 static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
0814 const struct mlxsw_sp_mr_route *mr_route)
0815 {
0816 return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
0817 }
0818
0819 static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
0820 {
0821 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
0822 }
0823
0824 static bool
0825 mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
0826 const struct mr_mfc *c)
0827 {
0828 struct mfc6_cache *mfc = (struct mfc6_cache *) c;
0829
0830
0831
0832
0833 if (ipv6_addr_any(&mfc->mf6c_origin) &&
0834 ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
0835 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
0836 "Offloading proxy routes is not supported.\n");
0837 return false;
0838 }
0839 return true;
0840 }
0841
0842 static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
0843 struct mlxsw_sp_mr_route_key *key,
0844 struct mr_mfc *c)
0845 {
0846 const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
0847
0848 memset(key, 0, sizeof(*key));
0849 key->vrid = mr_table->vr_id;
0850 key->proto = MLXSW_SP_L3_PROTO_IPV6;
0851 key->group.addr6 = mfc->mf6c_mcastgrp;
0852 memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
0853 key->source.addr6 = mfc->mf6c_origin;
0854 if (!ipv6_addr_any(&mfc->mf6c_origin))
0855 memset(&key->source_mask.addr6, 0xff,
0856 sizeof(key->source_mask.addr6));
0857 }
0858
0859 static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
0860 const struct mlxsw_sp_mr_route *mr_route)
0861 {
0862 return ipv6_addr_any(&mr_route->key.source_mask.addr6);
0863 }
0864
0865 static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
0866 {
0867 return !(vif->vif_flags & MIFF_REGISTER);
0868 }
0869
0870 static struct
0871 mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
0872 {
0873 .is_regular = mlxsw_sp_mr_vif4_is_regular,
0874 },
0875 {
0876 .is_regular = mlxsw_sp_mr_vif6_is_regular,
0877 },
0878 };
0879
0880 static struct
0881 mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
0882 {
0883 .is_route_valid = mlxsw_sp_mr_route4_validate,
0884 .key_create = mlxsw_sp_mr_route4_key,
0885 .is_route_starg = mlxsw_sp_mr_route4_starg,
0886 },
0887 {
0888 .is_route_valid = mlxsw_sp_mr_route6_validate,
0889 .key_create = mlxsw_sp_mr_route6_key,
0890 .is_route_starg = mlxsw_sp_mr_route6_starg,
0891 },
0892
0893 };
0894
0895 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
0896 u32 vr_id,
0897 enum mlxsw_sp_l3proto proto)
0898 {
0899 struct mlxsw_sp_mr_route_params catchall_route_params = {
0900 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
0901 .key = {
0902 .vrid = vr_id,
0903 .proto = proto,
0904 },
0905 .value = {
0906 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
0907 }
0908 };
0909 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0910 struct mlxsw_sp_mr_table *mr_table;
0911 int err;
0912 int i;
0913
0914 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
0915 GFP_KERNEL);
0916 if (!mr_table)
0917 return ERR_PTR(-ENOMEM);
0918
0919 mr_table->vr_id = vr_id;
0920 mr_table->mlxsw_sp = mlxsw_sp;
0921 mr_table->proto = proto;
0922 mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
0923 INIT_LIST_HEAD(&mr_table->route_list);
0924 mutex_init(&mr_table->route_list_lock);
0925
0926 err = rhashtable_init(&mr_table->route_ht,
0927 &mlxsw_sp_mr_route_ht_params);
0928 if (err)
0929 goto err_route_rhashtable_init;
0930
0931 for (i = 0; i < MAXVIFS; i++) {
0932 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
0933 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
0934 mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
0935 }
0936
0937 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
0938 mr_table->catchall_route_priv,
0939 &catchall_route_params);
0940 if (err)
0941 goto err_ops_route_create;
0942 mutex_lock(&mr->table_list_lock);
0943 list_add_tail(&mr_table->node, &mr->table_list);
0944 mutex_unlock(&mr->table_list_lock);
0945 return mr_table;
0946
0947 err_ops_route_create:
0948 rhashtable_destroy(&mr_table->route_ht);
0949 err_route_rhashtable_init:
0950 mutex_destroy(&mr_table->route_list_lock);
0951 kfree(mr_table);
0952 return ERR_PTR(err);
0953 }
0954
0955 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
0956 {
0957 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
0958 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
0959
0960 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
0961 mutex_lock(&mr->table_list_lock);
0962 list_del(&mr_table->node);
0963 mutex_unlock(&mr->table_list_lock);
0964 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
0965 &mr_table->catchall_route_priv);
0966 rhashtable_destroy(&mr_table->route_ht);
0967 mutex_destroy(&mr_table->route_list_lock);
0968 kfree(mr_table);
0969 }
0970
0971 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
0972 {
0973 struct mlxsw_sp_mr_route *mr_route, *tmp;
0974 int i;
0975
0976 mutex_lock(&mr_table->route_list_lock);
0977 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
0978 __mlxsw_sp_mr_route_del(mr_table, mr_route);
0979 mutex_unlock(&mr_table->route_list_lock);
0980
0981 for (i = 0; i < MAXVIFS; i++) {
0982 mr_table->vifs[i].dev = NULL;
0983 mr_table->vifs[i].rif = NULL;
0984 }
0985 }
0986
0987 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
0988 {
0989 int i;
0990
0991 for (i = 0; i < MAXVIFS; i++)
0992 if (mr_table->vifs[i].dev)
0993 return false;
0994 return list_empty(&mr_table->route_list);
0995 }
0996
0997 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
0998 struct mlxsw_sp_mr_route *mr_route)
0999 {
1000 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1001 u64 packets, bytes;
1002
1003 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
1004 return;
1005
1006 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
1007 &bytes);
1008
1009 if (mr_route->mfc->mfc_un.res.pkt != packets)
1010 mr_route->mfc->mfc_un.res.lastuse = jiffies;
1011 mr_route->mfc->mfc_un.res.pkt = packets;
1012 mr_route->mfc->mfc_un.res.bytes = bytes;
1013 }
1014
1015 static void mlxsw_sp_mr_stats_update(struct work_struct *work)
1016 {
1017 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
1018 stats_update_dw.work);
1019 struct mlxsw_sp_mr_table *mr_table;
1020 struct mlxsw_sp_mr_route *mr_route;
1021 unsigned long interval;
1022
1023 mutex_lock(&mr->table_list_lock);
1024 list_for_each_entry(mr_table, &mr->table_list, node) {
1025 mutex_lock(&mr_table->route_list_lock);
1026 list_for_each_entry(mr_route, &mr_table->route_list, node)
1027 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
1028 mr_route);
1029 mutex_unlock(&mr_table->route_list_lock);
1030 }
1031 mutex_unlock(&mr->table_list_lock);
1032
1033 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1034 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1035 }
1036
1037 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
1038 const struct mlxsw_sp_mr_ops *mr_ops)
1039 {
1040 struct mlxsw_sp_mr *mr;
1041 unsigned long interval;
1042 int err;
1043
1044 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
1045 if (!mr)
1046 return -ENOMEM;
1047 mr->mr_ops = mr_ops;
1048 mlxsw_sp->mr = mr;
1049 INIT_LIST_HEAD(&mr->table_list);
1050 mutex_init(&mr->table_list_lock);
1051
1052 err = mr_ops->init(mlxsw_sp, mr->priv);
1053 if (err)
1054 goto err;
1055
1056
1057 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
1058 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1059 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1060 return 0;
1061 err:
1062 mutex_destroy(&mr->table_list_lock);
1063 kfree(mr);
1064 return err;
1065 }
1066
1067 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1068 {
1069 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1070
1071 cancel_delayed_work_sync(&mr->stats_update_dw);
1072 mr->mr_ops->fini(mlxsw_sp, mr->priv);
1073 mutex_destroy(&mr->table_list_lock);
1074 kfree(mr);
1075 }