0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034 #include <linux/mlx5/driver.h>
0035 #include "mlx5_core.h"
0036
0037
0038 int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
0039 void *ctx, u32 *element_id)
0040 {
0041 u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
0042 u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
0043 void *schedc;
0044 int err;
0045
0046 schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
0047 scheduling_context);
0048 MLX5_SET(create_scheduling_element_in, in, opcode,
0049 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
0050 MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
0051 hierarchy);
0052 memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
0053
0054 err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
0055 if (err)
0056 return err;
0057
0058 *element_id = MLX5_GET(create_scheduling_element_out, out,
0059 scheduling_element_id);
0060 return 0;
0061 }
0062
0063 int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
0064 void *ctx, u32 element_id,
0065 u32 modify_bitmask)
0066 {
0067 u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
0068 void *schedc;
0069
0070 schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
0071 scheduling_context);
0072 MLX5_SET(modify_scheduling_element_in, in, opcode,
0073 MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
0074 MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
0075 element_id);
0076 MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
0077 modify_bitmask);
0078 MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
0079 hierarchy);
0080 memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
0081
0082 return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
0083 }
0084
0085 int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
0086 u32 element_id)
0087 {
0088 u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
0089
0090 MLX5_SET(destroy_scheduling_element_in, in, opcode,
0091 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
0092 MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
0093 element_id);
0094 MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
0095 hierarchy);
0096
0097 return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
0098 }
0099
0100 static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
0101 u16 uid)
0102 {
0103 return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
0104 entry->uid == uid);
0105 }
0106
0107
0108
0109
0110
0111
0112 static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
0113 void *rl_in, u16 uid, bool dedicated)
0114 {
0115 struct mlx5_rl_entry *ret_entry = NULL;
0116 bool empty_found = false;
0117 int i;
0118
0119 lockdep_assert_held(&table->rl_lock);
0120 WARN_ON(!table->rl_entry);
0121
0122 for (i = 0; i < table->max_size; i++) {
0123 if (dedicated) {
0124 if (!table->rl_entry[i].refcount)
0125 return &table->rl_entry[i];
0126 continue;
0127 }
0128
0129 if (table->rl_entry[i].refcount) {
0130 if (table->rl_entry[i].dedicated)
0131 continue;
0132 if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
0133 uid))
0134 return &table->rl_entry[i];
0135 } else if (!empty_found) {
0136 empty_found = true;
0137 ret_entry = &table->rl_entry[i];
0138 }
0139 }
0140
0141 return ret_entry;
0142 }
0143
0144 static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
0145 struct mlx5_rl_entry *entry, bool set)
0146 {
0147 u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
0148 void *pp_context;
0149
0150 pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
0151 MLX5_SET(set_pp_rate_limit_in, in, opcode,
0152 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
0153 MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
0154 MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
0155 if (set)
0156 memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
0157 return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
0158 }
0159
0160 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
0161 {
0162 struct mlx5_rl_table *table = &dev->priv.rl_table;
0163
0164 return (rate <= table->max_rate && rate >= table->min_rate);
0165 }
0166 EXPORT_SYMBOL(mlx5_rl_is_in_range);
0167
0168 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
0169 struct mlx5_rate_limit *rl_1)
0170 {
0171 return ((rl_0->rate == rl_1->rate) &&
0172 (rl_0->max_burst_sz == rl_1->max_burst_sz) &&
0173 (rl_0->typical_pkt_sz == rl_1->typical_pkt_sz));
0174 }
0175 EXPORT_SYMBOL(mlx5_rl_are_equal);
0176
0177 static int mlx5_rl_table_get(struct mlx5_rl_table *table)
0178 {
0179 int i;
0180
0181 lockdep_assert_held(&table->rl_lock);
0182
0183 if (table->rl_entry) {
0184 table->refcount++;
0185 return 0;
0186 }
0187
0188 table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
0189 GFP_KERNEL);
0190 if (!table->rl_entry)
0191 return -ENOMEM;
0192
0193
0194
0195
0196 for (i = 0; i < table->max_size; i++)
0197 table->rl_entry[i].index = i + 1;
0198
0199 table->refcount++;
0200 return 0;
0201 }
0202
0203 static void mlx5_rl_table_put(struct mlx5_rl_table *table)
0204 {
0205 lockdep_assert_held(&table->rl_lock);
0206 if (--table->refcount)
0207 return;
0208
0209 kfree(table->rl_entry);
0210 table->rl_entry = NULL;
0211 }
0212
0213 static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
0214 {
0215 int i;
0216
0217 if (!table->rl_entry)
0218 return;
0219
0220
0221 for (i = 0; i < table->max_size; i++)
0222 if (table->rl_entry[i].refcount)
0223 mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
0224 kfree(table->rl_entry);
0225 }
0226
0227 static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
0228 {
0229 entry->refcount++;
0230 }
0231
0232 static void
0233 mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
0234 {
0235 entry->refcount--;
0236 if (!entry->refcount)
0237 mlx5_set_pp_rate_limit_cmd(dev, entry, false);
0238 }
0239
0240 int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
0241 bool dedicated_entry, u16 *index)
0242 {
0243 struct mlx5_rl_table *table = &dev->priv.rl_table;
0244 struct mlx5_rl_entry *entry;
0245 u32 rate;
0246 int err;
0247
0248 if (!table->max_size)
0249 return -EOPNOTSUPP;
0250
0251 rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
0252 if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
0253 mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
0254 rate, table->min_rate, table->max_rate);
0255 return -EINVAL;
0256 }
0257
0258 mutex_lock(&table->rl_lock);
0259 err = mlx5_rl_table_get(table);
0260 if (err)
0261 goto out;
0262
0263 entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
0264 if (!entry) {
0265 mlx5_core_err(dev, "Max number of %u rates reached\n",
0266 table->max_size);
0267 err = -ENOSPC;
0268 goto rl_err;
0269 }
0270 if (!entry->refcount) {
0271
0272 memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
0273 entry->uid = uid;
0274 err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
0275 if (err) {
0276 mlx5_core_err(
0277 dev,
0278 "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
0279 err, rate,
0280 MLX5_GET(set_pp_rate_limit_context, rl_in,
0281 burst_upper_bound),
0282 MLX5_GET(set_pp_rate_limit_context, rl_in,
0283 typical_packet_size));
0284 goto rl_err;
0285 }
0286
0287 entry->dedicated = dedicated_entry;
0288 }
0289 mlx5_rl_entry_get(entry);
0290 *index = entry->index;
0291 mutex_unlock(&table->rl_lock);
0292 return 0;
0293
0294 rl_err:
0295 mlx5_rl_table_put(table);
0296 out:
0297 mutex_unlock(&table->rl_lock);
0298 return err;
0299 }
0300 EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
0301
0302 void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
0303 {
0304 struct mlx5_rl_table *table = &dev->priv.rl_table;
0305 struct mlx5_rl_entry *entry;
0306
0307 mutex_lock(&table->rl_lock);
0308 entry = &table->rl_entry[index - 1];
0309 mlx5_rl_entry_put(dev, entry);
0310 mlx5_rl_table_put(table);
0311 mutex_unlock(&table->rl_lock);
0312 }
0313 EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
0314
0315 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
0316 struct mlx5_rate_limit *rl)
0317 {
0318 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
0319
0320 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
0321 MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
0322 rl->max_burst_sz);
0323 MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
0324 rl->typical_pkt_sz);
0325
0326 return mlx5_rl_add_rate_raw(dev, rl_raw,
0327 MLX5_CAP_QOS(dev, packet_pacing_uid) ?
0328 MLX5_SHARED_RESOURCE_UID : 0,
0329 false, index);
0330 }
0331 EXPORT_SYMBOL(mlx5_rl_add_rate);
0332
0333 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
0334 {
0335 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
0336 struct mlx5_rl_table *table = &dev->priv.rl_table;
0337 struct mlx5_rl_entry *entry = NULL;
0338
0339
0340 if (rl->rate == 0)
0341 return;
0342
0343 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
0344 MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
0345 rl->max_burst_sz);
0346 MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
0347 rl->typical_pkt_sz);
0348
0349 mutex_lock(&table->rl_lock);
0350 entry = find_rl_entry(table, rl_raw,
0351 MLX5_CAP_QOS(dev, packet_pacing_uid) ?
0352 MLX5_SHARED_RESOURCE_UID : 0, false);
0353 if (!entry || !entry->refcount) {
0354 mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
0355 rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
0356 goto out;
0357 }
0358 mlx5_rl_entry_put(dev, entry);
0359 mlx5_rl_table_put(table);
0360 out:
0361 mutex_unlock(&table->rl_lock);
0362 }
0363 EXPORT_SYMBOL(mlx5_rl_remove_rate);
0364
0365 int mlx5_init_rl_table(struct mlx5_core_dev *dev)
0366 {
0367 struct mlx5_rl_table *table = &dev->priv.rl_table;
0368
0369 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
0370 table->max_size = 0;
0371 return 0;
0372 }
0373
0374 mutex_init(&table->rl_lock);
0375
0376
0377 table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
0378 table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
0379 table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
0380
0381 mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
0382 table->max_size,
0383 table->min_rate >> 10,
0384 table->max_rate >> 10);
0385
0386 return 0;
0387 }
0388
0389 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
0390 {
0391 struct mlx5_rl_table *table = &dev->priv.rl_table;
0392
0393 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
0394 return;
0395
0396 mlx5_rl_table_free(dev, table);
0397 mutex_destroy(&table->rl_lock);
0398 }