0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/highmem.h>
0034 #include <linux/kernel.h>
0035 #include <linux/delay.h>
0036 #include <linux/mlx5/driver.h>
0037 #include <linux/xarray.h>
0038 #include "mlx5_core.h"
0039 #include "lib/eq.h"
0040 #include "lib/tout.h"
0041
0042 enum {
0043 MLX5_PAGES_CANT_GIVE = 0,
0044 MLX5_PAGES_GIVE = 1,
0045 MLX5_PAGES_TAKE = 2
0046 };
0047
0048 struct mlx5_pages_req {
0049 struct mlx5_core_dev *dev;
0050 u16 func_id;
0051 u8 ec_function;
0052 s32 npages;
0053 struct work_struct work;
0054 u8 release_all;
0055 };
0056
0057 struct fw_page {
0058 struct rb_node rb_node;
0059 u64 addr;
0060 struct page *page;
0061 u32 function;
0062 unsigned long bitmask;
0063 struct list_head list;
0064 unsigned int free_count;
0065 };
0066
0067 enum {
0068 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
0069 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
0070 };
0071
0072 static u32 get_function(u16 func_id, bool ec_function)
0073 {
0074 return (u32)func_id | (ec_function << 16);
0075 }
0076
0077 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
0078 {
0079 struct rb_root *root;
0080 int err;
0081
0082 root = xa_load(&dev->priv.page_root_xa, function);
0083 if (root)
0084 return root;
0085
0086 root = kzalloc(sizeof(*root), GFP_KERNEL);
0087 if (!root)
0088 return ERR_PTR(-ENOMEM);
0089
0090 err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
0091 if (err) {
0092 kfree(root);
0093 return ERR_PTR(err);
0094 }
0095
0096 *root = RB_ROOT;
0097
0098 return root;
0099 }
0100
0101 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
0102 {
0103 struct rb_node *parent = NULL;
0104 struct rb_root *root;
0105 struct rb_node **new;
0106 struct fw_page *nfp;
0107 struct fw_page *tfp;
0108 int i;
0109
0110 root = page_root_per_function(dev, function);
0111 if (IS_ERR(root))
0112 return PTR_ERR(root);
0113
0114 new = &root->rb_node;
0115
0116 while (*new) {
0117 parent = *new;
0118 tfp = rb_entry(parent, struct fw_page, rb_node);
0119 if (tfp->addr < addr)
0120 new = &parent->rb_left;
0121 else if (tfp->addr > addr)
0122 new = &parent->rb_right;
0123 else
0124 return -EEXIST;
0125 }
0126
0127 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
0128 if (!nfp)
0129 return -ENOMEM;
0130
0131 nfp->addr = addr;
0132 nfp->page = page;
0133 nfp->function = function;
0134 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
0135 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
0136 set_bit(i, &nfp->bitmask);
0137
0138 rb_link_node(&nfp->rb_node, parent, new);
0139 rb_insert_color(&nfp->rb_node, root);
0140 list_add(&nfp->list, &dev->priv.free_list);
0141
0142 return 0;
0143 }
0144
0145 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
0146 u32 function)
0147 {
0148 struct fw_page *result = NULL;
0149 struct rb_root *root;
0150 struct rb_node *tmp;
0151 struct fw_page *tfp;
0152
0153 root = xa_load(&dev->priv.page_root_xa, function);
0154 if (WARN_ON_ONCE(!root))
0155 return NULL;
0156
0157 tmp = root->rb_node;
0158
0159 while (tmp) {
0160 tfp = rb_entry(tmp, struct fw_page, rb_node);
0161 if (tfp->addr < addr) {
0162 tmp = tmp->rb_left;
0163 } else if (tfp->addr > addr) {
0164 tmp = tmp->rb_right;
0165 } else {
0166 result = tfp;
0167 break;
0168 }
0169 }
0170
0171 return result;
0172 }
0173
0174 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
0175 s32 *npages, int boot)
0176 {
0177 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
0178 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
0179 int err;
0180
0181 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
0182 MLX5_SET(query_pages_in, in, op_mod, boot ?
0183 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
0184 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
0185 MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
0186
0187 err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
0188 if (err)
0189 return err;
0190
0191 *npages = MLX5_GET(query_pages_out, out, num_pages);
0192 *func_id = MLX5_GET(query_pages_out, out, function_id);
0193
0194 return err;
0195 }
0196
0197 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
0198 {
0199 struct fw_page *fp = NULL;
0200 struct fw_page *iter;
0201 unsigned n;
0202
0203 list_for_each_entry(iter, &dev->priv.free_list, list) {
0204 if (iter->function != function)
0205 continue;
0206 fp = iter;
0207 }
0208
0209 if (list_empty(&dev->priv.free_list) || !fp)
0210 return -ENOMEM;
0211
0212 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
0213 if (n >= MLX5_NUM_4K_IN_PAGE) {
0214 mlx5_core_warn(dev, "alloc 4k bug\n");
0215 return -ENOENT;
0216 }
0217 clear_bit(n, &fp->bitmask);
0218 fp->free_count--;
0219 if (!fp->free_count)
0220 list_del(&fp->list);
0221
0222 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
0223
0224 return 0;
0225 }
0226
0227 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
0228
0229 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
0230 bool in_free_list)
0231 {
0232 struct rb_root *root;
0233
0234 root = xa_load(&dev->priv.page_root_xa, fwp->function);
0235 if (WARN_ON_ONCE(!root))
0236 return;
0237
0238 rb_erase(&fwp->rb_node, root);
0239 if (in_free_list)
0240 list_del(&fwp->list);
0241 dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
0242 PAGE_SIZE, DMA_BIDIRECTIONAL);
0243 __free_page(fwp->page);
0244 kfree(fwp);
0245 }
0246
0247 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
0248 {
0249 struct fw_page *fwp;
0250 int n;
0251
0252 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
0253 if (!fwp) {
0254 mlx5_core_warn_rl(dev, "page not found\n");
0255 return;
0256 }
0257 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
0258 fwp->free_count++;
0259 set_bit(n, &fwp->bitmask);
0260 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
0261 free_fwp(dev, fwp, fwp->free_count != 1);
0262 else if (fwp->free_count == 1)
0263 list_add(&fwp->list, &dev->priv.free_list);
0264 }
0265
0266 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
0267 {
0268 struct device *device = mlx5_core_dma_dev(dev);
0269 int nid = dev_to_node(device);
0270 struct page *page;
0271 u64 zero_addr = 1;
0272 u64 addr;
0273 int err;
0274
0275 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
0276 if (!page) {
0277 mlx5_core_warn(dev, "failed to allocate page\n");
0278 return -ENOMEM;
0279 }
0280 map:
0281 addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
0282 if (dma_mapping_error(device, addr)) {
0283 mlx5_core_warn(dev, "failed dma mapping page\n");
0284 err = -ENOMEM;
0285 goto err_mapping;
0286 }
0287
0288
0289 if (addr == 0) {
0290 zero_addr = addr;
0291 goto map;
0292 }
0293
0294 err = insert_page(dev, addr, page, function);
0295 if (err) {
0296 mlx5_core_err(dev, "failed to track allocated page\n");
0297 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
0298 }
0299
0300 err_mapping:
0301 if (err)
0302 __free_page(page);
0303
0304 if (zero_addr == 0)
0305 dma_unmap_page(device, zero_addr, PAGE_SIZE,
0306 DMA_BIDIRECTIONAL);
0307
0308 return err;
0309 }
0310
0311 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
0312 bool ec_function)
0313 {
0314 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
0315 int err;
0316
0317 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
0318 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
0319 MLX5_SET(manage_pages_in, in, function_id, func_id);
0320 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
0321
0322 err = mlx5_cmd_exec_in(dev, manage_pages, in);
0323 if (err)
0324 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
0325 func_id, err);
0326 }
0327
0328 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
0329 int event, bool ec_function)
0330 {
0331 u32 function = get_function(func_id, ec_function);
0332 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
0333 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
0334 int notify_fail = event;
0335 u64 addr;
0336 int err;
0337 u32 *in;
0338 int i;
0339
0340 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
0341 in = kvzalloc(inlen, GFP_KERNEL);
0342 if (!in) {
0343 err = -ENOMEM;
0344 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
0345 goto out_free;
0346 }
0347
0348 for (i = 0; i < npages; i++) {
0349 retry:
0350 err = alloc_4k(dev, &addr, function);
0351 if (err) {
0352 if (err == -ENOMEM)
0353 err = alloc_system_page(dev, function);
0354 if (err) {
0355 dev->priv.fw_pages_alloc_failed += (npages - i);
0356 goto out_4k;
0357 }
0358
0359 goto retry;
0360 }
0361 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
0362 }
0363
0364 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
0365 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
0366 MLX5_SET(manage_pages_in, in, function_id, func_id);
0367 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
0368 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
0369
0370 err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
0371 if (err == -EREMOTEIO) {
0372 notify_fail = 0;
0373
0374 if (event) {
0375 err = 0;
0376 goto out_dropped;
0377 }
0378 }
0379 err = mlx5_cmd_check(dev, err, in, out);
0380 if (err) {
0381 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
0382 func_id, npages, err);
0383 goto out_dropped;
0384 }
0385
0386 dev->priv.fw_pages += npages;
0387 if (func_id)
0388 dev->priv.vfs_pages += npages;
0389 else if (mlx5_core_is_ecpf(dev) && !ec_function)
0390 dev->priv.host_pf_pages += npages;
0391
0392 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
0393 npages, ec_function, func_id, err);
0394
0395 kvfree(in);
0396 return 0;
0397
0398 out_dropped:
0399 dev->priv.give_pages_dropped += npages;
0400 out_4k:
0401 for (i--; i >= 0; i--)
0402 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
0403 out_free:
0404 kvfree(in);
0405 if (notify_fail)
0406 page_notify_fail(dev, func_id, ec_function);
0407 return err;
0408 }
0409
0410 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
0411 bool ec_function)
0412 {
0413 u32 function = get_function(func_id, ec_function);
0414 struct rb_root *root;
0415 struct rb_node *p;
0416 int npages = 0;
0417
0418 root = xa_load(&dev->priv.page_root_xa, function);
0419 if (WARN_ON_ONCE(!root))
0420 return;
0421
0422 p = rb_first(root);
0423 while (p) {
0424 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
0425
0426 p = rb_next(p);
0427 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
0428 free_fwp(dev, fwp, fwp->free_count);
0429 }
0430
0431 dev->priv.fw_pages -= npages;
0432 if (func_id)
0433 dev->priv.vfs_pages -= npages;
0434 else if (mlx5_core_is_ecpf(dev) && !ec_function)
0435 dev->priv.host_pf_pages -= npages;
0436
0437 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
0438 npages, ec_function, func_id);
0439 }
0440
0441 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
0442 u32 npages)
0443 {
0444 u32 pages_set = 0;
0445 unsigned int n;
0446
0447 for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
0448 MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
0449 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
0450 pages_set++;
0451
0452 if (!--npages)
0453 break;
0454 }
0455
0456 return pages_set;
0457 }
0458
0459 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
0460 u32 *in, int in_size, u32 *out, int out_size)
0461 {
0462 struct rb_root *root;
0463 struct fw_page *fwp;
0464 struct rb_node *p;
0465 bool ec_function;
0466 u32 func_id;
0467 u32 npages;
0468 u32 i = 0;
0469
0470 if (!mlx5_cmd_is_down(dev))
0471 return mlx5_cmd_do(dev, in, in_size, out, out_size);
0472
0473
0474 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
0475 func_id = MLX5_GET(manage_pages_in, in, function_id);
0476 ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
0477
0478 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
0479 if (WARN_ON_ONCE(!root))
0480 return -EEXIST;
0481
0482 p = rb_first(root);
0483 while (p && i < npages) {
0484 fwp = rb_entry(p, struct fw_page, rb_node);
0485 p = rb_next(p);
0486
0487 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
0488 }
0489
0490 MLX5_SET(manage_pages_out, out, output_num_entries, i);
0491 return 0;
0492 }
0493
0494 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
0495 int *nclaimed, bool event, bool ec_function)
0496 {
0497 u32 function = get_function(func_id, ec_function);
0498 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
0499 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
0500 int num_claimed;
0501 u32 *out;
0502 int err;
0503 int i;
0504
0505 if (nclaimed)
0506 *nclaimed = 0;
0507
0508 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
0509 out = kvzalloc(outlen, GFP_KERNEL);
0510 if (!out)
0511 return -ENOMEM;
0512
0513 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
0514 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
0515 MLX5_SET(manage_pages_in, in, function_id, func_id);
0516 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
0517 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
0518
0519 mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
0520 func_id, npages, outlen);
0521 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
0522 if (err) {
0523 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
0524 dev->priv.reclaim_pages_discard += npages;
0525 }
0526
0527 if (event && err == -EREMOTEIO) {
0528 err = 0;
0529 goto out_free;
0530 }
0531
0532 err = mlx5_cmd_check(dev, err, in, out);
0533 if (err) {
0534 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
0535 goto out_free;
0536 }
0537
0538 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
0539 if (num_claimed > npages) {
0540 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
0541 num_claimed, npages);
0542 err = -EINVAL;
0543 goto out_free;
0544 }
0545
0546 for (i = 0; i < num_claimed; i++)
0547 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
0548
0549 if (nclaimed)
0550 *nclaimed = num_claimed;
0551
0552 dev->priv.fw_pages -= num_claimed;
0553 if (func_id)
0554 dev->priv.vfs_pages -= num_claimed;
0555 else if (mlx5_core_is_ecpf(dev) && !ec_function)
0556 dev->priv.host_pf_pages -= num_claimed;
0557
0558 out_free:
0559 kvfree(out);
0560 return err;
0561 }
0562
0563 static void pages_work_handler(struct work_struct *work)
0564 {
0565 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
0566 struct mlx5_core_dev *dev = req->dev;
0567 int err = 0;
0568
0569 if (req->release_all)
0570 release_all_pages(dev, req->func_id, req->ec_function);
0571 else if (req->npages < 0)
0572 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
0573 true, req->ec_function);
0574 else if (req->npages > 0)
0575 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
0576
0577 if (err)
0578 mlx5_core_warn(dev, "%s fail %d\n",
0579 req->npages < 0 ? "reclaim" : "give", err);
0580
0581 kfree(req);
0582 }
0583
0584 enum {
0585 EC_FUNCTION_MASK = 0x8000,
0586 RELEASE_ALL_PAGES_MASK = 0x4000,
0587 };
0588
0589 static int req_pages_handler(struct notifier_block *nb,
0590 unsigned long type, void *data)
0591 {
0592 struct mlx5_pages_req *req;
0593 struct mlx5_core_dev *dev;
0594 struct mlx5_priv *priv;
0595 struct mlx5_eqe *eqe;
0596 bool ec_function;
0597 bool release_all;
0598 u16 func_id;
0599 s32 npages;
0600
0601 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
0602 dev = container_of(priv, struct mlx5_core_dev, priv);
0603 eqe = data;
0604
0605 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
0606 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
0607 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
0608 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
0609 RELEASE_ALL_PAGES_MASK;
0610 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
0611 func_id, npages, release_all);
0612 req = kzalloc(sizeof(*req), GFP_ATOMIC);
0613 if (!req) {
0614 mlx5_core_warn(dev, "failed to allocate pages request\n");
0615 return NOTIFY_DONE;
0616 }
0617
0618 req->dev = dev;
0619 req->func_id = func_id;
0620 req->npages = npages;
0621 req->ec_function = ec_function;
0622 req->release_all = release_all;
0623 INIT_WORK(&req->work, pages_work_handler);
0624 queue_work(dev->priv.pg_wq, &req->work);
0625 return NOTIFY_OK;
0626 }
0627
0628 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
0629 {
0630 u16 func_id;
0631 s32 npages;
0632 int err;
0633
0634 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
0635 if (err)
0636 return err;
0637
0638 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
0639 npages, boot ? "boot" : "init", func_id);
0640
0641 return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
0642 }
0643
0644 enum {
0645 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
0646 };
0647
0648 static int optimal_reclaimed_pages(void)
0649 {
0650 struct mlx5_cmd_prot_block *block;
0651 struct mlx5_cmd_layout *lay;
0652 int ret;
0653
0654 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
0655 MLX5_ST_SZ_BYTES(manage_pages_out)) /
0656 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
0657
0658 return ret;
0659 }
0660
0661 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
0662 struct rb_root *root, u16 func_id)
0663 {
0664 u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
0665 unsigned long end = jiffies + recl_pages_to_jiffies;
0666
0667 while (!RB_EMPTY_ROOT(root)) {
0668 int nclaimed;
0669 int err;
0670
0671 err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
0672 &nclaimed, false, mlx5_core_is_ecpf(dev));
0673 if (err) {
0674 mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
0675 err, func_id);
0676 return err;
0677 }
0678
0679 if (nclaimed)
0680 end = jiffies + recl_pages_to_jiffies;
0681
0682 if (time_after(jiffies, end)) {
0683 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
0684 break;
0685 }
0686 }
0687
0688 return 0;
0689 }
0690
0691 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
0692 {
0693 struct rb_root *root;
0694 unsigned long id;
0695 void *entry;
0696
0697 xa_for_each(&dev->priv.page_root_xa, id, entry) {
0698 root = entry;
0699 mlx5_reclaim_root_pages(dev, root, id);
0700 xa_erase(&dev->priv.page_root_xa, id);
0701 kfree(root);
0702 }
0703
0704 WARN_ON(!xa_empty(&dev->priv.page_root_xa));
0705
0706 WARN(dev->priv.fw_pages,
0707 "FW pages counter is %d after reclaiming all pages\n",
0708 dev->priv.fw_pages);
0709 WARN(dev->priv.vfs_pages,
0710 "VFs FW pages counter is %d after reclaiming all pages\n",
0711 dev->priv.vfs_pages);
0712 WARN(dev->priv.host_pf_pages,
0713 "External host PF FW pages counter is %d after reclaiming all pages\n",
0714 dev->priv.host_pf_pages);
0715
0716 return 0;
0717 }
0718
0719 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
0720 {
0721 INIT_LIST_HEAD(&dev->priv.free_list);
0722 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
0723 if (!dev->priv.pg_wq)
0724 return -ENOMEM;
0725
0726 xa_init(&dev->priv.page_root_xa);
0727 mlx5_pages_debugfs_init(dev);
0728
0729 return 0;
0730 }
0731
0732 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
0733 {
0734 mlx5_pages_debugfs_cleanup(dev);
0735 xa_destroy(&dev->priv.page_root_xa);
0736 destroy_workqueue(dev->priv.pg_wq);
0737 }
0738
0739 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
0740 {
0741 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
0742 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
0743 }
0744
0745 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
0746 {
0747 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
0748 flush_workqueue(dev->priv.pg_wq);
0749 }
0750
0751 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
0752 {
0753 u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
0754 unsigned long end = jiffies + recl_vf_pages_to_jiffies;
0755 int prev_pages = *pages;
0756
0757
0758 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
0759 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
0760 return 0;
0761 }
0762
0763 mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
0764 while (*pages) {
0765 if (time_after(jiffies, end)) {
0766 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
0767 return -ETIMEDOUT;
0768 }
0769 if (*pages < prev_pages) {
0770 end = jiffies + recl_vf_pages_to_jiffies;
0771 prev_pages = *pages;
0772 }
0773 msleep(50);
0774 }
0775
0776 mlx5_core_dbg(dev, "All pages received\n");
0777 return 0;
0778 }