0001
0002
0003 #include "osdep.h"
0004 #include "hmc.h"
0005 #include "defs.h"
0006 #include "type.h"
0007 #include "protos.h"
0008 #include "pble.h"
0009
0010 static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
0011
0012
0013
0014
0015
0016 void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
0017 {
0018 struct irdma_chunk *chunk;
0019 struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
0020
0021 while (!list_empty(&pinfo->clist)) {
0022 chunk = (struct irdma_chunk *) pinfo->clist.next;
0023 list_del(&chunk->list);
0024 if (chunk->type == PBLE_SD_PAGED)
0025 irdma_pble_free_paged_mem(chunk);
0026 bitmap_free(chunk->bitmapbuf);
0027 kfree(chunk->chunkmem.va);
0028 }
0029 }
0030
0031
0032
0033
0034
0035
0036 int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
0037 struct irdma_hmc_pble_rsrc *pble_rsrc)
0038 {
0039 struct irdma_hmc_info *hmc_info;
0040 u32 fpm_idx = 0;
0041 int status = 0;
0042
0043 hmc_info = dev->hmc_info;
0044 pble_rsrc->dev = dev;
0045 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
0046
0047 if (pble_rsrc->fpm_base_addr & 0xfff)
0048 fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
0049 pble_rsrc->unallocated_pble =
0050 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
0051 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
0052 pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
0053
0054 mutex_init(&pble_rsrc->pble_mutex_lock);
0055
0056 spin_lock_init(&pble_rsrc->pinfo.prm_lock);
0057 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
0058 if (add_pble_prm(pble_rsrc)) {
0059 irdma_destroy_pble_prm(pble_rsrc);
0060 status = -ENOMEM;
0061 }
0062
0063 return status;
0064 }
0065
0066
0067
0068
0069
0070
0071 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
0072 struct sd_pd_idx *idx)
0073 {
0074 idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
0075 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
0076 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
0077 }
0078
0079
0080
0081
0082
0083
0084 static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
0085 struct irdma_add_page_info *info)
0086 {
0087 struct irdma_sc_dev *dev = pble_rsrc->dev;
0088 int ret_code = 0;
0089 struct sd_pd_idx *idx = &info->idx;
0090 struct irdma_chunk *chunk = info->chunk;
0091 struct irdma_hmc_info *hmc_info = info->hmc_info;
0092 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
0093 u32 offset = 0;
0094
0095 if (!sd_entry->valid) {
0096 ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
0097 info->idx.sd_idx,
0098 IRDMA_SD_TYPE_DIRECT,
0099 IRDMA_HMC_DIRECT_BP_SIZE);
0100 if (ret_code)
0101 return ret_code;
0102
0103 chunk->type = PBLE_SD_CONTIGOUS;
0104 }
0105
0106 offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
0107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
0108 chunk->vaddr = sd_entry->u.bp.addr.va + offset;
0109 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
0110 ibdev_dbg(to_ibdev(dev),
0111 "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
0112 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
0113
0114 return 0;
0115 }
0116
0117
0118
0119
0120
0121
0122 static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
0123 {
0124 u64 idx;
0125
0126 idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
0127
0128 return (u32)idx;
0129 }
0130
0131
0132
0133
0134
0135
0136 static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
0137 struct irdma_add_page_info *info)
0138 {
0139 struct irdma_sc_dev *dev = pble_rsrc->dev;
0140 u8 *addr;
0141 struct irdma_dma_mem mem;
0142 struct irdma_hmc_pd_entry *pd_entry;
0143 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
0144 struct irdma_hmc_info *hmc_info = info->hmc_info;
0145 struct irdma_chunk *chunk = info->chunk;
0146 int status = 0;
0147 u32 rel_pd_idx = info->idx.rel_pd_idx;
0148 u32 pd_idx = info->idx.pd_idx;
0149 u32 i;
0150
0151 if (irdma_pble_get_paged_mem(chunk, info->pages))
0152 return -ENOMEM;
0153
0154 status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
0155 IRDMA_SD_TYPE_PAGED,
0156 IRDMA_HMC_DIRECT_BP_SIZE);
0157 if (status)
0158 goto error;
0159
0160 addr = chunk->vaddr;
0161 for (i = 0; i < info->pages; i++) {
0162 mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
0163 mem.size = 4096;
0164 mem.va = addr;
0165 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
0166 if (!pd_entry->valid) {
0167 status = irdma_add_pd_table_entry(dev, hmc_info,
0168 pd_idx++, &mem);
0169 if (status)
0170 goto error;
0171
0172 addr += 4096;
0173 }
0174 }
0175
0176 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
0177 return 0;
0178
0179 error:
0180 irdma_pble_free_paged_mem(chunk);
0181
0182 return status;
0183 }
0184
0185
0186
0187
0188
0189
0190
0191 static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
0192 struct sd_pd_idx *idx, u32 pages)
0193 {
0194 enum irdma_sd_entry_type sd_entry_type;
0195
0196 sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
0197 IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
0198 return sd_entry_type;
0199 }
0200
0201
0202
0203
0204
0205 static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
0206 {
0207 struct irdma_sc_dev *dev = pble_rsrc->dev;
0208 struct irdma_hmc_sd_entry *sd_entry;
0209 struct irdma_hmc_info *hmc_info;
0210 struct irdma_chunk *chunk;
0211 struct irdma_add_page_info info;
0212 struct sd_pd_idx *idx = &info.idx;
0213 int ret_code = 0;
0214 enum irdma_sd_entry_type sd_entry_type;
0215 u64 sd_reg_val = 0;
0216 struct irdma_virt_mem chunkmem;
0217 u32 pages;
0218
0219 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
0220 return -ENOMEM;
0221
0222 if (pble_rsrc->next_fpm_addr & 0xfff)
0223 return -EINVAL;
0224
0225 chunkmem.size = sizeof(*chunk);
0226 chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
0227 if (!chunkmem.va)
0228 return -ENOMEM;
0229
0230 chunk = chunkmem.va;
0231 chunk->chunkmem = chunkmem;
0232 hmc_info = dev->hmc_info;
0233 chunk->dev = dev;
0234 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
0235 get_sd_pd_idx(pble_rsrc, idx);
0236 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
0237 pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
0238 IRDMA_HMC_PD_CNT_IN_SD;
0239 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
0240 info.chunk = chunk;
0241 info.hmc_info = hmc_info;
0242 info.pages = pages;
0243 info.sd_entry = sd_entry;
0244 if (!sd_entry->valid)
0245 sd_entry_type = irdma_get_type(dev, idx, pages);
0246 else
0247 sd_entry_type = sd_entry->entry_type;
0248
0249 ibdev_dbg(to_ibdev(dev),
0250 "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
0251 pages, pble_rsrc->unallocated_pble,
0252 pble_rsrc->next_fpm_addr);
0253 ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
0254 if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
0255 ret_code = add_sd_direct(pble_rsrc, &info);
0256
0257 if (ret_code)
0258 sd_entry_type = IRDMA_SD_TYPE_PAGED;
0259 else
0260 pble_rsrc->stats_direct_sds++;
0261
0262 if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
0263 ret_code = add_bp_pages(pble_rsrc, &info);
0264 if (ret_code)
0265 goto error;
0266 else
0267 pble_rsrc->stats_paged_sds++;
0268 }
0269
0270 ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
0271 if (ret_code)
0272 goto error;
0273
0274 pble_rsrc->next_fpm_addr += chunk->size;
0275 ibdev_dbg(to_ibdev(dev),
0276 "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
0277 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
0278 pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
0279 sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
0280 sd_entry->u.pd_table.pd_page_addr.pa :
0281 sd_entry->u.bp.addr.pa;
0282
0283 if (!sd_entry->valid) {
0284 ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
0285 idx->sd_idx, sd_entry->entry_type, true);
0286 if (ret_code)
0287 goto error;
0288 }
0289
0290 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
0291 sd_entry->valid = true;
0292 return 0;
0293
0294 error:
0295 bitmap_free(chunk->bitmapbuf);
0296 kfree(chunk->chunkmem.va);
0297
0298 return ret_code;
0299 }
0300
0301
0302
0303
0304
0305
0306 static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
0307 struct irdma_pble_alloc *palloc)
0308 {
0309 u32 i;
0310 struct irdma_pble_level2 *lvl2 = &palloc->level2;
0311 struct irdma_pble_info *root = &lvl2->root;
0312 struct irdma_pble_info *leaf = lvl2->leaf;
0313
0314 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
0315 if (leaf->addr)
0316 irdma_prm_return_pbles(&pble_rsrc->pinfo,
0317 &leaf->chunkinfo);
0318 else
0319 break;
0320 }
0321
0322 if (root->addr)
0323 irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
0324
0325 kfree(lvl2->leafmem.va);
0326 lvl2->leaf = NULL;
0327 }
0328
0329
0330
0331
0332
0333
0334 static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
0335 struct irdma_pble_alloc *palloc)
0336 {
0337 u32 lf4k, lflast, total, i;
0338 u32 pblcnt = PBLE_PER_PAGE;
0339 u64 *addr;
0340 struct irdma_pble_level2 *lvl2 = &palloc->level2;
0341 struct irdma_pble_info *root = &lvl2->root;
0342 struct irdma_pble_info *leaf;
0343 int ret_code;
0344 u64 fpm_addr;
0345
0346
0347 lf4k = palloc->total_cnt >> 9;
0348 lflast = palloc->total_cnt % PBLE_PER_PAGE;
0349 total = (lflast == 0) ? lf4k : lf4k + 1;
0350 lvl2->leaf_cnt = total;
0351
0352 lvl2->leafmem.size = (sizeof(*leaf) * total);
0353 lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
0354 if (!lvl2->leafmem.va)
0355 return -ENOMEM;
0356
0357 lvl2->leaf = lvl2->leafmem.va;
0358 leaf = lvl2->leaf;
0359 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
0360 total << 3, &root->addr, &fpm_addr);
0361 if (ret_code) {
0362 kfree(lvl2->leafmem.va);
0363 lvl2->leaf = NULL;
0364 return -ENOMEM;
0365 }
0366
0367 root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
0368 root->cnt = total;
0369 addr = root->addr;
0370 for (i = 0; i < total; i++, leaf++) {
0371 pblcnt = (lflast && ((i + 1) == total)) ?
0372 lflast : PBLE_PER_PAGE;
0373 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
0374 &leaf->chunkinfo, pblcnt << 3,
0375 &leaf->addr, &fpm_addr);
0376 if (ret_code)
0377 goto error;
0378
0379 leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
0380
0381 leaf->cnt = pblcnt;
0382 *addr = (u64)leaf->idx;
0383 addr++;
0384 }
0385
0386 palloc->level = PBLE_LEVEL_2;
0387 pble_rsrc->stats_lvl2++;
0388 return 0;
0389
0390 error:
0391 free_lvl2(pble_rsrc, palloc);
0392
0393 return -ENOMEM;
0394 }
0395
0396
0397
0398
0399
0400
0401 static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
0402 struct irdma_pble_alloc *palloc)
0403 {
0404 int ret_code;
0405 u64 fpm_addr;
0406 struct irdma_pble_info *lvl1 = &palloc->level1;
0407
0408 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
0409 palloc->total_cnt << 3, &lvl1->addr,
0410 &fpm_addr);
0411 if (ret_code)
0412 return -ENOMEM;
0413
0414 palloc->level = PBLE_LEVEL_1;
0415 lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
0416 lvl1->cnt = palloc->total_cnt;
0417 pble_rsrc->stats_lvl1++;
0418
0419 return 0;
0420 }
0421
0422
0423
0424
0425
0426
0427
0428 static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
0429 struct irdma_pble_alloc *palloc, bool level1_only)
0430 {
0431 int status = 0;
0432
0433 status = get_lvl1_pble(pble_rsrc, palloc);
0434 if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
0435 return status;
0436
0437 status = get_lvl2_pble(pble_rsrc, palloc);
0438
0439 return status;
0440 }
0441
0442
0443
0444
0445
0446
0447
0448
0449 int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
0450 struct irdma_pble_alloc *palloc, u32 pble_cnt,
0451 bool level1_only)
0452 {
0453 int status = 0;
0454 int max_sds = 0;
0455 int i;
0456
0457 palloc->total_cnt = pble_cnt;
0458 palloc->level = PBLE_LEVEL_0;
0459
0460 mutex_lock(&pble_rsrc->pble_mutex_lock);
0461
0462
0463
0464
0465 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
0466 if (!status)
0467 goto exit;
0468
0469 max_sds = (palloc->total_cnt >> 18) + 1;
0470 for (i = 0; i < max_sds; i++) {
0471 status = add_pble_prm(pble_rsrc);
0472 if (status)
0473 break;
0474
0475 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
0476
0477 if (!status || level1_only)
0478 break;
0479 }
0480
0481 exit:
0482 if (!status) {
0483 pble_rsrc->allocdpbles += pble_cnt;
0484 pble_rsrc->stats_alloc_ok++;
0485 } else {
0486 pble_rsrc->stats_alloc_fail++;
0487 }
0488 mutex_unlock(&pble_rsrc->pble_mutex_lock);
0489
0490 return status;
0491 }
0492
0493
0494
0495
0496
0497
0498 void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
0499 struct irdma_pble_alloc *palloc)
0500 {
0501 pble_rsrc->freedpbles += palloc->total_cnt;
0502
0503 if (palloc->level == PBLE_LEVEL_2)
0504 free_lvl2(pble_rsrc, palloc);
0505 else
0506 irdma_prm_return_pbles(&pble_rsrc->pinfo,
0507 &palloc->level1.chunkinfo);
0508 pble_rsrc->stats_alloc_freed++;
0509 }