Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
0002 /* Copyright (c) 2015 - 2021 Intel Corporation */
0003 #include "osdep.h"
0004 #include "hmc.h"
0005 #include "defs.h"
0006 #include "type.h"
0007 #include "protos.h"
0008 
0009 /**
0010  * irdma_find_sd_index_limit - finds segment descriptor index limit
0011  * @hmc_info: pointer to the HMC configuration information structure
0012  * @type: type of HMC resources we're searching
0013  * @idx: starting index for the object
0014  * @cnt: number of objects we're trying to create
0015  * @sd_idx: pointer to return index of the segment descriptor in question
0016  * @sd_limit: pointer to return the maximum number of segment descriptors
0017  *
0018  * This function calculates the segment descriptor index and index limit
0019  * for the resource defined by irdma_hmc_rsrc_type.
0020  */
0021 
0022 static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
0023                       u32 idx, u32 cnt, u32 *sd_idx,
0024                       u32 *sd_limit)
0025 {
0026     u64 fpm_addr, fpm_limit;
0027 
0028     fpm_addr = hmc_info->hmc_obj[(type)].base +
0029            hmc_info->hmc_obj[type].size * idx;
0030     fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
0031     *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
0032     *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
0033     *sd_limit += 1;
0034 }
0035 
0036 /**
0037  * irdma_find_pd_index_limit - finds page descriptor index limit
0038  * @hmc_info: pointer to the HMC configuration information struct
0039  * @type: HMC resource type we're examining
0040  * @idx: starting index for the object
0041  * @cnt: number of objects we're trying to create
0042  * @pd_idx: pointer to return page descriptor index
0043  * @pd_limit: pointer to return page descriptor index limit
0044  *
0045  * Calculates the page descriptor index and index limit for the resource
0046  * defined by irdma_hmc_rsrc_type.
0047  */
0048 
0049 static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
0050                       u32 idx, u32 cnt, u32 *pd_idx,
0051                       u32 *pd_limit)
0052 {
0053     u64 fpm_adr, fpm_limit;
0054 
0055     fpm_adr = hmc_info->hmc_obj[type].base +
0056           hmc_info->hmc_obj[type].size * idx;
0057     fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
0058     *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
0059     *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
0060     *pd_limit += 1;
0061 }
0062 
0063 /**
0064  * irdma_set_sd_entry - setup entry for sd programming
0065  * @pa: physical addr
0066  * @idx: sd index
0067  * @type: paged or direct sd
0068  * @entry: sd entry ptr
0069  */
0070 static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
0071                    struct irdma_update_sd_entry *entry)
0072 {
0073     entry->data = pa |
0074               FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
0075               FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
0076                  type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
0077               FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
0078 
0079     entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
0080 }
0081 
0082 /**
0083  * irdma_clr_sd_entry - setup entry for sd clear
0084  * @idx: sd index
0085  * @type: paged or direct sd
0086  * @entry: sd entry ptr
0087  */
0088 static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
0089                    struct irdma_update_sd_entry *entry)
0090 {
0091     entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
0092               FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
0093                  type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
0094 
0095     entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
0096 }
0097 
0098 /**
0099  * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
0100  * @dev: pointer to our device struct
0101  * @sd_idx: segment descriptor index
0102  * @pd_idx: page descriptor index
0103  */
0104 static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
0105                           u32 pd_idx)
0106 {
0107     u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
0108           FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
0109           FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
0110 
0111     writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
0112 }
0113 
0114 /**
0115  * irdma_hmc_sd_one - setup 1 sd entry for cqp
0116  * @dev: pointer to the device structure
0117  * @hmc_fn_id: hmc's function id
0118  * @pa: physical addr
0119  * @sd_idx: sd index
0120  * @type: paged or direct sd
0121  * @setsd: flag to set or clear sd
0122  */
0123 int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
0124              enum irdma_sd_entry_type type, bool setsd)
0125 {
0126     struct irdma_update_sds_info sdinfo;
0127 
0128     sdinfo.cnt = 1;
0129     sdinfo.hmc_fn_id = hmc_fn_id;
0130     if (setsd)
0131         irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
0132     else
0133         irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
0134     return dev->cqp->process_cqp_sds(dev, &sdinfo);
0135 }
0136 
0137 /**
0138  * irdma_hmc_sd_grp - setup group of sd entries for cqp
0139  * @dev: pointer to the device structure
0140  * @hmc_info: pointer to the HMC configuration information struct
0141  * @sd_index: sd index
0142  * @sd_cnt: number of sd entries
0143  * @setsd: flag to set or clear sd
0144  */
0145 static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
0146                 struct irdma_hmc_info *hmc_info, u32 sd_index,
0147                 u32 sd_cnt, bool setsd)
0148 {
0149     struct irdma_hmc_sd_entry *sd_entry;
0150     struct irdma_update_sds_info sdinfo = {};
0151     u64 pa;
0152     u32 i;
0153     int ret_code = 0;
0154 
0155     sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
0156     for (i = sd_index; i < sd_index + sd_cnt; i++) {
0157         sd_entry = &hmc_info->sd_table.sd_entry[i];
0158         if (!sd_entry || (!sd_entry->valid && setsd) ||
0159             (sd_entry->valid && !setsd))
0160             continue;
0161         if (setsd) {
0162             pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
0163                      sd_entry->u.pd_table.pd_page_addr.pa :
0164                      sd_entry->u.bp.addr.pa;
0165             irdma_set_sd_entry(pa, i, sd_entry->entry_type,
0166                        &sdinfo.entry[sdinfo.cnt]);
0167         } else {
0168             irdma_clr_sd_entry(i, sd_entry->entry_type,
0169                        &sdinfo.entry[sdinfo.cnt]);
0170         }
0171         sdinfo.cnt++;
0172         if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
0173             ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
0174             if (ret_code) {
0175                 ibdev_dbg(to_ibdev(dev),
0176                       "HMC: sd_programming failed err=%d\n",
0177                       ret_code);
0178                 return ret_code;
0179             }
0180 
0181             sdinfo.cnt = 0;
0182         }
0183     }
0184     if (sdinfo.cnt)
0185         ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
0186 
0187     return ret_code;
0188 }
0189 
0190 /**
0191  * irdma_hmc_finish_add_sd_reg - program sd entries for objects
0192  * @dev: pointer to the device structure
0193  * @info: create obj info
0194  */
0195 static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
0196                        struct irdma_hmc_create_obj_info *info)
0197 {
0198     if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
0199         return -EINVAL;
0200 
0201     if ((info->start_idx + info->count) >
0202         info->hmc_info->hmc_obj[info->rsrc_type].cnt)
0203         return -EINVAL;
0204 
0205     if (!info->add_sd_cnt)
0206         return 0;
0207     return irdma_hmc_sd_grp(dev, info->hmc_info,
0208                 info->hmc_info->sd_indexes[0], info->add_sd_cnt,
0209                 true);
0210 }
0211 
0212 /**
0213  * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
0214  * @dev: pointer to the device structure
0215  * @info: pointer to irdma_hmc_create_obj_info struct
0216  *
0217  * This will allocate memory for PDs and backing pages and populate
0218  * the sd and pd entries.
0219  */
0220 int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
0221                 struct irdma_hmc_create_obj_info *info)
0222 {
0223     struct irdma_hmc_sd_entry *sd_entry;
0224     u32 sd_idx, sd_lmt;
0225     u32 pd_idx = 0, pd_lmt = 0;
0226     u32 pd_idx1 = 0, pd_lmt1 = 0;
0227     u32 i, j;
0228     bool pd_error = false;
0229     int ret_code = 0;
0230 
0231     if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
0232         return -EINVAL;
0233 
0234     if ((info->start_idx + info->count) >
0235         info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
0236         ibdev_dbg(to_ibdev(dev),
0237               "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
0238               info->rsrc_type, info->start_idx, info->count,
0239               info->hmc_info->hmc_obj[info->rsrc_type].cnt);
0240         return -EINVAL;
0241     }
0242 
0243     irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
0244                   info->start_idx, info->count, &sd_idx,
0245                   &sd_lmt);
0246     if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
0247         sd_lmt > info->hmc_info->sd_table.sd_cnt) {
0248         return -EINVAL;
0249     }
0250 
0251     irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
0252                   info->start_idx, info->count, &pd_idx,
0253                   &pd_lmt);
0254 
0255     for (j = sd_idx; j < sd_lmt; j++) {
0256         ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
0257                             info->entry_type,
0258                             IRDMA_HMC_DIRECT_BP_SIZE);
0259         if (ret_code)
0260             goto exit_sd_error;
0261 
0262         sd_entry = &info->hmc_info->sd_table.sd_entry[j];
0263         if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
0264             (dev->hmc_info == info->hmc_info &&
0265              info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
0266             pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
0267             pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
0268             for (i = pd_idx1; i < pd_lmt1; i++) {
0269                 /* update the pd table entry */
0270                 ret_code = irdma_add_pd_table_entry(dev,
0271                                     info->hmc_info,
0272                                     i, NULL);
0273                 if (ret_code) {
0274                     pd_error = true;
0275                     break;
0276                 }
0277             }
0278             if (pd_error) {
0279                 while (i && (i > pd_idx1)) {
0280                     irdma_remove_pd_bp(dev, info->hmc_info,
0281                                i - 1);
0282                     i--;
0283                 }
0284             }
0285         }
0286         if (sd_entry->valid)
0287             continue;
0288 
0289         info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
0290         info->add_sd_cnt++;
0291         sd_entry->valid = true;
0292     }
0293     return irdma_hmc_finish_add_sd_reg(dev, info);
0294 
0295 exit_sd_error:
0296     while (j && (j > sd_idx)) {
0297         sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
0298         switch (sd_entry->entry_type) {
0299         case IRDMA_SD_TYPE_PAGED:
0300             pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
0301             pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
0302             for (i = pd_idx1; i < pd_lmt1; i++)
0303                 irdma_prep_remove_pd_page(info->hmc_info, i);
0304             break;
0305         case IRDMA_SD_TYPE_DIRECT:
0306             irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
0307             break;
0308         default:
0309             ret_code = -EINVAL;
0310             break;
0311         }
0312         j--;
0313     }
0314 
0315     return ret_code;
0316 }
0317 
0318 /**
0319  * irdma_finish_del_sd_reg - delete sd entries for objects
0320  * @dev: pointer to the device structure
0321  * @info: dele obj info
0322  * @reset: true if called before reset
0323  */
0324 static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
0325                    struct irdma_hmc_del_obj_info *info,
0326                    bool reset)
0327 {
0328     struct irdma_hmc_sd_entry *sd_entry;
0329     int ret_code = 0;
0330     u32 i, sd_idx;
0331     struct irdma_dma_mem *mem;
0332 
0333     if (!reset)
0334         ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
0335                         info->hmc_info->sd_indexes[0],
0336                         info->del_sd_cnt, false);
0337 
0338     if (ret_code)
0339         ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
0340     for (i = 0; i < info->del_sd_cnt; i++) {
0341         sd_idx = info->hmc_info->sd_indexes[i];
0342         sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
0343         mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
0344                   &sd_entry->u.pd_table.pd_page_addr :
0345                   &sd_entry->u.bp.addr;
0346 
0347         if (!mem || !mem->va) {
0348             ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
0349         } else {
0350             dma_free_coherent(dev->hw->device, mem->size, mem->va,
0351                       mem->pa);
0352             mem->va = NULL;
0353         }
0354     }
0355 
0356     return ret_code;
0357 }
0358 
0359 /**
0360  * irdma_sc_del_hmc_obj - remove pe hmc objects
0361  * @dev: pointer to the device structure
0362  * @info: pointer to irdma_hmc_del_obj_info struct
0363  * @reset: true if called before reset
0364  *
0365  * This will de-populate the SDs and PDs.  It frees
0366  * the memory for PDS and backing storage.  After this function is returned,
0367  * caller should deallocate memory allocated previously for
0368  * book-keeping information about PDs and backing storage.
0369  */
0370 int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
0371              struct irdma_hmc_del_obj_info *info, bool reset)
0372 {
0373     struct irdma_hmc_pd_table *pd_table;
0374     u32 sd_idx, sd_lmt;
0375     u32 pd_idx, pd_lmt, rel_pd_idx;
0376     u32 i, j;
0377     int ret_code = 0;
0378 
0379     if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
0380         ibdev_dbg(to_ibdev(dev),
0381               "HMC: error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
0382               info->start_idx, info->rsrc_type,
0383               info->hmc_info->hmc_obj[info->rsrc_type].cnt);
0384         return -EINVAL;
0385     }
0386 
0387     if ((info->start_idx + info->count) >
0388         info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
0389         ibdev_dbg(to_ibdev(dev),
0390               "HMC: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
0391               info->start_idx, info->count, info->rsrc_type,
0392               info->hmc_info->hmc_obj[info->rsrc_type].cnt);
0393         return -EINVAL;
0394     }
0395 
0396     irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
0397                   info->start_idx, info->count, &pd_idx,
0398                   &pd_lmt);
0399 
0400     for (j = pd_idx; j < pd_lmt; j++) {
0401         sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
0402 
0403         if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
0404             continue;
0405 
0406         if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
0407             IRDMA_SD_TYPE_PAGED)
0408             continue;
0409 
0410         rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
0411         pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
0412         if (pd_table->pd_entry &&
0413             pd_table->pd_entry[rel_pd_idx].valid) {
0414             ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
0415             if (ret_code) {
0416                 ibdev_dbg(to_ibdev(dev),
0417                       "HMC: remove_pd_bp error\n");
0418                 return ret_code;
0419             }
0420         }
0421     }
0422 
0423     irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
0424                   info->start_idx, info->count, &sd_idx,
0425                   &sd_lmt);
0426     if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
0427         sd_lmt > info->hmc_info->sd_table.sd_cnt) {
0428         ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
0429         return -EINVAL;
0430     }
0431 
0432     for (i = sd_idx; i < sd_lmt; i++) {
0433         pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
0434         if (!info->hmc_info->sd_table.sd_entry[i].valid)
0435             continue;
0436         switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
0437         case IRDMA_SD_TYPE_DIRECT:
0438             ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
0439             if (!ret_code) {
0440                 info->hmc_info->sd_indexes[info->del_sd_cnt] =
0441                     (u16)i;
0442                 info->del_sd_cnt++;
0443             }
0444             break;
0445         case IRDMA_SD_TYPE_PAGED:
0446             ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
0447             if (ret_code)
0448                 break;
0449             if (dev->hmc_info != info->hmc_info &&
0450                 info->rsrc_type == IRDMA_HMC_IW_PBLE &&
0451                 pd_table->pd_entry) {
0452                 kfree(pd_table->pd_entry_virt_mem.va);
0453                 pd_table->pd_entry = NULL;
0454             }
0455             info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
0456             info->del_sd_cnt++;
0457             break;
0458         default:
0459             break;
0460         }
0461     }
0462     return irdma_finish_del_sd_reg(dev, info, reset);
0463 }
0464 
0465 /**
0466  * irdma_add_sd_table_entry - Adds a segment descriptor to the table
0467  * @hw: pointer to our hw struct
0468  * @hmc_info: pointer to the HMC configuration information struct
0469  * @sd_index: segment descriptor index to manipulate
0470  * @type: what type of segment descriptor we're manipulating
0471  * @direct_mode_sz: size to alloc in direct mode
0472  */
0473 int irdma_add_sd_table_entry(struct irdma_hw *hw,
0474                  struct irdma_hmc_info *hmc_info, u32 sd_index,
0475                  enum irdma_sd_entry_type type, u64 direct_mode_sz)
0476 {
0477     struct irdma_hmc_sd_entry *sd_entry;
0478     struct irdma_dma_mem dma_mem;
0479     u64 alloc_len;
0480 
0481     sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
0482     if (!sd_entry->valid) {
0483         if (type == IRDMA_SD_TYPE_PAGED)
0484             alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
0485         else
0486             alloc_len = direct_mode_sz;
0487 
0488         /* allocate a 4K pd page or 2M backing page */
0489         dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
0490         dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
0491                         &dma_mem.pa, GFP_KERNEL);
0492         if (!dma_mem.va)
0493             return -ENOMEM;
0494         if (type == IRDMA_SD_TYPE_PAGED) {
0495             struct irdma_virt_mem *vmem =
0496                 &sd_entry->u.pd_table.pd_entry_virt_mem;
0497 
0498             vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
0499             vmem->va = kzalloc(vmem->size, GFP_KERNEL);
0500             if (!vmem->va) {
0501                 dma_free_coherent(hw->device, dma_mem.size,
0502                           dma_mem.va, dma_mem.pa);
0503                 dma_mem.va = NULL;
0504                 return -ENOMEM;
0505             }
0506             sd_entry->u.pd_table.pd_entry = vmem->va;
0507 
0508             memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
0509                    sizeof(sd_entry->u.pd_table.pd_page_addr));
0510         } else {
0511             memcpy(&sd_entry->u.bp.addr, &dma_mem,
0512                    sizeof(sd_entry->u.bp.addr));
0513 
0514             sd_entry->u.bp.sd_pd_index = sd_index;
0515         }
0516 
0517         hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
0518         hmc_info->sd_table.use_cnt++;
0519     }
0520     if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
0521         sd_entry->u.bp.use_cnt++;
0522 
0523     return 0;
0524 }
0525 
0526 /**
0527  * irdma_add_pd_table_entry - Adds page descriptor to the specified table
0528  * @dev: pointer to our device structure
0529  * @hmc_info: pointer to the HMC configuration information structure
0530  * @pd_index: which page descriptor index to manipulate
0531  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
0532  *
0533  * This function:
0534  *  1. Initializes the pd entry
0535  *  2. Adds pd_entry in the pd_table
0536  *  3. Mark the entry valid in irdma_hmc_pd_entry structure
0537  *  4. Initializes the pd_entry's ref count to 1
0538  * assumptions:
0539  *  1. The memory for pd should be pinned down, physically contiguous and
0540  *     aligned on 4K boundary and zeroed memory.
0541  *  2. It should be 4K in size.
0542  */
0543 int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
0544                  struct irdma_hmc_info *hmc_info, u32 pd_index,
0545                  struct irdma_dma_mem *rsrc_pg)
0546 {
0547     struct irdma_hmc_pd_table *pd_table;
0548     struct irdma_hmc_pd_entry *pd_entry;
0549     struct irdma_dma_mem mem;
0550     struct irdma_dma_mem *page = &mem;
0551     u32 sd_idx, rel_pd_idx;
0552     u64 *pd_addr;
0553     u64 page_desc;
0554 
0555     if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
0556         return -EINVAL;
0557 
0558     sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
0559     if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
0560         IRDMA_SD_TYPE_PAGED)
0561         return 0;
0562 
0563     rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
0564     pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
0565     pd_entry = &pd_table->pd_entry[rel_pd_idx];
0566     if (!pd_entry->valid) {
0567         if (rsrc_pg) {
0568             pd_entry->rsrc_pg = true;
0569             page = rsrc_pg;
0570         } else {
0571             page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
0572                        IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
0573             page->va = dma_alloc_coherent(dev->hw->device,
0574                               page->size, &page->pa,
0575                               GFP_KERNEL);
0576             if (!page->va)
0577                 return -ENOMEM;
0578 
0579             pd_entry->rsrc_pg = false;
0580         }
0581 
0582         memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
0583         pd_entry->bp.sd_pd_index = pd_index;
0584         pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
0585         page_desc = page->pa | 0x1;
0586         pd_addr = pd_table->pd_page_addr.va;
0587         pd_addr += rel_pd_idx;
0588         memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
0589         pd_entry->sd_index = sd_idx;
0590         pd_entry->valid = true;
0591         pd_table->use_cnt++;
0592         irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
0593     }
0594     pd_entry->bp.use_cnt++;
0595 
0596     return 0;
0597 }
0598 
0599 /**
0600  * irdma_remove_pd_bp - remove a backing page from a page descriptor
0601  * @dev: pointer to our HW structure
0602  * @hmc_info: pointer to the HMC configuration information structure
0603  * @idx: the page index
0604  *
0605  * This function:
0606  *  1. Marks the entry in pd table (for paged address mode) or in sd table
0607  *     (for direct address mode) invalid.
0608  *  2. Write to register PMPDINV to invalidate the backing page in FV cache
0609  *  3. Decrement the ref count for the pd _entry
0610  * assumptions:
0611  *  1. Caller can deallocate the memory used by backing storage after this
0612  *     function returns.
0613  */
0614 int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
0615                struct irdma_hmc_info *hmc_info, u32 idx)
0616 {
0617     struct irdma_hmc_pd_entry *pd_entry;
0618     struct irdma_hmc_pd_table *pd_table;
0619     struct irdma_hmc_sd_entry *sd_entry;
0620     u32 sd_idx, rel_pd_idx;
0621     struct irdma_dma_mem *mem;
0622     u64 *pd_addr;
0623 
0624     sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
0625     rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
0626     if (sd_idx >= hmc_info->sd_table.sd_cnt)
0627         return -EINVAL;
0628 
0629     sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
0630     if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
0631         return -EINVAL;
0632 
0633     pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
0634     pd_entry = &pd_table->pd_entry[rel_pd_idx];
0635     if (--pd_entry->bp.use_cnt)
0636         return 0;
0637 
0638     pd_entry->valid = false;
0639     pd_table->use_cnt--;
0640     pd_addr = pd_table->pd_page_addr.va;
0641     pd_addr += rel_pd_idx;
0642     memset(pd_addr, 0, sizeof(u64));
0643     irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
0644 
0645     if (!pd_entry->rsrc_pg) {
0646         mem = &pd_entry->bp.addr;
0647         if (!mem || !mem->va)
0648             return -EINVAL;
0649 
0650         dma_free_coherent(dev->hw->device, mem->size, mem->va,
0651                   mem->pa);
0652         mem->va = NULL;
0653     }
0654     if (!pd_table->use_cnt)
0655         kfree(pd_table->pd_entry_virt_mem.va);
0656 
0657     return 0;
0658 }
0659 
0660 /**
0661  * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
0662  * @hmc_info: pointer to the HMC configuration information structure
0663  * @idx: the page index
0664  */
0665 int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
0666 {
0667     struct irdma_hmc_sd_entry *sd_entry;
0668 
0669     sd_entry = &hmc_info->sd_table.sd_entry[idx];
0670     if (--sd_entry->u.bp.use_cnt)
0671         return -EBUSY;
0672 
0673     hmc_info->sd_table.use_cnt--;
0674     sd_entry->valid = false;
0675 
0676     return 0;
0677 }
0678 
0679 /**
0680  * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
0681  * @hmc_info: pointer to the HMC configuration information structure
0682  * @idx: segment descriptor index to find the relevant page descriptor
0683  */
0684 int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
0685 {
0686     struct irdma_hmc_sd_entry *sd_entry;
0687 
0688     sd_entry = &hmc_info->sd_table.sd_entry[idx];
0689 
0690     if (sd_entry->u.pd_table.use_cnt)
0691         return -EBUSY;
0692 
0693     sd_entry->valid = false;
0694     hmc_info->sd_table.use_cnt--;
0695 
0696     return 0;
0697 }