Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* Copyright (c) 2020 Marvell International Ltd. */
0003 
0004 #include <linux/dma-mapping.h>
0005 #include <linux/qed/qed_chain.h>
0006 #include <linux/vmalloc.h>
0007 
0008 #include "qed_dev_api.h"
0009 
0010 static void qed_chain_init(struct qed_chain *chain,
0011                const struct qed_chain_init_params *params,
0012                u32 page_cnt)
0013 {
0014     memset(chain, 0, sizeof(*chain));
0015 
0016     chain->elem_size = params->elem_size;
0017     chain->intended_use = params->intended_use;
0018     chain->mode = params->mode;
0019     chain->cnt_type = params->cnt_type;
0020 
0021     chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
0022                           params->page_size);
0023     chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
0024                                params->page_size,
0025                                params->mode);
0026     chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
0027                                params->mode);
0028 
0029     chain->elem_per_page_mask = chain->elem_per_page - 1;
0030     chain->next_page_mask = chain->usable_per_page &
0031                 chain->elem_per_page_mask;
0032 
0033     chain->page_size = params->page_size;
0034     chain->page_cnt = page_cnt;
0035     chain->capacity = chain->usable_per_page * page_cnt;
0036     chain->size = chain->elem_per_page * page_cnt;
0037 
0038     if (params->ext_pbl_virt) {
0039         chain->pbl_sp.table_virt = params->ext_pbl_virt;
0040         chain->pbl_sp.table_phys = params->ext_pbl_phys;
0041 
0042         chain->b_external_pbl = true;
0043     }
0044 }
0045 
0046 static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
0047                      void *virt_curr, void *virt_next,
0048                      dma_addr_t phys_next)
0049 {
0050     struct qed_chain_next *next;
0051     u32 size;
0052 
0053     size = chain->elem_size * chain->usable_per_page;
0054     next = virt_curr + size;
0055 
0056     DMA_REGPAIR_LE(next->next_phys, phys_next);
0057     next->next_virt = virt_next;
0058 }
0059 
0060 static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
0061                    dma_addr_t phys_addr)
0062 {
0063     chain->p_virt_addr = virt_addr;
0064     chain->p_phys_addr = phys_addr;
0065 }
0066 
0067 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
0068                     struct qed_chain *chain)
0069 {
0070     struct device *dev = &cdev->pdev->dev;
0071     struct qed_chain_next *next;
0072     dma_addr_t phys, phys_next;
0073     void *virt, *virt_next;
0074     u32 size, i;
0075 
0076     size = chain->elem_size * chain->usable_per_page;
0077     virt = chain->p_virt_addr;
0078     phys = chain->p_phys_addr;
0079 
0080     for (i = 0; i < chain->page_cnt; i++) {
0081         if (!virt)
0082             break;
0083 
0084         next = virt + size;
0085         virt_next = next->next_virt;
0086         phys_next = HILO_DMA_REGPAIR(next->next_phys);
0087 
0088         dma_free_coherent(dev, chain->page_size, virt, phys);
0089 
0090         virt = virt_next;
0091         phys = phys_next;
0092     }
0093 }
0094 
0095 static void qed_chain_free_single(struct qed_dev *cdev,
0096                   struct qed_chain *chain)
0097 {
0098     if (!chain->p_virt_addr)
0099         return;
0100 
0101     dma_free_coherent(&cdev->pdev->dev, chain->page_size,
0102               chain->p_virt_addr, chain->p_phys_addr);
0103 }
0104 
0105 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
0106 {
0107     struct device *dev = &cdev->pdev->dev;
0108     struct addr_tbl_entry *entry;
0109     u32 i;
0110 
0111     if (!chain->pbl.pp_addr_tbl)
0112         return;
0113 
0114     for (i = 0; i < chain->page_cnt; i++) {
0115         entry = chain->pbl.pp_addr_tbl + i;
0116         if (!entry->virt_addr)
0117             break;
0118 
0119         dma_free_coherent(dev, chain->page_size, entry->virt_addr,
0120                   entry->dma_map);
0121     }
0122 
0123     if (!chain->b_external_pbl)
0124         dma_free_coherent(dev, chain->pbl_sp.table_size,
0125                   chain->pbl_sp.table_virt,
0126                   chain->pbl_sp.table_phys);
0127 
0128     vfree(chain->pbl.pp_addr_tbl);
0129     chain->pbl.pp_addr_tbl = NULL;
0130 }
0131 
0132 /**
0133  * qed_chain_free() - Free chain DMA memory.
0134  *
0135  * @cdev: Main device structure.
0136  * @chain: Chain to free.
0137  */
0138 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
0139 {
0140     switch (chain->mode) {
0141     case QED_CHAIN_MODE_NEXT_PTR:
0142         qed_chain_free_next_ptr(cdev, chain);
0143         break;
0144     case QED_CHAIN_MODE_SINGLE:
0145         qed_chain_free_single(cdev, chain);
0146         break;
0147     case QED_CHAIN_MODE_PBL:
0148         qed_chain_free_pbl(cdev, chain);
0149         break;
0150     default:
0151         return;
0152     }
0153 
0154     qed_chain_init_mem(chain, NULL, 0);
0155 }
0156 
0157 static int
0158 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
0159                  const struct qed_chain_init_params *params,
0160                  u32 page_cnt)
0161 {
0162     u64 chain_size;
0163 
0164     chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
0165     chain_size *= page_cnt;
0166 
0167     if (!chain_size)
0168         return -EINVAL;
0169 
0170     /* The actual chain size can be larger than the maximal possible value
0171      * after rounding up the requested elements number to pages, and after
0172      * taking into account the unusuable elements (next-ptr elements).
0173      * The size of a "u16" chain can be (U16_MAX + 1) since the chain
0174      * size/capacity fields are of u32 type.
0175      */
0176     switch (params->cnt_type) {
0177     case QED_CHAIN_CNT_TYPE_U16:
0178         if (chain_size > U16_MAX + 1)
0179             break;
0180 
0181         return 0;
0182     case QED_CHAIN_CNT_TYPE_U32:
0183         if (chain_size > U32_MAX)
0184             break;
0185 
0186         return 0;
0187     default:
0188         return -EINVAL;
0189     }
0190 
0191     DP_NOTICE(cdev,
0192           "The actual chain size (0x%llx) is larger than the maximal possible value\n",
0193           chain_size);
0194 
0195     return -EINVAL;
0196 }
0197 
0198 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
0199                     struct qed_chain *chain)
0200 {
0201     struct device *dev = &cdev->pdev->dev;
0202     void *virt, *virt_prev = NULL;
0203     dma_addr_t phys;
0204     u32 i;
0205 
0206     for (i = 0; i < chain->page_cnt; i++) {
0207         virt = dma_alloc_coherent(dev, chain->page_size, &phys,
0208                       GFP_KERNEL);
0209         if (!virt)
0210             return -ENOMEM;
0211 
0212         if (i == 0) {
0213             qed_chain_init_mem(chain, virt, phys);
0214             qed_chain_reset(chain);
0215         } else {
0216             qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
0217                              phys);
0218         }
0219 
0220         virt_prev = virt;
0221     }
0222 
0223     /* Last page's next element should point to the beginning of the
0224      * chain.
0225      */
0226     qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
0227                      chain->p_phys_addr);
0228 
0229     return 0;
0230 }
0231 
0232 static int qed_chain_alloc_single(struct qed_dev *cdev,
0233                   struct qed_chain *chain)
0234 {
0235     dma_addr_t phys;
0236     void *virt;
0237 
0238     virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
0239                   &phys, GFP_KERNEL);
0240     if (!virt)
0241         return -ENOMEM;
0242 
0243     qed_chain_init_mem(chain, virt, phys);
0244     qed_chain_reset(chain);
0245 
0246     return 0;
0247 }
0248 
0249 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
0250 {
0251     struct device *dev = &cdev->pdev->dev;
0252     struct addr_tbl_entry *addr_tbl;
0253     dma_addr_t phys, pbl_phys;
0254     __le64 *pbl_virt;
0255     u32 page_cnt, i;
0256     size_t size;
0257     void *virt;
0258 
0259     page_cnt = chain->page_cnt;
0260 
0261     size = array_size(page_cnt, sizeof(*addr_tbl));
0262     if (unlikely(size == SIZE_MAX))
0263         return -EOVERFLOW;
0264 
0265     addr_tbl = vzalloc(size);
0266     if (!addr_tbl)
0267         return -ENOMEM;
0268 
0269     chain->pbl.pp_addr_tbl = addr_tbl;
0270 
0271     if (chain->b_external_pbl) {
0272         pbl_virt = chain->pbl_sp.table_virt;
0273         goto alloc_pages;
0274     }
0275 
0276     size = array_size(page_cnt, sizeof(*pbl_virt));
0277     if (unlikely(size == SIZE_MAX))
0278         return -EOVERFLOW;
0279 
0280     pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
0281     if (!pbl_virt)
0282         return -ENOMEM;
0283 
0284     chain->pbl_sp.table_virt = pbl_virt;
0285     chain->pbl_sp.table_phys = pbl_phys;
0286     chain->pbl_sp.table_size = size;
0287 
0288 alloc_pages:
0289     for (i = 0; i < page_cnt; i++) {
0290         virt = dma_alloc_coherent(dev, chain->page_size, &phys,
0291                       GFP_KERNEL);
0292         if (!virt)
0293             return -ENOMEM;
0294 
0295         if (i == 0) {
0296             qed_chain_init_mem(chain, virt, phys);
0297             qed_chain_reset(chain);
0298         }
0299 
0300         /* Fill the PBL table with the physical address of the page */
0301         pbl_virt[i] = cpu_to_le64(phys);
0302 
0303         /* Keep the virtual address of the page */
0304         addr_tbl[i].virt_addr = virt;
0305         addr_tbl[i].dma_map = phys;
0306     }
0307 
0308     return 0;
0309 }
0310 
0311 /**
0312  * qed_chain_alloc() - Allocate and initialize a chain.
0313  *
0314  * @cdev: Main device structure.
0315  * @chain: Chain to be processed.
0316  * @params: Chain initialization parameters.
0317  *
0318  * Return: 0 on success, negative errno otherwise.
0319  */
0320 int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
0321             struct qed_chain_init_params *params)
0322 {
0323     u32 page_cnt;
0324     int rc;
0325 
0326     if (!params->page_size)
0327         params->page_size = QED_CHAIN_PAGE_SIZE;
0328 
0329     if (params->mode == QED_CHAIN_MODE_SINGLE)
0330         page_cnt = 1;
0331     else
0332         page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
0333                           params->elem_size,
0334                           params->page_size,
0335                           params->mode);
0336 
0337     rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
0338     if (rc) {
0339         DP_NOTICE(cdev,
0340               "Cannot allocate a chain with the given arguments:\n");
0341         DP_NOTICE(cdev,
0342               "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
0343               params->intended_use, params->mode, params->cnt_type,
0344               params->num_elems, params->elem_size,
0345               params->page_size);
0346         return rc;
0347     }
0348 
0349     qed_chain_init(chain, params, page_cnt);
0350 
0351     switch (params->mode) {
0352     case QED_CHAIN_MODE_NEXT_PTR:
0353         rc = qed_chain_alloc_next_ptr(cdev, chain);
0354         break;
0355     case QED_CHAIN_MODE_SINGLE:
0356         rc = qed_chain_alloc_single(cdev, chain);
0357         break;
0358     case QED_CHAIN_MODE_PBL:
0359         rc = qed_chain_alloc_pbl(cdev, chain);
0360         break;
0361     default:
0362         return -EINVAL;
0363     }
0364 
0365     if (!rc)
0366         return 0;
0367 
0368     qed_chain_free(cdev, chain);
0369 
0370     return rc;
0371 }