Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/errno.h>
0034 #include <linux/slab.h>
0035 #include <linux/mm.h>
0036 #include <linux/export.h>
0037 #include <linux/bitmap.h>
0038 #include <linux/dma-mapping.h>
0039 #include <linux/vmalloc.h>
0040 #include <linux/mlx5/driver.h>
0041 
0042 #include "mlx5_core.h"
0043 
0044 struct mlx5_db_pgdir {
0045     struct list_head    list;
0046     unsigned long          *bitmap;
0047     __be32             *db_page;
0048     dma_addr_t      db_dma;
0049 };
0050 
0051 /* Handling for queue buffers -- we allocate a bunch of memory and
0052  * register it in a memory region at HCA virtual address 0.
0053  */
0054 
0055 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
0056                        size_t size, dma_addr_t *dma_handle,
0057                        int node)
0058 {
0059     struct device *device = mlx5_core_dma_dev(dev);
0060     struct mlx5_priv *priv = &dev->priv;
0061     int original_node;
0062     void *cpu_handle;
0063 
0064     mutex_lock(&priv->alloc_mutex);
0065     original_node = dev_to_node(device);
0066     set_dev_node(device, node);
0067     cpu_handle = dma_alloc_coherent(device, size, dma_handle,
0068                     GFP_KERNEL);
0069     set_dev_node(device, original_node);
0070     mutex_unlock(&priv->alloc_mutex);
0071     return cpu_handle;
0072 }
0073 
0074 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
0075                  struct mlx5_frag_buf *buf, int node)
0076 {
0077     int i;
0078 
0079     buf->size = size;
0080     buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
0081     buf->page_shift = PAGE_SHIFT;
0082     buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
0083                  GFP_KERNEL);
0084     if (!buf->frags)
0085         goto err_out;
0086 
0087     for (i = 0; i < buf->npages; i++) {
0088         struct mlx5_buf_list *frag = &buf->frags[i];
0089         int frag_sz = min_t(int, size, PAGE_SIZE);
0090 
0091         frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
0092                               &frag->map, node);
0093         if (!frag->buf)
0094             goto err_free_buf;
0095         if (frag->map & ((1 << buf->page_shift) - 1)) {
0096             dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
0097                       buf->frags[i].buf, buf->frags[i].map);
0098             mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
0099                        &frag->map, buf->page_shift);
0100             goto err_free_buf;
0101         }
0102         size -= frag_sz;
0103     }
0104 
0105     return 0;
0106 
0107 err_free_buf:
0108     while (i--)
0109         dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
0110                   buf->frags[i].map);
0111     kfree(buf->frags);
0112 err_out:
0113     return -ENOMEM;
0114 }
0115 EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
0116 
0117 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
0118 {
0119     int size = buf->size;
0120     int i;
0121 
0122     for (i = 0; i < buf->npages; i++) {
0123         int frag_sz = min_t(int, size, PAGE_SIZE);
0124 
0125         dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
0126                   buf->frags[i].map);
0127         size -= frag_sz;
0128     }
0129     kfree(buf->frags);
0130 }
0131 EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
0132 
0133 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
0134                          int node)
0135 {
0136     u32 db_per_page = PAGE_SIZE / cache_line_size();
0137     struct mlx5_db_pgdir *pgdir;
0138 
0139     pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
0140     if (!pgdir)
0141         return NULL;
0142 
0143     pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
0144     if (!pgdir->bitmap) {
0145         kfree(pgdir);
0146         return NULL;
0147     }
0148 
0149     bitmap_fill(pgdir->bitmap, db_per_page);
0150 
0151     pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
0152                                &pgdir->db_dma, node);
0153     if (!pgdir->db_page) {
0154         bitmap_free(pgdir->bitmap);
0155         kfree(pgdir);
0156         return NULL;
0157     }
0158 
0159     return pgdir;
0160 }
0161 
0162 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
0163                     struct mlx5_db *db)
0164 {
0165     u32 db_per_page = PAGE_SIZE / cache_line_size();
0166     int offset;
0167     int i;
0168 
0169     i = find_first_bit(pgdir->bitmap, db_per_page);
0170     if (i >= db_per_page)
0171         return -ENOMEM;
0172 
0173     __clear_bit(i, pgdir->bitmap);
0174 
0175     db->u.pgdir = pgdir;
0176     db->index   = i;
0177     offset = db->index * cache_line_size();
0178     db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
0179     db->dma     = pgdir->db_dma  + offset;
0180 
0181     db->db[0] = 0;
0182     db->db[1] = 0;
0183 
0184     return 0;
0185 }
0186 
0187 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
0188 {
0189     struct mlx5_db_pgdir *pgdir;
0190     int ret = 0;
0191 
0192     mutex_lock(&dev->priv.pgdir_mutex);
0193 
0194     list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
0195         if (!mlx5_alloc_db_from_pgdir(pgdir, db))
0196             goto out;
0197 
0198     pgdir = mlx5_alloc_db_pgdir(dev, node);
0199     if (!pgdir) {
0200         ret = -ENOMEM;
0201         goto out;
0202     }
0203 
0204     list_add(&pgdir->list, &dev->priv.pgdir_list);
0205 
0206     /* This should never fail -- we just allocated an empty page: */
0207     WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
0208 
0209 out:
0210     mutex_unlock(&dev->priv.pgdir_mutex);
0211 
0212     return ret;
0213 }
0214 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
0215 
0216 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
0217 {
0218     u32 db_per_page = PAGE_SIZE / cache_line_size();
0219 
0220     mutex_lock(&dev->priv.pgdir_mutex);
0221 
0222     __set_bit(db->index, db->u.pgdir->bitmap);
0223 
0224     if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
0225         dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
0226                   db->u.pgdir->db_page, db->u.pgdir->db_dma);
0227         list_del(&db->u.pgdir->list);
0228         bitmap_free(db->u.pgdir->bitmap);
0229         kfree(db->u.pgdir);
0230     }
0231 
0232     mutex_unlock(&dev->priv.pgdir_mutex);
0233 }
0234 EXPORT_SYMBOL_GPL(mlx5_db_free);
0235 
0236 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
0237 {
0238     int i;
0239 
0240     WARN_ON(perm & 0xfc);
0241     for (i = 0; i < buf->npages; i++)
0242         pas[i] = cpu_to_be64(buf->frags[i].map | perm);
0243 }
0244 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
0245 
0246 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
0247 {
0248     mlx5_fill_page_frag_array_perm(buf, pas, 0);
0249 }
0250 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);