0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/errno.h>
0034 #include <linux/slab.h>
0035 #include <linux/mm.h>
0036 #include <linux/export.h>
0037 #include <linux/bitmap.h>
0038 #include <linux/dma-mapping.h>
0039 #include <linux/vmalloc.h>
0040 #include <linux/mlx5/driver.h>
0041
0042 #include "mlx5_core.h"
0043
0044 struct mlx5_db_pgdir {
0045 struct list_head list;
0046 unsigned long *bitmap;
0047 __be32 *db_page;
0048 dma_addr_t db_dma;
0049 };
0050
0051
0052
0053
0054
0055 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
0056 size_t size, dma_addr_t *dma_handle,
0057 int node)
0058 {
0059 struct device *device = mlx5_core_dma_dev(dev);
0060 struct mlx5_priv *priv = &dev->priv;
0061 int original_node;
0062 void *cpu_handle;
0063
0064 mutex_lock(&priv->alloc_mutex);
0065 original_node = dev_to_node(device);
0066 set_dev_node(device, node);
0067 cpu_handle = dma_alloc_coherent(device, size, dma_handle,
0068 GFP_KERNEL);
0069 set_dev_node(device, original_node);
0070 mutex_unlock(&priv->alloc_mutex);
0071 return cpu_handle;
0072 }
0073
0074 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
0075 struct mlx5_frag_buf *buf, int node)
0076 {
0077 int i;
0078
0079 buf->size = size;
0080 buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
0081 buf->page_shift = PAGE_SHIFT;
0082 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
0083 GFP_KERNEL);
0084 if (!buf->frags)
0085 goto err_out;
0086
0087 for (i = 0; i < buf->npages; i++) {
0088 struct mlx5_buf_list *frag = &buf->frags[i];
0089 int frag_sz = min_t(int, size, PAGE_SIZE);
0090
0091 frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
0092 &frag->map, node);
0093 if (!frag->buf)
0094 goto err_free_buf;
0095 if (frag->map & ((1 << buf->page_shift) - 1)) {
0096 dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
0097 buf->frags[i].buf, buf->frags[i].map);
0098 mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
0099 &frag->map, buf->page_shift);
0100 goto err_free_buf;
0101 }
0102 size -= frag_sz;
0103 }
0104
0105 return 0;
0106
0107 err_free_buf:
0108 while (i--)
0109 dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
0110 buf->frags[i].map);
0111 kfree(buf->frags);
0112 err_out:
0113 return -ENOMEM;
0114 }
0115 EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
0116
0117 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
0118 {
0119 int size = buf->size;
0120 int i;
0121
0122 for (i = 0; i < buf->npages; i++) {
0123 int frag_sz = min_t(int, size, PAGE_SIZE);
0124
0125 dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
0126 buf->frags[i].map);
0127 size -= frag_sz;
0128 }
0129 kfree(buf->frags);
0130 }
0131 EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
0132
0133 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
0134 int node)
0135 {
0136 u32 db_per_page = PAGE_SIZE / cache_line_size();
0137 struct mlx5_db_pgdir *pgdir;
0138
0139 pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
0140 if (!pgdir)
0141 return NULL;
0142
0143 pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
0144 if (!pgdir->bitmap) {
0145 kfree(pgdir);
0146 return NULL;
0147 }
0148
0149 bitmap_fill(pgdir->bitmap, db_per_page);
0150
0151 pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
0152 &pgdir->db_dma, node);
0153 if (!pgdir->db_page) {
0154 bitmap_free(pgdir->bitmap);
0155 kfree(pgdir);
0156 return NULL;
0157 }
0158
0159 return pgdir;
0160 }
0161
0162 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
0163 struct mlx5_db *db)
0164 {
0165 u32 db_per_page = PAGE_SIZE / cache_line_size();
0166 int offset;
0167 int i;
0168
0169 i = find_first_bit(pgdir->bitmap, db_per_page);
0170 if (i >= db_per_page)
0171 return -ENOMEM;
0172
0173 __clear_bit(i, pgdir->bitmap);
0174
0175 db->u.pgdir = pgdir;
0176 db->index = i;
0177 offset = db->index * cache_line_size();
0178 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
0179 db->dma = pgdir->db_dma + offset;
0180
0181 db->db[0] = 0;
0182 db->db[1] = 0;
0183
0184 return 0;
0185 }
0186
0187 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
0188 {
0189 struct mlx5_db_pgdir *pgdir;
0190 int ret = 0;
0191
0192 mutex_lock(&dev->priv.pgdir_mutex);
0193
0194 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
0195 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
0196 goto out;
0197
0198 pgdir = mlx5_alloc_db_pgdir(dev, node);
0199 if (!pgdir) {
0200 ret = -ENOMEM;
0201 goto out;
0202 }
0203
0204 list_add(&pgdir->list, &dev->priv.pgdir_list);
0205
0206
0207 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
0208
0209 out:
0210 mutex_unlock(&dev->priv.pgdir_mutex);
0211
0212 return ret;
0213 }
0214 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
0215
0216 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
0217 {
0218 u32 db_per_page = PAGE_SIZE / cache_line_size();
0219
0220 mutex_lock(&dev->priv.pgdir_mutex);
0221
0222 __set_bit(db->index, db->u.pgdir->bitmap);
0223
0224 if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
0225 dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
0226 db->u.pgdir->db_page, db->u.pgdir->db_dma);
0227 list_del(&db->u.pgdir->list);
0228 bitmap_free(db->u.pgdir->bitmap);
0229 kfree(db->u.pgdir);
0230 }
0231
0232 mutex_unlock(&dev->priv.pgdir_mutex);
0233 }
0234 EXPORT_SYMBOL_GPL(mlx5_db_free);
0235
0236 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
0237 {
0238 int i;
0239
0240 WARN_ON(perm & 0xfc);
0241 for (i = 0; i < buf->npages; i++)
0242 pas[i] = cpu_to_be64(buf->frags[i].map | perm);
0243 }
0244 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
0245
0246 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
0247 {
0248 mlx5_fill_page_frag_array_perm(buf, pas, 0);
0249 }
0250 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);