0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034 #include <linux/io-mapping.h>
0035 #include <linux/mlx5/driver.h>
0036 #include "mlx5_core.h"
0037
0038 static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
0039 {
0040 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
0041 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
0042 int err;
0043
0044 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
0045 err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
0046 if (err)
0047 return err;
0048
0049 *uarn = MLX5_GET(alloc_uar_out, out, uar);
0050 return 0;
0051 }
0052
0053 static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
0054 {
0055 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
0056
0057 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
0058 MLX5_SET(dealloc_uar_in, in, uar, uarn);
0059 return mlx5_cmd_exec_in(dev, dealloc_uar, in);
0060 }
0061
0062 static int uars_per_sys_page(struct mlx5_core_dev *mdev)
0063 {
0064 if (MLX5_CAP_GEN(mdev, uar_4k))
0065 return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
0066
0067 return 1;
0068 }
0069
0070 static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
0071 {
0072 u32 system_page_index;
0073
0074 if (MLX5_CAP_GEN(mdev, uar_4k))
0075 system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
0076 else
0077 system_page_index = index;
0078
0079 return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
0080 }
0081
0082 static void up_rel_func(struct kref *kref)
0083 {
0084 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
0085
0086 list_del(&up->list);
0087 iounmap(up->map);
0088 if (mlx5_cmd_free_uar(up->mdev, up->index))
0089 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
0090 bitmap_free(up->reg_bitmap);
0091 bitmap_free(up->fp_bitmap);
0092 kfree(up);
0093 }
0094
0095 static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
0096 bool map_wc)
0097 {
0098 struct mlx5_uars_page *up;
0099 int err = -ENOMEM;
0100 phys_addr_t pfn;
0101 int bfregs;
0102 int node;
0103 int i;
0104
0105 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
0106 node = mdev->priv.numa_node;
0107 up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
0108 if (!up)
0109 return ERR_PTR(err);
0110
0111 up->mdev = mdev;
0112 up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
0113 if (!up->reg_bitmap)
0114 goto error1;
0115
0116 up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
0117 if (!up->fp_bitmap)
0118 goto error1;
0119
0120 for (i = 0; i < bfregs; i++)
0121 if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
0122 set_bit(i, up->reg_bitmap);
0123 else
0124 set_bit(i, up->fp_bitmap);
0125
0126 up->bfregs = bfregs;
0127 up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
0128 up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
0129
0130 err = mlx5_cmd_alloc_uar(mdev, &up->index);
0131 if (err) {
0132 mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
0133 goto error1;
0134 }
0135
0136 pfn = uar2pfn(mdev, up->index);
0137 if (map_wc) {
0138 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
0139 if (!up->map) {
0140 err = -EAGAIN;
0141 goto error2;
0142 }
0143 } else {
0144 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
0145 if (!up->map) {
0146 err = -ENOMEM;
0147 goto error2;
0148 }
0149 }
0150 kref_init(&up->ref_count);
0151 mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
0152 up->index, up->bfregs);
0153 return up;
0154
0155 error2:
0156 if (mlx5_cmd_free_uar(mdev, up->index))
0157 mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
0158 error1:
0159 bitmap_free(up->fp_bitmap);
0160 bitmap_free(up->reg_bitmap);
0161 kfree(up);
0162 return ERR_PTR(err);
0163 }
0164
0165 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
0166 {
0167 struct mlx5_uars_page *ret;
0168
0169 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
0170 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
0171 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
0172 struct mlx5_uars_page, list);
0173 kref_get(&ret->ref_count);
0174 goto out;
0175 }
0176 ret = alloc_uars_page(mdev, false);
0177 if (IS_ERR(ret))
0178 goto out;
0179 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
0180 out:
0181 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
0182
0183 return ret;
0184 }
0185 EXPORT_SYMBOL(mlx5_get_uars_page);
0186
0187 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
0188 {
0189 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
0190 kref_put(&up->ref_count, up_rel_func);
0191 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
0192 }
0193 EXPORT_SYMBOL(mlx5_put_uars_page);
0194
0195 static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
0196 {
0197
0198
0199
0200 return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
0201 (dbi % MLX5_BFREGS_PER_UAR) *
0202 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
0203 }
0204
0205 static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
0206 bool map_wc, bool fast_path)
0207 {
0208 struct mlx5_bfreg_data *bfregs;
0209 struct mlx5_uars_page *up;
0210 struct list_head *head;
0211 unsigned long *bitmap;
0212 unsigned int *avail;
0213 struct mutex *lock;
0214 int dbi;
0215
0216 bfregs = &mdev->priv.bfregs;
0217 if (map_wc) {
0218 head = &bfregs->wc_head.list;
0219 lock = &bfregs->wc_head.lock;
0220 } else {
0221 head = &bfregs->reg_head.list;
0222 lock = &bfregs->reg_head.lock;
0223 }
0224 mutex_lock(lock);
0225 if (list_empty(head)) {
0226 up = alloc_uars_page(mdev, map_wc);
0227 if (IS_ERR(up)) {
0228 mutex_unlock(lock);
0229 return PTR_ERR(up);
0230 }
0231 list_add(&up->list, head);
0232 } else {
0233 up = list_entry(head->next, struct mlx5_uars_page, list);
0234 kref_get(&up->ref_count);
0235 }
0236 if (fast_path) {
0237 bitmap = up->fp_bitmap;
0238 avail = &up->fp_avail;
0239 } else {
0240 bitmap = up->reg_bitmap;
0241 avail = &up->reg_avail;
0242 }
0243 dbi = find_first_bit(bitmap, up->bfregs);
0244 clear_bit(dbi, bitmap);
0245 (*avail)--;
0246 if (!(*avail))
0247 list_del(&up->list);
0248
0249 bfreg->map = up->map + map_offset(mdev, dbi);
0250 bfreg->up = up;
0251 bfreg->wc = map_wc;
0252 bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
0253 mutex_unlock(lock);
0254
0255 return 0;
0256 }
0257
0258 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
0259 bool map_wc, bool fast_path)
0260 {
0261 int err;
0262
0263 err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
0264 if (!err)
0265 return 0;
0266
0267 if (err == -EAGAIN && map_wc)
0268 return alloc_bfreg(mdev, bfreg, false, fast_path);
0269
0270 return err;
0271 }
0272 EXPORT_SYMBOL(mlx5_alloc_bfreg);
0273
0274 static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
0275 struct mlx5_uars_page *up,
0276 struct mlx5_sq_bfreg *bfreg)
0277 {
0278 unsigned int uar_idx;
0279 unsigned int bfreg_idx;
0280 unsigned int bf_reg_size;
0281
0282 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
0283
0284 uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
0285 bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
0286
0287 return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
0288 }
0289
0290 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
0291 {
0292 struct mlx5_bfreg_data *bfregs;
0293 struct mlx5_uars_page *up;
0294 struct mutex *lock;
0295 unsigned int dbi;
0296 bool fp;
0297 unsigned int *avail;
0298 unsigned long *bitmap;
0299 struct list_head *head;
0300
0301 bfregs = &mdev->priv.bfregs;
0302 if (bfreg->wc) {
0303 head = &bfregs->wc_head.list;
0304 lock = &bfregs->wc_head.lock;
0305 } else {
0306 head = &bfregs->reg_head.list;
0307 lock = &bfregs->reg_head.lock;
0308 }
0309 up = bfreg->up;
0310 dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
0311 fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
0312 if (fp) {
0313 avail = &up->fp_avail;
0314 bitmap = up->fp_bitmap;
0315 } else {
0316 avail = &up->reg_avail;
0317 bitmap = up->reg_bitmap;
0318 }
0319 mutex_lock(lock);
0320 (*avail)++;
0321 set_bit(dbi, bitmap);
0322 if (*avail == 1)
0323 list_add_tail(&up->list, head);
0324
0325 kref_put(&up->ref_count, up_rel_func);
0326 mutex_unlock(lock);
0327 }
0328 EXPORT_SYMBOL(mlx5_free_bfreg);