Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
0003 
0004 #ifndef _MLX5_IB_UMR_H
0005 #define _MLX5_IB_UMR_H
0006 
0007 #include "mlx5_ib.h"
0008 
0009 
0010 #define MLX5_MAX_UMR_SHIFT 16
0011 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
0012 
0013 #define MLX5_IB_UMR_OCTOWORD           16
0014 #define MLX5_IB_UMR_XLT_ALIGNMENT      64
0015 
0016 int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
0017 void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
0018 
0019 static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
0020                       size_t length)
0021 {
0022     /*
0023      * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
0024      * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
0025      * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
0026      * can never be enabled without this capability. Simplify this weird
0027      * quirky hardware by just saying it can't use PAS lists with UMR at
0028      * all.
0029      */
0030     if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
0031         return false;
0032 
0033     /*
0034      * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
0035      * used.
0036      */
0037     if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
0038         length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
0039         return false;
0040     return true;
0041 }
0042 
0043 /*
0044  * true if an existing MR can be reconfigured to new access_flags using UMR.
0045  * Older HW cannot use UMR to update certain elements of the MKC. See
0046  * get_umr_update_access_mask() and umr_check_mkey_mask()
0047  */
0048 static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
0049                       unsigned int current_access_flags,
0050                       unsigned int target_access_flags)
0051 {
0052     unsigned int diffs = current_access_flags ^ target_access_flags;
0053 
0054     if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
0055         MLX5_CAP_GEN(dev->mdev, atomic) &&
0056         MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
0057         return false;
0058 
0059     if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
0060         MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
0061         !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
0062         return false;
0063 
0064     if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
0065         MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
0066         !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
0067         return false;
0068 
0069     return true;
0070 }
0071 
0072 static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
0073 {
0074     return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
0075            MLX5_IB_UMR_OCTOWORD;
0076 }
0077 
0078 struct mlx5r_umr_context {
0079     struct ib_cqe cqe;
0080     enum ib_wc_status status;
0081     struct completion done;
0082 };
0083 
0084 struct mlx5r_umr_wqe {
0085     struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
0086     struct mlx5_mkey_seg mkey_seg;
0087     struct mlx5_wqe_data_seg data_seg;
0088 };
0089 
0090 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
0091 int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
0092                   int access_flags);
0093 int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
0094 int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
0095              int page_shift, int flags);
0096 
0097 #endif /* _MLX5_IB_UMR_H */