Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
0002 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
0003 
0004 #include <linux/kernel.h>
0005 #include <linux/types.h>
0006 #include <linux/dcbnl.h>
0007 #include <linux/if_ether.h>
0008 #include <linux/list.h>
0009 #include <linux/netlink.h>
0010 
0011 #include "spectrum.h"
0012 #include "core.h"
0013 #include "port.h"
0014 #include "reg.h"
0015 
0016 struct mlxsw_sp_sb_pr {
0017     enum mlxsw_reg_sbpr_mode mode;
0018     u32 size;
0019     u8 freeze_mode:1,
0020        freeze_size:1;
0021 };
0022 
0023 struct mlxsw_cp_sb_occ {
0024     u32 cur;
0025     u32 max;
0026 };
0027 
0028 struct mlxsw_sp_sb_cm {
0029     u32 min_buff;
0030     u32 max_buff;
0031     u16 pool_index;
0032     struct mlxsw_cp_sb_occ occ;
0033     u8 freeze_pool:1,
0034        freeze_thresh:1;
0035 };
0036 
0037 #define MLXSW_SP_SB_INFI -1U
0038 #define MLXSW_SP_SB_REST -2U
0039 
0040 struct mlxsw_sp_sb_pm {
0041     u32 min_buff;
0042     u32 max_buff;
0043     struct mlxsw_cp_sb_occ occ;
0044 };
0045 
0046 struct mlxsw_sp_sb_mm {
0047     u32 min_buff;
0048     u32 max_buff;
0049     u16 pool_index;
0050 };
0051 
0052 struct mlxsw_sp_sb_pool_des {
0053     enum mlxsw_reg_sbxx_dir dir;
0054     u8 pool;
0055 };
0056 
0057 #define MLXSW_SP_SB_POOL_ING        0
0058 #define MLXSW_SP_SB_POOL_EGR        4
0059 #define MLXSW_SP_SB_POOL_EGR_MC     8
0060 #define MLXSW_SP_SB_POOL_ING_CPU    9
0061 #define MLXSW_SP_SB_POOL_EGR_CPU    10
0062 
0063 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
0064     {MLXSW_REG_SBXX_DIR_INGRESS, 0},
0065     {MLXSW_REG_SBXX_DIR_INGRESS, 1},
0066     {MLXSW_REG_SBXX_DIR_INGRESS, 2},
0067     {MLXSW_REG_SBXX_DIR_INGRESS, 3},
0068     {MLXSW_REG_SBXX_DIR_EGRESS, 0},
0069     {MLXSW_REG_SBXX_DIR_EGRESS, 1},
0070     {MLXSW_REG_SBXX_DIR_EGRESS, 2},
0071     {MLXSW_REG_SBXX_DIR_EGRESS, 3},
0072     {MLXSW_REG_SBXX_DIR_EGRESS, 15},
0073     {MLXSW_REG_SBXX_DIR_INGRESS, 4},
0074     {MLXSW_REG_SBXX_DIR_EGRESS, 4},
0075 };
0076 
0077 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
0078     {MLXSW_REG_SBXX_DIR_INGRESS, 0},
0079     {MLXSW_REG_SBXX_DIR_INGRESS, 1},
0080     {MLXSW_REG_SBXX_DIR_INGRESS, 2},
0081     {MLXSW_REG_SBXX_DIR_INGRESS, 3},
0082     {MLXSW_REG_SBXX_DIR_EGRESS, 0},
0083     {MLXSW_REG_SBXX_DIR_EGRESS, 1},
0084     {MLXSW_REG_SBXX_DIR_EGRESS, 2},
0085     {MLXSW_REG_SBXX_DIR_EGRESS, 3},
0086     {MLXSW_REG_SBXX_DIR_EGRESS, 15},
0087     {MLXSW_REG_SBXX_DIR_INGRESS, 4},
0088     {MLXSW_REG_SBXX_DIR_EGRESS, 4},
0089 };
0090 
0091 #define MLXSW_SP_SB_ING_TC_COUNT 8
0092 #define MLXSW_SP_SB_EG_TC_COUNT 16
0093 
0094 struct mlxsw_sp_sb_port {
0095     struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
0096     struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
0097     struct mlxsw_sp_sb_pm *pms;
0098 };
0099 
0100 struct mlxsw_sp_sb {
0101     struct mlxsw_sp_sb_pr *prs;
0102     struct mlxsw_sp_sb_port *ports;
0103     u32 cell_size;
0104     u32 max_headroom_cells;
0105     u64 sb_size;
0106 };
0107 
0108 struct mlxsw_sp_sb_vals {
0109     unsigned int pool_count;
0110     const struct mlxsw_sp_sb_pool_des *pool_dess;
0111     const struct mlxsw_sp_sb_pm *pms;
0112     const struct mlxsw_sp_sb_pm *pms_cpu;
0113     const struct mlxsw_sp_sb_pr *prs;
0114     const struct mlxsw_sp_sb_mm *mms;
0115     const struct mlxsw_sp_sb_cm *cms_ingress;
0116     const struct mlxsw_sp_sb_cm *cms_egress;
0117     const struct mlxsw_sp_sb_cm *cms_cpu;
0118     unsigned int mms_count;
0119     unsigned int cms_ingress_count;
0120     unsigned int cms_egress_count;
0121     unsigned int cms_cpu_count;
0122 };
0123 
0124 struct mlxsw_sp_sb_ops {
0125     u32 (*int_buf_size_get)(int mtu, u32 speed);
0126 };
0127 
0128 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
0129 {
0130     return mlxsw_sp->sb->cell_size * cells;
0131 }
0132 
0133 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
0134 {
0135     return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
0136 }
0137 
0138 static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
0139                         u32 size_cells)
0140 {
0141     /* Ports with eight lanes use two headroom buffers between which the
0142      * configured headroom size is split. Therefore, multiply the calculated
0143      * headroom size by two.
0144      */
0145     return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
0146 }
0147 
0148 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
0149                          u16 pool_index)
0150 {
0151     return &mlxsw_sp->sb->prs[pool_index];
0152 }
0153 
0154 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
0155 {
0156     if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
0157         return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
0158     else
0159         return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
0160 }
0161 
0162 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
0163                          u16 local_port, u8 pg_buff,
0164                          enum mlxsw_reg_sbxx_dir dir)
0165 {
0166     struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
0167 
0168     WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
0169     if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
0170         return &sb_port->ing_cms[pg_buff];
0171     else
0172         return &sb_port->eg_cms[pg_buff];
0173 }
0174 
0175 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
0176                          u16 local_port, u16 pool_index)
0177 {
0178     return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
0179 }
0180 
0181 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
0182                 enum mlxsw_reg_sbpr_mode mode,
0183                 u32 size, bool infi_size)
0184 {
0185     const struct mlxsw_sp_sb_pool_des *des =
0186         &mlxsw_sp->sb_vals->pool_dess[pool_index];
0187     char sbpr_pl[MLXSW_REG_SBPR_LEN];
0188     struct mlxsw_sp_sb_pr *pr;
0189     int err;
0190 
0191     mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
0192                 size, infi_size);
0193     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
0194     if (err)
0195         return err;
0196 
0197     if (infi_size)
0198         size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
0199     pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
0200     pr->mode = mode;
0201     pr->size = size;
0202     return 0;
0203 }
0204 
0205 static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp,
0206                      enum mlxsw_reg_sbxx_dir dir,
0207                      enum mlxsw_reg_sbpr_mode mode,
0208                      u32 size, bool infi_size)
0209 {
0210     char sbpr_pl[MLXSW_REG_SBPR_LEN];
0211 
0212     /* The FW default descriptor buffer configuration uses only pool 14 for
0213      * descriptors.
0214      */
0215     mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size);
0216     mlxsw_reg_sbpr_desc_set(sbpr_pl, true);
0217     return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
0218 }
0219 
0220 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0221                 u8 pg_buff, u32 min_buff, u32 max_buff,
0222                 bool infi_max, u16 pool_index)
0223 {
0224     const struct mlxsw_sp_sb_pool_des *des =
0225         &mlxsw_sp->sb_vals->pool_dess[pool_index];
0226     char sbcm_pl[MLXSW_REG_SBCM_LEN];
0227     struct mlxsw_sp_sb_cm *cm;
0228     int err;
0229 
0230     mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
0231                 min_buff, max_buff, infi_max, des->pool);
0232     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
0233     if (err)
0234         return err;
0235 
0236     if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
0237         if (infi_max)
0238             max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
0239                             mlxsw_sp->sb->sb_size);
0240 
0241         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
0242                     des->dir);
0243         cm->min_buff = min_buff;
0244         cm->max_buff = max_buff;
0245         cm->pool_index = pool_index;
0246     }
0247     return 0;
0248 }
0249 
0250 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0251                 u16 pool_index, u32 min_buff, u32 max_buff)
0252 {
0253     const struct mlxsw_sp_sb_pool_des *des =
0254         &mlxsw_sp->sb_vals->pool_dess[pool_index];
0255     char sbpm_pl[MLXSW_REG_SBPM_LEN];
0256     struct mlxsw_sp_sb_pm *pm;
0257     int err;
0258 
0259     mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
0260                 min_buff, max_buff);
0261     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
0262     if (err)
0263         return err;
0264 
0265     pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
0266     pm->min_buff = min_buff;
0267     pm->max_buff = max_buff;
0268     return 0;
0269 }
0270 
0271 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0272                     u16 pool_index, struct list_head *bulk_list)
0273 {
0274     const struct mlxsw_sp_sb_pool_des *des =
0275         &mlxsw_sp->sb_vals->pool_dess[pool_index];
0276     char sbpm_pl[MLXSW_REG_SBPM_LEN];
0277 
0278     if (local_port == MLXSW_PORT_CPU_PORT &&
0279         des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
0280         return 0;
0281 
0282     mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
0283                 true, 0, 0);
0284     return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
0285                      bulk_list, NULL, 0);
0286 }
0287 
0288 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
0289                     char *sbpm_pl, size_t sbpm_pl_len,
0290                     unsigned long cb_priv)
0291 {
0292     struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
0293 
0294     mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
0295 }
0296 
0297 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0298                     u16 pool_index, struct list_head *bulk_list)
0299 {
0300     const struct mlxsw_sp_sb_pool_des *des =
0301         &mlxsw_sp->sb_vals->pool_dess[pool_index];
0302     char sbpm_pl[MLXSW_REG_SBPM_LEN];
0303     struct mlxsw_sp_sb_pm *pm;
0304 
0305     if (local_port == MLXSW_PORT_CPU_PORT &&
0306         des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
0307         return 0;
0308 
0309     pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
0310     mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
0311                 false, 0, 0);
0312     return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
0313                      bulk_list,
0314                      mlxsw_sp_sb_pm_occ_query_cb,
0315                      (unsigned long) pm);
0316 }
0317 
0318 void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
0319 {
0320     int prio;
0321 
0322     for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
0323         switch (hdroom->mode) {
0324         case MLXSW_SP_HDROOM_MODE_DCB:
0325             hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
0326             break;
0327         case MLXSW_SP_HDROOM_MODE_TC:
0328             hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
0329             break;
0330         }
0331     }
0332 }
0333 
0334 void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
0335 {
0336     int prio;
0337     int i;
0338 
0339     for (i = 0; i < DCBX_MAX_BUFFERS; i++)
0340         hdroom->bufs.buf[i].lossy = true;
0341 
0342     for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
0343         if (!hdroom->prios.prio[prio].lossy)
0344             hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
0345     }
0346 }
0347 
0348 static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
0349 {
0350     return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
0351 }
0352 
0353 static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
0354 {
0355     if (lossy)
0356         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
0357     else
0358         mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
0359                             thres);
0360 }
0361 
0362 static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
0363                      const struct mlxsw_sp_hdroom *hdroom)
0364 {
0365     u16 delay_cells;
0366 
0367     delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
0368 
0369     /* In the worst case scenario the delay will be made up of packets that
0370      * are all of size CELL_SIZE + 1, which means each packet will require
0371      * almost twice its true size when buffered in the switch. We therefore
0372      * multiply this value by the "cell factor", which is close to 2.
0373      *
0374      * Another MTU is added in case the transmitting host already started
0375      * transmitting a maximum length frame when the PFC packet was received.
0376      */
0377     return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
0378 }
0379 
0380 static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
0381 {
0382     u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(mtu, speed);
0383 
0384     return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
0385 }
0386 
0387 static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
0388 {
0389     int prio;
0390 
0391     for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
0392         if (hdroom->prios.prio[prio].buf_idx == buf)
0393             return true;
0394     }
0395     return false;
0396 }
0397 
0398 void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
0399                       struct mlxsw_sp_hdroom *hdroom)
0400 {
0401     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0402     u16 reserve_cells;
0403     int i;
0404 
0405     /* Internal buffer. */
0406     reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
0407                              mlxsw_sp_port->max_speed);
0408     reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
0409     hdroom->int_buf.reserve_cells = reserve_cells;
0410 
0411     if (hdroom->int_buf.enable)
0412         hdroom->int_buf.size_cells = reserve_cells;
0413     else
0414         hdroom->int_buf.size_cells = 0;
0415 
0416     /* PG buffers. */
0417     for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
0418         struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
0419         u16 thres_cells;
0420         u16 delay_cells;
0421 
0422         if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
0423             thres_cells = 0;
0424             delay_cells = 0;
0425         } else if (buf->lossy) {
0426             thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
0427             delay_cells = 0;
0428         } else {
0429             thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
0430             delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
0431         }
0432 
0433         thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
0434         delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
0435 
0436         buf->thres_cells = thres_cells;
0437         if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
0438             buf->size_cells = thres_cells + delay_cells;
0439         } else {
0440             /* Do not allow going below the minimum size, even if
0441              * the user requested it.
0442              */
0443             buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
0444         }
0445     }
0446 }
0447 
0448 #define MLXSW_SP_PB_UNUSED 8
0449 
0450 static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
0451                          const struct mlxsw_sp_hdroom *hdroom, bool force)
0452 {
0453     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0454     char pbmc_pl[MLXSW_REG_PBMC_LEN];
0455     bool dirty;
0456     int err;
0457     int i;
0458 
0459     dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
0460     if (!dirty && !force)
0461         return 0;
0462 
0463     mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
0464     for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
0465         const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
0466 
0467         if (i == MLXSW_SP_PB_UNUSED)
0468             continue;
0469 
0470         mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
0471     }
0472 
0473     mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
0474     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
0475     if (err)
0476         return err;
0477 
0478     mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
0479     return 0;
0480 }
0481 
0482 static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
0483                          const struct mlxsw_sp_hdroom *hdroom, bool force)
0484 {
0485     char pptb_pl[MLXSW_REG_PPTB_LEN];
0486     bool dirty;
0487     int prio;
0488     int err;
0489 
0490     dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
0491     if (!dirty && !force)
0492         return 0;
0493 
0494     mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
0495     for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
0496         mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
0497 
0498     err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
0499     if (err)
0500         return err;
0501 
0502     mlxsw_sp_port->hdroom->prios = hdroom->prios;
0503     return 0;
0504 }
0505 
0506 static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
0507                          const struct mlxsw_sp_hdroom *hdroom, bool force)
0508 {
0509     char sbib_pl[MLXSW_REG_SBIB_LEN];
0510     bool dirty;
0511     int err;
0512 
0513     dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
0514     if (!dirty && !force)
0515         return 0;
0516 
0517     mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
0518     err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
0519     if (err)
0520         return err;
0521 
0522     mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
0523     return 0;
0524 }
0525 
0526 static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
0527                      const struct mlxsw_sp_hdroom *hdroom)
0528 {
0529     u32 taken_headroom_cells = 0;
0530     int i;
0531 
0532     for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
0533         taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
0534 
0535     taken_headroom_cells += hdroom->int_buf.reserve_cells;
0536     return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
0537 }
0538 
0539 static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
0540                        const struct mlxsw_sp_hdroom *hdroom, bool force)
0541 {
0542     struct mlxsw_sp_hdroom orig_hdroom;
0543     struct mlxsw_sp_hdroom tmp_hdroom;
0544     int err;
0545     int i;
0546 
0547     /* Port buffers need to be configured in three steps. First, all buffers
0548      * with non-zero size are configured. Then, prio-to-buffer map is
0549      * updated, allowing traffic to flow to the now non-zero buffers.
0550      * Finally, zero-sized buffers are configured, because now no traffic
0551      * should be directed to them anymore. This way, in a non-congested
0552      * system, no packet drops are introduced by the reconfiguration.
0553      */
0554 
0555     orig_hdroom = *mlxsw_sp_port->hdroom;
0556     tmp_hdroom = orig_hdroom;
0557     for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
0558         if (hdroom->bufs.buf[i].size_cells)
0559             tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
0560     }
0561 
0562     if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
0563         !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
0564         return -ENOBUFS;
0565 
0566     err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
0567     if (err)
0568         return err;
0569 
0570     err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
0571     if (err)
0572         goto err_configure_priomap;
0573 
0574     err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
0575     if (err)
0576         goto err_configure_buffers;
0577 
0578     err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
0579     if (err)
0580         goto err_configure_int_buf;
0581 
0582     *mlxsw_sp_port->hdroom = *hdroom;
0583     return 0;
0584 
0585 err_configure_int_buf:
0586     mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
0587 err_configure_buffers:
0588     mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
0589 err_configure_priomap:
0590     mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
0591     return err;
0592 }
0593 
0594 int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
0595                   const struct mlxsw_sp_hdroom *hdroom)
0596 {
0597     return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
0598 }
0599 
0600 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
0601 {
0602     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0603     struct mlxsw_sp_hdroom hdroom = {};
0604     u32 size9;
0605     int prio;
0606 
0607     hdroom.mtu = mlxsw_sp_port->dev->mtu;
0608     hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
0609     for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
0610         hdroom.prios.prio[prio].lossy = true;
0611 
0612     mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
0613     mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
0614 
0615     /* Buffer 9 is used for control traffic. */
0616     size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
0617     hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
0618 
0619     return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
0620 }
0621 
0622 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
0623                  struct mlxsw_sp_sb_port *sb_port)
0624 {
0625     struct mlxsw_sp_sb_pm *pms;
0626 
0627     pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
0628               GFP_KERNEL);
0629     if (!pms)
0630         return -ENOMEM;
0631     sb_port->pms = pms;
0632     return 0;
0633 }
0634 
0635 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
0636 {
0637     kfree(sb_port->pms);
0638 }
0639 
0640 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
0641 {
0642     unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
0643     struct mlxsw_sp_sb_pr *prs;
0644     int i;
0645     int err;
0646 
0647     mlxsw_sp->sb->ports = kcalloc(max_ports,
0648                       sizeof(struct mlxsw_sp_sb_port),
0649                       GFP_KERNEL);
0650     if (!mlxsw_sp->sb->ports)
0651         return -ENOMEM;
0652 
0653     prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
0654               GFP_KERNEL);
0655     if (!prs) {
0656         err = -ENOMEM;
0657         goto err_alloc_prs;
0658     }
0659     mlxsw_sp->sb->prs = prs;
0660 
0661     for (i = 0; i < max_ports; i++) {
0662         err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
0663         if (err)
0664             goto err_sb_port_init;
0665     }
0666 
0667     return 0;
0668 
0669 err_sb_port_init:
0670     for (i--; i >= 0; i--)
0671         mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
0672     kfree(mlxsw_sp->sb->prs);
0673 err_alloc_prs:
0674     kfree(mlxsw_sp->sb->ports);
0675     return err;
0676 }
0677 
0678 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
0679 {
0680     int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
0681     int i;
0682 
0683     for (i = max_ports - 1; i >= 0; i--)
0684         mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
0685     kfree(mlxsw_sp->sb->prs);
0686     kfree(mlxsw_sp->sb->ports);
0687 }
0688 
0689 #define MLXSW_SP_SB_PR(_mode, _size)    \
0690     {               \
0691         .mode = _mode,      \
0692         .size = _size,      \
0693     }
0694 
0695 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)    \
0696     {                               \
0697         .mode = _mode,                      \
0698         .size = _size,                      \
0699         .freeze_mode = _freeze_mode,                \
0700         .freeze_size = _freeze_size,                \
0701     }
0702 
0703 #define MLXSW_SP1_SB_PR_CPU_SIZE    (256 * 1000)
0704 
0705 /* Order according to mlxsw_sp1_sb_pool_dess */
0706 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
0707     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
0708     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0709     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0710     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0711     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
0712                true, false),
0713     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0714     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0715     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
0716     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
0717                true, true),
0718     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
0719                MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
0720     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
0721                MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
0722 };
0723 
0724 #define MLXSW_SP2_SB_PR_CPU_SIZE    (256 * 1000)
0725 
0726 /* Order according to mlxsw_sp2_sb_pool_dess */
0727 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
0728     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
0729     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0730     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0731     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0732     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
0733                true, false),
0734     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0735     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0736     MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
0737     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
0738                true, true),
0739     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
0740                MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
0741     MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
0742                MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
0743 };
0744 
0745 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
0746                 const struct mlxsw_sp_sb_pr *prs,
0747                 const struct mlxsw_sp_sb_pool_des *pool_dess,
0748                 size_t prs_len)
0749 {
0750     /* Round down, unlike mlxsw_sp_bytes_cells(). */
0751     u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
0752     u32 rest_cells[2] = {sb_cells, sb_cells};
0753     int i;
0754     int err;
0755 
0756     /* Calculate how much space to give to the "REST" pools in either
0757      * direction.
0758      */
0759     for (i = 0; i < prs_len; i++) {
0760         enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
0761         u32 size = prs[i].size;
0762         u32 size_cells;
0763 
0764         if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
0765             continue;
0766 
0767         size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
0768         if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
0769             continue;
0770 
0771         rest_cells[dir] -= size_cells;
0772     }
0773 
0774     for (i = 0; i < prs_len; i++) {
0775         u32 size = prs[i].size;
0776         u32 size_cells;
0777 
0778         if (size == MLXSW_SP_SB_INFI) {
0779             err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0780                            0, true);
0781         } else if (size == MLXSW_SP_SB_REST) {
0782             size_cells = rest_cells[pool_dess[i].dir];
0783             err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0784                            size_cells, false);
0785         } else {
0786             size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
0787             err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0788                            size_cells, false);
0789         }
0790         if (err)
0791             return err;
0792     }
0793 
0794     err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
0795                     MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
0796     if (err)
0797         return err;
0798 
0799     err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
0800                     MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
0801     if (err)
0802         return err;
0803 
0804     return 0;
0805 }
0806 
0807 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
0808     {                       \
0809         .min_buff = _min_buff,          \
0810         .max_buff = _max_buff,          \
0811         .pool_index = _pool,            \
0812     }
0813 
0814 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)    \
0815     {                       \
0816         .min_buff = _min_buff,          \
0817         .max_buff = _max_buff,          \
0818         .pool_index = MLXSW_SP_SB_POOL_ING, \
0819     }
0820 
0821 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)    \
0822     {                       \
0823         .min_buff = _min_buff,          \
0824         .max_buff = _max_buff,          \
0825         .pool_index = MLXSW_SP_SB_POOL_EGR, \
0826     }
0827 
0828 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
0829     {                       \
0830         .min_buff = _min_buff,          \
0831         .max_buff = _max_buff,          \
0832         .pool_index = MLXSW_SP_SB_POOL_EGR_MC,  \
0833         .freeze_pool = true,            \
0834         .freeze_thresh = true,          \
0835     }
0836 
0837 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
0838     MLXSW_SP_SB_CM_ING(10000, 8),
0839     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0840     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0841     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0842     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0843     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0844     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0845     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0846     MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
0847     MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
0848 };
0849 
0850 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
0851     MLXSW_SP_SB_CM_ING(0, 7),
0852     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0853     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0854     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0855     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0856     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0857     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0858     MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
0859     MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
0860     MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
0861 };
0862 
0863 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
0864     MLXSW_SP_SB_CM_EGR(1500, 9),
0865     MLXSW_SP_SB_CM_EGR(1500, 9),
0866     MLXSW_SP_SB_CM_EGR(1500, 9),
0867     MLXSW_SP_SB_CM_EGR(1500, 9),
0868     MLXSW_SP_SB_CM_EGR(1500, 9),
0869     MLXSW_SP_SB_CM_EGR(1500, 9),
0870     MLXSW_SP_SB_CM_EGR(1500, 9),
0871     MLXSW_SP_SB_CM_EGR(1500, 9),
0872     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0873     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0874     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0875     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0876     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0877     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0878     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0879     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0880     MLXSW_SP_SB_CM_EGR(1, 0xff),
0881 };
0882 
0883 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
0884     MLXSW_SP_SB_CM_EGR(0, 7),
0885     MLXSW_SP_SB_CM_EGR(0, 7),
0886     MLXSW_SP_SB_CM_EGR(0, 7),
0887     MLXSW_SP_SB_CM_EGR(0, 7),
0888     MLXSW_SP_SB_CM_EGR(0, 7),
0889     MLXSW_SP_SB_CM_EGR(0, 7),
0890     MLXSW_SP_SB_CM_EGR(0, 7),
0891     MLXSW_SP_SB_CM_EGR(0, 7),
0892     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0893     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0894     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0895     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0896     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0897     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0898     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0899     MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
0900     MLXSW_SP_SB_CM_EGR(1, 0xff),
0901 };
0902 
0903 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
0904 
0905 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
0906     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0907     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0908     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0909     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0910     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0911     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0912     MLXSW_SP_CPU_PORT_SB_CM,
0913     MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
0914     MLXSW_SP_CPU_PORT_SB_CM,
0915     MLXSW_SP_CPU_PORT_SB_CM,
0916     MLXSW_SP_CPU_PORT_SB_CM,
0917     MLXSW_SP_CPU_PORT_SB_CM,
0918     MLXSW_SP_CPU_PORT_SB_CM,
0919     MLXSW_SP_CPU_PORT_SB_CM,
0920     MLXSW_SP_CPU_PORT_SB_CM,
0921     MLXSW_SP_CPU_PORT_SB_CM,
0922     MLXSW_SP_CPU_PORT_SB_CM,
0923     MLXSW_SP_CPU_PORT_SB_CM,
0924     MLXSW_SP_CPU_PORT_SB_CM,
0925     MLXSW_SP_CPU_PORT_SB_CM,
0926     MLXSW_SP_CPU_PORT_SB_CM,
0927     MLXSW_SP_CPU_PORT_SB_CM,
0928     MLXSW_SP_CPU_PORT_SB_CM,
0929     MLXSW_SP_CPU_PORT_SB_CM,
0930     MLXSW_SP_CPU_PORT_SB_CM,
0931     MLXSW_SP_CPU_PORT_SB_CM,
0932     MLXSW_SP_CPU_PORT_SB_CM,
0933     MLXSW_SP_CPU_PORT_SB_CM,
0934     MLXSW_SP_CPU_PORT_SB_CM,
0935     MLXSW_SP_CPU_PORT_SB_CM,
0936     MLXSW_SP_CPU_PORT_SB_CM,
0937     MLXSW_SP_CPU_PORT_SB_CM,
0938 };
0939 
0940 static bool
0941 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
0942 {
0943     struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
0944 
0945     return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
0946 }
0947 
0948 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0949                   enum mlxsw_reg_sbxx_dir dir,
0950                   const struct mlxsw_sp_sb_cm *cms,
0951                   size_t cms_len)
0952 {
0953     const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
0954     int i;
0955     int err;
0956 
0957     for (i = 0; i < cms_len; i++) {
0958         const struct mlxsw_sp_sb_cm *cm;
0959         u32 min_buff;
0960         u32 max_buff;
0961 
0962         if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
0963             continue; /* PG number 8 does not exist, skip it */
0964         cm = &cms[i];
0965         if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
0966             continue;
0967 
0968         min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
0969         max_buff = cm->max_buff;
0970         if (max_buff == MLXSW_SP_SB_INFI) {
0971             err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
0972                            min_buff, 0,
0973                            true, cm->pool_index);
0974         } else {
0975             if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
0976                                cm->pool_index))
0977                 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
0978                                 max_buff);
0979             err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
0980                            min_buff, max_buff,
0981                            false, cm->pool_index);
0982         }
0983         if (err)
0984             return err;
0985     }
0986     return 0;
0987 }
0988 
0989 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
0990 {
0991     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0992     int err;
0993 
0994     err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
0995                      mlxsw_sp_port->local_port,
0996                      MLXSW_REG_SBXX_DIR_INGRESS,
0997                      mlxsw_sp->sb_vals->cms_ingress,
0998                      mlxsw_sp->sb_vals->cms_ingress_count);
0999     if (err)
1000         return err;
1001     return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
1002                       mlxsw_sp_port->local_port,
1003                       MLXSW_REG_SBXX_DIR_EGRESS,
1004                       mlxsw_sp->sb_vals->cms_egress,
1005                       mlxsw_sp->sb_vals->cms_egress_count);
1006 }
1007 
1008 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
1009 {
1010     return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
1011                       mlxsw_sp->sb_vals->cms_cpu,
1012                       mlxsw_sp->sb_vals->cms_cpu_count);
1013 }
1014 
1015 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
1016     {                   \
1017         .min_buff = _min_buff,      \
1018         .max_buff = _max_buff,      \
1019     }
1020 
1021 /* Order according to mlxsw_sp1_sb_pool_dess */
1022 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
1023     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
1024     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1025     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1026     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1027     MLXSW_SP_SB_PM(0, 7),
1028     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1029     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1030     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1031     MLXSW_SP_SB_PM(10000, 90000),
1032     MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1033     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1034 };
1035 
1036 /* Order according to mlxsw_sp2_sb_pool_dess */
1037 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
1038     MLXSW_SP_SB_PM(0, 7),
1039     MLXSW_SP_SB_PM(0, 0),
1040     MLXSW_SP_SB_PM(0, 0),
1041     MLXSW_SP_SB_PM(0, 0),
1042     MLXSW_SP_SB_PM(0, 7),
1043     MLXSW_SP_SB_PM(0, 0),
1044     MLXSW_SP_SB_PM(0, 0),
1045     MLXSW_SP_SB_PM(0, 0),
1046     MLXSW_SP_SB_PM(10000, 90000),
1047     MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1048     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1049 };
1050 
1051 /* Order according to mlxsw_sp*_sb_pool_dess */
1052 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
1053     MLXSW_SP_SB_PM(0, 0),
1054     MLXSW_SP_SB_PM(0, 0),
1055     MLXSW_SP_SB_PM(0, 0),
1056     MLXSW_SP_SB_PM(0, 0),
1057     MLXSW_SP_SB_PM(0, 0),
1058     MLXSW_SP_SB_PM(0, 0),
1059     MLXSW_SP_SB_PM(0, 0),
1060     MLXSW_SP_SB_PM(0, 0),
1061     MLXSW_SP_SB_PM(0, 90000),
1062     MLXSW_SP_SB_PM(0, 0),
1063     MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
1064 };
1065 
1066 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1067                 const struct mlxsw_sp_sb_pm *pms,
1068                 bool skip_ingress)
1069 {
1070     int i, err;
1071 
1072     for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1073         const struct mlxsw_sp_sb_pm *pm = &pms[i];
1074         const struct mlxsw_sp_sb_pool_des *des;
1075         u32 max_buff;
1076         u32 min_buff;
1077 
1078         des = &mlxsw_sp->sb_vals->pool_dess[i];
1079         if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
1080             continue;
1081 
1082         min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
1083         max_buff = pm->max_buff;
1084         if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
1085             max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
1086         err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
1087                        max_buff);
1088         if (err)
1089             return err;
1090     }
1091     return 0;
1092 }
1093 
1094 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
1095 {
1096     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1097 
1098     return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
1099                     mlxsw_sp->sb_vals->pms, false);
1100 }
1101 
1102 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
1103 {
1104     return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
1105                     true);
1106 }
1107 
1108 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)        \
1109     {                       \
1110         .min_buff = _min_buff,          \
1111         .max_buff = _max_buff,          \
1112         .pool_index = MLXSW_SP_SB_POOL_EGR, \
1113     }
1114 
1115 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
1116     MLXSW_SP_SB_MM(0, 6),
1117     MLXSW_SP_SB_MM(0, 6),
1118     MLXSW_SP_SB_MM(0, 6),
1119     MLXSW_SP_SB_MM(0, 6),
1120     MLXSW_SP_SB_MM(0, 6),
1121     MLXSW_SP_SB_MM(0, 6),
1122     MLXSW_SP_SB_MM(0, 6),
1123     MLXSW_SP_SB_MM(0, 6),
1124     MLXSW_SP_SB_MM(0, 6),
1125     MLXSW_SP_SB_MM(0, 6),
1126     MLXSW_SP_SB_MM(0, 6),
1127     MLXSW_SP_SB_MM(0, 6),
1128     MLXSW_SP_SB_MM(0, 6),
1129     MLXSW_SP_SB_MM(0, 6),
1130     MLXSW_SP_SB_MM(0, 6),
1131 };
1132 
1133 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
1134 {
1135     char sbmm_pl[MLXSW_REG_SBMM_LEN];
1136     int i;
1137     int err;
1138 
1139     for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
1140         const struct mlxsw_sp_sb_pool_des *des;
1141         const struct mlxsw_sp_sb_mm *mc;
1142         u32 min_buff;
1143 
1144         mc = &mlxsw_sp->sb_vals->mms[i];
1145         des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
1146         /* All pools used by sb_mm's are initialized using dynamic
1147          * thresholds, therefore 'max_buff' isn't specified in cells.
1148          */
1149         min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
1150         mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
1151                     des->pool);
1152         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
1153         if (err)
1154             return err;
1155     }
1156     return 0;
1157 }
1158 
1159 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
1160                 u16 *p_ingress_len, u16 *p_egress_len)
1161 {
1162     int i;
1163 
1164     for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
1165         if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
1166             MLXSW_REG_SBXX_DIR_INGRESS)
1167             (*p_ingress_len)++;
1168         else
1169             (*p_egress_len)++;
1170     }
1171 
1172     WARN(*p_egress_len == 0, "No egress pools\n");
1173 }
1174 
1175 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
1176     .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
1177     .pool_dess = mlxsw_sp1_sb_pool_dess,
1178     .pms = mlxsw_sp1_sb_pms,
1179     .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1180     .prs = mlxsw_sp1_sb_prs,
1181     .mms = mlxsw_sp_sb_mms,
1182     .cms_ingress = mlxsw_sp1_sb_cms_ingress,
1183     .cms_egress = mlxsw_sp1_sb_cms_egress,
1184     .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1185     .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1186     .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
1187     .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
1188     .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1189 };
1190 
1191 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
1192     .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
1193     .pool_dess = mlxsw_sp2_sb_pool_dess,
1194     .pms = mlxsw_sp2_sb_pms,
1195     .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1196     .prs = mlxsw_sp2_sb_prs,
1197     .mms = mlxsw_sp_sb_mms,
1198     .cms_ingress = mlxsw_sp2_sb_cms_ingress,
1199     .cms_egress = mlxsw_sp2_sb_cms_egress,
1200     .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1201     .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1202     .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
1203     .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
1204     .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1205 };
1206 
1207 static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
1208 {
1209     return mtu * 5 / 2;
1210 }
1211 
1212 static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
1213 {
1214     return 3 * mtu + buffer_factor * speed / 1000;
1215 }
1216 
1217 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1218 
1219 static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
1220 {
1221     int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
1222 
1223     return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1224 }
1225 
1226 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1227 
1228 static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
1229 {
1230     int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
1231 
1232     return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1233 }
1234 
1235 const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
1236     .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
1237 };
1238 
1239 const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
1240     .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
1241 };
1242 
1243 const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
1244     .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
1245 };
1246 
1247 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
1248 {
1249     u32 max_headroom_size;
1250     u16 ing_pool_count = 0;
1251     u16 eg_pool_count = 0;
1252     int err;
1253 
1254     if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
1255         return -EIO;
1256 
1257     if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
1258         return -EIO;
1259 
1260     if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
1261         return -EIO;
1262 
1263     mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
1264     if (!mlxsw_sp->sb)
1265         return -ENOMEM;
1266     mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
1267     mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1268                            GUARANTEED_SHARED_BUFFER);
1269     max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1270                            MAX_HEADROOM_SIZE);
1271     /* Round down, because this limit must not be overstepped. */
1272     mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
1273                         mlxsw_sp->sb->cell_size;
1274 
1275     err = mlxsw_sp_sb_ports_init(mlxsw_sp);
1276     if (err)
1277         goto err_sb_ports_init;
1278     err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
1279                    mlxsw_sp->sb_vals->pool_dess,
1280                    mlxsw_sp->sb_vals->pool_count);
1281     if (err)
1282         goto err_sb_prs_init;
1283     err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
1284     if (err)
1285         goto err_sb_cpu_port_sb_cms_init;
1286     err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
1287     if (err)
1288         goto err_sb_cpu_port_pms_init;
1289     err = mlxsw_sp_sb_mms_init(mlxsw_sp);
1290     if (err)
1291         goto err_sb_mms_init;
1292     mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
1293     err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
1294                    mlxsw_sp->sb->sb_size,
1295                    ing_pool_count,
1296                    eg_pool_count,
1297                    MLXSW_SP_SB_ING_TC_COUNT,
1298                    MLXSW_SP_SB_EG_TC_COUNT);
1299     if (err)
1300         goto err_devlink_sb_register;
1301 
1302     return 0;
1303 
1304 err_devlink_sb_register:
1305 err_sb_mms_init:
1306 err_sb_cpu_port_pms_init:
1307 err_sb_cpu_port_sb_cms_init:
1308 err_sb_prs_init:
1309     mlxsw_sp_sb_ports_fini(mlxsw_sp);
1310 err_sb_ports_init:
1311     kfree(mlxsw_sp->sb);
1312     return err;
1313 }
1314 
1315 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
1316 {
1317     devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
1318     mlxsw_sp_sb_ports_fini(mlxsw_sp);
1319     kfree(mlxsw_sp->sb);
1320 }
1321 
1322 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
1323 {
1324     int err;
1325 
1326     mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
1327     if (!mlxsw_sp_port->hdroom)
1328         return -ENOMEM;
1329     mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
1330 
1331     err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
1332     if (err)
1333         goto err_headroom_init;
1334     err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1335     if (err)
1336         goto err_port_sb_cms_init;
1337     err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1338     if (err)
1339         goto err_port_sb_pms_init;
1340     return 0;
1341 
1342 err_port_sb_pms_init:
1343 err_port_sb_cms_init:
1344 err_headroom_init:
1345     kfree(mlxsw_sp_port->hdroom);
1346     return err;
1347 }
1348 
1349 void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1350 {
1351     kfree(mlxsw_sp_port->hdroom);
1352 }
1353 
1354 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1355              unsigned int sb_index, u16 pool_index,
1356              struct devlink_sb_pool_info *pool_info)
1357 {
1358     struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1359     enum mlxsw_reg_sbxx_dir dir;
1360     struct mlxsw_sp_sb_pr *pr;
1361 
1362     dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1363     pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1364     pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1365     pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1366     pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1367     pool_info->cell_size = mlxsw_sp->sb->cell_size;
1368     return 0;
1369 }
1370 
1371 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1372              unsigned int sb_index, u16 pool_index, u32 size,
1373              enum devlink_sb_threshold_type threshold_type,
1374              struct netlink_ext_ack *extack)
1375 {
1376     struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1377     u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1378     const struct mlxsw_sp_sb_pr *pr;
1379     enum mlxsw_reg_sbpr_mode mode;
1380 
1381     mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1382     pr = &mlxsw_sp->sb_vals->prs[pool_index];
1383 
1384     if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1385                       GUARANTEED_SHARED_BUFFER)) {
1386         NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1387         return -EINVAL;
1388     }
1389 
1390     if (pr->freeze_mode && pr->mode != mode) {
1391         NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1392         return -EINVAL;
1393     }
1394 
1395     if (pr->freeze_size && pr->size != size) {
1396         NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1397         return -EINVAL;
1398     }
1399 
1400     return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1401                     pool_size, false);
1402 }
1403 
1404 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1405 
1406 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1407                      u32 max_buff)
1408 {
1409     struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1410 
1411     if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1412         return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1413     return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1414 }
1415 
1416 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1417                     u32 threshold, u32 *p_max_buff,
1418                     struct netlink_ext_ack *extack)
1419 {
1420     struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1421 
1422     if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1423         int val;
1424 
1425         val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1426         if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1427             val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1428             NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1429             return -EINVAL;
1430         }
1431         *p_max_buff = val;
1432     } else {
1433         *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1434     }
1435     return 0;
1436 }
1437 
1438 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1439                   unsigned int sb_index, u16 pool_index,
1440                   u32 *p_threshold)
1441 {
1442     struct mlxsw_sp_port *mlxsw_sp_port =
1443             mlxsw_core_port_driver_priv(mlxsw_core_port);
1444     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1445     u16 local_port = mlxsw_sp_port->local_port;
1446     struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1447                                pool_index);
1448 
1449     *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1450                          pm->max_buff);
1451     return 0;
1452 }
1453 
1454 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1455                   unsigned int sb_index, u16 pool_index,
1456                   u32 threshold, struct netlink_ext_ack *extack)
1457 {
1458     struct mlxsw_sp_port *mlxsw_sp_port =
1459             mlxsw_core_port_driver_priv(mlxsw_core_port);
1460     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1461     u16 local_port = mlxsw_sp_port->local_port;
1462     u32 max_buff;
1463     int err;
1464 
1465     if (local_port == MLXSW_PORT_CPU_PORT) {
1466         NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1467         return -EINVAL;
1468     }
1469 
1470     err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1471                        threshold, &max_buff, extack);
1472     if (err)
1473         return err;
1474 
1475     return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1476                     0, max_buff);
1477 }
1478 
1479 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1480                  unsigned int sb_index, u16 tc_index,
1481                  enum devlink_sb_pool_type pool_type,
1482                  u16 *p_pool_index, u32 *p_threshold)
1483 {
1484     struct mlxsw_sp_port *mlxsw_sp_port =
1485             mlxsw_core_port_driver_priv(mlxsw_core_port);
1486     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1487     u16 local_port = mlxsw_sp_port->local_port;
1488     u8 pg_buff = tc_index;
1489     enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1490     struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1491                                pg_buff, dir);
1492 
1493     *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1494                          cm->max_buff);
1495     *p_pool_index = cm->pool_index;
1496     return 0;
1497 }
1498 
1499 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1500                  unsigned int sb_index, u16 tc_index,
1501                  enum devlink_sb_pool_type pool_type,
1502                  u16 pool_index, u32 threshold,
1503                  struct netlink_ext_ack *extack)
1504 {
1505     struct mlxsw_sp_port *mlxsw_sp_port =
1506             mlxsw_core_port_driver_priv(mlxsw_core_port);
1507     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1508     u16 local_port = mlxsw_sp_port->local_port;
1509     const struct mlxsw_sp_sb_cm *cm;
1510     u8 pg_buff = tc_index;
1511     enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1512     u32 max_buff;
1513     int err;
1514 
1515     if (local_port == MLXSW_PORT_CPU_PORT) {
1516         NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1517         return -EINVAL;
1518     }
1519 
1520     if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1521         NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1522         return -EINVAL;
1523     }
1524 
1525     if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1526         cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1527     else
1528         cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1529 
1530     if (cm->freeze_pool && cm->pool_index != pool_index) {
1531         NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1532         return -EINVAL;
1533     }
1534 
1535     if (cm->freeze_thresh && cm->max_buff != threshold) {
1536         NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1537         return -EINVAL;
1538     }
1539 
1540     err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1541                        threshold, &max_buff, extack);
1542     if (err)
1543         return err;
1544 
1545     return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1546                     0, max_buff, false, pool_index);
1547 }
1548 
1549 #define MASKED_COUNT_MAX \
1550     (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1551      (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1552 
1553 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1554     u8 masked_count;
1555     u16 local_port_1;
1556 };
1557 
1558 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1559                     char *sbsr_pl, size_t sbsr_pl_len,
1560                     unsigned long cb_priv)
1561 {
1562     struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1563     struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1564     u8 masked_count;
1565     u16 local_port;
1566     int rec_index = 0;
1567     struct mlxsw_sp_sb_cm *cm;
1568     int i;
1569 
1570     memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1571 
1572     masked_count = 0;
1573     for (local_port = cb_ctx.local_port_1;
1574          local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1575         if (!mlxsw_sp->ports[local_port])
1576             continue;
1577         if (local_port == MLXSW_PORT_CPU_PORT) {
1578             /* Ingress quotas are not supported for the CPU port */
1579             masked_count++;
1580             continue;
1581         }
1582         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1583             cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1584                         MLXSW_REG_SBXX_DIR_INGRESS);
1585             mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1586                           &cm->occ.cur, &cm->occ.max);
1587         }
1588         if (++masked_count == cb_ctx.masked_count)
1589             break;
1590     }
1591     masked_count = 0;
1592     for (local_port = cb_ctx.local_port_1;
1593          local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1594         if (!mlxsw_sp->ports[local_port])
1595             continue;
1596         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1597             cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1598                         MLXSW_REG_SBXX_DIR_EGRESS);
1599             mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1600                           &cm->occ.cur, &cm->occ.max);
1601         }
1602         if (++masked_count == cb_ctx.masked_count)
1603             break;
1604     }
1605 }
1606 
1607 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1608                  unsigned int sb_index)
1609 {
1610     struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1611     u16 local_port, local_port_1, last_local_port;
1612     struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1613     u8 masked_count, current_page = 0;
1614     unsigned long cb_priv = 0;
1615     LIST_HEAD(bulk_list);
1616     char *sbsr_pl;
1617     int i;
1618     int err;
1619     int err2;
1620 
1621     sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1622     if (!sbsr_pl)
1623         return -ENOMEM;
1624 
1625     local_port = MLXSW_PORT_CPU_PORT;
1626 next_batch:
1627     local_port_1 = local_port;
1628     masked_count = 0;
1629     mlxsw_reg_sbsr_pack(sbsr_pl, false);
1630     mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
1631     last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
1632               MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
1633 
1634     for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1635         mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1636     for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1637         mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1638     for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1639         if (!mlxsw_sp->ports[local_port])
1640             continue;
1641         if (local_port > last_local_port) {
1642             current_page++;
1643             goto do_query;
1644         }
1645         if (local_port != MLXSW_PORT_CPU_PORT) {
1646             /* Ingress quotas are not supported for the CPU port */
1647             mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1648                                  local_port, 1);
1649         }
1650         mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1651         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1652             err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1653                                &bulk_list);
1654             if (err)
1655                 goto out;
1656         }
1657         if (++masked_count == MASKED_COUNT_MAX)
1658             goto do_query;
1659     }
1660 
1661 do_query:
1662     cb_ctx.masked_count = masked_count;
1663     cb_ctx.local_port_1 = local_port_1;
1664     memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1665     err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1666                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1667                     cb_priv);
1668     if (err)
1669         goto out;
1670     if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1671         local_port++;
1672         goto next_batch;
1673     }
1674 
1675 out:
1676     err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1677     if (!err)
1678         err = err2;
1679     kfree(sbsr_pl);
1680     return err;
1681 }
1682 
1683 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1684                   unsigned int sb_index)
1685 {
1686     struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1687     u16 local_port, last_local_port;
1688     LIST_HEAD(bulk_list);
1689     unsigned int masked_count;
1690     u8 current_page = 0;
1691     char *sbsr_pl;
1692     int i;
1693     int err;
1694     int err2;
1695 
1696     sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1697     if (!sbsr_pl)
1698         return -ENOMEM;
1699 
1700     local_port = MLXSW_PORT_CPU_PORT;
1701 next_batch:
1702     masked_count = 0;
1703     mlxsw_reg_sbsr_pack(sbsr_pl, true);
1704     mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
1705     last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
1706               MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
1707 
1708     for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1709         mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1710     for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1711         mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1712     for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1713         if (!mlxsw_sp->ports[local_port])
1714             continue;
1715         if (local_port > last_local_port) {
1716             current_page++;
1717             goto do_query;
1718         }
1719         if (local_port != MLXSW_PORT_CPU_PORT) {
1720             /* Ingress quotas are not supported for the CPU port */
1721             mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1722                                  local_port, 1);
1723         }
1724         mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1725         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1726             err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1727                                &bulk_list);
1728             if (err)
1729                 goto out;
1730         }
1731         if (++masked_count == MASKED_COUNT_MAX)
1732             goto do_query;
1733     }
1734 
1735 do_query:
1736     err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1737                     &bulk_list, NULL, 0);
1738     if (err)
1739         goto out;
1740     if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1741         local_port++;
1742         goto next_batch;
1743     }
1744 
1745 out:
1746     err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1747     if (!err)
1748         err = err2;
1749     kfree(sbsr_pl);
1750     return err;
1751 }
1752 
1753 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1754                   unsigned int sb_index, u16 pool_index,
1755                   u32 *p_cur, u32 *p_max)
1756 {
1757     struct mlxsw_sp_port *mlxsw_sp_port =
1758             mlxsw_core_port_driver_priv(mlxsw_core_port);
1759     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1760     u16 local_port = mlxsw_sp_port->local_port;
1761     struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1762                                pool_index);
1763 
1764     *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1765     *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1766     return 0;
1767 }
1768 
1769 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1770                      unsigned int sb_index, u16 tc_index,
1771                      enum devlink_sb_pool_type pool_type,
1772                      u32 *p_cur, u32 *p_max)
1773 {
1774     struct mlxsw_sp_port *mlxsw_sp_port =
1775             mlxsw_core_port_driver_priv(mlxsw_core_port);
1776     struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1777     u16 local_port = mlxsw_sp_port->local_port;
1778     u8 pg_buff = tc_index;
1779     enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1780     struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1781                                pg_buff, dir);
1782 
1783     *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1784     *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1785     return 0;
1786 }