Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2021 Marvell.
0005  *
0006  */
0007 
0008 #include "cn10k.h"
0009 #include "otx2_reg.h"
0010 #include "otx2_struct.h"
0011 
0012 static struct dev_hw_ops    otx2_hw_ops = {
0013     .sq_aq_init = otx2_sq_aq_init,
0014     .sqe_flush = otx2_sqe_flush,
0015     .aura_freeptr = otx2_aura_freeptr,
0016     .refill_pool_ptrs = otx2_refill_pool_ptrs,
0017 };
0018 
0019 static struct dev_hw_ops cn10k_hw_ops = {
0020     .sq_aq_init = cn10k_sq_aq_init,
0021     .sqe_flush = cn10k_sqe_flush,
0022     .aura_freeptr = cn10k_aura_freeptr,
0023     .refill_pool_ptrs = cn10k_refill_pool_ptrs,
0024 };
0025 
0026 int cn10k_lmtst_init(struct otx2_nic *pfvf)
0027 {
0028 
0029     struct lmtst_tbl_setup_req *req;
0030     struct otx2_lmt_info *lmt_info;
0031     int err, cpu;
0032 
0033     if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
0034         pfvf->hw_ops = &otx2_hw_ops;
0035         return 0;
0036     }
0037 
0038     pfvf->hw_ops = &cn10k_hw_ops;
0039     /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
0040     pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
0041     pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
0042 
0043     mutex_lock(&pfvf->mbox.lock);
0044     req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
0045     if (!req) {
0046         mutex_unlock(&pfvf->mbox.lock);
0047         return -ENOMEM;
0048     }
0049 
0050     req->use_local_lmt_region = true;
0051 
0052     err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
0053              LMT_LINE_SIZE);
0054     if (err) {
0055         mutex_unlock(&pfvf->mbox.lock);
0056         return err;
0057     }
0058     pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
0059     req->lmt_iova = (u64)pfvf->dync_lmt->iova;
0060 
0061     err = otx2_sync_mbox_msg(&pfvf->mbox);
0062     mutex_unlock(&pfvf->mbox.lock);
0063 
0064     for_each_possible_cpu(cpu) {
0065         lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
0066         lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
0067                       (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
0068         lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
0069     }
0070 
0071     return 0;
0072 }
0073 EXPORT_SYMBOL(cn10k_lmtst_init);
0074 
0075 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
0076 {
0077     struct nix_cn10k_aq_enq_req *aq;
0078     struct otx2_nic *pfvf = dev;
0079 
0080     /* Get memory to put this msg */
0081     aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
0082     if (!aq)
0083         return -ENOMEM;
0084 
0085     aq->sq.cq = pfvf->hw.rx_queues + qidx;
0086     aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
0087     aq->sq.cq_ena = 1;
0088     aq->sq.ena = 1;
0089     /* Only one SMQ is allocated, map all SQ's to that SMQ  */
0090     aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
0091     aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
0092     aq->sq.default_chan = pfvf->hw.tx_chan_base;
0093     aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
0094     aq->sq.sqb_aura = sqb_aura;
0095     aq->sq.sq_int_ena = NIX_SQINT_BITS;
0096     aq->sq.qint_idx = 0;
0097     /* Due pipelining impact minimum 2000 unused SQ CQE's
0098      * need to maintain to avoid CQ overflow.
0099      */
0100     aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
0101 
0102     /* Fill AQ info */
0103     aq->qidx = qidx;
0104     aq->ctype = NIX_AQ_CTYPE_SQ;
0105     aq->op = NIX_AQ_INSTOP_INIT;
0106 
0107     return otx2_sync_mbox_msg(&pfvf->mbox);
0108 }
0109 
0110 #define NPA_MAX_BURST 16
0111 void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
0112 {
0113     struct otx2_nic *pfvf = dev;
0114     u64 ptrs[NPA_MAX_BURST];
0115     int num_ptrs = 1;
0116     dma_addr_t bufptr;
0117 
0118     /* Refill pool with new buffers */
0119     while (cq->pool_ptrs) {
0120         if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
0121             if (num_ptrs--)
0122                 __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
0123                              num_ptrs);
0124             break;
0125         }
0126         cq->pool_ptrs--;
0127         ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
0128         num_ptrs++;
0129         if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
0130             __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
0131                          num_ptrs);
0132             num_ptrs = 1;
0133         }
0134     }
0135 }
0136 
0137 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
0138 {
0139     struct otx2_lmt_info *lmt_info;
0140     struct otx2_nic *pfvf = dev;
0141     u64 val = 0, tar_addr = 0;
0142 
0143     lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
0144     /* FIXME: val[0:10] LMT_ID.
0145      * [12:15] no of LMTST - 1 in the burst.
0146      * [19:63] data size of each LMTST in the burst except first.
0147      */
0148     val = (lmt_info->lmt_id & 0x7FF);
0149     /* Target address for LMTST flush tells HW how many 128bit
0150      * words are present.
0151      * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
0152      */
0153     tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
0154     dma_wmb();
0155     memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
0156     cn10k_lmt_flush(val, tar_addr);
0157 
0158     sq->head++;
0159     sq->head &= (sq->sqe_cnt - 1);
0160 }
0161 
0162 int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
0163 {
0164     struct nix_bandprof_free_req *req;
0165     int rc;
0166 
0167     if (is_dev_otx2(pfvf->pdev))
0168         return 0;
0169 
0170     mutex_lock(&pfvf->mbox.lock);
0171 
0172     req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
0173     if (!req) {
0174         rc =  -ENOMEM;
0175         goto out;
0176     }
0177 
0178     /* Free all bandwidth profiles allocated */
0179     req->free_all = true;
0180 
0181     rc = otx2_sync_mbox_msg(&pfvf->mbox);
0182 out:
0183     mutex_unlock(&pfvf->mbox.lock);
0184     return rc;
0185 }
0186 
0187 int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
0188 {
0189     struct nix_bandprof_alloc_req *req;
0190     struct nix_bandprof_alloc_rsp *rsp;
0191     int rc;
0192 
0193     req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
0194     if (!req)
0195         return  -ENOMEM;
0196 
0197     req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
0198 
0199     rc = otx2_sync_mbox_msg(&pfvf->mbox);
0200     if (rc)
0201         goto out;
0202 
0203     rsp = (struct  nix_bandprof_alloc_rsp *)
0204            otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
0205     if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
0206         rc = -EIO;
0207         goto out;
0208     }
0209 
0210     *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
0211 out:
0212     if (rc) {
0213         dev_warn(pfvf->dev,
0214              "Failed to allocate ingress bandwidth policer\n");
0215     }
0216 
0217     return rc;
0218 }
0219 
0220 int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
0221 {
0222     struct otx2_hw *hw = &pfvf->hw;
0223     int ret;
0224 
0225     mutex_lock(&pfvf->mbox.lock);
0226 
0227     ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
0228 
0229     mutex_unlock(&pfvf->mbox.lock);
0230 
0231     return ret;
0232 }
0233 
0234 #define POLICER_TIMESTAMP     1  /* 1 second */
0235 #define MAX_RATE_EXP          22 /* Valid rate exponent range: 0 - 22 */
0236 
0237 static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
0238                     u32 *burst_mantissa)
0239 {
0240     int tmp;
0241 
0242     /* Burst is calculated as
0243      * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
0244      * This is the upper limit on number tokens (bytes) that
0245      * can be accumulated in the bucket.
0246      */
0247     *burst_exp = ilog2(burst);
0248     if (burst < 256) {
0249         /* No float: can't express mantissa in this case */
0250         *burst_mantissa = 0;
0251         return;
0252     }
0253 
0254     if (*burst_exp > MAX_RATE_EXP)
0255         *burst_exp = MAX_RATE_EXP;
0256 
0257     /* Calculate mantissa
0258      * Find remaining bytes 'burst - 2^burst_exp'
0259      * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
0260      */
0261     tmp = burst - rounddown_pow_of_two(burst);
0262     *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
0263 }
0264 
0265 static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
0266                        u32 *rate_mantissa, u32 *rdiv)
0267 {
0268     u32 div = 0;
0269     u32 exp = 0;
0270     u64 tmp;
0271 
0272     /* Figure out mantissa, exponent and divider from given max pkt rate
0273      *
0274      * To achieve desired rate HW adds
0275      * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
0276      * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
0277      * Here policer timeunit is 2 usecs and rate is in bits per sec.
0278      * Since floating point cannot be used below algorithm uses 1000000
0279      * scale factor to support rates upto 100Gbps.
0280      */
0281     tmp = rate * 32 * 2;
0282     if (tmp < 256000000) {
0283         while (tmp < 256000000) {
0284             tmp = tmp * 2;
0285             div++;
0286         }
0287     } else {
0288         for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
0289             tmp = tmp / 2;
0290 
0291         if (exp > MAX_RATE_EXP)
0292             exp = MAX_RATE_EXP;
0293     }
0294 
0295     *rate_mantissa = (tmp - 256000000) / 1000000;
0296     *rate_exp = exp;
0297     *rdiv = div;
0298 }
0299 
0300 int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
0301                    u16 policer, bool map)
0302 {
0303     struct nix_cn10k_aq_enq_req *aq;
0304 
0305     aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
0306     if (!aq)
0307         return -ENOMEM;
0308 
0309     /* Enable policing and set the bandwidth profile (policer) index */
0310     if (map)
0311         aq->rq.policer_ena = 1;
0312     else
0313         aq->rq.policer_ena = 0;
0314     aq->rq_mask.policer_ena = 1;
0315 
0316     aq->rq.band_prof_id = policer;
0317     aq->rq_mask.band_prof_id = GENMASK(9, 0);
0318 
0319     /* Fill AQ info */
0320     aq->qidx = rq_idx;
0321     aq->ctype = NIX_AQ_CTYPE_RQ;
0322     aq->op = NIX_AQ_INSTOP_WRITE;
0323 
0324     return otx2_sync_mbox_msg(&pfvf->mbox);
0325 }
0326 
0327 int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
0328 {
0329     struct nix_bandprof_free_req *req;
0330 
0331     req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
0332     if (!req)
0333         return -ENOMEM;
0334 
0335     req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
0336     req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
0337 
0338     return otx2_sync_mbox_msg(&pfvf->mbox);
0339 }
0340 
0341 int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
0342 {
0343     struct otx2_hw *hw = &pfvf->hw;
0344     int qidx, rc;
0345 
0346     mutex_lock(&pfvf->mbox.lock);
0347 
0348     /* Remove RQ's policer mapping */
0349     for (qidx = 0; qidx < hw->rx_queues; qidx++)
0350         cn10k_map_unmap_rq_policer(pfvf, qidx,
0351                        hw->matchall_ipolicer, false);
0352 
0353     rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
0354 
0355     mutex_unlock(&pfvf->mbox.lock);
0356     return rc;
0357 }
0358 
0359 int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
0360                 u32 burst, u64 rate, bool pps)
0361 {
0362     struct nix_cn10k_aq_enq_req *aq;
0363     u32 burst_exp, burst_mantissa;
0364     u32 rate_exp, rate_mantissa;
0365     u32 rdiv;
0366 
0367     /* Get exponent and mantissa values for the desired rate */
0368     cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
0369     cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
0370 
0371     /* Init bandwidth profile */
0372     aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
0373     if (!aq)
0374         return -ENOMEM;
0375 
0376     /* Set initial color mode to blind */
0377     aq->prof.icolor = 0x03;
0378     aq->prof_mask.icolor = 0x03;
0379 
0380     /* Set rate and burst values */
0381     aq->prof.cir_exponent = rate_exp;
0382     aq->prof_mask.cir_exponent = 0x1F;
0383 
0384     aq->prof.cir_mantissa = rate_mantissa;
0385     aq->prof_mask.cir_mantissa = 0xFF;
0386 
0387     aq->prof.cbs_exponent = burst_exp;
0388     aq->prof_mask.cbs_exponent = 0x1F;
0389 
0390     aq->prof.cbs_mantissa = burst_mantissa;
0391     aq->prof_mask.cbs_mantissa = 0xFF;
0392 
0393     aq->prof.rdiv = rdiv;
0394     aq->prof_mask.rdiv = 0xF;
0395 
0396     if (pps) {
0397         /* The amount of decremented tokens is calculated according to
0398          * the following equation:
0399          * max([ LMODE ? 0 : (packet_length - LXPTR)] +
0400          *       ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
0401          *  1/256)
0402          * if LMODE is 1 then rate limiting will be based on
0403          * PPS otherwise bps.
0404          * The aim of the ADJUST value is to specify a token cost per
0405          * packet in contrary to the packet length that specifies a
0406          * cost per byte. To rate limit based on PPS adjust mantissa
0407          * is set as 384 and exponent as 1 so that number of tokens
0408          * decremented becomes 1 i.e, 1 token per packeet.
0409          */
0410         aq->prof.adjust_exponent = 1;
0411         aq->prof_mask.adjust_exponent = 0x1F;
0412 
0413         aq->prof.adjust_mantissa = 384;
0414         aq->prof_mask.adjust_mantissa = 0x1FF;
0415 
0416         aq->prof.lmode = 0x1;
0417         aq->prof_mask.lmode = 0x1;
0418     }
0419 
0420     /* Two rate three color marker
0421      * With PEIR/EIR set to zero, color will be either green or red
0422      */
0423     aq->prof.meter_algo = 2;
0424     aq->prof_mask.meter_algo = 0x3;
0425 
0426     aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
0427     aq->prof_mask.rc_action = 0x3;
0428 
0429     aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
0430     aq->prof_mask.yc_action = 0x3;
0431 
0432     aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
0433     aq->prof_mask.gc_action = 0x3;
0434 
0435     /* Setting exponent value as 24 and mantissa as 0 configures
0436      * the bucket with zero values making bucket unused. Peak
0437      * information rate and Excess information rate buckets are
0438      * unused here.
0439      */
0440     aq->prof.peir_exponent = 24;
0441     aq->prof_mask.peir_exponent = 0x1F;
0442 
0443     aq->prof.peir_mantissa = 0;
0444     aq->prof_mask.peir_mantissa = 0xFF;
0445 
0446     aq->prof.pebs_exponent = 24;
0447     aq->prof_mask.pebs_exponent = 0x1F;
0448 
0449     aq->prof.pebs_mantissa = 0;
0450     aq->prof_mask.pebs_mantissa = 0xFF;
0451 
0452     /* Fill AQ info */
0453     aq->qidx = profile;
0454     aq->ctype = NIX_AQ_CTYPE_BANDPROF;
0455     aq->op = NIX_AQ_INSTOP_WRITE;
0456 
0457     return otx2_sync_mbox_msg(&pfvf->mbox);
0458 }
0459 
0460 int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
0461                      u32 burst, u64 rate)
0462 {
0463     struct otx2_hw *hw = &pfvf->hw;
0464     int qidx, rc;
0465 
0466     mutex_lock(&pfvf->mbox.lock);
0467 
0468     rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
0469                      rate, false);
0470     if (rc)
0471         goto out;
0472 
0473     for (qidx = 0; qidx < hw->rx_queues; qidx++) {
0474         rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
0475                         hw->matchall_ipolicer, true);
0476         if (rc)
0477             break;
0478     }
0479 
0480 out:
0481     mutex_unlock(&pfvf->mbox.lock);
0482     return rc;
0483 }