Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /* Copyright 2019 NXP */
0003 
0004 #include "enetc.h"
0005 
0006 #include <net/pkt_sched.h>
0007 #include <linux/math64.h>
0008 #include <linux/refcount.h>
0009 #include <net/pkt_cls.h>
0010 #include <net/tc_act/tc_gate.h>
0011 
0012 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
0013 {
0014     return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
0015         & ENETC_QBV_MAX_GCL_LEN_MASK;
0016 }
0017 
0018 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
0019 {
0020     u32 old_speed = priv->speed;
0021     u32 pspeed;
0022 
0023     if (speed == old_speed)
0024         return;
0025 
0026     switch (speed) {
0027     case SPEED_1000:
0028         pspeed = ENETC_PMR_PSPEED_1000M;
0029         break;
0030     case SPEED_2500:
0031         pspeed = ENETC_PMR_PSPEED_2500M;
0032         break;
0033     case SPEED_100:
0034         pspeed = ENETC_PMR_PSPEED_100M;
0035         break;
0036     case SPEED_10:
0037     default:
0038         pspeed = ENETC_PMR_PSPEED_10M;
0039     }
0040 
0041     priv->speed = speed;
0042     enetc_port_wr(&priv->si->hw, ENETC_PMR,
0043               (enetc_port_rd(&priv->si->hw, ENETC_PMR)
0044               & (~ENETC_PMR_PSPEED_MASK))
0045               | pspeed);
0046 }
0047 
0048 static int enetc_setup_taprio(struct net_device *ndev,
0049                   struct tc_taprio_qopt_offload *admin_conf)
0050 {
0051     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0052     struct enetc_cbd cbd = {.cmd = 0};
0053     struct tgs_gcl_conf *gcl_config;
0054     struct tgs_gcl_data *gcl_data;
0055     dma_addr_t dma;
0056     struct gce *gce;
0057     u16 data_size;
0058     u16 gcl_len;
0059     void *tmp;
0060     u32 tge;
0061     int err;
0062     int i;
0063 
0064     if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
0065         return -EINVAL;
0066     gcl_len = admin_conf->num_entries;
0067 
0068     tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
0069     if (!admin_conf->enable) {
0070         enetc_wr(&priv->si->hw,
0071              ENETC_QBV_PTGCR_OFFSET,
0072              tge & (~ENETC_QBV_TGE));
0073 
0074         priv->active_offloads &= ~ENETC_F_QBV;
0075 
0076         return 0;
0077     }
0078 
0079     if (admin_conf->cycle_time > U32_MAX ||
0080         admin_conf->cycle_time_extension > U32_MAX)
0081         return -EINVAL;
0082 
0083     /* Configure the (administrative) gate control list using the
0084      * control BD descriptor.
0085      */
0086     gcl_config = &cbd.gcl_conf;
0087 
0088     data_size = struct_size(gcl_data, entry, gcl_len);
0089     tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
0090                        &dma, (void *)&gcl_data);
0091     if (!tmp)
0092         return -ENOMEM;
0093 
0094     gce = (struct gce *)(gcl_data + 1);
0095 
0096     /* Set all gates open as default */
0097     gcl_config->atc = 0xff;
0098     gcl_config->acl_len = cpu_to_le16(gcl_len);
0099 
0100     gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
0101     gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
0102     gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
0103     gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
0104 
0105     for (i = 0; i < gcl_len; i++) {
0106         struct tc_taprio_sched_entry *temp_entry;
0107         struct gce *temp_gce = gce + i;
0108 
0109         temp_entry = &admin_conf->entries[i];
0110 
0111         temp_gce->gate = (u8)temp_entry->gate_mask;
0112         temp_gce->period = cpu_to_le32(temp_entry->interval);
0113     }
0114 
0115     cbd.status_flags = 0;
0116 
0117     cbd.cls = BDCR_CMD_PORT_GCL;
0118     cbd.status_flags = 0;
0119 
0120     enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
0121          tge | ENETC_QBV_TGE);
0122 
0123     err = enetc_send_cmd(priv->si, &cbd);
0124     if (err)
0125         enetc_wr(&priv->si->hw,
0126              ENETC_QBV_PTGCR_OFFSET,
0127              tge & (~ENETC_QBV_TGE));
0128 
0129     enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
0130 
0131     if (!err)
0132         priv->active_offloads |= ENETC_F_QBV;
0133 
0134     return err;
0135 }
0136 
0137 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
0138 {
0139     struct tc_taprio_qopt_offload *taprio = type_data;
0140     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0141     int err;
0142     int i;
0143 
0144     /* TSD and Qbv are mutually exclusive in hardware */
0145     for (i = 0; i < priv->num_tx_rings; i++)
0146         if (priv->tx_ring[i]->tsd_enable)
0147             return -EBUSY;
0148 
0149     for (i = 0; i < priv->num_tx_rings; i++)
0150         enetc_set_bdr_prio(&priv->si->hw,
0151                    priv->tx_ring[i]->index,
0152                    taprio->enable ? i : 0);
0153 
0154     err = enetc_setup_taprio(ndev, taprio);
0155 
0156     if (err)
0157         for (i = 0; i < priv->num_tx_rings; i++)
0158             enetc_set_bdr_prio(&priv->si->hw,
0159                        priv->tx_ring[i]->index,
0160                        taprio->enable ? 0 : i);
0161 
0162     return err;
0163 }
0164 
0165 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
0166 {
0167     return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
0168 }
0169 
0170 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
0171 {
0172     return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
0173 }
0174 
0175 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
0176 {
0177     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0178     struct tc_cbs_qopt_offload *cbs = type_data;
0179     u32 port_transmit_rate = priv->speed;
0180     u8 tc_nums = netdev_get_num_tc(ndev);
0181     struct enetc_si *si = priv->si;
0182     u32 hi_credit_bit, hi_credit_reg;
0183     u32 max_interference_size;
0184     u32 port_frame_max_size;
0185     u8 tc = cbs->queue;
0186     u8 prio_top, prio_next;
0187     int bw_sum = 0;
0188     u8 bw;
0189 
0190     prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
0191     prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
0192 
0193     /* Support highest prio and second prio tc in cbs mode */
0194     if (tc != prio_top && tc != prio_next)
0195         return -EOPNOTSUPP;
0196 
0197     if (!cbs->enable) {
0198         /* Make sure the other TC that are numerically
0199          * lower than this TC have been disabled.
0200          */
0201         if (tc == prio_top &&
0202             enetc_get_cbs_enable(&si->hw, prio_next)) {
0203             dev_err(&ndev->dev,
0204                 "Disable TC%d before disable TC%d\n",
0205                 prio_next, tc);
0206             return -EINVAL;
0207         }
0208 
0209         enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
0210         enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
0211 
0212         return 0;
0213     }
0214 
0215     if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
0216         cbs->idleslope < 0 || cbs->sendslope > 0)
0217         return -EOPNOTSUPP;
0218 
0219     port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
0220 
0221     bw = cbs->idleslope / (port_transmit_rate * 10UL);
0222 
0223     /* Make sure the other TC that are numerically
0224      * higher than this TC have been enabled.
0225      */
0226     if (tc == prio_next) {
0227         if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
0228             dev_err(&ndev->dev,
0229                 "Enable TC%d first before enable TC%d\n",
0230                 prio_top, prio_next);
0231             return -EINVAL;
0232         }
0233         bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
0234     }
0235 
0236     if (bw_sum + bw >= 100) {
0237         dev_err(&ndev->dev,
0238             "The sum of all CBS Bandwidth can't exceed 100\n");
0239         return -EINVAL;
0240     }
0241 
0242     enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
0243 
0244     /* For top prio TC, the max_interfrence_size is maxSizedFrame.
0245      *
0246      * For next prio TC, the max_interfrence_size is calculated as below:
0247      *
0248      *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
0249      *
0250      *  - RA: idleSlope for AVB Class A
0251      *  - R0: port transmit rate
0252      *  - M0: maximum sized frame for the port
0253      *  - MA: maximum sized frame for AVB Class A
0254      */
0255 
0256     if (tc == prio_top) {
0257         max_interference_size = port_frame_max_size * 8;
0258     } else {
0259         u32 m0, ma, r0, ra;
0260 
0261         m0 = port_frame_max_size * 8;
0262         ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
0263         ra = enetc_get_cbs_bw(&si->hw, prio_top) *
0264             port_transmit_rate * 10000ULL;
0265         r0 = port_transmit_rate * 1000000ULL;
0266         max_interference_size = m0 + ma +
0267             (u32)div_u64((u64)ra * m0, r0 - ra);
0268     }
0269 
0270     /* hiCredit bits calculate by:
0271      *
0272      * maxSizedFrame * (idleSlope/portTxRate)
0273      */
0274     hi_credit_bit = max_interference_size * bw / 100;
0275 
0276     /* hiCredit bits to hiCredit register need to calculated as:
0277      *
0278      * (enetClockFrequency / portTransmitRate) * 100
0279      */
0280     hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
0281                      port_transmit_rate * 1000000ULL);
0282 
0283     enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
0284 
0285     /* Set bw register and enable this traffic class */
0286     enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
0287 
0288     return 0;
0289 }
0290 
0291 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
0292 {
0293     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0294     struct tc_etf_qopt_offload *qopt = type_data;
0295     u8 tc_nums = netdev_get_num_tc(ndev);
0296     int tc;
0297 
0298     if (!tc_nums)
0299         return -EOPNOTSUPP;
0300 
0301     tc = qopt->queue;
0302 
0303     if (tc < 0 || tc >= priv->num_tx_rings)
0304         return -EINVAL;
0305 
0306     /* TSD and Qbv are mutually exclusive in hardware */
0307     if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
0308         return -EBUSY;
0309 
0310     priv->tx_ring[tc]->tsd_enable = qopt->enable;
0311     enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
0312               qopt->enable ? ENETC_TSDE : 0);
0313 
0314     return 0;
0315 }
0316 
0317 enum streamid_type {
0318     STREAMID_TYPE_RESERVED = 0,
0319     STREAMID_TYPE_NULL,
0320     STREAMID_TYPE_SMAC,
0321 };
0322 
0323 enum streamid_vlan_tagged {
0324     STREAMID_VLAN_RESERVED = 0,
0325     STREAMID_VLAN_TAGGED,
0326     STREAMID_VLAN_UNTAGGED,
0327     STREAMID_VLAN_ALL,
0328 };
0329 
0330 #define ENETC_PSFP_WILDCARD -1
0331 #define HANDLE_OFFSET 100
0332 
0333 enum forward_type {
0334     FILTER_ACTION_TYPE_PSFP = BIT(0),
0335     FILTER_ACTION_TYPE_ACL = BIT(1),
0336     FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
0337 };
0338 
0339 /* This is for limit output type for input actions */
0340 struct actions_fwd {
0341     u64 actions;
0342     u64 keys;   /* include the must needed keys */
0343     enum forward_type output;
0344 };
0345 
0346 struct psfp_streamfilter_counters {
0347     u64 matching_frames_count;
0348     u64 passing_frames_count;
0349     u64 not_passing_frames_count;
0350     u64 passing_sdu_count;
0351     u64 not_passing_sdu_count;
0352     u64 red_frames_count;
0353 };
0354 
0355 struct enetc_streamid {
0356     u32 index;
0357     union {
0358         u8 src_mac[6];
0359         u8 dst_mac[6];
0360     };
0361     u8 filtertype;
0362     u16 vid;
0363     u8 tagged;
0364     s32 handle;
0365 };
0366 
0367 struct enetc_psfp_filter {
0368     u32 index;
0369     s32 handle;
0370     s8 prio;
0371     u32 maxsdu;
0372     u32 gate_id;
0373     s32 meter_id;
0374     refcount_t refcount;
0375     struct hlist_node node;
0376 };
0377 
0378 struct enetc_psfp_gate {
0379     u32 index;
0380     s8 init_ipv;
0381     u64 basetime;
0382     u64 cycletime;
0383     u64 cycletimext;
0384     u32 num_entries;
0385     refcount_t refcount;
0386     struct hlist_node node;
0387     struct action_gate_entry entries[];
0388 };
0389 
0390 /* Only enable the green color frame now
0391  * Will add eir and ebs color blind, couple flag etc when
0392  * policing action add more offloading parameters
0393  */
0394 struct enetc_psfp_meter {
0395     u32 index;
0396     u32 cir;
0397     u32 cbs;
0398     refcount_t refcount;
0399     struct hlist_node node;
0400 };
0401 
0402 #define ENETC_PSFP_FLAGS_FMI BIT(0)
0403 
0404 struct enetc_stream_filter {
0405     struct enetc_streamid sid;
0406     u32 sfi_index;
0407     u32 sgi_index;
0408     u32 flags;
0409     u32 fmi_index;
0410     struct flow_stats stats;
0411     struct hlist_node node;
0412 };
0413 
0414 struct enetc_psfp {
0415     unsigned long dev_bitmap;
0416     unsigned long *psfp_sfi_bitmap;
0417     struct hlist_head stream_list;
0418     struct hlist_head psfp_filter_list;
0419     struct hlist_head psfp_gate_list;
0420     struct hlist_head psfp_meter_list;
0421     spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
0422 };
0423 
0424 static struct actions_fwd enetc_act_fwd[] = {
0425     {
0426         BIT(FLOW_ACTION_GATE),
0427         BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
0428         FILTER_ACTION_TYPE_PSFP
0429     },
0430     {
0431         BIT(FLOW_ACTION_POLICE) |
0432         BIT(FLOW_ACTION_GATE),
0433         BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
0434         FILTER_ACTION_TYPE_PSFP
0435     },
0436     /* example for ACL actions */
0437     {
0438         BIT(FLOW_ACTION_DROP),
0439         0,
0440         FILTER_ACTION_TYPE_ACL
0441     }
0442 };
0443 
0444 static struct enetc_psfp epsfp = {
0445     .dev_bitmap = 0,
0446     .psfp_sfi_bitmap = NULL,
0447 };
0448 
0449 static LIST_HEAD(enetc_block_cb_list);
0450 
0451 /* Stream Identity Entry Set Descriptor */
0452 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
0453                  struct enetc_streamid *sid,
0454                  u8 enable)
0455 {
0456     struct enetc_cbd cbd = {.cmd = 0};
0457     struct streamid_data *si_data;
0458     struct streamid_conf *si_conf;
0459     dma_addr_t dma;
0460     u16 data_size;
0461     void *tmp;
0462     int port;
0463     int err;
0464 
0465     port = enetc_pf_to_port(priv->si->pdev);
0466     if (port < 0)
0467         return -EINVAL;
0468 
0469     if (sid->index >= priv->psfp_cap.max_streamid)
0470         return -EINVAL;
0471 
0472     if (sid->filtertype != STREAMID_TYPE_NULL &&
0473         sid->filtertype != STREAMID_TYPE_SMAC)
0474         return -EOPNOTSUPP;
0475 
0476     /* Disable operation before enable */
0477     cbd.index = cpu_to_le16((u16)sid->index);
0478     cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
0479     cbd.status_flags = 0;
0480 
0481     data_size = sizeof(struct streamid_data);
0482     tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
0483                        &dma, (void *)&si_data);
0484     if (!tmp)
0485         return -ENOMEM;
0486 
0487     eth_broadcast_addr(si_data->dmac);
0488     si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
0489                    + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
0490 
0491     si_conf = &cbd.sid_set;
0492     /* Only one port supported for one entry, set itself */
0493     si_conf->iports = cpu_to_le32(1 << port);
0494     si_conf->id_type = 1;
0495     si_conf->oui[2] = 0x0;
0496     si_conf->oui[1] = 0x80;
0497     si_conf->oui[0] = 0xC2;
0498 
0499     err = enetc_send_cmd(priv->si, &cbd);
0500     if (err)
0501         goto out;
0502 
0503     if (!enable)
0504         goto out;
0505 
0506     /* Enable the entry overwrite again incase space flushed by hardware */
0507     cbd.status_flags = 0;
0508 
0509     si_conf->en = 0x80;
0510     si_conf->stream_handle = cpu_to_le32(sid->handle);
0511     si_conf->iports = cpu_to_le32(1 << port);
0512     si_conf->id_type = sid->filtertype;
0513     si_conf->oui[2] = 0x0;
0514     si_conf->oui[1] = 0x80;
0515     si_conf->oui[0] = 0xC2;
0516 
0517     memset(si_data, 0, data_size);
0518 
0519     /* VIDM default to be 1.
0520      * VID Match. If set (b1) then the VID must match, otherwise
0521      * any VID is considered a match. VIDM setting is only used
0522      * when TG is set to b01.
0523      */
0524     if (si_conf->id_type == STREAMID_TYPE_NULL) {
0525         ether_addr_copy(si_data->dmac, sid->dst_mac);
0526         si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
0527                        ((((u16)(sid->tagged) & 0x3) << 14)
0528                        | ENETC_CBDR_SID_VIDM);
0529     } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
0530         ether_addr_copy(si_data->smac, sid->src_mac);
0531         si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
0532                        ((((u16)(sid->tagged) & 0x3) << 14)
0533                        | ENETC_CBDR_SID_VIDM);
0534     }
0535 
0536     err = enetc_send_cmd(priv->si, &cbd);
0537 out:
0538     enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
0539 
0540     return err;
0541 }
0542 
0543 /* Stream Filter Instance Set Descriptor */
0544 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
0545                      struct enetc_psfp_filter *sfi,
0546                      u8 enable)
0547 {
0548     struct enetc_cbd cbd = {.cmd = 0};
0549     struct sfi_conf *sfi_config;
0550     int port;
0551 
0552     port = enetc_pf_to_port(priv->si->pdev);
0553     if (port < 0)
0554         return -EINVAL;
0555 
0556     cbd.index = cpu_to_le16(sfi->index);
0557     cbd.cls = BDCR_CMD_STREAM_FILTER;
0558     cbd.status_flags = 0x80;
0559     cbd.length = cpu_to_le16(1);
0560 
0561     sfi_config = &cbd.sfi_conf;
0562     if (!enable)
0563         goto exit;
0564 
0565     sfi_config->en = 0x80;
0566 
0567     if (sfi->handle >= 0) {
0568         sfi_config->stream_handle =
0569             cpu_to_le32(sfi->handle);
0570         sfi_config->sthm |= 0x80;
0571     }
0572 
0573     sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
0574     sfi_config->input_ports = cpu_to_le32(1 << port);
0575 
0576     /* The priority value which may be matched against the
0577      * frame’s priority value to determine a match for this entry.
0578      */
0579     if (sfi->prio >= 0)
0580         sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
0581 
0582     /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
0583      * field as being either an MSDU value or an index into the Flow
0584      * Meter Instance table.
0585      */
0586     if (sfi->maxsdu) {
0587         sfi_config->msdu =
0588         cpu_to_le16(sfi->maxsdu);
0589         sfi_config->multi |= 0x40;
0590     }
0591 
0592     if (sfi->meter_id >= 0) {
0593         sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
0594         sfi_config->multi |= 0x80;
0595     }
0596 
0597 exit:
0598     return enetc_send_cmd(priv->si, &cbd);
0599 }
0600 
0601 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
0602                       u32 index,
0603                       struct psfp_streamfilter_counters *cnt)
0604 {
0605     struct enetc_cbd cbd = { .cmd = 2 };
0606     struct sfi_counter_data *data_buf;
0607     dma_addr_t dma;
0608     u16 data_size;
0609     void *tmp;
0610     int err;
0611 
0612     cbd.index = cpu_to_le16((u16)index);
0613     cbd.cmd = 2;
0614     cbd.cls = BDCR_CMD_STREAM_FILTER;
0615     cbd.status_flags = 0;
0616 
0617     data_size = sizeof(struct sfi_counter_data);
0618 
0619     tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
0620                        &dma, (void *)&data_buf);
0621     if (!tmp)
0622         return -ENOMEM;
0623 
0624     err = enetc_send_cmd(priv->si, &cbd);
0625     if (err)
0626         goto exit;
0627 
0628     cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
0629                      data_buf->matchl;
0630 
0631     cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
0632                      data_buf->msdu_dropl;
0633 
0634     cnt->passing_sdu_count = cnt->matching_frames_count
0635                 - cnt->not_passing_sdu_count;
0636 
0637     cnt->not_passing_frames_count =
0638                 ((u64)data_buf->stream_gate_droph << 32) +
0639                 data_buf->stream_gate_dropl;
0640 
0641     cnt->passing_frames_count = cnt->matching_frames_count -
0642                     cnt->not_passing_sdu_count -
0643                     cnt->not_passing_frames_count;
0644 
0645     cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
0646                 data_buf->flow_meter_dropl;
0647 
0648 exit:
0649     enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
0650 
0651     return err;
0652 }
0653 
0654 static u64 get_ptp_now(struct enetc_hw *hw)
0655 {
0656     u64 now_lo, now_hi, now;
0657 
0658     now_lo = enetc_rd(hw, ENETC_SICTR0);
0659     now_hi = enetc_rd(hw, ENETC_SICTR1);
0660     now = now_lo | now_hi << 32;
0661 
0662     return now;
0663 }
0664 
0665 static int get_start_ns(u64 now, u64 cycle, u64 *start)
0666 {
0667     u64 n;
0668 
0669     if (!cycle)
0670         return -EFAULT;
0671 
0672     n = div64_u64(now, cycle);
0673 
0674     *start = (n + 1) * cycle;
0675 
0676     return 0;
0677 }
0678 
0679 /* Stream Gate Instance Set Descriptor */
0680 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
0681                    struct enetc_psfp_gate *sgi,
0682                    u8 enable)
0683 {
0684     struct enetc_cbd cbd = { .cmd = 0 };
0685     struct sgi_table *sgi_config;
0686     struct sgcl_conf *sgcl_config;
0687     struct sgcl_data *sgcl_data;
0688     struct sgce *sgce;
0689     dma_addr_t dma;
0690     u16 data_size;
0691     int err, i;
0692     void *tmp;
0693     u64 now;
0694 
0695     cbd.index = cpu_to_le16(sgi->index);
0696     cbd.cmd = 0;
0697     cbd.cls = BDCR_CMD_STREAM_GCL;
0698     cbd.status_flags = 0x80;
0699 
0700     /* disable */
0701     if (!enable)
0702         return enetc_send_cmd(priv->si, &cbd);
0703 
0704     if (!sgi->num_entries)
0705         return 0;
0706 
0707     if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
0708         !sgi->cycletime)
0709         return -EINVAL;
0710 
0711     /* enable */
0712     sgi_config = &cbd.sgi_table;
0713 
0714     /* Keep open before gate list start */
0715     sgi_config->ocgtst = 0x80;
0716 
0717     sgi_config->oipv = (sgi->init_ipv < 0) ?
0718                 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
0719 
0720     sgi_config->en = 0x80;
0721 
0722     /* Basic config */
0723     err = enetc_send_cmd(priv->si, &cbd);
0724     if (err)
0725         return -EINVAL;
0726 
0727     memset(&cbd, 0, sizeof(cbd));
0728 
0729     cbd.index = cpu_to_le16(sgi->index);
0730     cbd.cmd = 1;
0731     cbd.cls = BDCR_CMD_STREAM_GCL;
0732     cbd.status_flags = 0;
0733 
0734     sgcl_config = &cbd.sgcl_conf;
0735 
0736     sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
0737 
0738     data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
0739     tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
0740                        &dma, (void *)&sgcl_data);
0741     if (!tmp)
0742         return -ENOMEM;
0743 
0744     sgce = &sgcl_data->sgcl[0];
0745 
0746     sgcl_config->agtst = 0x80;
0747 
0748     sgcl_data->ct = sgi->cycletime;
0749     sgcl_data->cte = sgi->cycletimext;
0750 
0751     if (sgi->init_ipv >= 0)
0752         sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
0753 
0754     for (i = 0; i < sgi->num_entries; i++) {
0755         struct action_gate_entry *from = &sgi->entries[i];
0756         struct sgce *to = &sgce[i];
0757 
0758         if (from->gate_state)
0759             to->multi |= 0x10;
0760 
0761         if (from->ipv >= 0)
0762             to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
0763 
0764         if (from->maxoctets >= 0) {
0765             to->multi |= 0x01;
0766             to->msdu[0] = from->maxoctets & 0xFF;
0767             to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
0768             to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
0769         }
0770 
0771         to->interval = from->interval;
0772     }
0773 
0774     /* If basetime is less than now, calculate start time */
0775     now = get_ptp_now(&priv->si->hw);
0776 
0777     if (sgi->basetime < now) {
0778         u64 start;
0779 
0780         err = get_start_ns(now, sgi->cycletime, &start);
0781         if (err)
0782             goto exit;
0783         sgcl_data->btl = lower_32_bits(start);
0784         sgcl_data->bth = upper_32_bits(start);
0785     } else {
0786         u32 hi, lo;
0787 
0788         hi = upper_32_bits(sgi->basetime);
0789         lo = lower_32_bits(sgi->basetime);
0790         sgcl_data->bth = hi;
0791         sgcl_data->btl = lo;
0792     }
0793 
0794     err = enetc_send_cmd(priv->si, &cbd);
0795 
0796 exit:
0797     enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
0798     return err;
0799 }
0800 
0801 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
0802                   struct enetc_psfp_meter *fmi,
0803                   u8 enable)
0804 {
0805     struct enetc_cbd cbd = { .cmd = 0 };
0806     struct fmi_conf *fmi_config;
0807     u64 temp = 0;
0808 
0809     cbd.index = cpu_to_le16((u16)fmi->index);
0810     cbd.cls = BDCR_CMD_FLOW_METER;
0811     cbd.status_flags = 0x80;
0812 
0813     if (!enable)
0814         return enetc_send_cmd(priv->si, &cbd);
0815 
0816     fmi_config = &cbd.fmi_conf;
0817     fmi_config->en = 0x80;
0818 
0819     if (fmi->cir) {
0820         temp = (u64)8000 * fmi->cir;
0821         temp = div_u64(temp, 3725);
0822     }
0823 
0824     fmi_config->cir = cpu_to_le32((u32)temp);
0825     fmi_config->cbs = cpu_to_le32(fmi->cbs);
0826 
0827     /* Default for eir ebs disable */
0828     fmi_config->eir = 0;
0829     fmi_config->ebs = 0;
0830 
0831     /* Default:
0832      * mark red disable
0833      * drop on yellow disable
0834      * color mode disable
0835      * couple flag disable
0836      */
0837     fmi_config->conf = 0;
0838 
0839     return enetc_send_cmd(priv->si, &cbd);
0840 }
0841 
0842 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
0843 {
0844     struct enetc_stream_filter *f;
0845 
0846     hlist_for_each_entry(f, &epsfp.stream_list, node)
0847         if (f->sid.index == index)
0848             return f;
0849 
0850     return NULL;
0851 }
0852 
0853 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
0854 {
0855     struct enetc_psfp_gate *g;
0856 
0857     hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
0858         if (g->index == index)
0859             return g;
0860 
0861     return NULL;
0862 }
0863 
0864 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
0865 {
0866     struct enetc_psfp_filter *s;
0867 
0868     hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
0869         if (s->index == index)
0870             return s;
0871 
0872     return NULL;
0873 }
0874 
0875 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
0876 {
0877     struct enetc_psfp_meter *m;
0878 
0879     hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
0880         if (m->index == index)
0881             return m;
0882 
0883     return NULL;
0884 }
0885 
0886 static struct enetc_psfp_filter
0887     *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
0888 {
0889     struct enetc_psfp_filter *s;
0890 
0891     hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
0892         if (s->gate_id == sfi->gate_id &&
0893             s->prio == sfi->prio &&
0894             s->maxsdu == sfi->maxsdu &&
0895             s->meter_id == sfi->meter_id)
0896             return s;
0897 
0898     return NULL;
0899 }
0900 
0901 static int enetc_get_free_index(struct enetc_ndev_priv *priv)
0902 {
0903     u32 max_size = priv->psfp_cap.max_psfp_filter;
0904     unsigned long index;
0905 
0906     index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
0907     if (index == max_size)
0908         return -1;
0909 
0910     return index;
0911 }
0912 
0913 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
0914 {
0915     struct enetc_psfp_filter *sfi;
0916     u8 z;
0917 
0918     sfi = enetc_get_filter_by_index(index);
0919     WARN_ON(!sfi);
0920     z = refcount_dec_and_test(&sfi->refcount);
0921 
0922     if (z) {
0923         enetc_streamfilter_hw_set(priv, sfi, false);
0924         hlist_del(&sfi->node);
0925         kfree(sfi);
0926         clear_bit(index, epsfp.psfp_sfi_bitmap);
0927     }
0928 }
0929 
0930 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
0931 {
0932     struct enetc_psfp_gate *sgi;
0933     u8 z;
0934 
0935     sgi = enetc_get_gate_by_index(index);
0936     WARN_ON(!sgi);
0937     z = refcount_dec_and_test(&sgi->refcount);
0938     if (z) {
0939         enetc_streamgate_hw_set(priv, sgi, false);
0940         hlist_del(&sgi->node);
0941         kfree(sgi);
0942     }
0943 }
0944 
0945 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
0946 {
0947     struct enetc_psfp_meter *fmi;
0948     u8 z;
0949 
0950     fmi = enetc_get_meter_by_index(index);
0951     WARN_ON(!fmi);
0952     z = refcount_dec_and_test(&fmi->refcount);
0953     if (z) {
0954         enetc_flowmeter_hw_set(priv, fmi, false);
0955         hlist_del(&fmi->node);
0956         kfree(fmi);
0957     }
0958 }
0959 
0960 static void remove_one_chain(struct enetc_ndev_priv *priv,
0961                  struct enetc_stream_filter *filter)
0962 {
0963     if (filter->flags & ENETC_PSFP_FLAGS_FMI)
0964         flow_meter_unref(priv, filter->fmi_index);
0965 
0966     stream_gate_unref(priv, filter->sgi_index);
0967     stream_filter_unref(priv, filter->sfi_index);
0968 
0969     hlist_del(&filter->node);
0970     kfree(filter);
0971 }
0972 
0973 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
0974                  struct enetc_streamid *sid,
0975                  struct enetc_psfp_filter *sfi,
0976                  struct enetc_psfp_gate *sgi,
0977                  struct enetc_psfp_meter *fmi)
0978 {
0979     int err;
0980 
0981     err = enetc_streamid_hw_set(priv, sid, true);
0982     if (err)
0983         return err;
0984 
0985     if (sfi) {
0986         err = enetc_streamfilter_hw_set(priv, sfi, true);
0987         if (err)
0988             goto revert_sid;
0989     }
0990 
0991     err = enetc_streamgate_hw_set(priv, sgi, true);
0992     if (err)
0993         goto revert_sfi;
0994 
0995     if (fmi) {
0996         err = enetc_flowmeter_hw_set(priv, fmi, true);
0997         if (err)
0998             goto revert_sgi;
0999     }
1000 
1001     return 0;
1002 
1003 revert_sgi:
1004     enetc_streamgate_hw_set(priv, sgi, false);
1005 revert_sfi:
1006     if (sfi)
1007         enetc_streamfilter_hw_set(priv, sfi, false);
1008 revert_sid:
1009     enetc_streamid_hw_set(priv, sid, false);
1010     return err;
1011 }
1012 
1013 static struct actions_fwd *enetc_check_flow_actions(u64 acts,
1014                             unsigned int inputkeys)
1015 {
1016     int i;
1017 
1018     for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
1019         if (acts == enetc_act_fwd[i].actions &&
1020             inputkeys & enetc_act_fwd[i].keys)
1021             return &enetc_act_fwd[i];
1022 
1023     return NULL;
1024 }
1025 
1026 static int enetc_psfp_policer_validate(const struct flow_action *action,
1027                        const struct flow_action_entry *act,
1028                        struct netlink_ext_ack *extack)
1029 {
1030     if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1031         NL_SET_ERR_MSG_MOD(extack,
1032                    "Offload not supported when exceed action is not drop");
1033         return -EOPNOTSUPP;
1034     }
1035 
1036     if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1037         act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1038         NL_SET_ERR_MSG_MOD(extack,
1039                    "Offload not supported when conform action is not pipe or ok");
1040         return -EOPNOTSUPP;
1041     }
1042 
1043     if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1044         !flow_action_is_last_entry(action, act)) {
1045         NL_SET_ERR_MSG_MOD(extack,
1046                    "Offload not supported when conform action is ok, but action is not last");
1047         return -EOPNOTSUPP;
1048     }
1049 
1050     if (act->police.peakrate_bytes_ps ||
1051         act->police.avrate || act->police.overhead) {
1052         NL_SET_ERR_MSG_MOD(extack,
1053                    "Offload not supported when peakrate/avrate/overhead is configured");
1054         return -EOPNOTSUPP;
1055     }
1056 
1057     if (act->police.rate_pkt_ps) {
1058         NL_SET_ERR_MSG_MOD(extack,
1059                    "QoS offload not support packets per second");
1060         return -EOPNOTSUPP;
1061     }
1062 
1063     return 0;
1064 }
1065 
1066 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
1067                       struct flow_cls_offload *f)
1068 {
1069     struct flow_action_entry *entryg = NULL, *entryp = NULL;
1070     struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1071     struct netlink_ext_ack *extack = f->common.extack;
1072     struct enetc_stream_filter *filter, *old_filter;
1073     struct enetc_psfp_meter *fmi = NULL, *old_fmi;
1074     struct enetc_psfp_filter *sfi, *old_sfi;
1075     struct enetc_psfp_gate *sgi, *old_sgi;
1076     struct flow_action_entry *entry;
1077     struct action_gate_entry *e;
1078     u8 sfi_overwrite = 0;
1079     int entries_size;
1080     int i, err;
1081 
1082     if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1083         NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1084         return -ENOSPC;
1085     }
1086 
1087     flow_action_for_each(i, entry, &rule->action)
1088         if (entry->id == FLOW_ACTION_GATE)
1089             entryg = entry;
1090         else if (entry->id == FLOW_ACTION_POLICE)
1091             entryp = entry;
1092 
1093     /* Not support without gate action */
1094     if (!entryg)
1095         return -EINVAL;
1096 
1097     filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1098     if (!filter)
1099         return -ENOMEM;
1100 
1101     filter->sid.index = f->common.chain_index;
1102 
1103     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1104         struct flow_match_eth_addrs match;
1105 
1106         flow_rule_match_eth_addrs(rule, &match);
1107 
1108         if (!is_zero_ether_addr(match.mask->dst) &&
1109             !is_zero_ether_addr(match.mask->src)) {
1110             NL_SET_ERR_MSG_MOD(extack,
1111                        "Cannot match on both source and destination MAC");
1112             err = -EINVAL;
1113             goto free_filter;
1114         }
1115 
1116         if (!is_zero_ether_addr(match.mask->dst)) {
1117             if (!is_broadcast_ether_addr(match.mask->dst)) {
1118                 NL_SET_ERR_MSG_MOD(extack,
1119                            "Masked matching on destination MAC not supported");
1120                 err = -EINVAL;
1121                 goto free_filter;
1122             }
1123             ether_addr_copy(filter->sid.dst_mac, match.key->dst);
1124             filter->sid.filtertype = STREAMID_TYPE_NULL;
1125         }
1126 
1127         if (!is_zero_ether_addr(match.mask->src)) {
1128             if (!is_broadcast_ether_addr(match.mask->src)) {
1129                 NL_SET_ERR_MSG_MOD(extack,
1130                            "Masked matching on source MAC not supported");
1131                 err = -EINVAL;
1132                 goto free_filter;
1133             }
1134             ether_addr_copy(filter->sid.src_mac, match.key->src);
1135             filter->sid.filtertype = STREAMID_TYPE_SMAC;
1136         }
1137     } else {
1138         NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
1139         err = -EINVAL;
1140         goto free_filter;
1141     }
1142 
1143     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1144         struct flow_match_vlan match;
1145 
1146         flow_rule_match_vlan(rule, &match);
1147         if (match.mask->vlan_priority) {
1148             if (match.mask->vlan_priority !=
1149                 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
1150                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
1151                 err = -EINVAL;
1152                 goto free_filter;
1153             }
1154         }
1155 
1156         if (match.mask->vlan_id) {
1157             if (match.mask->vlan_id != VLAN_VID_MASK) {
1158                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
1159                 err = -EINVAL;
1160                 goto free_filter;
1161             }
1162 
1163             filter->sid.vid = match.key->vlan_id;
1164             if (!filter->sid.vid)
1165                 filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
1166             else
1167                 filter->sid.tagged = STREAMID_VLAN_TAGGED;
1168         }
1169     } else {
1170         filter->sid.tagged = STREAMID_VLAN_ALL;
1171     }
1172 
1173     /* parsing gate action */
1174     if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
1175         NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1176         err = -ENOSPC;
1177         goto free_filter;
1178     }
1179 
1180     if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
1181         NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1182         err = -ENOSPC;
1183         goto free_filter;
1184     }
1185 
1186     entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
1187     sgi = kzalloc(entries_size, GFP_KERNEL);
1188     if (!sgi) {
1189         err = -ENOMEM;
1190         goto free_filter;
1191     }
1192 
1193     refcount_set(&sgi->refcount, 1);
1194     sgi->index = entryg->hw_index;
1195     sgi->init_ipv = entryg->gate.prio;
1196     sgi->basetime = entryg->gate.basetime;
1197     sgi->cycletime = entryg->gate.cycletime;
1198     sgi->num_entries = entryg->gate.num_entries;
1199 
1200     e = sgi->entries;
1201     for (i = 0; i < entryg->gate.num_entries; i++) {
1202         e[i].gate_state = entryg->gate.entries[i].gate_state;
1203         e[i].interval = entryg->gate.entries[i].interval;
1204         e[i].ipv = entryg->gate.entries[i].ipv;
1205         e[i].maxoctets = entryg->gate.entries[i].maxoctets;
1206     }
1207 
1208     filter->sgi_index = sgi->index;
1209 
1210     sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
1211     if (!sfi) {
1212         err = -ENOMEM;
1213         goto free_gate;
1214     }
1215 
1216     refcount_set(&sfi->refcount, 1);
1217     sfi->gate_id = sgi->index;
1218     sfi->meter_id = ENETC_PSFP_WILDCARD;
1219 
1220     /* Flow meter and max frame size */
1221     if (entryp) {
1222         err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
1223         if (err)
1224             goto free_sfi;
1225 
1226         if (entryp->police.burst) {
1227             fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
1228             if (!fmi) {
1229                 err = -ENOMEM;
1230                 goto free_sfi;
1231             }
1232             refcount_set(&fmi->refcount, 1);
1233             fmi->cir = entryp->police.rate_bytes_ps;
1234             fmi->cbs = entryp->police.burst;
1235             fmi->index = entryp->hw_index;
1236             filter->flags |= ENETC_PSFP_FLAGS_FMI;
1237             filter->fmi_index = fmi->index;
1238             sfi->meter_id = fmi->index;
1239         }
1240 
1241         if (entryp->police.mtu)
1242             sfi->maxsdu = entryp->police.mtu;
1243     }
1244 
1245     /* prio ref the filter prio */
1246     if (f->common.prio && f->common.prio <= BIT(3))
1247         sfi->prio = f->common.prio - 1;
1248     else
1249         sfi->prio = ENETC_PSFP_WILDCARD;
1250 
1251     old_sfi = enetc_psfp_check_sfi(sfi);
1252     if (!old_sfi) {
1253         int index;
1254 
1255         index = enetc_get_free_index(priv);
1256         if (sfi->handle < 0) {
1257             NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
1258             err = -ENOSPC;
1259             goto free_fmi;
1260         }
1261 
1262         sfi->index = index;
1263         sfi->handle = index + HANDLE_OFFSET;
1264         /* Update the stream filter handle also */
1265         filter->sid.handle = sfi->handle;
1266         filter->sfi_index = sfi->index;
1267         sfi_overwrite = 0;
1268     } else {
1269         filter->sfi_index = old_sfi->index;
1270         filter->sid.handle = old_sfi->handle;
1271         sfi_overwrite = 1;
1272     }
1273 
1274     err = enetc_psfp_hw_set(priv, &filter->sid,
1275                 sfi_overwrite ? NULL : sfi, sgi, fmi);
1276     if (err)
1277         goto free_fmi;
1278 
1279     spin_lock(&epsfp.psfp_lock);
1280     if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
1281         old_fmi = enetc_get_meter_by_index(filter->fmi_index);
1282         if (old_fmi) {
1283             fmi->refcount = old_fmi->refcount;
1284             refcount_set(&fmi->refcount,
1285                      refcount_read(&old_fmi->refcount) + 1);
1286             hlist_del(&old_fmi->node);
1287             kfree(old_fmi);
1288         }
1289         hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
1290     }
1291 
1292     /* Remove the old node if exist and update with a new node */
1293     old_sgi = enetc_get_gate_by_index(filter->sgi_index);
1294     if (old_sgi) {
1295         refcount_set(&sgi->refcount,
1296                  refcount_read(&old_sgi->refcount) + 1);
1297         hlist_del(&old_sgi->node);
1298         kfree(old_sgi);
1299     }
1300 
1301     hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
1302 
1303     if (!old_sfi) {
1304         hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
1305         set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
1306     } else {
1307         kfree(sfi);
1308         refcount_inc(&old_sfi->refcount);
1309     }
1310 
1311     old_filter = enetc_get_stream_by_index(filter->sid.index);
1312     if (old_filter)
1313         remove_one_chain(priv, old_filter);
1314 
1315     filter->stats.lastused = jiffies;
1316     hlist_add_head(&filter->node, &epsfp.stream_list);
1317 
1318     spin_unlock(&epsfp.psfp_lock);
1319 
1320     return 0;
1321 
1322 free_fmi:
1323     kfree(fmi);
1324 free_sfi:
1325     kfree(sfi);
1326 free_gate:
1327     kfree(sgi);
1328 free_filter:
1329     kfree(filter);
1330 
1331     return err;
1332 }
1333 
1334 static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
1335                   struct flow_cls_offload *cls_flower)
1336 {
1337     struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1338     struct netlink_ext_ack *extack = cls_flower->common.extack;
1339     struct flow_dissector *dissector = rule->match.dissector;
1340     struct flow_action *action = &rule->action;
1341     struct flow_action_entry *entry;
1342     struct actions_fwd *fwd;
1343     u64 actions = 0;
1344     int i, err;
1345 
1346     if (!flow_action_has_entries(action)) {
1347         NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
1348         return -EINVAL;
1349     }
1350 
1351     flow_action_for_each(i, entry, action)
1352         actions |= BIT(entry->id);
1353 
1354     fwd = enetc_check_flow_actions(actions, dissector->used_keys);
1355     if (!fwd) {
1356         NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
1357         return -EOPNOTSUPP;
1358     }
1359 
1360     if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
1361         err = enetc_psfp_parse_clsflower(priv, cls_flower);
1362         if (err) {
1363             NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
1364             return err;
1365         }
1366     } else {
1367         NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
1368         return -EOPNOTSUPP;
1369     }
1370 
1371     return 0;
1372 }
1373 
1374 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
1375                     struct flow_cls_offload *f)
1376 {
1377     struct enetc_stream_filter *filter;
1378     struct netlink_ext_ack *extack = f->common.extack;
1379     int err;
1380 
1381     if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1382         NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1383         return -ENOSPC;
1384     }
1385 
1386     filter = enetc_get_stream_by_index(f->common.chain_index);
1387     if (!filter)
1388         return -EINVAL;
1389 
1390     err = enetc_streamid_hw_set(priv, &filter->sid, false);
1391     if (err)
1392         return err;
1393 
1394     remove_one_chain(priv, filter);
1395 
1396     return 0;
1397 }
1398 
1399 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
1400                    struct flow_cls_offload *f)
1401 {
1402     return enetc_psfp_destroy_clsflower(priv, f);
1403 }
1404 
1405 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
1406                 struct flow_cls_offload *f)
1407 {
1408     struct psfp_streamfilter_counters counters = {};
1409     struct enetc_stream_filter *filter;
1410     struct flow_stats stats = {};
1411     int err;
1412 
1413     filter = enetc_get_stream_by_index(f->common.chain_index);
1414     if (!filter)
1415         return -EINVAL;
1416 
1417     err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
1418     if (err)
1419         return -EINVAL;
1420 
1421     spin_lock(&epsfp.psfp_lock);
1422     stats.pkts = counters.matching_frames_count +
1423              counters.not_passing_sdu_count -
1424              filter->stats.pkts;
1425     stats.drops = counters.not_passing_frames_count +
1426               counters.not_passing_sdu_count +
1427               counters.red_frames_count -
1428               filter->stats.drops;
1429     stats.lastused = filter->stats.lastused;
1430     filter->stats.pkts += stats.pkts;
1431     filter->stats.drops += stats.drops;
1432     spin_unlock(&epsfp.psfp_lock);
1433 
1434     flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
1435               stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
1436 
1437     return 0;
1438 }
1439 
1440 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
1441                      struct flow_cls_offload *cls_flower)
1442 {
1443     switch (cls_flower->command) {
1444     case FLOW_CLS_REPLACE:
1445         return enetc_config_clsflower(priv, cls_flower);
1446     case FLOW_CLS_DESTROY:
1447         return enetc_destroy_clsflower(priv, cls_flower);
1448     case FLOW_CLS_STATS:
1449         return enetc_psfp_get_stats(priv, cls_flower);
1450     default:
1451         return -EOPNOTSUPP;
1452     }
1453 }
1454 
1455 static inline void clean_psfp_sfi_bitmap(void)
1456 {
1457     bitmap_free(epsfp.psfp_sfi_bitmap);
1458     epsfp.psfp_sfi_bitmap = NULL;
1459 }
1460 
1461 static void clean_stream_list(void)
1462 {
1463     struct enetc_stream_filter *s;
1464     struct hlist_node *tmp;
1465 
1466     hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
1467         hlist_del(&s->node);
1468         kfree(s);
1469     }
1470 }
1471 
1472 static void clean_sfi_list(void)
1473 {
1474     struct enetc_psfp_filter *sfi;
1475     struct hlist_node *tmp;
1476 
1477     hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
1478         hlist_del(&sfi->node);
1479         kfree(sfi);
1480     }
1481 }
1482 
1483 static void clean_sgi_list(void)
1484 {
1485     struct enetc_psfp_gate *sgi;
1486     struct hlist_node *tmp;
1487 
1488     hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
1489         hlist_del(&sgi->node);
1490         kfree(sgi);
1491     }
1492 }
1493 
1494 static void clean_psfp_all(void)
1495 {
1496     /* Disable all list nodes and free all memory */
1497     clean_sfi_list();
1498     clean_sgi_list();
1499     clean_stream_list();
1500     epsfp.dev_bitmap = 0;
1501     clean_psfp_sfi_bitmap();
1502 }
1503 
1504 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1505                 void *cb_priv)
1506 {
1507     struct net_device *ndev = cb_priv;
1508 
1509     if (!tc_can_offload(ndev))
1510         return -EOPNOTSUPP;
1511 
1512     switch (type) {
1513     case TC_SETUP_CLSFLOWER:
1514         return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
1515     default:
1516         return -EOPNOTSUPP;
1517     }
1518 }
1519 
1520 int enetc_set_psfp(struct net_device *ndev, bool en)
1521 {
1522     struct enetc_ndev_priv *priv = netdev_priv(ndev);
1523     int err;
1524 
1525     if (en) {
1526         err = enetc_psfp_enable(priv);
1527         if (err)
1528             return err;
1529 
1530         priv->active_offloads |= ENETC_F_QCI;
1531         return 0;
1532     }
1533 
1534     err = enetc_psfp_disable(priv);
1535     if (err)
1536         return err;
1537 
1538     priv->active_offloads &= ~ENETC_F_QCI;
1539 
1540     return 0;
1541 }
1542 
1543 int enetc_psfp_init(struct enetc_ndev_priv *priv)
1544 {
1545     if (epsfp.psfp_sfi_bitmap)
1546         return 0;
1547 
1548     epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
1549                           GFP_KERNEL);
1550     if (!epsfp.psfp_sfi_bitmap)
1551         return -ENOMEM;
1552 
1553     spin_lock_init(&epsfp.psfp_lock);
1554 
1555     if (list_empty(&enetc_block_cb_list))
1556         epsfp.dev_bitmap = 0;
1557 
1558     return 0;
1559 }
1560 
1561 int enetc_psfp_clean(struct enetc_ndev_priv *priv)
1562 {
1563     if (!list_empty(&enetc_block_cb_list))
1564         return -EBUSY;
1565 
1566     clean_psfp_all();
1567 
1568     return 0;
1569 }
1570 
1571 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
1572 {
1573     struct enetc_ndev_priv *priv = netdev_priv(ndev);
1574     struct flow_block_offload *f = type_data;
1575     int port, err;
1576 
1577     err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
1578                      enetc_setup_tc_block_cb,
1579                      ndev, ndev, true);
1580     if (err)
1581         return err;
1582 
1583     switch (f->command) {
1584     case FLOW_BLOCK_BIND:
1585         port = enetc_pf_to_port(priv->si->pdev);
1586         if (port < 0)
1587             return -EINVAL;
1588 
1589         set_bit(port, &epsfp.dev_bitmap);
1590         break;
1591     case FLOW_BLOCK_UNBIND:
1592         port = enetc_pf_to_port(priv->si->pdev);
1593         if (port < 0)
1594             return -EINVAL;
1595 
1596         clear_bit(port, &epsfp.dev_bitmap);
1597         if (!epsfp.dev_bitmap)
1598             clean_psfp_all();
1599         break;
1600     }
1601 
1602     return 0;
1603 }