Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Header Parser helpers for Marvell PPv2 Network Controller
0004  *
0005  * Copyright (C) 2014 Marvell
0006  *
0007  * Marcin Wojtas <mw@semihalf.com>
0008  */
0009 
0010 #include <linux/kernel.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/platform_device.h>
0014 #include <uapi/linux/ppp_defs.h>
0015 #include <net/ip.h>
0016 #include <net/ipv6.h>
0017 
0018 #include "mvpp2.h"
0019 #include "mvpp2_prs.h"
0020 
0021 /* Update parser tcam and sram hw entries */
0022 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
0023 {
0024     int i;
0025 
0026     if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
0027         return -EINVAL;
0028 
0029     /* Clear entry invalidation bit */
0030     pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
0031 
0032     /* Write sram index - indirect access */
0033     mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
0034     for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
0035         mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
0036 
0037     /* Write tcam index - indirect access */
0038     mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
0039     for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
0040         mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
0041 
0042     return 0;
0043 }
0044 
0045 /* Initialize tcam entry from hw */
0046 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
0047                int tid)
0048 {
0049     int i;
0050 
0051     if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
0052         return -EINVAL;
0053 
0054     memset(pe, 0, sizeof(*pe));
0055     pe->index = tid;
0056 
0057     /* Write tcam index - indirect access */
0058     mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
0059 
0060     pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
0061                   MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
0062     if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
0063         return MVPP2_PRS_TCAM_ENTRY_INVALID;
0064 
0065     for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
0066         pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
0067 
0068     /* Write sram index - indirect access */
0069     mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
0070     for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
0071         pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
0072 
0073     return 0;
0074 }
0075 
0076 /* Invalidate tcam hw entry */
0077 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
0078 {
0079     /* Write index - indirect access */
0080     mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
0081     mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
0082             MVPP2_PRS_TCAM_INV_MASK);
0083 }
0084 
0085 /* Enable shadow table entry and set its lookup ID */
0086 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
0087 {
0088     priv->prs_shadow[index].valid = true;
0089     priv->prs_shadow[index].lu = lu;
0090 }
0091 
0092 /* Update ri fields in shadow table entry */
0093 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
0094                     unsigned int ri, unsigned int ri_mask)
0095 {
0096     priv->prs_shadow[index].ri_mask = ri_mask;
0097     priv->prs_shadow[index].ri = ri;
0098 }
0099 
0100 /* Update lookup field in tcam sw entry */
0101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
0102 {
0103     pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
0104     pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
0105     pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
0106     pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
0107 }
0108 
0109 /* Update mask for single port in tcam sw entry */
0110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
0111                     unsigned int port, bool add)
0112 {
0113     if (add)
0114         pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
0115     else
0116         pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
0117 }
0118 
0119 /* Update port map in tcam sw entry */
0120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
0121                     unsigned int ports)
0122 {
0123     pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
0124     pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
0125     pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
0126 }
0127 
0128 /* Obtain port map from tcam sw entry */
0129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
0130 {
0131     return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
0132 }
0133 
0134 /* Set byte of data and its enable bits in tcam sw entry */
0135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
0136                      unsigned int offs, unsigned char byte,
0137                      unsigned char enable)
0138 {
0139     int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
0140 
0141     pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
0142     pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
0143     pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
0144     pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
0145 }
0146 
0147 /* Get byte of data and its enable bits from tcam sw entry */
0148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
0149                   unsigned int offs, unsigned char *byte,
0150                   unsigned char *enable)
0151 {
0152     int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
0153 
0154     *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
0155     *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
0156 }
0157 
0158 /* Compare tcam data bytes with a pattern */
0159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
0160                     u16 data)
0161 {
0162     u16 tcam_data;
0163 
0164     tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
0165     return tcam_data == data;
0166 }
0167 
0168 /* Update ai bits in tcam sw entry */
0169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
0170                      unsigned int bits, unsigned int enable)
0171 {
0172     int i;
0173 
0174     for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
0175         if (!(enable & BIT(i)))
0176             continue;
0177 
0178         if (bits & BIT(i))
0179             pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
0180         else
0181             pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
0182     }
0183 
0184     pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
0185 }
0186 
0187 /* Get ai bits from tcam sw entry */
0188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
0189 {
0190     return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
0191 }
0192 
0193 /* Set ethertype in tcam sw entry */
0194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
0195                   unsigned short ethertype)
0196 {
0197     mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
0198     mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
0199 }
0200 
0201 /* Set vid in tcam sw entry */
0202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
0203                 unsigned short vid)
0204 {
0205     mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
0206     mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
0207 }
0208 
0209 /* Set bits in sram sw entry */
0210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
0211                     u32 val)
0212 {
0213     pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
0214 }
0215 
0216 /* Clear bits in sram sw entry */
0217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
0218                       u32 val)
0219 {
0220     pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
0221 }
0222 
0223 /* Update ri bits in sram sw entry */
0224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
0225                      unsigned int bits, unsigned int mask)
0226 {
0227     unsigned int i;
0228 
0229     for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
0230         if (!(mask & BIT(i)))
0231             continue;
0232 
0233         if (bits & BIT(i))
0234             mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
0235                         1);
0236         else
0237             mvpp2_prs_sram_bits_clear(pe,
0238                           MVPP2_PRS_SRAM_RI_OFFS + i,
0239                           1);
0240 
0241         mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
0242     }
0243 }
0244 
0245 /* Obtain ri bits from sram sw entry */
0246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
0247 {
0248     return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
0249 }
0250 
0251 /* Update ai bits in sram sw entry */
0252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
0253                      unsigned int bits, unsigned int mask)
0254 {
0255     unsigned int i;
0256 
0257     for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
0258         if (!(mask & BIT(i)))
0259             continue;
0260 
0261         if (bits & BIT(i))
0262             mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
0263                         1);
0264         else
0265             mvpp2_prs_sram_bits_clear(pe,
0266                           MVPP2_PRS_SRAM_AI_OFFS + i,
0267                           1);
0268 
0269         mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
0270     }
0271 }
0272 
0273 /* Read ai bits from sram sw entry */
0274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
0275 {
0276     u8 bits;
0277     /* ai is stored on bits 90->97; so it spreads across two u32 */
0278     int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
0279     int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
0280 
0281     bits = (pe->sram[ai_off] >> ai_shift) |
0282            (pe->sram[ai_off + 1] << (32 - ai_shift));
0283 
0284     return bits;
0285 }
0286 
0287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
0288  * lookup interation
0289  */
0290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
0291                        unsigned int lu)
0292 {
0293     int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
0294 
0295     mvpp2_prs_sram_bits_clear(pe, sram_next_off,
0296                   MVPP2_PRS_SRAM_NEXT_LU_MASK);
0297     mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
0298 }
0299 
0300 /* In the sram sw entry set sign and value of the next lookup offset
0301  * and the offset value generated to the classifier
0302  */
0303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
0304                      unsigned int op)
0305 {
0306     /* Set sign */
0307     if (shift < 0) {
0308         mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
0309         shift = 0 - shift;
0310     } else {
0311         mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
0312     }
0313 
0314     /* Set value */
0315     pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
0316         shift & MVPP2_PRS_SRAM_SHIFT_MASK;
0317 
0318     /* Reset and set operation */
0319     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
0320                   MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
0321     mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
0322 
0323     /* Set base offset as current */
0324     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
0325 }
0326 
0327 /* In the sram sw entry set sign and value of the user defined offset
0328  * generated to the classifier
0329  */
0330 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
0331                       unsigned int type, int offset,
0332                       unsigned int op)
0333 {
0334     /* Set sign */
0335     if (offset < 0) {
0336         mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
0337         offset = 0 - offset;
0338     } else {
0339         mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
0340     }
0341 
0342     /* Set value */
0343     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
0344                   MVPP2_PRS_SRAM_UDF_MASK);
0345     mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
0346                 offset & MVPP2_PRS_SRAM_UDF_MASK);
0347 
0348     /* Set offset type */
0349     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
0350                   MVPP2_PRS_SRAM_UDF_TYPE_MASK);
0351     mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
0352 
0353     /* Set offset operation */
0354     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
0355                   MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
0356     mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
0357                 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
0358 
0359     /* Set base offset as current */
0360     mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
0361 }
0362 
0363 /* Find parser flow entry */
0364 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
0365 {
0366     struct mvpp2_prs_entry pe;
0367     int tid;
0368 
0369     /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
0370     for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
0371         u8 bits;
0372 
0373         if (!priv->prs_shadow[tid].valid ||
0374             priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
0375             continue;
0376 
0377         mvpp2_prs_init_from_hw(priv, &pe, tid);
0378         bits = mvpp2_prs_sram_ai_get(&pe);
0379 
0380         /* Sram store classification lookup ID in AI bits [5:0] */
0381         if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
0382             return tid;
0383     }
0384 
0385     return -ENOENT;
0386 }
0387 
0388 /* Return first free tcam index, seeking from start to end */
0389 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
0390                      unsigned char end)
0391 {
0392     int tid;
0393 
0394     if (start > end)
0395         swap(start, end);
0396 
0397     for (tid = start; tid <= end; tid++) {
0398         if (!priv->prs_shadow[tid].valid)
0399             return tid;
0400     }
0401 
0402     return -EINVAL;
0403 }
0404 
0405 /* Drop flow control pause frames */
0406 static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
0407 {
0408     unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
0409     struct mvpp2_prs_entry pe;
0410     unsigned int len;
0411 
0412     memset(&pe, 0, sizeof(pe));
0413 
0414     /* For all ports - drop flow control frames */
0415     pe.index = MVPP2_PE_FC_DROP;
0416     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
0417 
0418     /* Set match on DA */
0419     len = ETH_ALEN;
0420     while (len--)
0421         mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
0422 
0423     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
0424                  MVPP2_PRS_RI_DROP_MASK);
0425 
0426     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
0427     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
0428 
0429     /* Mask all ports */
0430     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
0431 
0432     /* Update shadow table and hw entry */
0433     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
0434     mvpp2_prs_hw_write(priv, &pe);
0435 }
0436 
0437 /* Enable/disable dropping all mac da's */
0438 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
0439 {
0440     struct mvpp2_prs_entry pe;
0441 
0442     if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
0443         /* Entry exist - update port only */
0444         mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
0445     } else {
0446         /* Entry doesn't exist - create new */
0447         memset(&pe, 0, sizeof(pe));
0448         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
0449         pe.index = MVPP2_PE_DROP_ALL;
0450 
0451         /* Non-promiscuous mode for all ports - DROP unknown packets */
0452         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
0453                      MVPP2_PRS_RI_DROP_MASK);
0454 
0455         mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
0456         mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
0457 
0458         /* Update shadow table */
0459         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
0460 
0461         /* Mask all ports */
0462         mvpp2_prs_tcam_port_map_set(&pe, 0);
0463     }
0464 
0465     /* Update port mask */
0466     mvpp2_prs_tcam_port_set(&pe, port, add);
0467 
0468     mvpp2_prs_hw_write(priv, &pe);
0469 }
0470 
0471 /* Set port to unicast or multicast promiscuous mode */
0472 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
0473                    enum mvpp2_prs_l2_cast l2_cast, bool add)
0474 {
0475     struct mvpp2_prs_entry pe;
0476     unsigned char cast_match;
0477     unsigned int ri;
0478     int tid;
0479 
0480     if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
0481         cast_match = MVPP2_PRS_UCAST_VAL;
0482         tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
0483         ri = MVPP2_PRS_RI_L2_UCAST;
0484     } else {
0485         cast_match = MVPP2_PRS_MCAST_VAL;
0486         tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
0487         ri = MVPP2_PRS_RI_L2_MCAST;
0488     }
0489 
0490     /* promiscuous mode - Accept unknown unicast or multicast packets */
0491     if (priv->prs_shadow[tid].valid) {
0492         mvpp2_prs_init_from_hw(priv, &pe, tid);
0493     } else {
0494         memset(&pe, 0, sizeof(pe));
0495         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
0496         pe.index = tid;
0497 
0498         /* Continue - set next lookup */
0499         mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
0500 
0501         /* Set result info bits */
0502         mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
0503 
0504         /* Match UC or MC addresses */
0505         mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
0506                          MVPP2_PRS_CAST_MASK);
0507 
0508         /* Shift to ethertype */
0509         mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
0510                      MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
0511 
0512         /* Mask all ports */
0513         mvpp2_prs_tcam_port_map_set(&pe, 0);
0514 
0515         /* Update shadow table */
0516         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
0517     }
0518 
0519     /* Update port mask */
0520     mvpp2_prs_tcam_port_set(&pe, port, add);
0521 
0522     mvpp2_prs_hw_write(priv, &pe);
0523 }
0524 
0525 /* Set entry for dsa packets */
0526 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
0527                   bool tagged, bool extend)
0528 {
0529     struct mvpp2_prs_entry pe;
0530     int tid, shift;
0531 
0532     if (extend) {
0533         tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
0534         shift = 8;
0535     } else {
0536         tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
0537         shift = 4;
0538     }
0539 
0540     if (priv->prs_shadow[tid].valid) {
0541         /* Entry exist - update port only */
0542         mvpp2_prs_init_from_hw(priv, &pe, tid);
0543     } else {
0544         /* Entry doesn't exist - create new */
0545         memset(&pe, 0, sizeof(pe));
0546         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
0547         pe.index = tid;
0548 
0549         /* Update shadow table */
0550         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
0551 
0552         if (tagged) {
0553             /* Set tagged bit in DSA tag */
0554             mvpp2_prs_tcam_data_byte_set(&pe, 0,
0555                          MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
0556                          MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
0557 
0558             /* Set ai bits for next iteration */
0559             if (extend)
0560                 mvpp2_prs_sram_ai_update(&pe, 1,
0561                             MVPP2_PRS_SRAM_AI_MASK);
0562             else
0563                 mvpp2_prs_sram_ai_update(&pe, 0,
0564                             MVPP2_PRS_SRAM_AI_MASK);
0565 
0566             /* Set result info bits to 'single vlan' */
0567             mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
0568                          MVPP2_PRS_RI_VLAN_MASK);
0569             /* If packet is tagged continue check vid filtering */
0570             mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
0571         } else {
0572             /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
0573             mvpp2_prs_sram_shift_set(&pe, shift,
0574                     MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
0575 
0576             /* Set result info bits to 'no vlans' */
0577             mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
0578                          MVPP2_PRS_RI_VLAN_MASK);
0579             mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
0580         }
0581 
0582         /* Mask all ports */
0583         mvpp2_prs_tcam_port_map_set(&pe, 0);
0584     }
0585 
0586     /* Update port mask */
0587     mvpp2_prs_tcam_port_set(&pe, port, add);
0588 
0589     mvpp2_prs_hw_write(priv, &pe);
0590 }
0591 
0592 /* Set entry for dsa ethertype */
0593 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
0594                         bool add, bool tagged, bool extend)
0595 {
0596     struct mvpp2_prs_entry pe;
0597     int tid, shift, port_mask;
0598 
0599     if (extend) {
0600         tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
0601               MVPP2_PE_ETYPE_EDSA_UNTAGGED;
0602         port_mask = 0;
0603         shift = 8;
0604     } else {
0605         tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
0606               MVPP2_PE_ETYPE_DSA_UNTAGGED;
0607         port_mask = MVPP2_PRS_PORT_MASK;
0608         shift = 4;
0609     }
0610 
0611     if (priv->prs_shadow[tid].valid) {
0612         /* Entry exist - update port only */
0613         mvpp2_prs_init_from_hw(priv, &pe, tid);
0614     } else {
0615         /* Entry doesn't exist - create new */
0616         memset(&pe, 0, sizeof(pe));
0617         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
0618         pe.index = tid;
0619 
0620         /* Set ethertype */
0621         mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
0622         mvpp2_prs_match_etype(&pe, 2, 0);
0623 
0624         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
0625                      MVPP2_PRS_RI_DSA_MASK);
0626         /* Shift ethertype + 2 byte reserved + tag*/
0627         mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
0628                      MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
0629 
0630         /* Update shadow table */
0631         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
0632 
0633         if (tagged) {
0634             /* Set tagged bit in DSA tag */
0635             mvpp2_prs_tcam_data_byte_set(&pe,
0636                              MVPP2_ETH_TYPE_LEN + 2 + 3,
0637                          MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
0638                          MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
0639             /* Clear all ai bits for next iteration */
0640             mvpp2_prs_sram_ai_update(&pe, 0,
0641                          MVPP2_PRS_SRAM_AI_MASK);
0642             /* If packet is tagged continue check vlans */
0643             mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
0644         } else {
0645             /* Set result info bits to 'no vlans' */
0646             mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
0647                          MVPP2_PRS_RI_VLAN_MASK);
0648             mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
0649         }
0650         /* Mask/unmask all ports, depending on dsa type */
0651         mvpp2_prs_tcam_port_map_set(&pe, port_mask);
0652     }
0653 
0654     /* Update port mask */
0655     mvpp2_prs_tcam_port_set(&pe, port, add);
0656 
0657     mvpp2_prs_hw_write(priv, &pe);
0658 }
0659 
0660 /* Search for existing single/triple vlan entry */
0661 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
0662 {
0663     struct mvpp2_prs_entry pe;
0664     int tid;
0665 
0666     /* Go through the all entries with MVPP2_PRS_LU_VLAN */
0667     for (tid = MVPP2_PE_FIRST_FREE_TID;
0668          tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
0669         unsigned int ri_bits, ai_bits;
0670         bool match;
0671 
0672         if (!priv->prs_shadow[tid].valid ||
0673             priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
0674             continue;
0675 
0676         mvpp2_prs_init_from_hw(priv, &pe, tid);
0677         match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
0678         if (!match)
0679             continue;
0680 
0681         /* Get vlan type */
0682         ri_bits = mvpp2_prs_sram_ri_get(&pe);
0683         ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
0684 
0685         /* Get current ai value from tcam */
0686         ai_bits = mvpp2_prs_tcam_ai_get(&pe);
0687         /* Clear double vlan bit */
0688         ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
0689 
0690         if (ai != ai_bits)
0691             continue;
0692 
0693         if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
0694             ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
0695             return tid;
0696     }
0697 
0698     return -ENOENT;
0699 }
0700 
0701 /* Add/update single/triple vlan entry */
0702 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
0703                   unsigned int port_map)
0704 {
0705     struct mvpp2_prs_entry pe;
0706     int tid_aux, tid;
0707     int ret = 0;
0708 
0709     memset(&pe, 0, sizeof(pe));
0710 
0711     tid = mvpp2_prs_vlan_find(priv, tpid, ai);
0712 
0713     if (tid < 0) {
0714         /* Create new tcam entry */
0715         tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
0716                         MVPP2_PE_FIRST_FREE_TID);
0717         if (tid < 0)
0718             return tid;
0719 
0720         /* Get last double vlan tid */
0721         for (tid_aux = MVPP2_PE_LAST_FREE_TID;
0722              tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
0723             unsigned int ri_bits;
0724 
0725             if (!priv->prs_shadow[tid_aux].valid ||
0726                 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
0727                 continue;
0728 
0729             mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
0730             ri_bits = mvpp2_prs_sram_ri_get(&pe);
0731             if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
0732                 MVPP2_PRS_RI_VLAN_DOUBLE)
0733                 break;
0734         }
0735 
0736         if (tid <= tid_aux)
0737             return -EINVAL;
0738 
0739         memset(&pe, 0, sizeof(pe));
0740         pe.index = tid;
0741         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
0742 
0743         mvpp2_prs_match_etype(&pe, 0, tpid);
0744 
0745         /* VLAN tag detected, proceed with VID filtering */
0746         mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
0747 
0748         /* Clear all ai bits for next iteration */
0749         mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
0750 
0751         if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
0752             mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
0753                          MVPP2_PRS_RI_VLAN_MASK);
0754         } else {
0755             ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
0756             mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
0757                          MVPP2_PRS_RI_VLAN_MASK);
0758         }
0759         mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
0760 
0761         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
0762     } else {
0763         mvpp2_prs_init_from_hw(priv, &pe, tid);
0764     }
0765     /* Update ports' mask */
0766     mvpp2_prs_tcam_port_map_set(&pe, port_map);
0767 
0768     mvpp2_prs_hw_write(priv, &pe);
0769 
0770     return ret;
0771 }
0772 
0773 /* Get first free double vlan ai number */
0774 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
0775 {
0776     int i;
0777 
0778     for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
0779         if (!priv->prs_double_vlans[i])
0780             return i;
0781     }
0782 
0783     return -EINVAL;
0784 }
0785 
0786 /* Search for existing double vlan entry */
0787 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
0788                       unsigned short tpid2)
0789 {
0790     struct mvpp2_prs_entry pe;
0791     int tid;
0792 
0793     /* Go through the all entries with MVPP2_PRS_LU_VLAN */
0794     for (tid = MVPP2_PE_FIRST_FREE_TID;
0795          tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
0796         unsigned int ri_mask;
0797         bool match;
0798 
0799         if (!priv->prs_shadow[tid].valid ||
0800             priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
0801             continue;
0802 
0803         mvpp2_prs_init_from_hw(priv, &pe, tid);
0804 
0805         match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
0806             mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
0807 
0808         if (!match)
0809             continue;
0810 
0811         ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
0812         if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
0813             return tid;
0814     }
0815 
0816     return -ENOENT;
0817 }
0818 
0819 /* Add or update double vlan entry */
0820 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
0821                      unsigned short tpid2,
0822                      unsigned int port_map)
0823 {
0824     int tid_aux, tid, ai, ret = 0;
0825     struct mvpp2_prs_entry pe;
0826 
0827     memset(&pe, 0, sizeof(pe));
0828 
0829     tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
0830 
0831     if (tid < 0) {
0832         /* Create new tcam entry */
0833         tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
0834                 MVPP2_PE_LAST_FREE_TID);
0835         if (tid < 0)
0836             return tid;
0837 
0838         /* Set ai value for new double vlan entry */
0839         ai = mvpp2_prs_double_vlan_ai_free_get(priv);
0840         if (ai < 0)
0841             return ai;
0842 
0843         /* Get first single/triple vlan tid */
0844         for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
0845              tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
0846             unsigned int ri_bits;
0847 
0848             if (!priv->prs_shadow[tid_aux].valid ||
0849                 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
0850                 continue;
0851 
0852             mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
0853             ri_bits = mvpp2_prs_sram_ri_get(&pe);
0854             ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
0855             if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
0856                 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
0857                 break;
0858         }
0859 
0860         if (tid >= tid_aux)
0861             return -ERANGE;
0862 
0863         memset(&pe, 0, sizeof(pe));
0864         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
0865         pe.index = tid;
0866 
0867         priv->prs_double_vlans[ai] = true;
0868 
0869         mvpp2_prs_match_etype(&pe, 0, tpid1);
0870         mvpp2_prs_match_etype(&pe, 4, tpid2);
0871 
0872         mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
0873         /* Shift 4 bytes - skip outer vlan tag */
0874         mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
0875                      MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
0876         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
0877                      MVPP2_PRS_RI_VLAN_MASK);
0878         mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
0879                      MVPP2_PRS_SRAM_AI_MASK);
0880 
0881         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
0882     } else {
0883         mvpp2_prs_init_from_hw(priv, &pe, tid);
0884     }
0885 
0886     /* Update ports' mask */
0887     mvpp2_prs_tcam_port_map_set(&pe, port_map);
0888     mvpp2_prs_hw_write(priv, &pe);
0889 
0890     return ret;
0891 }
0892 
0893 /* IPv4 header parsing for fragmentation and L4 offset */
0894 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
0895                    unsigned int ri, unsigned int ri_mask)
0896 {
0897     struct mvpp2_prs_entry pe;
0898     int tid;
0899 
0900     if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
0901         (proto != IPPROTO_IGMP))
0902         return -EINVAL;
0903 
0904     /* Not fragmented packet */
0905     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
0906                     MVPP2_PE_LAST_FREE_TID);
0907     if (tid < 0)
0908         return tid;
0909 
0910     memset(&pe, 0, sizeof(pe));
0911     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
0912     pe.index = tid;
0913 
0914     /* Finished: go to flowid generation */
0915     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
0916     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
0917 
0918     /* Set L3 offset */
0919     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
0920                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
0921     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
0922     mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
0923 
0924     mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
0925                      MVPP2_PRS_TCAM_PROTO_MASK_L);
0926     mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
0927                      MVPP2_PRS_TCAM_PROTO_MASK);
0928 
0929     mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
0930     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
0931                  MVPP2_PRS_IPV4_DIP_AI_BIT);
0932     /* Unmask all ports */
0933     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
0934 
0935     /* Update shadow table and hw entry */
0936     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
0937     mvpp2_prs_hw_write(priv, &pe);
0938 
0939     /* Fragmented packet */
0940     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
0941                     MVPP2_PE_LAST_FREE_TID);
0942     if (tid < 0)
0943         return tid;
0944 
0945     pe.index = tid;
0946     /* Clear ri before updating */
0947     pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
0948     pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
0949     mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
0950 
0951     mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
0952                  ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
0953 
0954     mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
0955     mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
0956 
0957     /* Update shadow table and hw entry */
0958     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
0959     mvpp2_prs_hw_write(priv, &pe);
0960 
0961     return 0;
0962 }
0963 
0964 /* IPv4 L3 multicast or broadcast */
0965 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
0966 {
0967     struct mvpp2_prs_entry pe;
0968     int mask, tid;
0969 
0970     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
0971                     MVPP2_PE_LAST_FREE_TID);
0972     if (tid < 0)
0973         return tid;
0974 
0975     memset(&pe, 0, sizeof(pe));
0976     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
0977     pe.index = tid;
0978 
0979     switch (l3_cast) {
0980     case MVPP2_PRS_L3_MULTI_CAST:
0981         mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
0982                          MVPP2_PRS_IPV4_MC_MASK);
0983         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
0984                      MVPP2_PRS_RI_L3_ADDR_MASK);
0985         break;
0986     case  MVPP2_PRS_L3_BROAD_CAST:
0987         mask = MVPP2_PRS_IPV4_BC_MASK;
0988         mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
0989         mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
0990         mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
0991         mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
0992         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
0993                      MVPP2_PRS_RI_L3_ADDR_MASK);
0994         break;
0995     default:
0996         return -EINVAL;
0997     }
0998 
0999     /* Go again to ipv4 */
1000     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1001 
1002     mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1003                  MVPP2_PRS_IPV4_DIP_AI_BIT);
1004 
1005     /* Shift back to IPv4 proto */
1006     mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1007 
1008     mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1009 
1010     /* Unmask all ports */
1011     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1012 
1013     /* Update shadow table and hw entry */
1014     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1015     mvpp2_prs_hw_write(priv, &pe);
1016 
1017     return 0;
1018 }
1019 
1020 /* Set entries for protocols over IPv6  */
1021 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1022                    unsigned int ri, unsigned int ri_mask)
1023 {
1024     struct mvpp2_prs_entry pe;
1025     int tid;
1026 
1027     if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1028         (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1029         return -EINVAL;
1030 
1031     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1032                     MVPP2_PE_LAST_FREE_TID);
1033     if (tid < 0)
1034         return tid;
1035 
1036     memset(&pe, 0, sizeof(pe));
1037     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1038     pe.index = tid;
1039 
1040     /* Finished: go to flowid generation */
1041     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1042     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1043     mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1044     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1045                   sizeof(struct ipv6hdr) - 6,
1046                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1047 
1048     mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1049     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1050                  MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1051     /* Unmask all ports */
1052     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1053 
1054     /* Write HW */
1055     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1056     mvpp2_prs_hw_write(priv, &pe);
1057 
1058     return 0;
1059 }
1060 
1061 /* IPv6 L3 multicast entry */
1062 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1063 {
1064     struct mvpp2_prs_entry pe;
1065     int tid;
1066 
1067     if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1068         return -EINVAL;
1069 
1070     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1071                     MVPP2_PE_LAST_FREE_TID);
1072     if (tid < 0)
1073         return tid;
1074 
1075     memset(&pe, 0, sizeof(pe));
1076     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1077     pe.index = tid;
1078 
1079     /* Finished: go to flowid generation */
1080     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1081     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1082                  MVPP2_PRS_RI_L3_ADDR_MASK);
1083     mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1084                  MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1085     /* Shift back to IPv6 NH */
1086     mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1087 
1088     mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1089                      MVPP2_PRS_IPV6_MC_MASK);
1090     mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1091     /* Unmask all ports */
1092     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1093 
1094     /* Update shadow table and hw entry */
1095     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1096     mvpp2_prs_hw_write(priv, &pe);
1097 
1098     return 0;
1099 }
1100 
1101 /* Parser per-port initialization */
1102 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1103                    int lu_max, int offset)
1104 {
1105     u32 val;
1106 
1107     /* Set lookup ID */
1108     val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1109     val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1110     val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1111     mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1112 
1113     /* Set maximum number of loops for packet received from port */
1114     val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1115     val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1116     val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1117     mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1118 
1119     /* Set initial offset for packet header extraction for the first
1120      * searching loop
1121      */
1122     val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1123     val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1124     val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1125     mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1126 }
1127 
1128 /* Default flow entries initialization for all ports */
1129 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1130 {
1131     struct mvpp2_prs_entry pe;
1132     int port;
1133 
1134     for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1135         memset(&pe, 0, sizeof(pe));
1136         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1137         pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1138 
1139         /* Mask all ports */
1140         mvpp2_prs_tcam_port_map_set(&pe, 0);
1141 
1142         /* Set flow ID*/
1143         mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1144         mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1145 
1146         /* Update shadow table and hw entry */
1147         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1148         mvpp2_prs_hw_write(priv, &pe);
1149     }
1150 }
1151 
1152 /* Set default entry for Marvell Header field */
1153 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1154 {
1155     struct mvpp2_prs_entry pe;
1156 
1157     memset(&pe, 0, sizeof(pe));
1158 
1159     pe.index = MVPP2_PE_MH_DEFAULT;
1160     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1161     mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1162                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1163     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1164 
1165     /* Unmask all ports */
1166     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1167 
1168     /* Update shadow table and hw entry */
1169     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1170     mvpp2_prs_hw_write(priv, &pe);
1171 
1172     /* Set MH entry that skip parser */
1173     pe.index = MVPP2_PE_MH_SKIP_PRS;
1174     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1175     mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1176                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1177     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1178     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1179 
1180     /* Mask all ports */
1181     mvpp2_prs_tcam_port_map_set(&pe, 0);
1182 
1183     /* Update shadow table and hw entry */
1184     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1185     mvpp2_prs_hw_write(priv, &pe);
1186 }
1187 
1188 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1189  * multicast MAC addresses
1190  */
1191 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1192 {
1193     struct mvpp2_prs_entry pe;
1194 
1195     memset(&pe, 0, sizeof(pe));
1196 
1197     /* Non-promiscuous mode for all ports - DROP unknown packets */
1198     pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1199     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1200 
1201     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1202                  MVPP2_PRS_RI_DROP_MASK);
1203     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1204     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1205 
1206     /* Unmask all ports */
1207     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1208 
1209     /* Update shadow table and hw entry */
1210     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1211     mvpp2_prs_hw_write(priv, &pe);
1212 
1213     /* Create dummy entries for drop all and promiscuous modes */
1214     mvpp2_prs_drop_fc(priv);
1215     mvpp2_prs_mac_drop_all_set(priv, 0, false);
1216     mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1217     mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1218 }
1219 
1220 /* Set default entries for various types of dsa packets */
1221 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1222 {
1223     struct mvpp2_prs_entry pe;
1224 
1225     /* None tagged EDSA entry - place holder */
1226     mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1227                   MVPP2_PRS_EDSA);
1228 
1229     /* Tagged EDSA entry - place holder */
1230     mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1231 
1232     /* None tagged DSA entry - place holder */
1233     mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1234                   MVPP2_PRS_DSA);
1235 
1236     /* Tagged DSA entry - place holder */
1237     mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1238 
1239     /* None tagged EDSA ethertype entry - place holder*/
1240     mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1241                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1242 
1243     /* Tagged EDSA ethertype entry - place holder*/
1244     mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1245                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1246 
1247     /* None tagged DSA ethertype entry */
1248     mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1249                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1250 
1251     /* Tagged DSA ethertype entry */
1252     mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1253                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1254 
1255     /* Set default entry, in case DSA or EDSA tag not found */
1256     memset(&pe, 0, sizeof(pe));
1257     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1258     pe.index = MVPP2_PE_DSA_DEFAULT;
1259     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1260 
1261     /* Shift 0 bytes */
1262     mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1263     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1264 
1265     /* Clear all sram ai bits for next iteration */
1266     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1267 
1268     /* Unmask all ports */
1269     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1270 
1271     mvpp2_prs_hw_write(priv, &pe);
1272 }
1273 
1274 /* Initialize parser entries for VID filtering */
1275 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1276 {
1277     struct mvpp2_prs_entry pe;
1278 
1279     memset(&pe, 0, sizeof(pe));
1280 
1281     /* Set default vid entry */
1282     pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1283     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1284 
1285     mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1286 
1287     /* Skip VLAN header - Set offset to 4 bytes */
1288     mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1289                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1290 
1291     /* Clear all ai bits for next iteration */
1292     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1293 
1294     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1295 
1296     /* Unmask all ports */
1297     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1298 
1299     /* Update shadow table and hw entry */
1300     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1301     mvpp2_prs_hw_write(priv, &pe);
1302 
1303     /* Set default vid entry for extended DSA*/
1304     memset(&pe, 0, sizeof(pe));
1305 
1306     /* Set default vid entry */
1307     pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1308     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1309 
1310     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1311                  MVPP2_PRS_EDSA_VID_AI_BIT);
1312 
1313     /* Skip VLAN header - Set offset to 8 bytes */
1314     mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1315                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1316 
1317     /* Clear all ai bits for next iteration */
1318     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1319 
1320     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1321 
1322     /* Unmask all ports */
1323     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1324 
1325     /* Update shadow table and hw entry */
1326     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1327     mvpp2_prs_hw_write(priv, &pe);
1328 }
1329 
1330 /* Match basic ethertypes */
1331 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1332 {
1333     struct mvpp2_prs_entry pe;
1334     int tid, ihl;
1335 
1336     /* Ethertype: PPPoE */
1337     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1338                     MVPP2_PE_LAST_FREE_TID);
1339     if (tid < 0)
1340         return tid;
1341 
1342     memset(&pe, 0, sizeof(pe));
1343     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1344     pe.index = tid;
1345 
1346     mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1347 
1348     mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1349                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1350     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1351     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1352                  MVPP2_PRS_RI_PPPOE_MASK);
1353 
1354     /* Update shadow table and hw entry */
1355     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1356     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1357     priv->prs_shadow[pe.index].finish = false;
1358     mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1359                 MVPP2_PRS_RI_PPPOE_MASK);
1360     mvpp2_prs_hw_write(priv, &pe);
1361 
1362     /* Ethertype: ARP */
1363     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1364                     MVPP2_PE_LAST_FREE_TID);
1365     if (tid < 0)
1366         return tid;
1367 
1368     memset(&pe, 0, sizeof(pe));
1369     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1370     pe.index = tid;
1371 
1372     mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1373 
1374     /* Generate flow in the next iteration*/
1375     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1376     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1377     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1378                  MVPP2_PRS_RI_L3_PROTO_MASK);
1379     /* Set L3 offset */
1380     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1381                   MVPP2_ETH_TYPE_LEN,
1382                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1383 
1384     /* Update shadow table and hw entry */
1385     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1386     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1387     priv->prs_shadow[pe.index].finish = true;
1388     mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1389                 MVPP2_PRS_RI_L3_PROTO_MASK);
1390     mvpp2_prs_hw_write(priv, &pe);
1391 
1392     /* Ethertype: LBTD */
1393     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1394                     MVPP2_PE_LAST_FREE_TID);
1395     if (tid < 0)
1396         return tid;
1397 
1398     memset(&pe, 0, sizeof(pe));
1399     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1400     pe.index = tid;
1401 
1402     mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1403 
1404     /* Generate flow in the next iteration*/
1405     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1406     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1407     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1408                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1409                  MVPP2_PRS_RI_CPU_CODE_MASK |
1410                  MVPP2_PRS_RI_UDF3_MASK);
1411     /* Set L3 offset */
1412     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1413                   MVPP2_ETH_TYPE_LEN,
1414                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1415 
1416     /* Update shadow table and hw entry */
1417     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1418     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1419     priv->prs_shadow[pe.index].finish = true;
1420     mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1421                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1422                 MVPP2_PRS_RI_CPU_CODE_MASK |
1423                 MVPP2_PRS_RI_UDF3_MASK);
1424     mvpp2_prs_hw_write(priv, &pe);
1425 
1426     /* Ethertype: IPv4 with header length >= 5 */
1427     for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1428         tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1429                         MVPP2_PE_LAST_FREE_TID);
1430         if (tid < 0)
1431             return tid;
1432 
1433         memset(&pe, 0, sizeof(pe));
1434         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1435         pe.index = tid;
1436 
1437         mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1438         mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1439                          MVPP2_PRS_IPV4_HEAD | ihl,
1440                          MVPP2_PRS_IPV4_HEAD_MASK |
1441                          MVPP2_PRS_IPV4_IHL_MASK);
1442 
1443         mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1444         mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1445                      MVPP2_PRS_RI_L3_PROTO_MASK);
1446         /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1447         mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1448                      sizeof(struct iphdr) - 4,
1449                      MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1450         /* Set L4 offset */
1451         mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1452                       MVPP2_ETH_TYPE_LEN + (ihl * 4),
1453                       MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1454 
1455         /* Update shadow table and hw entry */
1456         mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1457         priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1458         priv->prs_shadow[pe.index].finish = false;
1459         mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1460                     MVPP2_PRS_RI_L3_PROTO_MASK);
1461         mvpp2_prs_hw_write(priv, &pe);
1462     }
1463 
1464     /* Ethertype: IPv6 without options */
1465     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1466                     MVPP2_PE_LAST_FREE_TID);
1467     if (tid < 0)
1468         return tid;
1469 
1470     memset(&pe, 0, sizeof(pe));
1471     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1472     pe.index = tid;
1473 
1474     mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1475 
1476     /* Skip DIP of IPV6 header */
1477     mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1478                  MVPP2_MAX_L3_ADDR_SIZE,
1479                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1480     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1481     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1482                  MVPP2_PRS_RI_L3_PROTO_MASK);
1483     /* Set L3 offset */
1484     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1485                   MVPP2_ETH_TYPE_LEN,
1486                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1487 
1488     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1489     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1490     priv->prs_shadow[pe.index].finish = false;
1491     mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1492                 MVPP2_PRS_RI_L3_PROTO_MASK);
1493     mvpp2_prs_hw_write(priv, &pe);
1494 
1495     /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1496     memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1497     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1498     pe.index = MVPP2_PE_ETH_TYPE_UN;
1499 
1500     /* Unmask all ports */
1501     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1502 
1503     /* Generate flow in the next iteration*/
1504     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1505     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1506     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1507                  MVPP2_PRS_RI_L3_PROTO_MASK);
1508     /* Set L3 offset even it's unknown L3 */
1509     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1510                   MVPP2_ETH_TYPE_LEN,
1511                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1512 
1513     /* Update shadow table and hw entry */
1514     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1515     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1516     priv->prs_shadow[pe.index].finish = true;
1517     mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1518                 MVPP2_PRS_RI_L3_PROTO_MASK);
1519     mvpp2_prs_hw_write(priv, &pe);
1520 
1521     return 0;
1522 }
1523 
1524 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1525  * Possible options:
1526  * 0x8100, 0x88A8
1527  * 0x8100, 0x8100
1528  * 0x8100
1529  * 0x88A8
1530  */
1531 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1532 {
1533     struct mvpp2_prs_entry pe;
1534     int err;
1535 
1536     priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1537                           MVPP2_PRS_DBL_VLANS_MAX,
1538                           GFP_KERNEL);
1539     if (!priv->prs_double_vlans)
1540         return -ENOMEM;
1541 
1542     /* Double VLAN: 0x8100, 0x88A8 */
1543     err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1544                     MVPP2_PRS_PORT_MASK);
1545     if (err)
1546         return err;
1547 
1548     /* Double VLAN: 0x8100, 0x8100 */
1549     err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1550                     MVPP2_PRS_PORT_MASK);
1551     if (err)
1552         return err;
1553 
1554     /* Single VLAN: 0x88a8 */
1555     err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1556                  MVPP2_PRS_PORT_MASK);
1557     if (err)
1558         return err;
1559 
1560     /* Single VLAN: 0x8100 */
1561     err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1562                  MVPP2_PRS_PORT_MASK);
1563     if (err)
1564         return err;
1565 
1566     /* Set default double vlan entry */
1567     memset(&pe, 0, sizeof(pe));
1568     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1569     pe.index = MVPP2_PE_VLAN_DBL;
1570 
1571     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1572 
1573     /* Clear ai for next iterations */
1574     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1575     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1576                  MVPP2_PRS_RI_VLAN_MASK);
1577 
1578     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1579                  MVPP2_PRS_DBL_VLAN_AI_BIT);
1580     /* Unmask all ports */
1581     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1582 
1583     /* Update shadow table and hw entry */
1584     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1585     mvpp2_prs_hw_write(priv, &pe);
1586 
1587     /* Set default vlan none entry */
1588     memset(&pe, 0, sizeof(pe));
1589     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1590     pe.index = MVPP2_PE_VLAN_NONE;
1591 
1592     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1593     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1594                  MVPP2_PRS_RI_VLAN_MASK);
1595 
1596     /* Unmask all ports */
1597     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1598 
1599     /* Update shadow table and hw entry */
1600     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1601     mvpp2_prs_hw_write(priv, &pe);
1602 
1603     return 0;
1604 }
1605 
1606 /* Set entries for PPPoE ethertype */
1607 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1608 {
1609     struct mvpp2_prs_entry pe;
1610     int tid;
1611 
1612     /* IPv4 over PPPoE with options */
1613     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1614                     MVPP2_PE_LAST_FREE_TID);
1615     if (tid < 0)
1616         return tid;
1617 
1618     memset(&pe, 0, sizeof(pe));
1619     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1620     pe.index = tid;
1621 
1622     mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1623 
1624     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1625     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1626                  MVPP2_PRS_RI_L3_PROTO_MASK);
1627     /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
1628     mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1629                  sizeof(struct iphdr) - 4,
1630                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1631     /* Set L3 offset */
1632     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1633                   MVPP2_ETH_TYPE_LEN,
1634                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1635 
1636     /* Update shadow table and hw entry */
1637     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1638     mvpp2_prs_hw_write(priv, &pe);
1639 
1640     /* IPv4 over PPPoE without options */
1641     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1642                     MVPP2_PE_LAST_FREE_TID);
1643     if (tid < 0)
1644         return tid;
1645 
1646     pe.index = tid;
1647 
1648     mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1649                      MVPP2_PRS_IPV4_HEAD |
1650                      MVPP2_PRS_IPV4_IHL_MIN,
1651                      MVPP2_PRS_IPV4_HEAD_MASK |
1652                      MVPP2_PRS_IPV4_IHL_MASK);
1653 
1654     /* Clear ri before updating */
1655     pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1656     pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1657     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1658                  MVPP2_PRS_RI_L3_PROTO_MASK);
1659 
1660     /* Update shadow table and hw entry */
1661     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1662     mvpp2_prs_hw_write(priv, &pe);
1663 
1664     /* IPv6 over PPPoE */
1665     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1666                     MVPP2_PE_LAST_FREE_TID);
1667     if (tid < 0)
1668         return tid;
1669 
1670     memset(&pe, 0, sizeof(pe));
1671     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1672     pe.index = tid;
1673 
1674     mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1675 
1676     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1677     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1678                  MVPP2_PRS_RI_L3_PROTO_MASK);
1679     /* Jump to DIP of IPV6 header */
1680     mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1681                  MVPP2_MAX_L3_ADDR_SIZE,
1682                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1683     /* Set L3 offset */
1684     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1685                   MVPP2_ETH_TYPE_LEN,
1686                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1687 
1688     /* Update shadow table and hw entry */
1689     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1690     mvpp2_prs_hw_write(priv, &pe);
1691 
1692     /* Non-IP over PPPoE */
1693     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1694                     MVPP2_PE_LAST_FREE_TID);
1695     if (tid < 0)
1696         return tid;
1697 
1698     memset(&pe, 0, sizeof(pe));
1699     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1700     pe.index = tid;
1701 
1702     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1703                  MVPP2_PRS_RI_L3_PROTO_MASK);
1704 
1705     /* Finished: go to flowid generation */
1706     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1707     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1708     /* Set L3 offset even if it's unknown L3 */
1709     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1710                   MVPP2_ETH_TYPE_LEN,
1711                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1712 
1713     /* Update shadow table and hw entry */
1714     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1715     mvpp2_prs_hw_write(priv, &pe);
1716 
1717     return 0;
1718 }
1719 
1720 /* Initialize entries for IPv4 */
1721 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1722 {
1723     struct mvpp2_prs_entry pe;
1724     int err;
1725 
1726     /* Set entries for TCP, UDP and IGMP over IPv4 */
1727     err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1728                   MVPP2_PRS_RI_L4_PROTO_MASK);
1729     if (err)
1730         return err;
1731 
1732     err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1733                   MVPP2_PRS_RI_L4_PROTO_MASK);
1734     if (err)
1735         return err;
1736 
1737     err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1738                   MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1739                   MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1740                   MVPP2_PRS_RI_CPU_CODE_MASK |
1741                   MVPP2_PRS_RI_UDF3_MASK);
1742     if (err)
1743         return err;
1744 
1745     /* IPv4 Broadcast */
1746     err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1747     if (err)
1748         return err;
1749 
1750     /* IPv4 Multicast */
1751     err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1752     if (err)
1753         return err;
1754 
1755     /* Default IPv4 entry for unknown protocols */
1756     memset(&pe, 0, sizeof(pe));
1757     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1758     pe.index = MVPP2_PE_IP4_PROTO_UN;
1759 
1760     /* Finished: go to flowid generation */
1761     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1762     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1763 
1764     /* Set L3 offset */
1765     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
1766                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1767     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1768     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1769                  MVPP2_PRS_RI_L4_PROTO_MASK);
1770 
1771     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1772                  MVPP2_PRS_IPV4_DIP_AI_BIT);
1773     /* Unmask all ports */
1774     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1775 
1776     /* Update shadow table and hw entry */
1777     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1778     mvpp2_prs_hw_write(priv, &pe);
1779 
1780     /* Default IPv4 entry for unicast address */
1781     memset(&pe, 0, sizeof(pe));
1782     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1783     pe.index = MVPP2_PE_IP4_ADDR_UN;
1784 
1785     /* Go again to ipv4 */
1786     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1787 
1788     mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1789                  MVPP2_PRS_IPV4_DIP_AI_BIT);
1790 
1791     /* Shift back to IPv4 proto */
1792     mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1793 
1794     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1795                  MVPP2_PRS_RI_L3_ADDR_MASK);
1796     mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1797 
1798     /* Unmask all ports */
1799     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1800 
1801     /* Update shadow table and hw entry */
1802     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1803     mvpp2_prs_hw_write(priv, &pe);
1804 
1805     return 0;
1806 }
1807 
1808 /* Initialize entries for IPv6 */
1809 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1810 {
1811     struct mvpp2_prs_entry pe;
1812     int tid, err;
1813 
1814     /* Set entries for TCP, UDP and ICMP over IPv6 */
1815     err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1816                   MVPP2_PRS_RI_L4_TCP,
1817                   MVPP2_PRS_RI_L4_PROTO_MASK);
1818     if (err)
1819         return err;
1820 
1821     err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1822                   MVPP2_PRS_RI_L4_UDP,
1823                   MVPP2_PRS_RI_L4_PROTO_MASK);
1824     if (err)
1825         return err;
1826 
1827     err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1828                   MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1829                   MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1830                   MVPP2_PRS_RI_CPU_CODE_MASK |
1831                   MVPP2_PRS_RI_UDF3_MASK);
1832     if (err)
1833         return err;
1834 
1835     /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1836     /* Result Info: UDF7=1, DS lite */
1837     err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1838                   MVPP2_PRS_RI_UDF7_IP6_LITE,
1839                   MVPP2_PRS_RI_UDF7_MASK);
1840     if (err)
1841         return err;
1842 
1843     /* IPv6 multicast */
1844     err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1845     if (err)
1846         return err;
1847 
1848     /* Entry for checking hop limit */
1849     tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1850                     MVPP2_PE_LAST_FREE_TID);
1851     if (tid < 0)
1852         return tid;
1853 
1854     memset(&pe, 0, sizeof(pe));
1855     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1856     pe.index = tid;
1857 
1858     /* Finished: go to flowid generation */
1859     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1860     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1861     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1862                  MVPP2_PRS_RI_DROP_MASK,
1863                  MVPP2_PRS_RI_L3_PROTO_MASK |
1864                  MVPP2_PRS_RI_DROP_MASK);
1865 
1866     mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1867     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1868                  MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1869 
1870     /* Update shadow table and hw entry */
1871     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1872     mvpp2_prs_hw_write(priv, &pe);
1873 
1874     /* Default IPv6 entry for unknown protocols */
1875     memset(&pe, 0, sizeof(pe));
1876     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1877     pe.index = MVPP2_PE_IP6_PROTO_UN;
1878 
1879     /* Finished: go to flowid generation */
1880     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1881     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1882     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1883                  MVPP2_PRS_RI_L4_PROTO_MASK);
1884     /* Set L4 offset relatively to our current place */
1885     mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1886                   sizeof(struct ipv6hdr) - 4,
1887                   MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1888 
1889     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1890                  MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1891     /* Unmask all ports */
1892     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1893 
1894     /* Update shadow table and hw entry */
1895     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1896     mvpp2_prs_hw_write(priv, &pe);
1897 
1898     /* Default IPv6 entry for unknown ext protocols */
1899     memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1900     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1901     pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1902 
1903     /* Finished: go to flowid generation */
1904     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1905     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1906     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1907                  MVPP2_PRS_RI_L4_PROTO_MASK);
1908 
1909     mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1910                  MVPP2_PRS_IPV6_EXT_AI_BIT);
1911     /* Unmask all ports */
1912     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1913 
1914     /* Update shadow table and hw entry */
1915     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1916     mvpp2_prs_hw_write(priv, &pe);
1917 
1918     /* Default IPv6 entry for unicast address */
1919     memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1920     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1921     pe.index = MVPP2_PE_IP6_ADDR_UN;
1922 
1923     /* Finished: go to IPv6 again */
1924     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1925     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1926                  MVPP2_PRS_RI_L3_ADDR_MASK);
1927     mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1928                  MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1929     /* Shift back to IPV6 NH */
1930     mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1931 
1932     mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1933     /* Unmask all ports */
1934     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1935 
1936     /* Update shadow table and hw entry */
1937     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1938     mvpp2_prs_hw_write(priv, &pe);
1939 
1940     return 0;
1941 }
1942 
1943 /* Find tcam entry with matched pair <vid,port> */
1944 static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1945 {
1946     unsigned char byte[2], enable[2];
1947     struct mvpp2_prs_entry pe;
1948     u16 rvid, rmask;
1949     int tid;
1950 
1951     /* Go through the all entries with MVPP2_PRS_LU_VID */
1952     for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1953          tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1954         if (!port->priv->prs_shadow[tid].valid ||
1955             port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1956             continue;
1957 
1958         mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1959 
1960         mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1961         mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1962 
1963         rvid = ((byte[0] & 0xf) << 8) + byte[1];
1964         rmask = ((enable[0] & 0xf) << 8) + enable[1];
1965 
1966         if (rvid != vid || rmask != mask)
1967             continue;
1968 
1969         return tid;
1970     }
1971 
1972     return -ENOENT;
1973 }
1974 
1975 /* Write parser entry for VID filtering */
1976 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1977 {
1978     unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1979                  port->id * MVPP2_PRS_VLAN_FILT_MAX;
1980     unsigned int mask = 0xfff, reg_val, shift;
1981     struct mvpp2 *priv = port->priv;
1982     struct mvpp2_prs_entry pe;
1983     int tid;
1984 
1985     memset(&pe, 0, sizeof(pe));
1986 
1987     /* Scan TCAM and see if entry with this <vid,port> already exist */
1988     tid = mvpp2_prs_vid_range_find(port, vid, mask);
1989 
1990     reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1991     if (reg_val & MVPP2_DSA_EXTENDED)
1992         shift = MVPP2_VLAN_TAG_EDSA_LEN;
1993     else
1994         shift = MVPP2_VLAN_TAG_LEN;
1995 
1996     /* No such entry */
1997     if (tid < 0) {
1998 
1999         /* Go through all entries from first to last in vlan range */
2000         tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2001                         vid_start +
2002                         MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2003 
2004         /* There isn't room for a new VID filter */
2005         if (tid < 0)
2006             return tid;
2007 
2008         mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2009         pe.index = tid;
2010 
2011         /* Mask all ports */
2012         mvpp2_prs_tcam_port_map_set(&pe, 0);
2013     } else {
2014         mvpp2_prs_init_from_hw(priv, &pe, tid);
2015     }
2016 
2017     /* Enable the current port */
2018     mvpp2_prs_tcam_port_set(&pe, port->id, true);
2019 
2020     /* Continue - set next lookup */
2021     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2022 
2023     /* Skip VLAN header - Set offset to 4 or 8 bytes */
2024     mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2025 
2026     /* Set match on VID */
2027     mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2028 
2029     /* Clear all ai bits for next iteration */
2030     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2031 
2032     /* Update shadow table */
2033     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2034     mvpp2_prs_hw_write(priv, &pe);
2035 
2036     return 0;
2037 }
2038 
2039 /* Write parser entry for VID filtering */
2040 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2041 {
2042     struct mvpp2 *priv = port->priv;
2043     int tid;
2044 
2045     /* Scan TCAM and see if entry with this <vid,port> already exist */
2046     tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2047 
2048     /* No such entry */
2049     if (tid < 0)
2050         return;
2051 
2052     mvpp2_prs_hw_inv(priv, tid);
2053     priv->prs_shadow[tid].valid = false;
2054 }
2055 
2056 /* Remove all existing VID filters on this port */
2057 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2058 {
2059     struct mvpp2 *priv = port->priv;
2060     int tid;
2061 
2062     for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2063          tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2064         if (priv->prs_shadow[tid].valid) {
2065             mvpp2_prs_hw_inv(priv, tid);
2066             priv->prs_shadow[tid].valid = false;
2067         }
2068     }
2069 }
2070 
2071 /* Remove VID filering entry for this port */
2072 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2073 {
2074     unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2075     struct mvpp2 *priv = port->priv;
2076 
2077     /* Invalidate the guard entry */
2078     mvpp2_prs_hw_inv(priv, tid);
2079 
2080     priv->prs_shadow[tid].valid = false;
2081 }
2082 
2083 /* Add guard entry that drops packets when no VID is matched on this port */
2084 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2085 {
2086     unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2087     struct mvpp2 *priv = port->priv;
2088     unsigned int reg_val, shift;
2089     struct mvpp2_prs_entry pe;
2090 
2091     if (priv->prs_shadow[tid].valid)
2092         return;
2093 
2094     memset(&pe, 0, sizeof(pe));
2095 
2096     pe.index = tid;
2097 
2098     reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2099     if (reg_val & MVPP2_DSA_EXTENDED)
2100         shift = MVPP2_VLAN_TAG_EDSA_LEN;
2101     else
2102         shift = MVPP2_VLAN_TAG_LEN;
2103 
2104     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2105 
2106     /* Mask all ports */
2107     mvpp2_prs_tcam_port_map_set(&pe, 0);
2108 
2109     /* Update port mask */
2110     mvpp2_prs_tcam_port_set(&pe, port->id, true);
2111 
2112     /* Continue - set next lookup */
2113     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2114 
2115     /* Skip VLAN header - Set offset to 4 or 8 bytes */
2116     mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2117 
2118     /* Drop VLAN packets that don't belong to any VIDs on this port */
2119     mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2120                  MVPP2_PRS_RI_DROP_MASK);
2121 
2122     /* Clear all ai bits for next iteration */
2123     mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2124 
2125     /* Update shadow table */
2126     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2127     mvpp2_prs_hw_write(priv, &pe);
2128 }
2129 
2130 /* Parser default initialization */
2131 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2132 {
2133     int err, index, i;
2134 
2135     /* Enable tcam table */
2136     mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2137 
2138     /* Clear all tcam and sram entries */
2139     for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2140         mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2141         for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2142             mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2143 
2144         mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2145         for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2146             mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2147     }
2148 
2149     /* Invalidate all tcam entries */
2150     for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2151         mvpp2_prs_hw_inv(priv, index);
2152 
2153     priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2154                     sizeof(*priv->prs_shadow),
2155                     GFP_KERNEL);
2156     if (!priv->prs_shadow)
2157         return -ENOMEM;
2158 
2159     /* Always start from lookup = 0 */
2160     for (index = 0; index < MVPP2_MAX_PORTS; index++)
2161         mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2162                        MVPP2_PRS_PORT_LU_MAX, 0);
2163 
2164     mvpp2_prs_def_flow_init(priv);
2165 
2166     mvpp2_prs_mh_init(priv);
2167 
2168     mvpp2_prs_mac_init(priv);
2169 
2170     mvpp2_prs_dsa_init(priv);
2171 
2172     mvpp2_prs_vid_init(priv);
2173 
2174     err = mvpp2_prs_etype_init(priv);
2175     if (err)
2176         return err;
2177 
2178     err = mvpp2_prs_vlan_init(pdev, priv);
2179     if (err)
2180         return err;
2181 
2182     err = mvpp2_prs_pppoe_init(priv);
2183     if (err)
2184         return err;
2185 
2186     err = mvpp2_prs_ip6_init(priv);
2187     if (err)
2188         return err;
2189 
2190     err = mvpp2_prs_ip4_init(priv);
2191     if (err)
2192         return err;
2193 
2194     return 0;
2195 }
2196 
2197 /* Compare MAC DA with tcam entry data */
2198 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2199                        const u8 *da, unsigned char *mask)
2200 {
2201     unsigned char tcam_byte, tcam_mask;
2202     int index;
2203 
2204     for (index = 0; index < ETH_ALEN; index++) {
2205         mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2206         if (tcam_mask != mask[index])
2207             return false;
2208 
2209         if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2210             return false;
2211     }
2212 
2213     return true;
2214 }
2215 
2216 /* Find tcam entry with matched pair <MAC DA, port> */
2217 static int
2218 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2219                 unsigned char *mask, int udf_type)
2220 {
2221     struct mvpp2_prs_entry pe;
2222     int tid;
2223 
2224     /* Go through the all entires with MVPP2_PRS_LU_MAC */
2225     for (tid = MVPP2_PE_MAC_RANGE_START;
2226          tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2227         unsigned int entry_pmap;
2228 
2229         if (!priv->prs_shadow[tid].valid ||
2230             (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2231             (priv->prs_shadow[tid].udf != udf_type))
2232             continue;
2233 
2234         mvpp2_prs_init_from_hw(priv, &pe, tid);
2235         entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2236 
2237         if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2238             entry_pmap == pmap)
2239             return tid;
2240     }
2241 
2242     return -ENOENT;
2243 }
2244 
2245 /* Update parser's mac da entry */
2246 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2247 {
2248     unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2249     struct mvpp2 *priv = port->priv;
2250     unsigned int pmap, len, ri;
2251     struct mvpp2_prs_entry pe;
2252     int tid;
2253 
2254     memset(&pe, 0, sizeof(pe));
2255 
2256     /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2257     tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2258                       MVPP2_PRS_UDF_MAC_DEF);
2259 
2260     /* No such entry */
2261     if (tid < 0) {
2262         if (!add)
2263             return 0;
2264 
2265         /* Create new TCAM entry */
2266         /* Go through the all entries from first to last */
2267         tid = mvpp2_prs_tcam_first_free(priv,
2268                         MVPP2_PE_MAC_RANGE_START,
2269                         MVPP2_PE_MAC_RANGE_END);
2270         if (tid < 0)
2271             return tid;
2272 
2273         pe.index = tid;
2274 
2275         /* Mask all ports */
2276         mvpp2_prs_tcam_port_map_set(&pe, 0);
2277     } else {
2278         mvpp2_prs_init_from_hw(priv, &pe, tid);
2279     }
2280 
2281     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2282 
2283     /* Update port mask */
2284     mvpp2_prs_tcam_port_set(&pe, port->id, add);
2285 
2286     /* Invalidate the entry if no ports are left enabled */
2287     pmap = mvpp2_prs_tcam_port_map_get(&pe);
2288     if (pmap == 0) {
2289         if (add)
2290             return -EINVAL;
2291 
2292         mvpp2_prs_hw_inv(priv, pe.index);
2293         priv->prs_shadow[pe.index].valid = false;
2294         return 0;
2295     }
2296 
2297     /* Continue - set next lookup */
2298     mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2299 
2300     /* Set match on DA */
2301     len = ETH_ALEN;
2302     while (len--)
2303         mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2304 
2305     /* Set result info bits */
2306     if (is_broadcast_ether_addr(da)) {
2307         ri = MVPP2_PRS_RI_L2_BCAST;
2308     } else if (is_multicast_ether_addr(da)) {
2309         ri = MVPP2_PRS_RI_L2_MCAST;
2310     } else {
2311         ri = MVPP2_PRS_RI_L2_UCAST;
2312 
2313         if (ether_addr_equal(da, port->dev->dev_addr))
2314             ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2315     }
2316 
2317     mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2318                  MVPP2_PRS_RI_MAC_ME_MASK);
2319     mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2320                 MVPP2_PRS_RI_MAC_ME_MASK);
2321 
2322     /* Shift to ethertype */
2323     mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2324                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2325 
2326     /* Update shadow table and hw entry */
2327     priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2328     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2329     mvpp2_prs_hw_write(priv, &pe);
2330 
2331     return 0;
2332 }
2333 
2334 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2335 {
2336     struct mvpp2_port *port = netdev_priv(dev);
2337     int err;
2338 
2339     /* Remove old parser entry */
2340     err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2341     if (err)
2342         return err;
2343 
2344     /* Add new parser entry */
2345     err = mvpp2_prs_mac_da_accept(port, da, true);
2346     if (err)
2347         return err;
2348 
2349     /* Set addr in the device */
2350     eth_hw_addr_set(dev, da);
2351 
2352     return 0;
2353 }
2354 
2355 void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2356 {
2357     struct mvpp2 *priv = port->priv;
2358     struct mvpp2_prs_entry pe;
2359     unsigned long pmap;
2360     int index, tid;
2361 
2362     for (tid = MVPP2_PE_MAC_RANGE_START;
2363          tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2364         unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2365 
2366         if (!priv->prs_shadow[tid].valid ||
2367             (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2368             (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2369             continue;
2370 
2371         mvpp2_prs_init_from_hw(priv, &pe, tid);
2372 
2373         pmap = mvpp2_prs_tcam_port_map_get(&pe);
2374 
2375         /* We only want entries active on this port */
2376         if (!test_bit(port->id, &pmap))
2377             continue;
2378 
2379         /* Read mac addr from entry */
2380         for (index = 0; index < ETH_ALEN; index++)
2381             mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2382                              &da_mask[index]);
2383 
2384         /* Special cases : Don't remove broadcast and port's own
2385          * address
2386          */
2387         if (is_broadcast_ether_addr(da) ||
2388             ether_addr_equal(da, port->dev->dev_addr))
2389             continue;
2390 
2391         /* Remove entry from TCAM */
2392         mvpp2_prs_mac_da_accept(port, da, false);
2393     }
2394 }
2395 
2396 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2397 {
2398     switch (type) {
2399     case MVPP2_TAG_TYPE_EDSA:
2400         /* Add port to EDSA entries */
2401         mvpp2_prs_dsa_tag_set(priv, port, true,
2402                       MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2403         mvpp2_prs_dsa_tag_set(priv, port, true,
2404                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2405         /* Remove port from DSA entries */
2406         mvpp2_prs_dsa_tag_set(priv, port, false,
2407                       MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2408         mvpp2_prs_dsa_tag_set(priv, port, false,
2409                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2410         break;
2411 
2412     case MVPP2_TAG_TYPE_DSA:
2413         /* Add port to DSA entries */
2414         mvpp2_prs_dsa_tag_set(priv, port, true,
2415                       MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2416         mvpp2_prs_dsa_tag_set(priv, port, true,
2417                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2418         /* Remove port from EDSA entries */
2419         mvpp2_prs_dsa_tag_set(priv, port, false,
2420                       MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2421         mvpp2_prs_dsa_tag_set(priv, port, false,
2422                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2423         break;
2424 
2425     case MVPP2_TAG_TYPE_MH:
2426     case MVPP2_TAG_TYPE_NONE:
2427         /* Remove port form EDSA and DSA entries */
2428         mvpp2_prs_dsa_tag_set(priv, port, false,
2429                       MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2430         mvpp2_prs_dsa_tag_set(priv, port, false,
2431                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2432         mvpp2_prs_dsa_tag_set(priv, port, false,
2433                       MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2434         mvpp2_prs_dsa_tag_set(priv, port, false,
2435                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2436         break;
2437 
2438     default:
2439         if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2440             return -EINVAL;
2441     }
2442 
2443     return 0;
2444 }
2445 
2446 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2447 {
2448     struct mvpp2_prs_entry pe;
2449     u8 *ri_byte, *ri_byte_mask;
2450     int tid, i;
2451 
2452     memset(&pe, 0, sizeof(pe));
2453 
2454     tid = mvpp2_prs_tcam_first_free(priv,
2455                     MVPP2_PE_LAST_FREE_TID,
2456                     MVPP2_PE_FIRST_FREE_TID);
2457     if (tid < 0)
2458         return tid;
2459 
2460     pe.index = tid;
2461 
2462     ri_byte = (u8 *)&ri;
2463     ri_byte_mask = (u8 *)&ri_mask;
2464 
2465     mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2466     mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2467 
2468     for (i = 0; i < 4; i++) {
2469         mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2470                          ri_byte_mask[i]);
2471     }
2472 
2473     mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2474     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2475     mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2476     mvpp2_prs_hw_write(priv, &pe);
2477 
2478     return 0;
2479 }
2480 
2481 /* Set prs flow for the port */
2482 int mvpp2_prs_def_flow(struct mvpp2_port *port)
2483 {
2484     struct mvpp2_prs_entry pe;
2485     int tid;
2486 
2487     memset(&pe, 0, sizeof(pe));
2488 
2489     tid = mvpp2_prs_flow_find(port->priv, port->id);
2490 
2491     /* Such entry not exist */
2492     if (tid < 0) {
2493         /* Go through the all entires from last to first */
2494         tid = mvpp2_prs_tcam_first_free(port->priv,
2495                         MVPP2_PE_LAST_FREE_TID,
2496                            MVPP2_PE_FIRST_FREE_TID);
2497         if (tid < 0)
2498             return tid;
2499 
2500         pe.index = tid;
2501 
2502         /* Set flow ID*/
2503         mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2504         mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2505 
2506         /* Update shadow table */
2507         mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2508     } else {
2509         mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2510     }
2511 
2512     mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2513     mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2514     mvpp2_prs_hw_write(port->priv, &pe);
2515 
2516     return 0;
2517 }
2518 
2519 int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2520 {
2521     u32 val;
2522 
2523     if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2524         return -EINVAL;
2525 
2526     mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2527 
2528     val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2529 
2530     val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2531 
2532     return val;
2533 }