Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * RSS and Classifier helpers for Marvell PPv2 Network Controller
0004  *
0005  * Copyright (C) 2014 Marvell
0006  *
0007  * Marcin Wojtas <mw@semihalf.com>
0008  */
0009 
0010 #include "mvpp2.h"
0011 #include "mvpp2_cls.h"
0012 #include "mvpp2_prs.h"
0013 
0014 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask)    \
0015 {                               \
0016     .flow_type = _type,                 \
0017     .flow_id = _id,                     \
0018     .supported_hash_opts = _opts,               \
0019     .prs_ri = {                     \
0020         .ri = _ri,                  \
0021         .ri_mask = _ri_mask             \
0022     }                           \
0023 }
0024 
0025 static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
0026     /* TCP over IPv4 flows, Not fragmented, no vlan tag */
0027     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
0028                MVPP22_CLS_HEK_IP4_5T,
0029                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
0030                MVPP2_PRS_RI_L4_TCP,
0031                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0032 
0033     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
0034                MVPP22_CLS_HEK_IP4_5T,
0035                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
0036                MVPP2_PRS_RI_L4_TCP,
0037                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0038 
0039     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
0040                MVPP22_CLS_HEK_IP4_5T,
0041                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
0042                MVPP2_PRS_RI_L4_TCP,
0043                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0044 
0045     /* TCP over IPv4 flows, Not fragmented, with vlan tag */
0046     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
0047                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0048                MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
0049                MVPP2_PRS_IP_MASK),
0050 
0051     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
0052                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0053                MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
0054                MVPP2_PRS_IP_MASK),
0055 
0056     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
0057                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0058                MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
0059                MVPP2_PRS_IP_MASK),
0060 
0061     /* TCP over IPv4 flows, fragmented, no vlan tag */
0062     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
0063                MVPP22_CLS_HEK_IP4_2T,
0064                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
0065                MVPP2_PRS_RI_L4_TCP,
0066                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0067 
0068     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
0069                MVPP22_CLS_HEK_IP4_2T,
0070                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
0071                MVPP2_PRS_RI_L4_TCP,
0072                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0073 
0074     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
0075                MVPP22_CLS_HEK_IP4_2T,
0076                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
0077                MVPP2_PRS_RI_L4_TCP,
0078                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0079 
0080     /* TCP over IPv4 flows, fragmented, with vlan tag */
0081     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
0082                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0083                MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
0084                MVPP2_PRS_IP_MASK),
0085 
0086     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
0087                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0088                MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
0089                MVPP2_PRS_IP_MASK),
0090 
0091     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
0092                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0093                MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
0094                MVPP2_PRS_IP_MASK),
0095 
0096     /* UDP over IPv4 flows, Not fragmented, no vlan tag */
0097     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
0098                MVPP22_CLS_HEK_IP4_5T,
0099                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
0100                MVPP2_PRS_RI_L4_UDP,
0101                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0102 
0103     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
0104                MVPP22_CLS_HEK_IP4_5T,
0105                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
0106                MVPP2_PRS_RI_L4_UDP,
0107                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0108 
0109     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
0110                MVPP22_CLS_HEK_IP4_5T,
0111                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
0112                MVPP2_PRS_RI_L4_UDP,
0113                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0114 
0115     /* UDP over IPv4 flows, Not fragmented, with vlan tag */
0116     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
0117                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0118                MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
0119                MVPP2_PRS_IP_MASK),
0120 
0121     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
0122                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0123                MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
0124                MVPP2_PRS_IP_MASK),
0125 
0126     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
0127                MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
0128                MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
0129                MVPP2_PRS_IP_MASK),
0130 
0131     /* UDP over IPv4 flows, fragmented, no vlan tag */
0132     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
0133                MVPP22_CLS_HEK_IP4_2T,
0134                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
0135                MVPP2_PRS_RI_L4_UDP,
0136                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0137 
0138     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
0139                MVPP22_CLS_HEK_IP4_2T,
0140                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
0141                MVPP2_PRS_RI_L4_UDP,
0142                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0143 
0144     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
0145                MVPP22_CLS_HEK_IP4_2T,
0146                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
0147                MVPP2_PRS_RI_L4_UDP,
0148                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0149 
0150     /* UDP over IPv4 flows, fragmented, with vlan tag */
0151     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
0152                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0153                MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
0154                MVPP2_PRS_IP_MASK),
0155 
0156     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
0157                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0158                MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
0159                MVPP2_PRS_IP_MASK),
0160 
0161     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
0162                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0163                MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
0164                MVPP2_PRS_IP_MASK),
0165 
0166     /* TCP over IPv6 flows, not fragmented, no vlan tag */
0167     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
0168                MVPP22_CLS_HEK_IP6_5T,
0169                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
0170                MVPP2_PRS_RI_L4_TCP,
0171                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0172 
0173     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
0174                MVPP22_CLS_HEK_IP6_5T,
0175                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
0176                MVPP2_PRS_RI_L4_TCP,
0177                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0178 
0179     /* TCP over IPv6 flows, not fragmented, with vlan tag */
0180     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
0181                MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
0182                MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
0183                MVPP2_PRS_IP_MASK),
0184 
0185     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
0186                MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
0187                MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
0188                MVPP2_PRS_IP_MASK),
0189 
0190     /* TCP over IPv6 flows, fragmented, no vlan tag */
0191     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
0192                MVPP22_CLS_HEK_IP6_2T,
0193                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
0194                MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
0195                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0196 
0197     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
0198                MVPP22_CLS_HEK_IP6_2T,
0199                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
0200                MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
0201                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0202 
0203     /* TCP over IPv6 flows, fragmented, with vlan tag */
0204     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
0205                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0206                MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
0207                MVPP2_PRS_RI_L4_TCP,
0208                MVPP2_PRS_IP_MASK),
0209 
0210     MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
0211                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0212                MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
0213                MVPP2_PRS_RI_L4_TCP,
0214                MVPP2_PRS_IP_MASK),
0215 
0216     /* UDP over IPv6 flows, not fragmented, no vlan tag */
0217     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
0218                MVPP22_CLS_HEK_IP6_5T,
0219                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
0220                MVPP2_PRS_RI_L4_UDP,
0221                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0222 
0223     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
0224                MVPP22_CLS_HEK_IP6_5T,
0225                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
0226                MVPP2_PRS_RI_L4_UDP,
0227                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0228 
0229     /* UDP over IPv6 flows, not fragmented, with vlan tag */
0230     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
0231                MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
0232                MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
0233                MVPP2_PRS_IP_MASK),
0234 
0235     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
0236                MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
0237                MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
0238                MVPP2_PRS_IP_MASK),
0239 
0240     /* UDP over IPv6 flows, fragmented, no vlan tag */
0241     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
0242                MVPP22_CLS_HEK_IP6_2T,
0243                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
0244                MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
0245                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0246 
0247     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
0248                MVPP22_CLS_HEK_IP6_2T,
0249                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
0250                MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
0251                MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
0252 
0253     /* UDP over IPv6 flows, fragmented, with vlan tag */
0254     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
0255                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0256                MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
0257                MVPP2_PRS_RI_L4_UDP,
0258                MVPP2_PRS_IP_MASK),
0259 
0260     MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
0261                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0262                MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
0263                MVPP2_PRS_RI_L4_UDP,
0264                MVPP2_PRS_IP_MASK),
0265 
0266     /* IPv4 flows, no vlan tag */
0267     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
0268                MVPP22_CLS_HEK_IP4_2T,
0269                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
0270                MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
0271     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
0272                MVPP22_CLS_HEK_IP4_2T,
0273                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
0274                MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
0275     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
0276                MVPP22_CLS_HEK_IP4_2T,
0277                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
0278                MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
0279 
0280     /* IPv4 flows, with vlan tag */
0281     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
0282                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0283                MVPP2_PRS_RI_L3_IP4,
0284                MVPP2_PRS_RI_L3_PROTO_MASK),
0285     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
0286                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0287                MVPP2_PRS_RI_L3_IP4_OPT,
0288                MVPP2_PRS_RI_L3_PROTO_MASK),
0289     MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
0290                MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
0291                MVPP2_PRS_RI_L3_IP4_OTHER,
0292                MVPP2_PRS_RI_L3_PROTO_MASK),
0293 
0294     /* IPv6 flows, no vlan tag */
0295     MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
0296                MVPP22_CLS_HEK_IP6_2T,
0297                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
0298                MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
0299     MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
0300                MVPP22_CLS_HEK_IP6_2T,
0301                MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
0302                MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
0303 
0304     /* IPv6 flows, with vlan tag */
0305     MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
0306                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0307                MVPP2_PRS_RI_L3_IP6,
0308                MVPP2_PRS_RI_L3_PROTO_MASK),
0309     MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
0310                MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
0311                MVPP2_PRS_RI_L3_IP6,
0312                MVPP2_PRS_RI_L3_PROTO_MASK),
0313 
0314     /* Non IP flow, no vlan tag */
0315     MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
0316                0,
0317                MVPP2_PRS_RI_VLAN_NONE,
0318                MVPP2_PRS_RI_VLAN_MASK),
0319     /* Non IP flow, with vlan tag */
0320     MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
0321                MVPP22_CLS_HEK_OPT_VLAN,
0322                0, 0),
0323 };
0324 
0325 u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
0326 {
0327     mvpp2_write(priv, MVPP2_CTRS_IDX, index);
0328 
0329     return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
0330 }
0331 
0332 void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
0333              struct mvpp2_cls_flow_entry *fe)
0334 {
0335     fe->index = index;
0336     mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
0337     fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
0338     fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
0339     fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
0340 }
0341 
0342 /* Update classification flow table registers */
0343 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
0344                  struct mvpp2_cls_flow_entry *fe)
0345 {
0346     mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
0347     mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
0348     mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
0349     mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
0350 }
0351 
0352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
0353 {
0354     mvpp2_write(priv, MVPP2_CTRS_IDX, index);
0355 
0356     return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
0357 }
0358 
0359 void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
0360                struct mvpp2_cls_lookup_entry *le)
0361 {
0362     u32 val;
0363 
0364     val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
0365     mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
0366     le->way = way;
0367     le->lkpid = lkpid;
0368     le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
0369 }
0370 
0371 /* Update classification lookup table register */
0372 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
0373                    struct mvpp2_cls_lookup_entry *le)
0374 {
0375     u32 val;
0376 
0377     val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
0378     mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
0379     mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
0380 }
0381 
0382 /* Operations on flow entry */
0383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
0384 {
0385     return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
0386 }
0387 
0388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
0389                        int num_of_fields)
0390 {
0391     fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
0392     fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
0393 }
0394 
0395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
0396                   int field_index)
0397 {
0398     return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
0399         MVPP2_CLS_FLOW_TBL2_FLD_MASK;
0400 }
0401 
0402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
0403                    int field_index, int field_id)
0404 {
0405     fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
0406                         MVPP2_CLS_FLOW_TBL2_FLD_MASK);
0407     fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
0408 }
0409 
0410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
0411                    int engine)
0412 {
0413     fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
0414     fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
0415 }
0416 
0417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
0418 {
0419     return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
0420         MVPP2_CLS_FLOW_TBL0_ENG_MASK;
0421 }
0422 
0423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
0424                        bool from_packet)
0425 {
0426     if (from_packet)
0427         fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
0428     else
0429         fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
0430 }
0431 
0432 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
0433                     bool is_last)
0434 {
0435     fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
0436     fe->data[0] |= !!is_last;
0437 }
0438 
0439 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
0440 {
0441     fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
0442     fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
0443 }
0444 
0445 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
0446                     u32 port)
0447 {
0448     fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
0449 }
0450 
0451 static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
0452                        u32 port)
0453 {
0454     fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
0455 }
0456 
0457 static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
0458                        u8 lu_type)
0459 {
0460     fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
0461     fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
0462 }
0463 
0464 /* Initialize the parser entry for the given flow */
0465 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
0466                     const struct mvpp2_cls_flow *flow)
0467 {
0468     mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
0469                flow->prs_ri.ri_mask);
0470 }
0471 
0472 /* Initialize the Lookup Id table entry for the given flow */
0473 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
0474                     const struct mvpp2_cls_flow *flow)
0475 {
0476     struct mvpp2_cls_lookup_entry le;
0477 
0478     le.way = 0;
0479     le.lkpid = flow->flow_id;
0480 
0481     /* The default RxQ for this port is set in the C2 lookup */
0482     le.data = 0;
0483 
0484     /* We point on the first lookup in the sequence for the flow, that is
0485      * the C2 lookup.
0486      */
0487     le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
0488 
0489     /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
0490     le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
0491 
0492     mvpp2_cls_lookup_write(priv, &le);
0493 }
0494 
0495 static void mvpp2_cls_c2_write(struct mvpp2 *priv,
0496                    struct mvpp2_cls_c2_entry *c2)
0497 {
0498     u32 val;
0499     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
0500 
0501     val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
0502     if (c2->valid)
0503         val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
0504     else
0505         val |= MVPP22_CLS_C2_TCAM_INV_BIT;
0506     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
0507 
0508     mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
0509 
0510     mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
0511     mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
0512     mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
0513     mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
0514 
0515     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
0516     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
0517     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
0518     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
0519     /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
0520     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
0521 }
0522 
0523 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
0524                struct mvpp2_cls_c2_entry *c2)
0525 {
0526     u32 val;
0527     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
0528 
0529     c2->index = index;
0530 
0531     c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
0532     c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
0533     c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
0534     c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
0535     c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
0536 
0537     c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
0538 
0539     c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
0540     c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
0541     c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
0542     c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
0543 
0544     val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
0545     c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
0546 }
0547 
0548 static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
0549 {
0550     switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
0551     case ETHER_FLOW:
0552         return MVPP22_FLOW_ETHERNET;
0553     case TCP_V4_FLOW:
0554         return MVPP22_FLOW_TCP4;
0555     case TCP_V6_FLOW:
0556         return MVPP22_FLOW_TCP6;
0557     case UDP_V4_FLOW:
0558         return MVPP22_FLOW_UDP4;
0559     case UDP_V6_FLOW:
0560         return MVPP22_FLOW_UDP6;
0561     case IPV4_FLOW:
0562         return MVPP22_FLOW_IP4;
0563     case IPV6_FLOW:
0564         return MVPP22_FLOW_IP6;
0565     default:
0566         return -EOPNOTSUPP;
0567     }
0568 }
0569 
0570 static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
0571 {
0572     return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
0573 }
0574 
0575 /* Initialize the flow table entries for the given flow */
0576 static void mvpp2_cls_flow_init(struct mvpp2 *priv,
0577                 const struct mvpp2_cls_flow *flow)
0578 {
0579     struct mvpp2_cls_flow_entry fe;
0580     int i, pri = 0;
0581 
0582     /* Assign default values to all entries in the flow */
0583     for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
0584          i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
0585         memset(&fe, 0, sizeof(fe));
0586         fe.index = i;
0587         mvpp2_cls_flow_pri_set(&fe, pri++);
0588 
0589         if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
0590             mvpp2_cls_flow_last_set(&fe, 1);
0591 
0592         mvpp2_cls_flow_write(priv, &fe);
0593     }
0594 
0595     /* RSS config C2 lookup */
0596     mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
0597                 &fe);
0598 
0599     mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
0600     mvpp2_cls_flow_port_id_sel(&fe, true);
0601     mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
0602 
0603     /* Add all ports */
0604     for (i = 0; i < MVPP2_MAX_PORTS; i++)
0605         mvpp2_cls_flow_port_add(&fe, BIT(i));
0606 
0607     mvpp2_cls_flow_write(priv, &fe);
0608 
0609     /* C3Hx lookups */
0610     for (i = 0; i < MVPP2_MAX_PORTS; i++) {
0611         mvpp2_cls_flow_read(priv,
0612                     MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
0613                     &fe);
0614 
0615         /* Set a default engine. Will be overwritten when setting the
0616          * real HEK parameters
0617          */
0618         mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
0619         mvpp2_cls_flow_port_id_sel(&fe, true);
0620         mvpp2_cls_flow_port_add(&fe, BIT(i));
0621 
0622         mvpp2_cls_flow_write(priv, &fe);
0623     }
0624 }
0625 
0626 /* Adds a field to the Header Extracted Key generation parameters*/
0627 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
0628                     u32 field_id)
0629 {
0630     int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
0631 
0632     if (nb_fields == MVPP2_FLOW_N_FIELDS)
0633         return -EINVAL;
0634 
0635     mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
0636 
0637     mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
0638 
0639     return 0;
0640 }
0641 
0642 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
0643                      unsigned long hash_opts)
0644 {
0645     u32 field_id;
0646     int i;
0647 
0648     /* Clear old fields */
0649     mvpp2_cls_flow_hek_num_set(fe, 0);
0650     fe->data[2] = 0;
0651 
0652     for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
0653         switch (BIT(i)) {
0654         case MVPP22_CLS_HEK_OPT_MAC_DA:
0655             field_id = MVPP22_CLS_FIELD_MAC_DA;
0656             break;
0657         case MVPP22_CLS_HEK_OPT_VLAN:
0658             field_id = MVPP22_CLS_FIELD_VLAN;
0659             break;
0660         case MVPP22_CLS_HEK_OPT_VLAN_PRI:
0661             field_id = MVPP22_CLS_FIELD_VLAN_PRI;
0662             break;
0663         case MVPP22_CLS_HEK_OPT_IP4SA:
0664             field_id = MVPP22_CLS_FIELD_IP4SA;
0665             break;
0666         case MVPP22_CLS_HEK_OPT_IP4DA:
0667             field_id = MVPP22_CLS_FIELD_IP4DA;
0668             break;
0669         case MVPP22_CLS_HEK_OPT_IP6SA:
0670             field_id = MVPP22_CLS_FIELD_IP6SA;
0671             break;
0672         case MVPP22_CLS_HEK_OPT_IP6DA:
0673             field_id = MVPP22_CLS_FIELD_IP6DA;
0674             break;
0675         case MVPP22_CLS_HEK_OPT_L4SIP:
0676             field_id = MVPP22_CLS_FIELD_L4SIP;
0677             break;
0678         case MVPP22_CLS_HEK_OPT_L4DIP:
0679             field_id = MVPP22_CLS_FIELD_L4DIP;
0680             break;
0681         default:
0682             return -EINVAL;
0683         }
0684         if (mvpp2_flow_add_hek_field(fe, field_id))
0685             return -EINVAL;
0686     }
0687 
0688     return 0;
0689 }
0690 
0691 /* Returns the size, in bits, of the corresponding HEK field */
0692 static int mvpp2_cls_hek_field_size(u32 field)
0693 {
0694     switch (field) {
0695     case MVPP22_CLS_HEK_OPT_MAC_DA:
0696         return 48;
0697     case MVPP22_CLS_HEK_OPT_VLAN:
0698         return 12;
0699     case MVPP22_CLS_HEK_OPT_VLAN_PRI:
0700         return 3;
0701     case MVPP22_CLS_HEK_OPT_IP4SA:
0702     case MVPP22_CLS_HEK_OPT_IP4DA:
0703         return 32;
0704     case MVPP22_CLS_HEK_OPT_IP6SA:
0705     case MVPP22_CLS_HEK_OPT_IP6DA:
0706         return 128;
0707     case MVPP22_CLS_HEK_OPT_L4SIP:
0708     case MVPP22_CLS_HEK_OPT_L4DIP:
0709         return 16;
0710     default:
0711         return -1;
0712     }
0713 }
0714 
0715 const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
0716 {
0717     if (flow >= MVPP2_N_PRS_FLOWS)
0718         return NULL;
0719 
0720     return &cls_flows[flow];
0721 }
0722 
0723 /* Set the hash generation options for the given traffic flow.
0724  * One traffic flow (in the ethtool sense) has multiple classification flows,
0725  * to handle specific cases such as fragmentation, or the presence of a
0726  * VLAN / DSA Tag.
0727  *
0728  * Each of these individual flows has different constraints, for example we
0729  * can't hash fragmented packets on L4 data (else we would risk having packet
0730  * re-ordering), so each classification flows masks the options with their
0731  * supported ones.
0732  *
0733  */
0734 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
0735                     u16 requested_opts)
0736 {
0737     const struct mvpp2_cls_flow *flow;
0738     struct mvpp2_cls_flow_entry fe;
0739     int i, engine, flow_index;
0740     u16 hash_opts;
0741 
0742     for_each_cls_flow_id_with_type(i, flow_type) {
0743         flow = mvpp2_cls_flow_get(i);
0744         if (!flow)
0745             return -EINVAL;
0746 
0747         flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
0748 
0749         mvpp2_cls_flow_read(port->priv, flow_index, &fe);
0750 
0751         hash_opts = flow->supported_hash_opts & requested_opts;
0752 
0753         /* Use C3HB engine to access L4 infos. This adds L4 infos to the
0754          * hash parameters
0755          */
0756         if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
0757             engine = MVPP22_CLS_ENGINE_C3HB;
0758         else
0759             engine = MVPP22_CLS_ENGINE_C3HA;
0760 
0761         if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
0762             return -EINVAL;
0763 
0764         mvpp2_cls_flow_eng_set(&fe, engine);
0765 
0766         mvpp2_cls_flow_write(port->priv, &fe);
0767     }
0768 
0769     return 0;
0770 }
0771 
0772 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
0773 {
0774     u16 hash_opts = 0;
0775     int n_fields, i, field;
0776 
0777     n_fields = mvpp2_cls_flow_hek_num_get(fe);
0778 
0779     for (i = 0; i < n_fields; i++) {
0780         field = mvpp2_cls_flow_hek_get(fe, i);
0781 
0782         switch (field) {
0783         case MVPP22_CLS_FIELD_MAC_DA:
0784             hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
0785             break;
0786         case MVPP22_CLS_FIELD_VLAN:
0787             hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
0788             break;
0789         case MVPP22_CLS_FIELD_VLAN_PRI:
0790             hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
0791             break;
0792         case MVPP22_CLS_FIELD_L3_PROTO:
0793             hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
0794             break;
0795         case MVPP22_CLS_FIELD_IP4SA:
0796             hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
0797             break;
0798         case MVPP22_CLS_FIELD_IP4DA:
0799             hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
0800             break;
0801         case MVPP22_CLS_FIELD_IP6SA:
0802             hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
0803             break;
0804         case MVPP22_CLS_FIELD_IP6DA:
0805             hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
0806             break;
0807         case MVPP22_CLS_FIELD_L4SIP:
0808             hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
0809             break;
0810         case MVPP22_CLS_FIELD_L4DIP:
0811             hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
0812             break;
0813         default:
0814             break;
0815         }
0816     }
0817     return hash_opts;
0818 }
0819 
0820 /* Returns the hash opts for this flow. There are several classifier flows
0821  * for one traffic flow, this returns an aggregation of all configurations.
0822  */
0823 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
0824 {
0825     const struct mvpp2_cls_flow *flow;
0826     struct mvpp2_cls_flow_entry fe;
0827     int i, flow_index;
0828     u16 hash_opts = 0;
0829 
0830     for_each_cls_flow_id_with_type(i, flow_type) {
0831         flow = mvpp2_cls_flow_get(i);
0832         if (!flow)
0833             return 0;
0834 
0835         flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
0836 
0837         mvpp2_cls_flow_read(port->priv, flow_index, &fe);
0838 
0839         hash_opts |= mvpp2_flow_get_hek_fields(&fe);
0840     }
0841 
0842     return hash_opts;
0843 }
0844 
0845 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
0846 {
0847     const struct mvpp2_cls_flow *flow;
0848     int i;
0849 
0850     for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
0851         flow = mvpp2_cls_flow_get(i);
0852         if (!flow)
0853             break;
0854 
0855         mvpp2_cls_flow_prs_init(priv, flow);
0856         mvpp2_cls_flow_lkp_init(priv, flow);
0857         mvpp2_cls_flow_init(priv, flow);
0858     }
0859 }
0860 
0861 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
0862 {
0863     struct mvpp2_cls_c2_entry c2;
0864     u8 qh, ql, pmap;
0865 
0866     memset(&c2, 0, sizeof(c2));
0867 
0868     c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
0869 
0870     pmap = BIT(port->id);
0871     c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
0872     c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
0873 
0874     /* Match on Lookup Type */
0875     c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
0876     c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
0877 
0878     /* Update RSS status after matching this entry */
0879     c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
0880 
0881     /* Mark packet as "forwarded to software", needed for RSS */
0882     c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
0883 
0884     /* Configure the default rx queue : Update Queue Low and Queue High, but
0885      * don't lock, since the rx queue selection might be overridden by RSS
0886      */
0887     c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
0888            MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
0889 
0890     qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
0891     ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
0892 
0893     c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
0894               MVPP22_CLS_C2_ATTR0_QLOW(ql);
0895 
0896     c2.valid = true;
0897 
0898     mvpp2_cls_c2_write(port->priv, &c2);
0899 }
0900 
0901 /* Classifier default initialization */
0902 void mvpp2_cls_init(struct mvpp2 *priv)
0903 {
0904     struct mvpp2_cls_lookup_entry le;
0905     struct mvpp2_cls_flow_entry fe;
0906     struct mvpp2_cls_c2_entry c2;
0907     int index;
0908 
0909     /* Enable classifier */
0910     mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
0911 
0912     /* Clear classifier flow table */
0913     memset(&fe.data, 0, sizeof(fe.data));
0914     for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
0915         fe.index = index;
0916         mvpp2_cls_flow_write(priv, &fe);
0917     }
0918 
0919     /* Clear classifier lookup table */
0920     le.data = 0;
0921     for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
0922         le.lkpid = index;
0923         le.way = 0;
0924         mvpp2_cls_lookup_write(priv, &le);
0925 
0926         le.way = 1;
0927         mvpp2_cls_lookup_write(priv, &le);
0928     }
0929 
0930     /* Clear C2 TCAM engine table */
0931     memset(&c2, 0, sizeof(c2));
0932     c2.valid = false;
0933     for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
0934         c2.index = index;
0935         mvpp2_cls_c2_write(priv, &c2);
0936     }
0937 
0938     /* Disable the FIFO stages in C2 engine, which are only used in BIST
0939      * mode
0940      */
0941     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
0942             MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
0943 
0944     mvpp2_cls_port_init_flows(priv);
0945 }
0946 
0947 void mvpp2_cls_port_config(struct mvpp2_port *port)
0948 {
0949     struct mvpp2_cls_lookup_entry le;
0950     u32 val;
0951 
0952     /* Set way for the port */
0953     val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
0954     val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
0955     mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
0956 
0957     /* Pick the entry to be accessed in lookup ID decoding table
0958      * according to the way and lkpid.
0959      */
0960     le.lkpid = port->id;
0961     le.way = 0;
0962     le.data = 0;
0963 
0964     /* Set initial CPU queue for receiving packets */
0965     le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
0966     le.data |= port->first_rxq;
0967 
0968     /* Disable classification engines */
0969     le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
0970 
0971     /* Update lookup ID table entry */
0972     mvpp2_cls_lookup_write(port->priv, &le);
0973 
0974     mvpp2_port_c2_cls_init(port);
0975 }
0976 
0977 u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
0978 {
0979     mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
0980 
0981     return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
0982 }
0983 
0984 static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
0985 {
0986     struct mvpp2_cls_c2_entry c2;
0987     u8 qh, ql;
0988 
0989     mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
0990 
0991     /* The RxQ number is used to select the RSS table. It that case, we set
0992      * it to be the ctx number.
0993      */
0994     qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
0995     ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
0996 
0997     c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
0998              MVPP22_CLS_C2_ATTR0_QLOW(ql);
0999 
1000     c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1001 
1002     mvpp2_cls_c2_write(port->priv, &c2);
1003 }
1004 
1005 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
1006 {
1007     struct mvpp2_cls_c2_entry c2;
1008     u8 qh, ql;
1009 
1010     mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
1011 
1012     /* Reset the default destination RxQ to the port's first rx queue. */
1013     qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1014     ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1015 
1016     c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1017               MVPP22_CLS_C2_ATTR0_QLOW(ql);
1018 
1019     c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
1020 
1021     mvpp2_cls_c2_write(port->priv, &c2);
1022 }
1023 
1024 static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
1025 {
1026     return port->rss_ctx[port_rss_ctx];
1027 }
1028 
1029 int mvpp22_port_rss_enable(struct mvpp2_port *port)
1030 {
1031     if (mvpp22_rss_ctx(port, 0) < 0)
1032         return -EINVAL;
1033 
1034     mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
1035 
1036     return 0;
1037 }
1038 
1039 int mvpp22_port_rss_disable(struct mvpp2_port *port)
1040 {
1041     if (mvpp22_rss_ctx(port, 0) < 0)
1042         return -EINVAL;
1043 
1044     mvpp2_rss_port_c2_disable(port);
1045 
1046     return 0;
1047 }
1048 
1049 static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
1050 {
1051     struct mvpp2_cls_c2_entry c2;
1052 
1053     mvpp2_cls_c2_read(port->priv, entry, &c2);
1054 
1055     /* Clear the port map so that the entry doesn't match anymore */
1056     c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
1057 
1058     mvpp2_cls_c2_write(port->priv, &c2);
1059 }
1060 
1061 /* Set CPU queue number for oversize packets */
1062 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
1063 {
1064     u32 val;
1065 
1066     mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
1067             port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
1068 
1069     mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
1070             (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
1071 
1072     val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
1073     val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
1074     mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
1075 }
1076 
1077 static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
1078                        struct mvpp2_rfs_rule *rule)
1079 {
1080     struct flow_action_entry *act;
1081     struct mvpp2_cls_c2_entry c2;
1082     u8 qh, ql, pmap;
1083     int index, ctx;
1084 
1085     if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1086         return -EOPNOTSUPP;
1087 
1088     memset(&c2, 0, sizeof(c2));
1089 
1090     index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
1091     if (index < 0)
1092         return -EINVAL;
1093     c2.index = index;
1094 
1095     act = &rule->flow->action.entries[0];
1096 
1097     rule->c2_index = c2.index;
1098 
1099     c2.tcam[3] = (rule->c2_tcam & 0xffff) |
1100              ((rule->c2_tcam_mask & 0xffff) << 16);
1101     c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
1102              (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
1103     c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
1104              (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
1105     c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
1106              (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
1107 
1108     pmap = BIT(port->id);
1109     c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
1110     c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
1111 
1112     /* Match on Lookup Type */
1113     c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
1114     c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
1115 
1116     if (act->id == FLOW_ACTION_DROP) {
1117         c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
1118     } else {
1119         /* We want to keep the default color derived from the Header
1120          * Parser drop entries, for VLAN and MAC filtering. This will
1121          * assign a default color of Green or Red, and we want matches
1122          * with a non-drop action to keep that color.
1123          */
1124         c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
1125 
1126         /* Update RSS status after matching this entry */
1127         if (act->queue.ctx)
1128             c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1129 
1130         /* Always lock the RSS_EN decision. We might have high prio
1131          * rules steering to an RXQ, and a lower one steering to RSS,
1132          * we don't want the low prio RSS rule overwriting this flag.
1133          */
1134         c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
1135 
1136         /* Mark packet as "forwarded to software", needed for RSS */
1137         c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
1138 
1139         c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
1140                MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
1141 
1142         if (act->queue.ctx) {
1143             /* Get the global ctx number */
1144             ctx = mvpp22_rss_ctx(port, act->queue.ctx);
1145             if (ctx < 0)
1146                 return -EINVAL;
1147 
1148             qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1149             ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1150         } else {
1151             qh = ((act->queue.index + port->first_rxq) >> 3) &
1152                   MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1153             ql = (act->queue.index + port->first_rxq) &
1154                   MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1155         }
1156 
1157         c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1158                   MVPP22_CLS_C2_ATTR0_QLOW(ql);
1159     }
1160 
1161     c2.valid = true;
1162 
1163     mvpp2_cls_c2_write(port->priv, &c2);
1164 
1165     return 0;
1166 }
1167 
1168 static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
1169                      struct mvpp2_rfs_rule *rule)
1170 {
1171     return mvpp2_port_c2_tcam_rule_add(port, rule);
1172 }
1173 
1174 static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
1175                       struct mvpp2_rfs_rule *rule)
1176 {
1177     const struct mvpp2_cls_flow *flow;
1178     struct mvpp2_cls_flow_entry fe;
1179     int index, i;
1180 
1181     for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1182         flow = mvpp2_cls_flow_get(i);
1183         if (!flow)
1184             return 0;
1185 
1186         index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1187 
1188         mvpp2_cls_flow_read(port->priv, index, &fe);
1189         mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
1190         mvpp2_cls_flow_write(port->priv, &fe);
1191     }
1192 
1193     if (rule->c2_index >= 0)
1194         mvpp22_port_c2_lookup_disable(port, rule->c2_index);
1195 
1196     return 0;
1197 }
1198 
1199 static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
1200                       struct mvpp2_rfs_rule *rule)
1201 {
1202     const struct mvpp2_cls_flow *flow;
1203     struct mvpp2 *priv = port->priv;
1204     struct mvpp2_cls_flow_entry fe;
1205     int index, ret, i;
1206 
1207     if (rule->engine != MVPP22_CLS_ENGINE_C2)
1208         return -EOPNOTSUPP;
1209 
1210     ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
1211     if (ret)
1212         return ret;
1213 
1214     for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1215         flow = mvpp2_cls_flow_get(i);
1216         if (!flow)
1217             return 0;
1218 
1219         if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
1220             continue;
1221 
1222         index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1223 
1224         mvpp2_cls_flow_read(priv, index, &fe);
1225         mvpp2_cls_flow_eng_set(&fe, rule->engine);
1226         mvpp2_cls_flow_port_id_sel(&fe, true);
1227         mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
1228         mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
1229         mvpp2_cls_flow_port_add(&fe, 0xf);
1230 
1231         mvpp2_cls_flow_write(priv, &fe);
1232     }
1233 
1234     return 0;
1235 }
1236 
1237 static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
1238 {
1239     struct flow_rule *flow = rule->flow;
1240     int offs = 0;
1241 
1242     /* The order of insertion in C2 tcam must match the order in which
1243      * the fields are found in the header
1244      */
1245     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
1246         struct flow_match_vlan match;
1247 
1248         flow_rule_match_vlan(flow, &match);
1249         if (match.mask->vlan_id) {
1250             rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
1251 
1252             rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
1253             rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
1254 
1255             /* Don't update the offset yet */
1256         }
1257 
1258         if (match.mask->vlan_priority) {
1259             rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
1260 
1261             /* VLAN pri is always at offset 13 relative to the
1262              * current offset
1263              */
1264             rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
1265                 (offs + 13);
1266             rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
1267                 (offs + 13);
1268         }
1269 
1270         if (match.mask->vlan_dei)
1271             return -EOPNOTSUPP;
1272 
1273         /* vlan id and prio always seem to take a full 16-bit slot in
1274          * the Header Extracted Key.
1275          */
1276         offs += 16;
1277     }
1278 
1279     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
1280         struct flow_match_ports match;
1281 
1282         flow_rule_match_ports(flow, &match);
1283         if (match.mask->src) {
1284             rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
1285 
1286             rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
1287             rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
1288             offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
1289         }
1290 
1291         if (match.mask->dst) {
1292             rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
1293 
1294             rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
1295             rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
1296             offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
1297         }
1298     }
1299 
1300     if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
1301         return -EOPNOTSUPP;
1302 
1303     return 0;
1304 }
1305 
1306 static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
1307 {
1308     struct flow_rule *flow = rule->flow;
1309     struct flow_action_entry *act;
1310 
1311     if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1312         return -EOPNOTSUPP;
1313 
1314     act = &flow->action.entries[0];
1315     if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
1316         return -EOPNOTSUPP;
1317 
1318     /* When both an RSS context and an queue index are set, the index
1319      * is considered as an offset to be added to the indirection table
1320      * entries. We don't support this, so reject this rule.
1321      */
1322     if (act->queue.ctx && act->queue.index)
1323         return -EOPNOTSUPP;
1324 
1325     /* For now, only use the C2 engine which has a HEK size limited to 64
1326      * bits for TCAM matching.
1327      */
1328     rule->engine = MVPP22_CLS_ENGINE_C2;
1329 
1330     if (mvpp2_cls_c2_build_match(rule))
1331         return -EINVAL;
1332 
1333     return 0;
1334 }
1335 
1336 int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
1337                    struct ethtool_rxnfc *rxnfc)
1338 {
1339     struct mvpp2_ethtool_fs *efs;
1340 
1341     if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1342         return -EINVAL;
1343 
1344     efs = port->rfs_rules[rxnfc->fs.location];
1345     if (!efs)
1346         return -ENOENT;
1347 
1348     memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
1349 
1350     return 0;
1351 }
1352 
1353 int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
1354                    struct ethtool_rxnfc *info)
1355 {
1356     struct ethtool_rx_flow_spec_input input = {};
1357     struct ethtool_rx_flow_rule *ethtool_rule;
1358     struct mvpp2_ethtool_fs *efs, *old_efs;
1359     int ret = 0;
1360 
1361     if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1362         return -EINVAL;
1363 
1364     efs = kzalloc(sizeof(*efs), GFP_KERNEL);
1365     if (!efs)
1366         return -ENOMEM;
1367 
1368     input.fs = &info->fs;
1369 
1370     /* We need to manually set the rss_ctx, since this info isn't present
1371      * in info->fs
1372      */
1373     if (info->fs.flow_type & FLOW_RSS)
1374         input.rss_ctx = info->rss_context;
1375 
1376     ethtool_rule = ethtool_rx_flow_rule_create(&input);
1377     if (IS_ERR(ethtool_rule)) {
1378         ret = PTR_ERR(ethtool_rule);
1379         goto clean_rule;
1380     }
1381 
1382     efs->rule.flow = ethtool_rule->rule;
1383     efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
1384     if (efs->rule.flow_type < 0) {
1385         ret = efs->rule.flow_type;
1386         goto clean_rule;
1387     }
1388 
1389     ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
1390     if (ret)
1391         goto clean_eth_rule;
1392 
1393     efs->rule.loc = info->fs.location;
1394 
1395     /* Replace an already existing rule */
1396     if (port->rfs_rules[efs->rule.loc]) {
1397         old_efs = port->rfs_rules[efs->rule.loc];
1398         ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
1399         if (ret)
1400             goto clean_eth_rule;
1401         kfree(old_efs);
1402         port->n_rfs_rules--;
1403     }
1404 
1405     ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
1406     if (ret)
1407         goto clean_eth_rule;
1408 
1409     ethtool_rx_flow_rule_destroy(ethtool_rule);
1410     efs->rule.flow = NULL;
1411 
1412     memcpy(&efs->rxnfc, info, sizeof(*info));
1413     port->rfs_rules[efs->rule.loc] = efs;
1414     port->n_rfs_rules++;
1415 
1416     return ret;
1417 
1418 clean_eth_rule:
1419     ethtool_rx_flow_rule_destroy(ethtool_rule);
1420 clean_rule:
1421     kfree(efs);
1422     return ret;
1423 }
1424 
1425 int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1426                    struct ethtool_rxnfc *info)
1427 {
1428     struct mvpp2_ethtool_fs *efs;
1429     int ret;
1430 
1431     if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1432         return -EINVAL;
1433 
1434     efs = port->rfs_rules[info->fs.location];
1435     if (!efs)
1436         return -EINVAL;
1437 
1438     /* Remove the rule from the engines. */
1439     ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
1440     if (ret)
1441         return ret;
1442 
1443     port->n_rfs_rules--;
1444     port->rfs_rules[info->fs.location] = NULL;
1445     kfree(efs);
1446 
1447     return 0;
1448 }
1449 
1450 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
1451 {
1452     int nrxqs, cpu, cpus = num_possible_cpus();
1453 
1454     /* Number of RXQs per CPU */
1455     nrxqs = port->nrxqs / cpus;
1456 
1457     /* CPU that will handle this rx queue */
1458     cpu = rxq / nrxqs;
1459 
1460     if (!cpu_online(cpu))
1461         return port->first_rxq;
1462 
1463     /* Indirection to better distribute the paquets on the CPUs when
1464      * configuring the RSS queues.
1465      */
1466     return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
1467 }
1468 
1469 static void mvpp22_rss_fill_table(struct mvpp2_port *port,
1470                   struct mvpp2_rss_table *table,
1471                   u32 rss_ctx)
1472 {
1473     struct mvpp2 *priv = port->priv;
1474     int i;
1475 
1476     for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
1477         u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
1478               MVPP22_RSS_INDEX_TABLE_ENTRY(i);
1479         mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
1480 
1481         mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
1482                 mvpp22_rxfh_indir(port, table->indir[i]));
1483     }
1484 }
1485 
1486 static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
1487 {
1488     struct mvpp2 *priv = port->priv;
1489     u32 ctx;
1490 
1491     /* Find the first free RSS table */
1492     for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
1493         if (!priv->rss_tables[ctx])
1494             break;
1495     }
1496 
1497     if (ctx == MVPP22_N_RSS_TABLES)
1498         return -EINVAL;
1499 
1500     priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
1501                     GFP_KERNEL);
1502     if (!priv->rss_tables[ctx])
1503         return -ENOMEM;
1504 
1505     *rss_ctx = ctx;
1506 
1507     /* Set the table width: replace the whole classifier Rx queue number
1508      * with the ones configured in RSS table entries.
1509      */
1510     mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
1511     mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1512 
1513     mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
1514     mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
1515 
1516     return 0;
1517 }
1518 
1519 int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
1520 {
1521     u32 rss_ctx;
1522     int ret, i;
1523 
1524     ret = mvpp22_rss_context_create(port, &rss_ctx);
1525     if (ret)
1526         return ret;
1527 
1528     /* Find the first available context number in the port, starting from 1.
1529      * Context 0 on each port is reserved for the default context.
1530      */
1531     for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
1532         if (port->rss_ctx[i] < 0)
1533             break;
1534     }
1535 
1536     if (i == MVPP22_N_RSS_TABLES)
1537         return -EINVAL;
1538 
1539     port->rss_ctx[i] = rss_ctx;
1540     *port_ctx = i;
1541 
1542     return 0;
1543 }
1544 
1545 static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
1546                             int rss_ctx)
1547 {
1548     if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1549         return NULL;
1550 
1551     return priv->rss_tables[rss_ctx];
1552 }
1553 
1554 int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
1555 {
1556     struct mvpp2 *priv = port->priv;
1557     struct ethtool_rxnfc *rxnfc;
1558     int i, rss_ctx, ret;
1559 
1560     rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1561 
1562     if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1563         return -EINVAL;
1564 
1565     /* Invalidate any active classification rule that use this context */
1566     for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
1567         if (!port->rfs_rules[i])
1568             continue;
1569 
1570         rxnfc = &port->rfs_rules[i]->rxnfc;
1571         if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
1572             rxnfc->rss_context != port_ctx)
1573             continue;
1574 
1575         ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
1576         if (ret) {
1577             netdev_warn(port->dev,
1578                     "couldn't remove classification rule %d associated to this context",
1579                     rxnfc->fs.location);
1580         }
1581     }
1582 
1583     kfree(priv->rss_tables[rss_ctx]);
1584 
1585     priv->rss_tables[rss_ctx] = NULL;
1586     port->rss_ctx[port_ctx] = -1;
1587 
1588     return 0;
1589 }
1590 
1591 int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
1592                   const u32 *indir)
1593 {
1594     int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1595     struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1596                                  rss_ctx);
1597 
1598     if (!rss_table)
1599         return -EINVAL;
1600 
1601     memcpy(rss_table->indir, indir,
1602            MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1603 
1604     mvpp22_rss_fill_table(port, rss_table, rss_ctx);
1605 
1606     return 0;
1607 }
1608 
1609 int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
1610                   u32 *indir)
1611 {
1612     int rss_ctx =  mvpp22_rss_ctx(port, port_ctx);
1613     struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1614                                  rss_ctx);
1615 
1616     if (!rss_table)
1617         return -EINVAL;
1618 
1619     memcpy(indir, rss_table->indir,
1620            MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1621 
1622     return 0;
1623 }
1624 
1625 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1626 {
1627     u16 hash_opts = 0;
1628     u32 flow_type;
1629 
1630     flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1631 
1632     switch (flow_type) {
1633     case MVPP22_FLOW_TCP4:
1634     case MVPP22_FLOW_UDP4:
1635     case MVPP22_FLOW_TCP6:
1636     case MVPP22_FLOW_UDP6:
1637         if (info->data & RXH_L4_B_0_1)
1638             hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
1639         if (info->data & RXH_L4_B_2_3)
1640             hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
1641         fallthrough;
1642     case MVPP22_FLOW_IP4:
1643     case MVPP22_FLOW_IP6:
1644         if (info->data & RXH_L2DA)
1645             hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
1646         if (info->data & RXH_VLAN)
1647             hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
1648         if (info->data & RXH_L3_PROTO)
1649             hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
1650         if (info->data & RXH_IP_SRC)
1651             hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
1652                      MVPP22_CLS_HEK_OPT_IP6SA);
1653         if (info->data & RXH_IP_DST)
1654             hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
1655                      MVPP22_CLS_HEK_OPT_IP6DA);
1656         break;
1657     default: return -EOPNOTSUPP;
1658     }
1659 
1660     return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
1661 }
1662 
1663 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1664 {
1665     unsigned long hash_opts;
1666     u32 flow_type;
1667     int i;
1668 
1669     flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1670 
1671     hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
1672     info->data = 0;
1673 
1674     for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
1675         switch (BIT(i)) {
1676         case MVPP22_CLS_HEK_OPT_MAC_DA:
1677             info->data |= RXH_L2DA;
1678             break;
1679         case MVPP22_CLS_HEK_OPT_VLAN:
1680             info->data |= RXH_VLAN;
1681             break;
1682         case MVPP22_CLS_HEK_OPT_L3_PROTO:
1683             info->data |= RXH_L3_PROTO;
1684             break;
1685         case MVPP22_CLS_HEK_OPT_IP4SA:
1686         case MVPP22_CLS_HEK_OPT_IP6SA:
1687             info->data |= RXH_IP_SRC;
1688             break;
1689         case MVPP22_CLS_HEK_OPT_IP4DA:
1690         case MVPP22_CLS_HEK_OPT_IP6DA:
1691             info->data |= RXH_IP_DST;
1692             break;
1693         case MVPP22_CLS_HEK_OPT_L4SIP:
1694             info->data |= RXH_L4_B_0_1;
1695             break;
1696         case MVPP22_CLS_HEK_OPT_L4DIP:
1697             info->data |= RXH_L4_B_2_3;
1698             break;
1699         default:
1700             return -EINVAL;
1701         }
1702     }
1703     return 0;
1704 }
1705 
1706 int mvpp22_port_rss_init(struct mvpp2_port *port)
1707 {
1708     struct mvpp2_rss_table *table;
1709     u32 context = 0;
1710     int i, ret;
1711 
1712     for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
1713         port->rss_ctx[i] = -1;
1714 
1715     ret = mvpp22_rss_context_create(port, &context);
1716     if (ret)
1717         return ret;
1718 
1719     table = mvpp22_rss_table_get(port->priv, context);
1720     if (!table)
1721         return -EINVAL;
1722 
1723     port->rss_ctx[0] = context;
1724 
1725     /* Configure the first table to evenly distribute the packets across
1726      * real Rx Queues. The table entries map a hash to a port Rx Queue.
1727      */
1728     for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1729         table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1730 
1731     mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
1732 
1733     /* Configure default flows */
1734     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
1735     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
1736     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
1737     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
1738     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
1739     mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
1740 
1741     return 0;
1742 }