Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /* Microchip Sparx5 Switch driver
0003  *
0004  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
0005  */
0006 
0007 #include "sparx5_main_regs.h"
0008 #include "sparx5_main.h"
0009 
0010 static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
0011 {
0012     u32 mask[3];
0013 
0014     /* Divide up mask in 32 bit words */
0015     bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
0016 
0017     /* Output mask to respective registers */
0018     spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
0019     spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
0020     spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
0021 
0022     return 0;
0023 }
0024 
0025 void sparx5_vlan_init(struct sparx5 *sparx5)
0026 {
0027     u16 vid;
0028 
0029     spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
0030          ANA_L3_VLAN_CTRL_VLAN_ENA,
0031          sparx5,
0032          ANA_L3_VLAN_CTRL);
0033 
0034     /* Map VLAN = FID */
0035     for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
0036         spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
0037              ANA_L3_VLAN_CFG_VLAN_FID,
0038              sparx5,
0039              ANA_L3_VLAN_CFG(vid));
0040 }
0041 
0042 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
0043 {
0044     struct sparx5_port *port = sparx5->ports[portno];
0045 
0046     /* Configure PVID */
0047     spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
0048          ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
0049          ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
0050          ANA_CL_VLAN_CTRL_PORT_VID,
0051          sparx5,
0052          ANA_CL_VLAN_CTRL(port->portno));
0053 }
0054 
0055 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
0056             bool untagged)
0057 {
0058     struct sparx5 *sparx5 = port->sparx5;
0059     int ret;
0060 
0061     /* Untagged egress vlan classification */
0062     if (untagged && port->vid != vid) {
0063         if (port->vid) {
0064             netdev_err(port->ndev,
0065                    "Port already has a native VLAN: %d\n",
0066                    port->vid);
0067             return -EBUSY;
0068         }
0069         port->vid = vid;
0070     }
0071 
0072     /* Make the port a member of the VLAN */
0073     set_bit(port->portno, sparx5->vlan_mask[vid]);
0074     ret = sparx5_vlant_set_mask(sparx5, vid);
0075     if (ret)
0076         return ret;
0077 
0078     /* Default ingress vlan classification */
0079     if (pvid)
0080         port->pvid = vid;
0081 
0082     sparx5_vlan_port_apply(sparx5, port);
0083 
0084     return 0;
0085 }
0086 
0087 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
0088 {
0089     struct sparx5 *sparx5 = port->sparx5;
0090     int ret;
0091 
0092     /* 8021q removes VID 0 on module unload for all interfaces
0093      * with VLAN filtering feature. We need to keep it to receive
0094      * untagged traffic.
0095      */
0096     if (vid == 0)
0097         return 0;
0098 
0099     /* Stop the port from being a member of the vlan */
0100     clear_bit(port->portno, sparx5->vlan_mask[vid]);
0101     ret = sparx5_vlant_set_mask(sparx5, vid);
0102     if (ret)
0103         return ret;
0104 
0105     /* Ingress */
0106     if (port->pvid == vid)
0107         port->pvid = 0;
0108 
0109     /* Egress */
0110     if (port->vid == vid)
0111         port->vid = 0;
0112 
0113     sparx5_vlan_port_apply(sparx5, port);
0114 
0115     return 0;
0116 }
0117 
0118 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
0119 {
0120     struct sparx5 *sparx5 = port->sparx5;
0121     u32 val, mask;
0122 
0123     /* mask is spread across 3 registers x 32 bit */
0124     if (port->portno < 32) {
0125         mask = BIT(port->portno);
0126         val = enable ? mask : 0;
0127         spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
0128     } else if (port->portno < 64) {
0129         mask = BIT(port->portno - 32);
0130         val = enable ? mask : 0;
0131         spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
0132     } else if (port->portno < SPX5_PORTS) {
0133         mask = BIT(port->portno - 64);
0134         val = enable ? mask : 0;
0135         spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
0136     } else {
0137         netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
0138     }
0139 }
0140 
0141 void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
0142 {
0143     portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
0144     portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
0145     portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
0146 }
0147 
0148 void sparx5_update_fwd(struct sparx5 *sparx5)
0149 {
0150     DECLARE_BITMAP(workmask, SPX5_PORTS);
0151     u32 mask[3];
0152     int port;
0153 
0154     /* Divide up fwd mask in 32 bit words */
0155     bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
0156 
0157     /* Update flood masks */
0158     for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
0159         spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
0160         spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
0161         spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
0162     }
0163 
0164     /* Update SRC masks */
0165     for (port = 0; port < SPX5_PORTS; port++) {
0166         if (test_bit(port, sparx5->bridge_fwd_mask)) {
0167             /* Allow to send to all bridged but self */
0168             bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
0169             clear_bit(port, workmask);
0170             bitmap_to_arr32(mask, workmask, SPX5_PORTS);
0171             spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
0172             spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
0173             spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
0174         } else {
0175             spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
0176             spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
0177             spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
0178         }
0179     }
0180 
0181     /* Learning enabled only for bridged ports */
0182     bitmap_and(workmask, sparx5->bridge_fwd_mask,
0183            sparx5->bridge_lrn_mask, SPX5_PORTS);
0184     bitmap_to_arr32(mask, workmask, SPX5_PORTS);
0185 
0186     /* Apply learning mask */
0187     spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
0188     spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
0189     spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
0190 }
0191 
0192 void sparx5_vlan_port_apply(struct sparx5 *sparx5,
0193                 struct sparx5_port *port)
0194 
0195 {
0196     u32 val;
0197 
0198     /* Configure PVID, vlan aware */
0199     val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
0200         ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
0201         ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
0202     spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
0203 
0204     val = 0;
0205     if (port->vlan_aware && !port->pvid)
0206         /* If port is vlan-aware and tagged, drop untagged and
0207          * priority tagged frames.
0208          */
0209         val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
0210             ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
0211             ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
0212     spx5_wr(val, sparx5,
0213         ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
0214 
0215     /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
0216     val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
0217     if (port->vlan_aware) {
0218         if (port->vid)
0219             /* Tag all frames except when VID == DEFAULT_VLAN */
0220             val |= REW_TAG_CTRL_TAG_CFG_SET(1);
0221         else
0222             val |= REW_TAG_CTRL_TAG_CFG_SET(3);
0223     }
0224     spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
0225 
0226     /* Egress VID */
0227     spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
0228          REW_PORT_VLAN_CFG_PORT_VID,
0229          sparx5,
0230          REW_PORT_VLAN_CFG(port->portno));
0231 }