Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright (c) 2016-2017 Hisilicon Limited.
0003 
0004 #include <linux/dma-mapping.h>
0005 #include <linux/etherdevice.h>
0006 #include <linux/interrupt.h>
0007 #ifdef CONFIG_RFS_ACCEL
0008 #include <linux/cpu_rmap.h>
0009 #endif
0010 #include <linux/if_vlan.h>
0011 #include <linux/irq.h>
0012 #include <linux/ip.h>
0013 #include <linux/ipv6.h>
0014 #include <linux/module.h>
0015 #include <linux/pci.h>
0016 #include <linux/aer.h>
0017 #include <linux/skbuff.h>
0018 #include <linux/sctp.h>
0019 #include <net/gre.h>
0020 #include <net/gro.h>
0021 #include <net/ip6_checksum.h>
0022 #include <net/pkt_cls.h>
0023 #include <net/tcp.h>
0024 #include <net/vxlan.h>
0025 #include <net/geneve.h>
0026 
0027 #include "hnae3.h"
0028 #include "hns3_enet.h"
0029 /* All hns3 tracepoints are defined by the include below, which
0030  * must be included exactly once across the whole kernel with
0031  * CREATE_TRACE_POINTS defined
0032  */
0033 #define CREATE_TRACE_POINTS
0034 #include "hns3_trace.h"
0035 
0036 #define hns3_set_field(origin, shift, val)  ((origin) |= (val) << (shift))
0037 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
0038 
0039 #define hns3_rl_err(fmt, ...)                       \
0040     do {                                \
0041         if (net_ratelimit())                    \
0042             netdev_err(fmt, ##__VA_ARGS__);         \
0043     } while (0)
0044 
0045 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
0046 
0047 static const char hns3_driver_name[] = "hns3";
0048 static const char hns3_driver_string[] =
0049             "Hisilicon Ethernet Network Driver for Hip08 Family";
0050 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
0051 static struct hnae3_client client;
0052 
0053 static int debug = -1;
0054 module_param(debug, int, 0);
0055 MODULE_PARM_DESC(debug, " Network interface message level setting");
0056 
0057 static unsigned int tx_sgl = 1;
0058 module_param(tx_sgl, uint, 0600);
0059 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
0060 
0061 static bool page_pool_enabled = true;
0062 module_param(page_pool_enabled, bool, 0400);
0063 
0064 #define HNS3_SGL_SIZE(nfrag)    (sizeof(struct scatterlist) * (nfrag) + \
0065                  sizeof(struct sg_table))
0066 #define HNS3_MAX_SGL_SIZE   ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
0067                       dma_get_cache_alignment())
0068 
0069 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
0070                NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
0071 
0072 #define HNS3_INNER_VLAN_TAG 1
0073 #define HNS3_OUTER_VLAN_TAG 2
0074 
0075 #define HNS3_MIN_TX_LEN     33U
0076 #define HNS3_MIN_TUN_PKT_LEN    65U
0077 
0078 /* hns3_pci_tbl - PCI Device ID Table
0079  *
0080  * Last entry must be all 0s
0081  *
0082  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
0083  *   Class, Class Mask, private data (not used) }
0084  */
0085 static const struct pci_device_id hns3_pci_tbl[] = {
0086     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
0087     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
0088     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
0089      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0090     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
0091      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0092     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
0093      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0094     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
0095      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0096     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
0097      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0098     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
0099      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0100     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
0101     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
0102      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0103     /* required last entry */
0104     {0,}
0105 };
0106 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
0107 
0108 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
0109     {   ptype, \
0110         l, \
0111         CHECKSUM_##s, \
0112         HNS3_L3_TYPE_##t, \
0113         1 }
0114 
0115 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
0116         { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
0117 
0118 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
0119     HNS3_RX_PTYPE_UNUSED_ENTRY(0),
0120     HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
0121     HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
0122     HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
0123     HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
0124     HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
0125     HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
0126     HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
0127     HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
0128     HNS3_RX_PTYPE_UNUSED_ENTRY(9),
0129     HNS3_RX_PTYPE_UNUSED_ENTRY(10),
0130     HNS3_RX_PTYPE_UNUSED_ENTRY(11),
0131     HNS3_RX_PTYPE_UNUSED_ENTRY(12),
0132     HNS3_RX_PTYPE_UNUSED_ENTRY(13),
0133     HNS3_RX_PTYPE_UNUSED_ENTRY(14),
0134     HNS3_RX_PTYPE_UNUSED_ENTRY(15),
0135     HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
0136     HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
0137     HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
0138     HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
0139     HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
0140     HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
0141     HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
0142     HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
0143     HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
0144     HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
0145     HNS3_RX_PTYPE_UNUSED_ENTRY(26),
0146     HNS3_RX_PTYPE_UNUSED_ENTRY(27),
0147     HNS3_RX_PTYPE_UNUSED_ENTRY(28),
0148     HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
0149     HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
0150     HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
0151     HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
0152     HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
0153     HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
0154     HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
0155     HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
0156     HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
0157     HNS3_RX_PTYPE_UNUSED_ENTRY(38),
0158     HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
0159     HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
0160     HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
0161     HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
0162     HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
0163     HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
0164     HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
0165     HNS3_RX_PTYPE_UNUSED_ENTRY(46),
0166     HNS3_RX_PTYPE_UNUSED_ENTRY(47),
0167     HNS3_RX_PTYPE_UNUSED_ENTRY(48),
0168     HNS3_RX_PTYPE_UNUSED_ENTRY(49),
0169     HNS3_RX_PTYPE_UNUSED_ENTRY(50),
0170     HNS3_RX_PTYPE_UNUSED_ENTRY(51),
0171     HNS3_RX_PTYPE_UNUSED_ENTRY(52),
0172     HNS3_RX_PTYPE_UNUSED_ENTRY(53),
0173     HNS3_RX_PTYPE_UNUSED_ENTRY(54),
0174     HNS3_RX_PTYPE_UNUSED_ENTRY(55),
0175     HNS3_RX_PTYPE_UNUSED_ENTRY(56),
0176     HNS3_RX_PTYPE_UNUSED_ENTRY(57),
0177     HNS3_RX_PTYPE_UNUSED_ENTRY(58),
0178     HNS3_RX_PTYPE_UNUSED_ENTRY(59),
0179     HNS3_RX_PTYPE_UNUSED_ENTRY(60),
0180     HNS3_RX_PTYPE_UNUSED_ENTRY(61),
0181     HNS3_RX_PTYPE_UNUSED_ENTRY(62),
0182     HNS3_RX_PTYPE_UNUSED_ENTRY(63),
0183     HNS3_RX_PTYPE_UNUSED_ENTRY(64),
0184     HNS3_RX_PTYPE_UNUSED_ENTRY(65),
0185     HNS3_RX_PTYPE_UNUSED_ENTRY(66),
0186     HNS3_RX_PTYPE_UNUSED_ENTRY(67),
0187     HNS3_RX_PTYPE_UNUSED_ENTRY(68),
0188     HNS3_RX_PTYPE_UNUSED_ENTRY(69),
0189     HNS3_RX_PTYPE_UNUSED_ENTRY(70),
0190     HNS3_RX_PTYPE_UNUSED_ENTRY(71),
0191     HNS3_RX_PTYPE_UNUSED_ENTRY(72),
0192     HNS3_RX_PTYPE_UNUSED_ENTRY(73),
0193     HNS3_RX_PTYPE_UNUSED_ENTRY(74),
0194     HNS3_RX_PTYPE_UNUSED_ENTRY(75),
0195     HNS3_RX_PTYPE_UNUSED_ENTRY(76),
0196     HNS3_RX_PTYPE_UNUSED_ENTRY(77),
0197     HNS3_RX_PTYPE_UNUSED_ENTRY(78),
0198     HNS3_RX_PTYPE_UNUSED_ENTRY(79),
0199     HNS3_RX_PTYPE_UNUSED_ENTRY(80),
0200     HNS3_RX_PTYPE_UNUSED_ENTRY(81),
0201     HNS3_RX_PTYPE_UNUSED_ENTRY(82),
0202     HNS3_RX_PTYPE_UNUSED_ENTRY(83),
0203     HNS3_RX_PTYPE_UNUSED_ENTRY(84),
0204     HNS3_RX_PTYPE_UNUSED_ENTRY(85),
0205     HNS3_RX_PTYPE_UNUSED_ENTRY(86),
0206     HNS3_RX_PTYPE_UNUSED_ENTRY(87),
0207     HNS3_RX_PTYPE_UNUSED_ENTRY(88),
0208     HNS3_RX_PTYPE_UNUSED_ENTRY(89),
0209     HNS3_RX_PTYPE_UNUSED_ENTRY(90),
0210     HNS3_RX_PTYPE_UNUSED_ENTRY(91),
0211     HNS3_RX_PTYPE_UNUSED_ENTRY(92),
0212     HNS3_RX_PTYPE_UNUSED_ENTRY(93),
0213     HNS3_RX_PTYPE_UNUSED_ENTRY(94),
0214     HNS3_RX_PTYPE_UNUSED_ENTRY(95),
0215     HNS3_RX_PTYPE_UNUSED_ENTRY(96),
0216     HNS3_RX_PTYPE_UNUSED_ENTRY(97),
0217     HNS3_RX_PTYPE_UNUSED_ENTRY(98),
0218     HNS3_RX_PTYPE_UNUSED_ENTRY(99),
0219     HNS3_RX_PTYPE_UNUSED_ENTRY(100),
0220     HNS3_RX_PTYPE_UNUSED_ENTRY(101),
0221     HNS3_RX_PTYPE_UNUSED_ENTRY(102),
0222     HNS3_RX_PTYPE_UNUSED_ENTRY(103),
0223     HNS3_RX_PTYPE_UNUSED_ENTRY(104),
0224     HNS3_RX_PTYPE_UNUSED_ENTRY(105),
0225     HNS3_RX_PTYPE_UNUSED_ENTRY(106),
0226     HNS3_RX_PTYPE_UNUSED_ENTRY(107),
0227     HNS3_RX_PTYPE_UNUSED_ENTRY(108),
0228     HNS3_RX_PTYPE_UNUSED_ENTRY(109),
0229     HNS3_RX_PTYPE_UNUSED_ENTRY(110),
0230     HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
0231     HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
0232     HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
0233     HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
0234     HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
0235     HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
0236     HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
0237     HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
0238     HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
0239     HNS3_RX_PTYPE_UNUSED_ENTRY(120),
0240     HNS3_RX_PTYPE_UNUSED_ENTRY(121),
0241     HNS3_RX_PTYPE_UNUSED_ENTRY(122),
0242     HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
0243     HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
0244     HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
0245     HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
0246     HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
0247     HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
0248     HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
0249     HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
0250     HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
0251     HNS3_RX_PTYPE_UNUSED_ENTRY(132),
0252     HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
0253     HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
0254     HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
0255     HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
0256     HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
0257     HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
0258     HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
0259     HNS3_RX_PTYPE_UNUSED_ENTRY(140),
0260     HNS3_RX_PTYPE_UNUSED_ENTRY(141),
0261     HNS3_RX_PTYPE_UNUSED_ENTRY(142),
0262     HNS3_RX_PTYPE_UNUSED_ENTRY(143),
0263     HNS3_RX_PTYPE_UNUSED_ENTRY(144),
0264     HNS3_RX_PTYPE_UNUSED_ENTRY(145),
0265     HNS3_RX_PTYPE_UNUSED_ENTRY(146),
0266     HNS3_RX_PTYPE_UNUSED_ENTRY(147),
0267     HNS3_RX_PTYPE_UNUSED_ENTRY(148),
0268     HNS3_RX_PTYPE_UNUSED_ENTRY(149),
0269     HNS3_RX_PTYPE_UNUSED_ENTRY(150),
0270     HNS3_RX_PTYPE_UNUSED_ENTRY(151),
0271     HNS3_RX_PTYPE_UNUSED_ENTRY(152),
0272     HNS3_RX_PTYPE_UNUSED_ENTRY(153),
0273     HNS3_RX_PTYPE_UNUSED_ENTRY(154),
0274     HNS3_RX_PTYPE_UNUSED_ENTRY(155),
0275     HNS3_RX_PTYPE_UNUSED_ENTRY(156),
0276     HNS3_RX_PTYPE_UNUSED_ENTRY(157),
0277     HNS3_RX_PTYPE_UNUSED_ENTRY(158),
0278     HNS3_RX_PTYPE_UNUSED_ENTRY(159),
0279     HNS3_RX_PTYPE_UNUSED_ENTRY(160),
0280     HNS3_RX_PTYPE_UNUSED_ENTRY(161),
0281     HNS3_RX_PTYPE_UNUSED_ENTRY(162),
0282     HNS3_RX_PTYPE_UNUSED_ENTRY(163),
0283     HNS3_RX_PTYPE_UNUSED_ENTRY(164),
0284     HNS3_RX_PTYPE_UNUSED_ENTRY(165),
0285     HNS3_RX_PTYPE_UNUSED_ENTRY(166),
0286     HNS3_RX_PTYPE_UNUSED_ENTRY(167),
0287     HNS3_RX_PTYPE_UNUSED_ENTRY(168),
0288     HNS3_RX_PTYPE_UNUSED_ENTRY(169),
0289     HNS3_RX_PTYPE_UNUSED_ENTRY(170),
0290     HNS3_RX_PTYPE_UNUSED_ENTRY(171),
0291     HNS3_RX_PTYPE_UNUSED_ENTRY(172),
0292     HNS3_RX_PTYPE_UNUSED_ENTRY(173),
0293     HNS3_RX_PTYPE_UNUSED_ENTRY(174),
0294     HNS3_RX_PTYPE_UNUSED_ENTRY(175),
0295     HNS3_RX_PTYPE_UNUSED_ENTRY(176),
0296     HNS3_RX_PTYPE_UNUSED_ENTRY(177),
0297     HNS3_RX_PTYPE_UNUSED_ENTRY(178),
0298     HNS3_RX_PTYPE_UNUSED_ENTRY(179),
0299     HNS3_RX_PTYPE_UNUSED_ENTRY(180),
0300     HNS3_RX_PTYPE_UNUSED_ENTRY(181),
0301     HNS3_RX_PTYPE_UNUSED_ENTRY(182),
0302     HNS3_RX_PTYPE_UNUSED_ENTRY(183),
0303     HNS3_RX_PTYPE_UNUSED_ENTRY(184),
0304     HNS3_RX_PTYPE_UNUSED_ENTRY(185),
0305     HNS3_RX_PTYPE_UNUSED_ENTRY(186),
0306     HNS3_RX_PTYPE_UNUSED_ENTRY(187),
0307     HNS3_RX_PTYPE_UNUSED_ENTRY(188),
0308     HNS3_RX_PTYPE_UNUSED_ENTRY(189),
0309     HNS3_RX_PTYPE_UNUSED_ENTRY(190),
0310     HNS3_RX_PTYPE_UNUSED_ENTRY(191),
0311     HNS3_RX_PTYPE_UNUSED_ENTRY(192),
0312     HNS3_RX_PTYPE_UNUSED_ENTRY(193),
0313     HNS3_RX_PTYPE_UNUSED_ENTRY(194),
0314     HNS3_RX_PTYPE_UNUSED_ENTRY(195),
0315     HNS3_RX_PTYPE_UNUSED_ENTRY(196),
0316     HNS3_RX_PTYPE_UNUSED_ENTRY(197),
0317     HNS3_RX_PTYPE_UNUSED_ENTRY(198),
0318     HNS3_RX_PTYPE_UNUSED_ENTRY(199),
0319     HNS3_RX_PTYPE_UNUSED_ENTRY(200),
0320     HNS3_RX_PTYPE_UNUSED_ENTRY(201),
0321     HNS3_RX_PTYPE_UNUSED_ENTRY(202),
0322     HNS3_RX_PTYPE_UNUSED_ENTRY(203),
0323     HNS3_RX_PTYPE_UNUSED_ENTRY(204),
0324     HNS3_RX_PTYPE_UNUSED_ENTRY(205),
0325     HNS3_RX_PTYPE_UNUSED_ENTRY(206),
0326     HNS3_RX_PTYPE_UNUSED_ENTRY(207),
0327     HNS3_RX_PTYPE_UNUSED_ENTRY(208),
0328     HNS3_RX_PTYPE_UNUSED_ENTRY(209),
0329     HNS3_RX_PTYPE_UNUSED_ENTRY(210),
0330     HNS3_RX_PTYPE_UNUSED_ENTRY(211),
0331     HNS3_RX_PTYPE_UNUSED_ENTRY(212),
0332     HNS3_RX_PTYPE_UNUSED_ENTRY(213),
0333     HNS3_RX_PTYPE_UNUSED_ENTRY(214),
0334     HNS3_RX_PTYPE_UNUSED_ENTRY(215),
0335     HNS3_RX_PTYPE_UNUSED_ENTRY(216),
0336     HNS3_RX_PTYPE_UNUSED_ENTRY(217),
0337     HNS3_RX_PTYPE_UNUSED_ENTRY(218),
0338     HNS3_RX_PTYPE_UNUSED_ENTRY(219),
0339     HNS3_RX_PTYPE_UNUSED_ENTRY(220),
0340     HNS3_RX_PTYPE_UNUSED_ENTRY(221),
0341     HNS3_RX_PTYPE_UNUSED_ENTRY(222),
0342     HNS3_RX_PTYPE_UNUSED_ENTRY(223),
0343     HNS3_RX_PTYPE_UNUSED_ENTRY(224),
0344     HNS3_RX_PTYPE_UNUSED_ENTRY(225),
0345     HNS3_RX_PTYPE_UNUSED_ENTRY(226),
0346     HNS3_RX_PTYPE_UNUSED_ENTRY(227),
0347     HNS3_RX_PTYPE_UNUSED_ENTRY(228),
0348     HNS3_RX_PTYPE_UNUSED_ENTRY(229),
0349     HNS3_RX_PTYPE_UNUSED_ENTRY(230),
0350     HNS3_RX_PTYPE_UNUSED_ENTRY(231),
0351     HNS3_RX_PTYPE_UNUSED_ENTRY(232),
0352     HNS3_RX_PTYPE_UNUSED_ENTRY(233),
0353     HNS3_RX_PTYPE_UNUSED_ENTRY(234),
0354     HNS3_RX_PTYPE_UNUSED_ENTRY(235),
0355     HNS3_RX_PTYPE_UNUSED_ENTRY(236),
0356     HNS3_RX_PTYPE_UNUSED_ENTRY(237),
0357     HNS3_RX_PTYPE_UNUSED_ENTRY(238),
0358     HNS3_RX_PTYPE_UNUSED_ENTRY(239),
0359     HNS3_RX_PTYPE_UNUSED_ENTRY(240),
0360     HNS3_RX_PTYPE_UNUSED_ENTRY(241),
0361     HNS3_RX_PTYPE_UNUSED_ENTRY(242),
0362     HNS3_RX_PTYPE_UNUSED_ENTRY(243),
0363     HNS3_RX_PTYPE_UNUSED_ENTRY(244),
0364     HNS3_RX_PTYPE_UNUSED_ENTRY(245),
0365     HNS3_RX_PTYPE_UNUSED_ENTRY(246),
0366     HNS3_RX_PTYPE_UNUSED_ENTRY(247),
0367     HNS3_RX_PTYPE_UNUSED_ENTRY(248),
0368     HNS3_RX_PTYPE_UNUSED_ENTRY(249),
0369     HNS3_RX_PTYPE_UNUSED_ENTRY(250),
0370     HNS3_RX_PTYPE_UNUSED_ENTRY(251),
0371     HNS3_RX_PTYPE_UNUSED_ENTRY(252),
0372     HNS3_RX_PTYPE_UNUSED_ENTRY(253),
0373     HNS3_RX_PTYPE_UNUSED_ENTRY(254),
0374     HNS3_RX_PTYPE_UNUSED_ENTRY(255),
0375 };
0376 
0377 #define HNS3_INVALID_PTYPE \
0378         ARRAY_SIZE(hns3_rx_ptype_tbl)
0379 
0380 static irqreturn_t hns3_irq_handle(int irq, void *vector)
0381 {
0382     struct hns3_enet_tqp_vector *tqp_vector = vector;
0383 
0384     napi_schedule_irqoff(&tqp_vector->napi);
0385     tqp_vector->event_cnt++;
0386 
0387     return IRQ_HANDLED;
0388 }
0389 
0390 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
0391 {
0392     struct hns3_enet_tqp_vector *tqp_vectors;
0393     unsigned int i;
0394 
0395     for (i = 0; i < priv->vector_num; i++) {
0396         tqp_vectors = &priv->tqp_vector[i];
0397 
0398         if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
0399             continue;
0400 
0401         /* clear the affinity mask */
0402         irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
0403 
0404         /* release the irq resource */
0405         free_irq(tqp_vectors->vector_irq, tqp_vectors);
0406         tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
0407     }
0408 }
0409 
0410 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
0411 {
0412     struct hns3_enet_tqp_vector *tqp_vectors;
0413     int txrx_int_idx = 0;
0414     int rx_int_idx = 0;
0415     int tx_int_idx = 0;
0416     unsigned int i;
0417     int ret;
0418 
0419     for (i = 0; i < priv->vector_num; i++) {
0420         tqp_vectors = &priv->tqp_vector[i];
0421 
0422         if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
0423             continue;
0424 
0425         if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
0426             snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
0427                  "%s-%s-%s-%d", hns3_driver_name,
0428                  pci_name(priv->ae_handle->pdev),
0429                  "TxRx", txrx_int_idx++);
0430             txrx_int_idx++;
0431         } else if (tqp_vectors->rx_group.ring) {
0432             snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
0433                  "%s-%s-%s-%d", hns3_driver_name,
0434                  pci_name(priv->ae_handle->pdev),
0435                  "Rx", rx_int_idx++);
0436         } else if (tqp_vectors->tx_group.ring) {
0437             snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
0438                  "%s-%s-%s-%d", hns3_driver_name,
0439                  pci_name(priv->ae_handle->pdev),
0440                  "Tx", tx_int_idx++);
0441         } else {
0442             /* Skip this unused q_vector */
0443             continue;
0444         }
0445 
0446         tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
0447 
0448         irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
0449         ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
0450                   tqp_vectors->name, tqp_vectors);
0451         if (ret) {
0452             netdev_err(priv->netdev, "request irq(%d) fail\n",
0453                    tqp_vectors->vector_irq);
0454             hns3_nic_uninit_irq(priv);
0455             return ret;
0456         }
0457 
0458         irq_set_affinity_hint(tqp_vectors->vector_irq,
0459                       &tqp_vectors->affinity_mask);
0460 
0461         tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
0462     }
0463 
0464     return 0;
0465 }
0466 
0467 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
0468                  u32 mask_en)
0469 {
0470     writel(mask_en, tqp_vector->mask_addr);
0471 }
0472 
0473 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
0474 {
0475     napi_enable(&tqp_vector->napi);
0476     enable_irq(tqp_vector->vector_irq);
0477 
0478     /* enable vector */
0479     hns3_mask_vector_irq(tqp_vector, 1);
0480 }
0481 
0482 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
0483 {
0484     /* disable vector */
0485     hns3_mask_vector_irq(tqp_vector, 0);
0486 
0487     disable_irq(tqp_vector->vector_irq);
0488     napi_disable(&tqp_vector->napi);
0489     cancel_work_sync(&tqp_vector->rx_group.dim.work);
0490     cancel_work_sync(&tqp_vector->tx_group.dim.work);
0491 }
0492 
0493 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
0494                  u32 rl_value)
0495 {
0496     u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
0497 
0498     /* this defines the configuration for RL (Interrupt Rate Limiter).
0499      * Rl defines rate of interrupts i.e. number of interrupts-per-second
0500      * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
0501      */
0502     if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
0503         !tqp_vector->rx_group.coal.adapt_enable)
0504         /* According to the hardware, the range of rl_reg is
0505          * 0-59 and the unit is 4.
0506          */
0507         rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
0508 
0509     writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
0510 }
0511 
0512 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
0513                     u32 gl_value)
0514 {
0515     u32 new_val;
0516 
0517     if (tqp_vector->rx_group.coal.unit_1us)
0518         new_val = gl_value | HNS3_INT_GL_1US;
0519     else
0520         new_val = hns3_gl_usec_to_reg(gl_value);
0521 
0522     writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
0523 }
0524 
0525 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
0526                     u32 gl_value)
0527 {
0528     u32 new_val;
0529 
0530     if (tqp_vector->tx_group.coal.unit_1us)
0531         new_val = gl_value | HNS3_INT_GL_1US;
0532     else
0533         new_val = hns3_gl_usec_to_reg(gl_value);
0534 
0535     writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
0536 }
0537 
0538 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
0539                     u32 ql_value)
0540 {
0541     writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
0542 }
0543 
0544 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
0545                     u32 ql_value)
0546 {
0547     writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
0548 }
0549 
0550 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
0551                       struct hns3_nic_priv *priv)
0552 {
0553     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
0554     struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
0555     struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
0556     struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
0557     struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
0558 
0559     tx_coal->adapt_enable = ptx_coal->adapt_enable;
0560     rx_coal->adapt_enable = prx_coal->adapt_enable;
0561 
0562     tx_coal->int_gl = ptx_coal->int_gl;
0563     rx_coal->int_gl = prx_coal->int_gl;
0564 
0565     rx_coal->flow_level = prx_coal->flow_level;
0566     tx_coal->flow_level = ptx_coal->flow_level;
0567 
0568     /* device version above V3(include V3), GL can configure 1us
0569      * unit, so uses 1us unit.
0570      */
0571     if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
0572         tx_coal->unit_1us = 1;
0573         rx_coal->unit_1us = 1;
0574     }
0575 
0576     if (ae_dev->dev_specs.int_ql_max) {
0577         tx_coal->ql_enable = 1;
0578         rx_coal->ql_enable = 1;
0579         tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
0580         rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
0581         tx_coal->int_ql = ptx_coal->int_ql;
0582         rx_coal->int_ql = prx_coal->int_ql;
0583     }
0584 }
0585 
0586 static void
0587 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
0588                  struct hns3_nic_priv *priv)
0589 {
0590     struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
0591     struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
0592     struct hnae3_handle *h = priv->ae_handle;
0593 
0594     hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
0595     hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
0596     hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
0597 
0598     if (tx_coal->ql_enable)
0599         hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
0600 
0601     if (rx_coal->ql_enable)
0602         hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
0603 }
0604 
0605 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
0606 {
0607     struct hnae3_handle *h = hns3_get_handle(netdev);
0608     struct hnae3_knic_private_info *kinfo = &h->kinfo;
0609     struct hnae3_tc_info *tc_info = &kinfo->tc_info;
0610     unsigned int queue_size = kinfo->num_tqps;
0611     int i, ret;
0612 
0613     if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
0614         netdev_reset_tc(netdev);
0615     } else {
0616         ret = netdev_set_num_tc(netdev, tc_info->num_tc);
0617         if (ret) {
0618             netdev_err(netdev,
0619                    "netdev_set_num_tc fail, ret=%d!\n", ret);
0620             return ret;
0621         }
0622 
0623         for (i = 0; i < tc_info->num_tc; i++)
0624             netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
0625                         tc_info->tqp_offset[i]);
0626     }
0627 
0628     ret = netif_set_real_num_tx_queues(netdev, queue_size);
0629     if (ret) {
0630         netdev_err(netdev,
0631                "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
0632         return ret;
0633     }
0634 
0635     ret = netif_set_real_num_rx_queues(netdev, queue_size);
0636     if (ret) {
0637         netdev_err(netdev,
0638                "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
0639         return ret;
0640     }
0641 
0642     return 0;
0643 }
0644 
0645 u16 hns3_get_max_available_channels(struct hnae3_handle *h)
0646 {
0647     u16 alloc_tqps, max_rss_size, rss_size;
0648 
0649     h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
0650     rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
0651 
0652     return min_t(u16, rss_size, max_rss_size);
0653 }
0654 
0655 static void hns3_tqp_enable(struct hnae3_queue *tqp)
0656 {
0657     u32 rcb_reg;
0658 
0659     rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
0660     rcb_reg |= BIT(HNS3_RING_EN_B);
0661     hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
0662 }
0663 
0664 static void hns3_tqp_disable(struct hnae3_queue *tqp)
0665 {
0666     u32 rcb_reg;
0667 
0668     rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
0669     rcb_reg &= ~BIT(HNS3_RING_EN_B);
0670     hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
0671 }
0672 
0673 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
0674 {
0675 #ifdef CONFIG_RFS_ACCEL
0676     free_irq_cpu_rmap(netdev->rx_cpu_rmap);
0677     netdev->rx_cpu_rmap = NULL;
0678 #endif
0679 }
0680 
0681 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
0682 {
0683 #ifdef CONFIG_RFS_ACCEL
0684     struct hns3_nic_priv *priv = netdev_priv(netdev);
0685     struct hns3_enet_tqp_vector *tqp_vector;
0686     int i, ret;
0687 
0688     if (!netdev->rx_cpu_rmap) {
0689         netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
0690         if (!netdev->rx_cpu_rmap)
0691             return -ENOMEM;
0692     }
0693 
0694     for (i = 0; i < priv->vector_num; i++) {
0695         tqp_vector = &priv->tqp_vector[i];
0696         ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
0697                        tqp_vector->vector_irq);
0698         if (ret) {
0699             hns3_free_rx_cpu_rmap(netdev);
0700             return ret;
0701         }
0702     }
0703 #endif
0704     return 0;
0705 }
0706 
0707 static int hns3_nic_net_up(struct net_device *netdev)
0708 {
0709     struct hns3_nic_priv *priv = netdev_priv(netdev);
0710     struct hnae3_handle *h = priv->ae_handle;
0711     int i, j;
0712     int ret;
0713 
0714     ret = hns3_nic_reset_all_ring(h);
0715     if (ret)
0716         return ret;
0717 
0718     clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
0719 
0720     /* enable the vectors */
0721     for (i = 0; i < priv->vector_num; i++)
0722         hns3_vector_enable(&priv->tqp_vector[i]);
0723 
0724     /* enable rcb */
0725     for (j = 0; j < h->kinfo.num_tqps; j++)
0726         hns3_tqp_enable(h->kinfo.tqp[j]);
0727 
0728     /* start the ae_dev */
0729     ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
0730     if (ret) {
0731         set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
0732         while (j--)
0733             hns3_tqp_disable(h->kinfo.tqp[j]);
0734 
0735         for (j = i - 1; j >= 0; j--)
0736             hns3_vector_disable(&priv->tqp_vector[j]);
0737     }
0738 
0739     return ret;
0740 }
0741 
0742 static void hns3_config_xps(struct hns3_nic_priv *priv)
0743 {
0744     int i;
0745 
0746     for (i = 0; i < priv->vector_num; i++) {
0747         struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
0748         struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
0749 
0750         while (ring) {
0751             int ret;
0752 
0753             ret = netif_set_xps_queue(priv->netdev,
0754                           &tqp_vector->affinity_mask,
0755                           ring->tqp->tqp_index);
0756             if (ret)
0757                 netdev_warn(priv->netdev,
0758                         "set xps queue failed: %d", ret);
0759 
0760             ring = ring->next;
0761         }
0762     }
0763 }
0764 
0765 static int hns3_nic_net_open(struct net_device *netdev)
0766 {
0767     struct hns3_nic_priv *priv = netdev_priv(netdev);
0768     struct hnae3_handle *h = hns3_get_handle(netdev);
0769     struct hnae3_knic_private_info *kinfo;
0770     int i, ret;
0771 
0772     if (hns3_nic_resetting(netdev))
0773         return -EBUSY;
0774 
0775     if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
0776         netdev_warn(netdev, "net open repeatedly!\n");
0777         return 0;
0778     }
0779 
0780     netif_carrier_off(netdev);
0781 
0782     ret = hns3_nic_set_real_num_queue(netdev);
0783     if (ret)
0784         return ret;
0785 
0786     ret = hns3_nic_net_up(netdev);
0787     if (ret) {
0788         netdev_err(netdev, "net up fail, ret=%d!\n", ret);
0789         return ret;
0790     }
0791 
0792     kinfo = &h->kinfo;
0793     for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
0794         netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
0795 
0796     if (h->ae_algo->ops->set_timer_task)
0797         h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
0798 
0799     hns3_config_xps(priv);
0800 
0801     netif_dbg(h, drv, netdev, "net open\n");
0802 
0803     return 0;
0804 }
0805 
0806 static void hns3_reset_tx_queue(struct hnae3_handle *h)
0807 {
0808     struct net_device *ndev = h->kinfo.netdev;
0809     struct hns3_nic_priv *priv = netdev_priv(ndev);
0810     struct netdev_queue *dev_queue;
0811     u32 i;
0812 
0813     for (i = 0; i < h->kinfo.num_tqps; i++) {
0814         dev_queue = netdev_get_tx_queue(ndev,
0815                         priv->ring[i].queue_index);
0816         netdev_tx_reset_queue(dev_queue);
0817     }
0818 }
0819 
0820 static void hns3_nic_net_down(struct net_device *netdev)
0821 {
0822     struct hns3_nic_priv *priv = netdev_priv(netdev);
0823     struct hnae3_handle *h = hns3_get_handle(netdev);
0824     const struct hnae3_ae_ops *ops;
0825     int i;
0826 
0827     /* disable vectors */
0828     for (i = 0; i < priv->vector_num; i++)
0829         hns3_vector_disable(&priv->tqp_vector[i]);
0830 
0831     /* disable rcb */
0832     for (i = 0; i < h->kinfo.num_tqps; i++)
0833         hns3_tqp_disable(h->kinfo.tqp[i]);
0834 
0835     /* stop ae_dev */
0836     ops = priv->ae_handle->ae_algo->ops;
0837     if (ops->stop)
0838         ops->stop(priv->ae_handle);
0839 
0840     /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
0841      * during reset process, because driver may not be able
0842      * to disable the ring through firmware when downing the netdev.
0843      */
0844     if (!hns3_nic_resetting(netdev))
0845         hns3_clear_all_ring(priv->ae_handle, false);
0846 
0847     hns3_reset_tx_queue(priv->ae_handle);
0848 }
0849 
0850 static int hns3_nic_net_stop(struct net_device *netdev)
0851 {
0852     struct hns3_nic_priv *priv = netdev_priv(netdev);
0853     struct hnae3_handle *h = hns3_get_handle(netdev);
0854 
0855     if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
0856         return 0;
0857 
0858     netif_dbg(h, drv, netdev, "net stop\n");
0859 
0860     if (h->ae_algo->ops->set_timer_task)
0861         h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
0862 
0863     netif_carrier_off(netdev);
0864     netif_tx_disable(netdev);
0865 
0866     hns3_nic_net_down(netdev);
0867 
0868     return 0;
0869 }
0870 
0871 static int hns3_nic_uc_sync(struct net_device *netdev,
0872                 const unsigned char *addr)
0873 {
0874     struct hnae3_handle *h = hns3_get_handle(netdev);
0875 
0876     if (h->ae_algo->ops->add_uc_addr)
0877         return h->ae_algo->ops->add_uc_addr(h, addr);
0878 
0879     return 0;
0880 }
0881 
0882 static int hns3_nic_uc_unsync(struct net_device *netdev,
0883                   const unsigned char *addr)
0884 {
0885     struct hnae3_handle *h = hns3_get_handle(netdev);
0886 
0887     /* need ignore the request of removing device address, because
0888      * we store the device address and other addresses of uc list
0889      * in the function's mac filter list.
0890      */
0891     if (ether_addr_equal(addr, netdev->dev_addr))
0892         return 0;
0893 
0894     if (h->ae_algo->ops->rm_uc_addr)
0895         return h->ae_algo->ops->rm_uc_addr(h, addr);
0896 
0897     return 0;
0898 }
0899 
0900 static int hns3_nic_mc_sync(struct net_device *netdev,
0901                 const unsigned char *addr)
0902 {
0903     struct hnae3_handle *h = hns3_get_handle(netdev);
0904 
0905     if (h->ae_algo->ops->add_mc_addr)
0906         return h->ae_algo->ops->add_mc_addr(h, addr);
0907 
0908     return 0;
0909 }
0910 
0911 static int hns3_nic_mc_unsync(struct net_device *netdev,
0912                   const unsigned char *addr)
0913 {
0914     struct hnae3_handle *h = hns3_get_handle(netdev);
0915 
0916     if (h->ae_algo->ops->rm_mc_addr)
0917         return h->ae_algo->ops->rm_mc_addr(h, addr);
0918 
0919     return 0;
0920 }
0921 
0922 static u8 hns3_get_netdev_flags(struct net_device *netdev)
0923 {
0924     u8 flags = 0;
0925 
0926     if (netdev->flags & IFF_PROMISC)
0927         flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
0928     else if (netdev->flags & IFF_ALLMULTI)
0929         flags = HNAE3_USER_MPE;
0930 
0931     return flags;
0932 }
0933 
0934 static void hns3_nic_set_rx_mode(struct net_device *netdev)
0935 {
0936     struct hnae3_handle *h = hns3_get_handle(netdev);
0937     u8 new_flags;
0938 
0939     new_flags = hns3_get_netdev_flags(netdev);
0940 
0941     __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
0942     __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
0943 
0944     /* User mode Promisc mode enable and vlan filtering is disabled to
0945      * let all packets in.
0946      */
0947     h->netdev_flags = new_flags;
0948     hns3_request_update_promisc_mode(h);
0949 }
0950 
0951 void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
0952 {
0953     const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
0954 
0955     if (ops->request_update_promisc_mode)
0956         ops->request_update_promisc_mode(handle);
0957 }
0958 
0959 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
0960 {
0961     struct hns3_tx_spare *tx_spare = ring->tx_spare;
0962     u32 ntc, ntu;
0963 
0964     /* This smp_load_acquire() pairs with smp_store_release() in
0965      * hns3_tx_spare_update() called in tx desc cleaning process.
0966      */
0967     ntc = smp_load_acquire(&tx_spare->last_to_clean);
0968     ntu = tx_spare->next_to_use;
0969 
0970     if (ntc > ntu)
0971         return ntc - ntu - 1;
0972 
0973     /* The free tx buffer is divided into two part, so pick the
0974      * larger one.
0975      */
0976     return max(ntc, tx_spare->len - ntu) - 1;
0977 }
0978 
0979 static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
0980 {
0981     struct hns3_tx_spare *tx_spare = ring->tx_spare;
0982 
0983     if (!tx_spare ||
0984         tx_spare->last_to_clean == tx_spare->next_to_clean)
0985         return;
0986 
0987     /* This smp_store_release() pairs with smp_load_acquire() in
0988      * hns3_tx_spare_space() called in xmit process.
0989      */
0990     smp_store_release(&tx_spare->last_to_clean,
0991               tx_spare->next_to_clean);
0992 }
0993 
0994 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
0995                    struct sk_buff *skb,
0996                    u32 space)
0997 {
0998     u32 len = skb->len <= ring->tx_copybreak ? skb->len :
0999                 skb_headlen(skb);
1000 
1001     if (len > ring->tx_copybreak)
1002         return false;
1003 
1004     if (ALIGN(len, dma_get_cache_alignment()) > space) {
1005         hns3_ring_stats_update(ring, tx_spare_full);
1006         return false;
1007     }
1008 
1009     return true;
1010 }
1011 
1012 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
1013                 struct sk_buff *skb,
1014                 u32 space)
1015 {
1016     if (skb->len <= ring->tx_copybreak || !tx_sgl ||
1017         (!skb_has_frag_list(skb) &&
1018          skb_shinfo(skb)->nr_frags < tx_sgl))
1019         return false;
1020 
1021     if (space < HNS3_MAX_SGL_SIZE) {
1022         hns3_ring_stats_update(ring, tx_spare_full);
1023         return false;
1024     }
1025 
1026     return true;
1027 }
1028 
1029 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
1030 {
1031     u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
1032     struct hns3_tx_spare *tx_spare;
1033     struct page *page;
1034     dma_addr_t dma;
1035     int order;
1036 
1037     if (!alloc_size)
1038         return;
1039 
1040     order = get_order(alloc_size);
1041     if (order >= MAX_ORDER) {
1042         if (net_ratelimit())
1043             dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
1044         return;
1045     }
1046 
1047     tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
1048                 GFP_KERNEL);
1049     if (!tx_spare) {
1050         /* The driver still work without the tx spare buffer */
1051         dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
1052         goto devm_kzalloc_error;
1053     }
1054 
1055     page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
1056                 GFP_KERNEL, order);
1057     if (!page) {
1058         dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
1059         goto alloc_pages_error;
1060     }
1061 
1062     dma = dma_map_page(ring_to_dev(ring), page, 0,
1063                PAGE_SIZE << order, DMA_TO_DEVICE);
1064     if (dma_mapping_error(ring_to_dev(ring), dma)) {
1065         dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
1066         goto dma_mapping_error;
1067     }
1068 
1069     tx_spare->dma = dma;
1070     tx_spare->buf = page_address(page);
1071     tx_spare->len = PAGE_SIZE << order;
1072     ring->tx_spare = tx_spare;
1073     return;
1074 
1075 dma_mapping_error:
1076     put_page(page);
1077 alloc_pages_error:
1078     devm_kfree(ring_to_dev(ring), tx_spare);
1079 devm_kzalloc_error:
1080     ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
1081 }
1082 
1083 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1084  * before calling below function to allocate tx buffer.
1085  */
1086 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
1087                  unsigned int size, dma_addr_t *dma,
1088                  u32 *cb_len)
1089 {
1090     struct hns3_tx_spare *tx_spare = ring->tx_spare;
1091     u32 ntu = tx_spare->next_to_use;
1092 
1093     size = ALIGN(size, dma_get_cache_alignment());
1094     *cb_len = size;
1095 
1096     /* Tx spare buffer wraps back here because the end of
1097      * freed tx buffer is not enough.
1098      */
1099     if (ntu + size > tx_spare->len) {
1100         *cb_len += (tx_spare->len - ntu);
1101         ntu = 0;
1102     }
1103 
1104     tx_spare->next_to_use = ntu + size;
1105     if (tx_spare->next_to_use == tx_spare->len)
1106         tx_spare->next_to_use = 0;
1107 
1108     *dma = tx_spare->dma + ntu;
1109 
1110     return tx_spare->buf + ntu;
1111 }
1112 
1113 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
1114 {
1115     struct hns3_tx_spare *tx_spare = ring->tx_spare;
1116 
1117     if (len > tx_spare->next_to_use) {
1118         len -= tx_spare->next_to_use;
1119         tx_spare->next_to_use = tx_spare->len - len;
1120     } else {
1121         tx_spare->next_to_use -= len;
1122     }
1123 }
1124 
1125 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
1126                      struct hns3_desc_cb *cb)
1127 {
1128     struct hns3_tx_spare *tx_spare = ring->tx_spare;
1129     u32 ntc = tx_spare->next_to_clean;
1130     u32 len = cb->length;
1131 
1132     tx_spare->next_to_clean += len;
1133 
1134     if (tx_spare->next_to_clean >= tx_spare->len) {
1135         tx_spare->next_to_clean -= tx_spare->len;
1136 
1137         if (tx_spare->next_to_clean) {
1138             ntc = 0;
1139             len = tx_spare->next_to_clean;
1140         }
1141     }
1142 
1143     /* This tx spare buffer is only really reclaimed after calling
1144      * hns3_tx_spare_update(), so it is still safe to use the info in
1145      * the tx buffer to do the dma sync or sg unmapping after
1146      * tx_spare->next_to_clean is moved forword.
1147      */
1148     if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
1149         dma_addr_t dma = tx_spare->dma + ntc;
1150 
1151         dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
1152                     DMA_TO_DEVICE);
1153     } else {
1154         struct sg_table *sgt = tx_spare->buf + ntc;
1155 
1156         dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
1157                  DMA_TO_DEVICE);
1158     }
1159 }
1160 
1161 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
1162             u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
1163 {
1164     u32 l4_offset, hdr_len;
1165     union l3_hdr_info l3;
1166     union l4_hdr_info l4;
1167     u32 l4_paylen;
1168     int ret;
1169 
1170     if (!skb_is_gso(skb))
1171         return 0;
1172 
1173     ret = skb_cow_head(skb, 0);
1174     if (unlikely(ret < 0))
1175         return ret;
1176 
1177     l3.hdr = skb_network_header(skb);
1178     l4.hdr = skb_transport_header(skb);
1179 
1180     /* Software should clear the IPv4's checksum field when tso is
1181      * needed.
1182      */
1183     if (l3.v4->version == 4)
1184         l3.v4->check = 0;
1185 
1186     /* tunnel packet */
1187     if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1188                      SKB_GSO_GRE_CSUM |
1189                      SKB_GSO_UDP_TUNNEL |
1190                      SKB_GSO_UDP_TUNNEL_CSUM)) {
1191         /* reset l3&l4 pointers from outer to inner headers */
1192         l3.hdr = skb_inner_network_header(skb);
1193         l4.hdr = skb_inner_transport_header(skb);
1194 
1195         /* Software should clear the IPv4's checksum field when
1196          * tso is needed.
1197          */
1198         if (l3.v4->version == 4)
1199             l3.v4->check = 0;
1200     }
1201 
1202     /* normal or tunnel packet */
1203     l4_offset = l4.hdr - skb->data;
1204 
1205     /* remove payload length from inner pseudo checksum when tso */
1206     l4_paylen = skb->len - l4_offset;
1207 
1208     if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1209         hdr_len = sizeof(*l4.udp) + l4_offset;
1210         csum_replace_by_diff(&l4.udp->check,
1211                      (__force __wsum)htonl(l4_paylen));
1212     } else {
1213         hdr_len = (l4.tcp->doff << 2) + l4_offset;
1214         csum_replace_by_diff(&l4.tcp->check,
1215                      (__force __wsum)htonl(l4_paylen));
1216     }
1217 
1218     *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
1219 
1220     /* find the txbd field values */
1221     *paylen_fdop_ol4cs = skb->len - hdr_len;
1222     hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
1223 
1224     /* offload outer UDP header checksum */
1225     if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1226         hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
1227 
1228     /* get MSS for TSO */
1229     *mss = skb_shinfo(skb)->gso_size;
1230 
1231     trace_hns3_tso(skb);
1232 
1233     return 0;
1234 }
1235 
1236 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
1237                 u8 *il4_proto)
1238 {
1239     union l3_hdr_info l3;
1240     unsigned char *l4_hdr;
1241     unsigned char *exthdr;
1242     u8 l4_proto_tmp;
1243     __be16 frag_off;
1244 
1245     /* find outer header point */
1246     l3.hdr = skb_network_header(skb);
1247     l4_hdr = skb_transport_header(skb);
1248 
1249     if (skb->protocol == htons(ETH_P_IPV6)) {
1250         exthdr = l3.hdr + sizeof(*l3.v6);
1251         l4_proto_tmp = l3.v6->nexthdr;
1252         if (l4_hdr != exthdr)
1253             ipv6_skip_exthdr(skb, exthdr - skb->data,
1254                      &l4_proto_tmp, &frag_off);
1255     } else if (skb->protocol == htons(ETH_P_IP)) {
1256         l4_proto_tmp = l3.v4->protocol;
1257     } else {
1258         return -EINVAL;
1259     }
1260 
1261     *ol4_proto = l4_proto_tmp;
1262 
1263     /* tunnel packet */
1264     if (!skb->encapsulation) {
1265         *il4_proto = 0;
1266         return 0;
1267     }
1268 
1269     /* find inner header point */
1270     l3.hdr = skb_inner_network_header(skb);
1271     l4_hdr = skb_inner_transport_header(skb);
1272 
1273     if (l3.v6->version == 6) {
1274         exthdr = l3.hdr + sizeof(*l3.v6);
1275         l4_proto_tmp = l3.v6->nexthdr;
1276         if (l4_hdr != exthdr)
1277             ipv6_skip_exthdr(skb, exthdr - skb->data,
1278                      &l4_proto_tmp, &frag_off);
1279     } else if (l3.v4->version == 4) {
1280         l4_proto_tmp = l3.v4->protocol;
1281     }
1282 
1283     *il4_proto = l4_proto_tmp;
1284 
1285     return 0;
1286 }
1287 
1288 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1289  * and it is udp packet, which has a dest port as the IANA assigned.
1290  * the hardware is expected to do the checksum offload, but the
1291  * hardware will not do the checksum offload when udp dest port is
1292  * 4789, 4790 or 6081.
1293  */
1294 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1295 {
1296     struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1297     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
1298     union l4_hdr_info l4;
1299 
1300     /* device version above V3(include V3), the hardware can
1301      * do this checksum offload.
1302      */
1303     if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
1304         return false;
1305 
1306     l4.hdr = skb_transport_header(skb);
1307 
1308     if (!(!skb->encapsulation &&
1309           (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
1310           l4.udp->dest == htons(GENEVE_UDP_PORT) ||
1311           l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
1312         return false;
1313 
1314     return true;
1315 }
1316 
1317 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1318                   u32 *ol_type_vlan_len_msec)
1319 {
1320     u32 l2_len, l3_len, l4_len;
1321     unsigned char *il2_hdr;
1322     union l3_hdr_info l3;
1323     union l4_hdr_info l4;
1324 
1325     l3.hdr = skb_network_header(skb);
1326     l4.hdr = skb_transport_header(skb);
1327 
1328     /* compute OL2 header size, defined in 2 Bytes */
1329     l2_len = l3.hdr - skb->data;
1330     hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
1331 
1332     /* compute OL3 header size, defined in 4 Bytes */
1333     l3_len = l4.hdr - l3.hdr;
1334     hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
1335 
1336     il2_hdr = skb_inner_mac_header(skb);
1337     /* compute OL4 header size, defined in 4 Bytes */
1338     l4_len = il2_hdr - l4.hdr;
1339     hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
1340 
1341     /* define outer network header type */
1342     if (skb->protocol == htons(ETH_P_IP)) {
1343         if (skb_is_gso(skb))
1344             hns3_set_field(*ol_type_vlan_len_msec,
1345                        HNS3_TXD_OL3T_S,
1346                        HNS3_OL3T_IPV4_CSUM);
1347         else
1348             hns3_set_field(*ol_type_vlan_len_msec,
1349                        HNS3_TXD_OL3T_S,
1350                        HNS3_OL3T_IPV4_NO_CSUM);
1351     } else if (skb->protocol == htons(ETH_P_IPV6)) {
1352         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
1353                    HNS3_OL3T_IPV6);
1354     }
1355 
1356     if (ol4_proto == IPPROTO_UDP)
1357         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1358                    HNS3_TUN_MAC_IN_UDP);
1359     else if (ol4_proto == IPPROTO_GRE)
1360         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1361                    HNS3_TUN_NVGRE);
1362 }
1363 
1364 static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
1365                  u32 *type_cs_vlan_tso)
1366 {
1367     if (l3.v4->version == 4) {
1368         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1369                    HNS3_L3T_IPV4);
1370 
1371         /* the stack computes the IP header already, the only time we
1372          * need the hardware to recompute it is in the case of TSO.
1373          */
1374         if (skb_is_gso(skb))
1375             hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
1376     } else if (l3.v6->version == 6) {
1377         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1378                    HNS3_L3T_IPV6);
1379     }
1380 }
1381 
1382 static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
1383                    u32 l4_proto, u32 *type_cs_vlan_tso)
1384 {
1385     /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1386     switch (l4_proto) {
1387     case IPPROTO_TCP:
1388         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1389         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1390                    HNS3_L4T_TCP);
1391         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1392                    l4.tcp->doff);
1393         break;
1394     case IPPROTO_UDP:
1395         if (hns3_tunnel_csum_bug(skb)) {
1396             int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
1397 
1398             return ret ? ret : skb_checksum_help(skb);
1399         }
1400 
1401         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1402         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1403                    HNS3_L4T_UDP);
1404         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1405                    (sizeof(struct udphdr) >> 2));
1406         break;
1407     case IPPROTO_SCTP:
1408         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1409         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1410                    HNS3_L4T_SCTP);
1411         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1412                    (sizeof(struct sctphdr) >> 2));
1413         break;
1414     default:
1415         /* drop the skb tunnel packet if hardware don't support,
1416          * because hardware can't calculate csum when TSO.
1417          */
1418         if (skb_is_gso(skb))
1419             return -EDOM;
1420 
1421         /* the stack computes the IP header already,
1422          * driver calculate l4 checksum when not TSO.
1423          */
1424         return skb_checksum_help(skb);
1425     }
1426 
1427     return 0;
1428 }
1429 
1430 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1431                u8 il4_proto, u32 *type_cs_vlan_tso,
1432                u32 *ol_type_vlan_len_msec)
1433 {
1434     unsigned char *l2_hdr = skb->data;
1435     u32 l4_proto = ol4_proto;
1436     union l4_hdr_info l4;
1437     union l3_hdr_info l3;
1438     u32 l2_len, l3_len;
1439 
1440     l4.hdr = skb_transport_header(skb);
1441     l3.hdr = skb_network_header(skb);
1442 
1443     /* handle encapsulation skb */
1444     if (skb->encapsulation) {
1445         /* If this is a not UDP/GRE encapsulation skb */
1446         if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
1447             /* drop the skb tunnel packet if hardware don't support,
1448              * because hardware can't calculate csum when TSO.
1449              */
1450             if (skb_is_gso(skb))
1451                 return -EDOM;
1452 
1453             /* the stack computes the IP header already,
1454              * driver calculate l4 checksum when not TSO.
1455              */
1456             return skb_checksum_help(skb);
1457         }
1458 
1459         hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
1460 
1461         /* switch to inner header */
1462         l2_hdr = skb_inner_mac_header(skb);
1463         l3.hdr = skb_inner_network_header(skb);
1464         l4.hdr = skb_inner_transport_header(skb);
1465         l4_proto = il4_proto;
1466     }
1467 
1468     hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
1469 
1470     /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1471     l2_len = l3.hdr - l2_hdr;
1472     hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
1473 
1474     /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1475     l3_len = l4.hdr - l3.hdr;
1476     hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
1477 
1478     return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
1479 }
1480 
1481 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
1482                  struct sk_buff *skb)
1483 {
1484     struct hnae3_handle *handle = tx_ring->tqp->handle;
1485     struct hnae3_ae_dev *ae_dev;
1486     struct vlan_ethhdr *vhdr;
1487     int rc;
1488 
1489     if (!(skb->protocol == htons(ETH_P_8021Q) ||
1490           skb_vlan_tag_present(skb)))
1491         return 0;
1492 
1493     /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1494      * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1495      * will cause RAS error.
1496      */
1497     ae_dev = pci_get_drvdata(handle->pdev);
1498     if (unlikely(skb_vlan_tagged_multi(skb) &&
1499              ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
1500              handle->port_base_vlan_state ==
1501              HNAE3_PORT_BASE_VLAN_ENABLE))
1502         return -EINVAL;
1503 
1504     if (skb->protocol == htons(ETH_P_8021Q) &&
1505         !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1506         /* When HW VLAN acceleration is turned off, and the stack
1507          * sets the protocol to 802.1q, the driver just need to
1508          * set the protocol to the encapsulated ethertype.
1509          */
1510         skb->protocol = vlan_get_protocol(skb);
1511         return 0;
1512     }
1513 
1514     if (skb_vlan_tag_present(skb)) {
1515         /* Based on hw strategy, use out_vtag in two layer tag case,
1516          * and use inner_vtag in one tag case.
1517          */
1518         if (skb->protocol == htons(ETH_P_8021Q) &&
1519             handle->port_base_vlan_state ==
1520             HNAE3_PORT_BASE_VLAN_DISABLE)
1521             rc = HNS3_OUTER_VLAN_TAG;
1522         else
1523             rc = HNS3_INNER_VLAN_TAG;
1524 
1525         skb->protocol = vlan_get_protocol(skb);
1526         return rc;
1527     }
1528 
1529     rc = skb_cow_head(skb, 0);
1530     if (unlikely(rc < 0))
1531         return rc;
1532 
1533     vhdr = (struct vlan_ethhdr *)skb->data;
1534     vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
1535                      & VLAN_PRIO_MASK);
1536 
1537     skb->protocol = vlan_get_protocol(skb);
1538     return 0;
1539 }
1540 
1541 /* check if the hardware is capable of checksum offloading */
1542 static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
1543 {
1544     struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1545 
1546     /* Kindly note, due to backward compatibility of the TX descriptor,
1547      * HW checksum of the non-IP packets and GSO packets is handled at
1548      * different place in the following code
1549      */
1550     if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
1551         !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
1552         return false;
1553 
1554     return true;
1555 }
1556 
1557 struct hns3_desc_param {
1558     u32 paylen_ol4cs;
1559     u32 ol_type_vlan_len_msec;
1560     u32 type_cs_vlan_tso;
1561     u16 mss_hw_csum;
1562     u16 inner_vtag;
1563     u16 out_vtag;
1564 };
1565 
1566 static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
1567 {
1568     pa->paylen_ol4cs = skb->len;
1569     pa->ol_type_vlan_len_msec = 0;
1570     pa->type_cs_vlan_tso = 0;
1571     pa->mss_hw_csum = 0;
1572     pa->inner_vtag = 0;
1573     pa->out_vtag = 0;
1574 }
1575 
1576 static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
1577                  struct sk_buff *skb,
1578                  struct hns3_desc_param *param)
1579 {
1580     int ret;
1581 
1582     ret = hns3_handle_vtags(ring, skb);
1583     if (unlikely(ret < 0)) {
1584         hns3_ring_stats_update(ring, tx_vlan_err);
1585         return ret;
1586     } else if (ret == HNS3_INNER_VLAN_TAG) {
1587         param->inner_vtag = skb_vlan_tag_get(skb);
1588         param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1589                 VLAN_PRIO_MASK;
1590         hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
1591     } else if (ret == HNS3_OUTER_VLAN_TAG) {
1592         param->out_vtag = skb_vlan_tag_get(skb);
1593         param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1594                 VLAN_PRIO_MASK;
1595         hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1596                    1);
1597     }
1598     return 0;
1599 }
1600 
1601 static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
1602                     struct sk_buff *skb,
1603                     struct hns3_desc_cb *desc_cb,
1604                     struct hns3_desc_param *param)
1605 {
1606     u8 ol4_proto, il4_proto;
1607     int ret;
1608 
1609     if (hns3_check_hw_tx_csum(skb)) {
1610         /* set checksum start and offset, defined in 2 Bytes */
1611         hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
1612                    skb_checksum_start_offset(skb) >> 1);
1613         hns3_set_field(param->ol_type_vlan_len_msec,
1614                    HNS3_TXD_CSUM_OFFSET_S,
1615                    skb->csum_offset >> 1);
1616         param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
1617         return 0;
1618     }
1619 
1620     skb_reset_mac_len(skb);
1621 
1622     ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1623     if (unlikely(ret < 0)) {
1624         hns3_ring_stats_update(ring, tx_l4_proto_err);
1625         return ret;
1626     }
1627 
1628     ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1629                   &param->type_cs_vlan_tso,
1630                   &param->ol_type_vlan_len_msec);
1631     if (unlikely(ret < 0)) {
1632         hns3_ring_stats_update(ring, tx_l2l3l4_err);
1633         return ret;
1634     }
1635 
1636     ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
1637                &param->type_cs_vlan_tso, &desc_cb->send_bytes);
1638     if (unlikely(ret < 0)) {
1639         hns3_ring_stats_update(ring, tx_tso_err);
1640         return ret;
1641     }
1642     return 0;
1643 }
1644 
1645 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1646                   struct sk_buff *skb, struct hns3_desc *desc,
1647                   struct hns3_desc_cb *desc_cb)
1648 {
1649     struct hns3_desc_param param;
1650     int ret;
1651 
1652     hns3_init_desc_data(skb, &param);
1653     ret = hns3_handle_vlan_info(ring, skb, &param);
1654     if (unlikely(ret < 0))
1655         return ret;
1656 
1657     desc_cb->send_bytes = skb->len;
1658 
1659     if (skb->ip_summed == CHECKSUM_PARTIAL) {
1660         ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
1661         if (ret)
1662             return ret;
1663     }
1664 
1665     /* Set txbd */
1666     desc->tx.ol_type_vlan_len_msec =
1667         cpu_to_le32(param.ol_type_vlan_len_msec);
1668     desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
1669     desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
1670     desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
1671     desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
1672     desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
1673 
1674     return 0;
1675 }
1676 
1677 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
1678               unsigned int size)
1679 {
1680 #define HNS3_LIKELY_BD_NUM  1
1681 
1682     struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1683     unsigned int frag_buf_num;
1684     int k, sizeoflast;
1685 
1686     if (likely(size <= HNS3_MAX_BD_SIZE)) {
1687         desc->addr = cpu_to_le64(dma);
1688         desc->tx.send_size = cpu_to_le16(size);
1689         desc->tx.bdtp_fe_sc_vld_ra_ri =
1690             cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1691 
1692         trace_hns3_tx_desc(ring, ring->next_to_use);
1693         ring_ptr_move_fw(ring, next_to_use);
1694         return HNS3_LIKELY_BD_NUM;
1695     }
1696 
1697     frag_buf_num = hns3_tx_bd_count(size);
1698     sizeoflast = size % HNS3_MAX_BD_SIZE;
1699     sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1700 
1701     /* When frag size is bigger than hardware limit, split this frag */
1702     for (k = 0; k < frag_buf_num; k++) {
1703         /* now, fill the descriptor */
1704         desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1705         desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1706                      (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1707         desc->tx.bdtp_fe_sc_vld_ra_ri =
1708                 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1709 
1710         trace_hns3_tx_desc(ring, ring->next_to_use);
1711         /* move ring pointer to next */
1712         ring_ptr_move_fw(ring, next_to_use);
1713 
1714         desc = &ring->desc[ring->next_to_use];
1715     }
1716 
1717     return frag_buf_num;
1718 }
1719 
1720 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
1721                   unsigned int type)
1722 {
1723     struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1724     struct device *dev = ring_to_dev(ring);
1725     unsigned int size;
1726     dma_addr_t dma;
1727 
1728     if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
1729         struct sk_buff *skb = (struct sk_buff *)priv;
1730 
1731         size = skb_headlen(skb);
1732         if (!size)
1733             return 0;
1734 
1735         dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1736     } else if (type & DESC_TYPE_BOUNCE_HEAD) {
1737         /* Head data has been filled in hns3_handle_tx_bounce(),
1738          * just return 0 here.
1739          */
1740         return 0;
1741     } else {
1742         skb_frag_t *frag = (skb_frag_t *)priv;
1743 
1744         size = skb_frag_size(frag);
1745         if (!size)
1746             return 0;
1747 
1748         dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1749     }
1750 
1751     if (unlikely(dma_mapping_error(dev, dma))) {
1752         hns3_ring_stats_update(ring, sw_err_cnt);
1753         return -ENOMEM;
1754     }
1755 
1756     desc_cb->priv = priv;
1757     desc_cb->length = size;
1758     desc_cb->dma = dma;
1759     desc_cb->type = type;
1760 
1761     return hns3_fill_desc(ring, dma, size);
1762 }
1763 
1764 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1765                     unsigned int bd_num)
1766 {
1767     unsigned int size;
1768     int i;
1769 
1770     size = skb_headlen(skb);
1771     while (size > HNS3_MAX_BD_SIZE) {
1772         bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1773         size -= HNS3_MAX_BD_SIZE;
1774 
1775         if (bd_num > HNS3_MAX_TSO_BD_NUM)
1776             return bd_num;
1777     }
1778 
1779     if (size) {
1780         bd_size[bd_num++] = size;
1781         if (bd_num > HNS3_MAX_TSO_BD_NUM)
1782             return bd_num;
1783     }
1784 
1785     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1786         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1787         size = skb_frag_size(frag);
1788         if (!size)
1789             continue;
1790 
1791         while (size > HNS3_MAX_BD_SIZE) {
1792             bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1793             size -= HNS3_MAX_BD_SIZE;
1794 
1795             if (bd_num > HNS3_MAX_TSO_BD_NUM)
1796                 return bd_num;
1797         }
1798 
1799         bd_size[bd_num++] = size;
1800         if (bd_num > HNS3_MAX_TSO_BD_NUM)
1801             return bd_num;
1802     }
1803 
1804     return bd_num;
1805 }
1806 
1807 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1808                    u8 max_non_tso_bd_num, unsigned int bd_num,
1809                    unsigned int recursion_level)
1810 {
1811 #define HNS3_MAX_RECURSION_LEVEL    24
1812 
1813     struct sk_buff *frag_skb;
1814 
1815     /* If the total len is within the max bd limit */
1816     if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
1817            !skb_has_frag_list(skb) &&
1818            skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
1819         return skb_shinfo(skb)->nr_frags + 1U;
1820 
1821     if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
1822         return UINT_MAX;
1823 
1824     bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
1825     if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
1826         return bd_num;
1827 
1828     skb_walk_frags(skb, frag_skb) {
1829         bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
1830                     bd_num, recursion_level + 1);
1831         if (bd_num > HNS3_MAX_TSO_BD_NUM)
1832             return bd_num;
1833     }
1834 
1835     return bd_num;
1836 }
1837 
1838 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1839 {
1840     if (!skb->encapsulation)
1841         return skb_tcp_all_headers(skb);
1842 
1843     return skb_inner_tcp_all_headers(skb);
1844 }
1845 
1846 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1847  * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1848  * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1849  * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1850  * than MSS except the last max_non_tso_bd_num - 1 frags.
1851  */
1852 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1853                      unsigned int bd_num, u8 max_non_tso_bd_num)
1854 {
1855     unsigned int tot_len = 0;
1856     int i;
1857 
1858     for (i = 0; i < max_non_tso_bd_num - 1U; i++)
1859         tot_len += bd_size[i];
1860 
1861     /* ensure the first max_non_tso_bd_num frags is greater than
1862      * mss + header
1863      */
1864     if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
1865         skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
1866         return true;
1867 
1868     /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1869      * than mss except the last one.
1870      */
1871     for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
1872         tot_len -= bd_size[i];
1873         tot_len += bd_size[i + max_non_tso_bd_num - 1U];
1874 
1875         if (tot_len < skb_shinfo(skb)->gso_size)
1876             return true;
1877     }
1878 
1879     return false;
1880 }
1881 
1882 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
1883 {
1884     int i;
1885 
1886     for (i = 0; i < MAX_SKB_FRAGS; i++)
1887         size[i] = skb_frag_size(&shinfo->frags[i]);
1888 }
1889 
1890 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
1891                   struct sk_buff *skb,
1892                   unsigned int bd_num)
1893 {
1894     /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1895      * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1896      */
1897     if (bd_num == UINT_MAX) {
1898         hns3_ring_stats_update(ring, over_max_recursion);
1899         return -ENOMEM;
1900     }
1901 
1902     /* The skb->len has exceeded the hw limitation, linearization
1903      * will not help.
1904      */
1905     if (skb->len > HNS3_MAX_TSO_SIZE ||
1906         (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
1907         hns3_ring_stats_update(ring, hw_limitation);
1908         return -ENOMEM;
1909     }
1910 
1911     if (__skb_linearize(skb)) {
1912         hns3_ring_stats_update(ring, sw_err_cnt);
1913         return -ENOMEM;
1914     }
1915 
1916     return 0;
1917 }
1918 
1919 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1920                   struct net_device *netdev,
1921                   struct sk_buff *skb)
1922 {
1923     struct hns3_nic_priv *priv = netdev_priv(netdev);
1924     u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
1925     unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
1926     unsigned int bd_num;
1927 
1928     bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
1929     if (unlikely(bd_num > max_non_tso_bd_num)) {
1930         if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
1931             !hns3_skb_need_linearized(skb, bd_size, bd_num,
1932                           max_non_tso_bd_num)) {
1933             trace_hns3_over_max_bd(skb);
1934             goto out;
1935         }
1936 
1937         if (hns3_skb_linearize(ring, skb, bd_num))
1938             return -ENOMEM;
1939 
1940         bd_num = hns3_tx_bd_count(skb->len);
1941 
1942         hns3_ring_stats_update(ring, tx_copy);
1943     }
1944 
1945 out:
1946     if (likely(ring_space(ring) >= bd_num))
1947         return bd_num;
1948 
1949     netif_stop_subqueue(netdev, ring->queue_index);
1950     smp_mb(); /* Memory barrier before checking ring_space */
1951 
1952     /* Start queue in case hns3_clean_tx_ring has just made room
1953      * available and has not seen the queue stopped state performed
1954      * by netif_stop_subqueue above.
1955      */
1956     if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
1957         !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
1958         netif_start_subqueue(netdev, ring->queue_index);
1959         return bd_num;
1960     }
1961 
1962     hns3_ring_stats_update(ring, tx_busy);
1963 
1964     return -EBUSY;
1965 }
1966 
1967 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1968 {
1969     struct device *dev = ring_to_dev(ring);
1970     unsigned int i;
1971 
1972     for (i = 0; i < ring->desc_num; i++) {
1973         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1974         struct hns3_desc_cb *desc_cb;
1975 
1976         memset(desc, 0, sizeof(*desc));
1977 
1978         /* check if this is where we started */
1979         if (ring->next_to_use == next_to_use_orig)
1980             break;
1981 
1982         /* rollback one */
1983         ring_ptr_move_bw(ring, next_to_use);
1984 
1985         desc_cb = &ring->desc_cb[ring->next_to_use];
1986 
1987         if (!desc_cb->dma)
1988             continue;
1989 
1990         /* unmap the descriptor dma address */
1991         if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
1992             dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
1993                      DMA_TO_DEVICE);
1994         else if (desc_cb->type &
1995              (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
1996             hns3_tx_spare_rollback(ring, desc_cb->length);
1997         else if (desc_cb->length)
1998             dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
1999                        DMA_TO_DEVICE);
2000 
2001         desc_cb->length = 0;
2002         desc_cb->dma = 0;
2003         desc_cb->type = DESC_TYPE_UNKNOWN;
2004     }
2005 }
2006 
2007 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
2008                  struct sk_buff *skb, unsigned int type)
2009 {
2010     struct sk_buff *frag_skb;
2011     int i, ret, bd_num = 0;
2012 
2013     ret = hns3_map_and_fill_desc(ring, skb, type);
2014     if (unlikely(ret < 0))
2015         return ret;
2016 
2017     bd_num += ret;
2018 
2019     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2020         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2021 
2022         ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
2023         if (unlikely(ret < 0))
2024             return ret;
2025 
2026         bd_num += ret;
2027     }
2028 
2029     skb_walk_frags(skb, frag_skb) {
2030         ret = hns3_fill_skb_to_desc(ring, frag_skb,
2031                         DESC_TYPE_FRAGLIST_SKB);
2032         if (unlikely(ret < 0))
2033             return ret;
2034 
2035         bd_num += ret;
2036     }
2037 
2038     return bd_num;
2039 }
2040 
2041 static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
2042 {
2043 #define HNS3_BYTES_PER_64BIT        8
2044 
2045     struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
2046     int offset = 0;
2047 
2048     /* make sure everything is visible to device before
2049      * excuting tx push or updating doorbell
2050      */
2051     dma_wmb();
2052 
2053     do {
2054         int idx = (ring->next_to_use - num + ring->desc_num) %
2055               ring->desc_num;
2056 
2057         u64_stats_update_begin(&ring->syncp);
2058         ring->stats.tx_push++;
2059         u64_stats_update_end(&ring->syncp);
2060         memcpy(&desc[offset], &ring->desc[idx],
2061                sizeof(struct hns3_desc));
2062         offset++;
2063     } while (--num);
2064 
2065     __iowrite64_copy(ring->tqp->mem_base, desc,
2066              (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
2067              HNS3_BYTES_PER_64BIT);
2068 
2069     io_stop_wc();
2070 }
2071 
2072 static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
2073 {
2074 #define HNS3_MEM_DOORBELL_OFFSET    64
2075 
2076     __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
2077 
2078     /* make sure everything is visible to device before
2079      * excuting tx push or updating doorbell
2080      */
2081     dma_wmb();
2082 
2083     __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
2084              &bd_num, 1);
2085     u64_stats_update_begin(&ring->syncp);
2086     ring->stats.tx_mem_doorbell += ring->pending_buf;
2087     u64_stats_update_end(&ring->syncp);
2088 
2089     io_stop_wc();
2090 }
2091 
2092 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
2093                  bool doorbell)
2094 {
2095     struct net_device *netdev = ring_to_netdev(ring);
2096     struct hns3_nic_priv *priv = netdev_priv(netdev);
2097 
2098     /* when tx push is enabled, the packet whose number of BD below
2099      * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
2100      */
2101     if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
2102         !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
2103         hns3_tx_push_bd(ring, num);
2104         WRITE_ONCE(ring->last_to_use, ring->next_to_use);
2105         return;
2106     }
2107 
2108     ring->pending_buf += num;
2109 
2110     if (!doorbell) {
2111         hns3_ring_stats_update(ring, tx_more);
2112         return;
2113     }
2114 
2115     if (ring->tqp->mem_base)
2116         hns3_tx_mem_doorbell(ring);
2117     else
2118         writel(ring->pending_buf,
2119                ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
2120 
2121     ring->pending_buf = 0;
2122     WRITE_ONCE(ring->last_to_use, ring->next_to_use);
2123 }
2124 
2125 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
2126               struct hns3_desc *desc)
2127 {
2128     struct hnae3_handle *h = hns3_get_handle(netdev);
2129 
2130     if (!(h->ae_algo->ops->set_tx_hwts_info &&
2131           h->ae_algo->ops->set_tx_hwts_info(h, skb)))
2132         return;
2133 
2134     desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
2135 }
2136 
2137 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
2138                  struct sk_buff *skb)
2139 {
2140     struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2141     unsigned int type = DESC_TYPE_BOUNCE_HEAD;
2142     unsigned int size = skb_headlen(skb);
2143     dma_addr_t dma;
2144     int bd_num = 0;
2145     u32 cb_len;
2146     void *buf;
2147     int ret;
2148 
2149     if (skb->len <= ring->tx_copybreak) {
2150         size = skb->len;
2151         type = DESC_TYPE_BOUNCE_ALL;
2152     }
2153 
2154     /* hns3_can_use_tx_bounce() is called to ensure the below
2155      * function can always return the tx buffer.
2156      */
2157     buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
2158 
2159     ret = skb_copy_bits(skb, 0, buf, size);
2160     if (unlikely(ret < 0)) {
2161         hns3_tx_spare_rollback(ring, cb_len);
2162         hns3_ring_stats_update(ring, copy_bits_err);
2163         return ret;
2164     }
2165 
2166     desc_cb->priv = skb;
2167     desc_cb->length = cb_len;
2168     desc_cb->dma = dma;
2169     desc_cb->type = type;
2170 
2171     bd_num += hns3_fill_desc(ring, dma, size);
2172 
2173     if (type == DESC_TYPE_BOUNCE_HEAD) {
2174         ret = hns3_fill_skb_to_desc(ring, skb,
2175                         DESC_TYPE_BOUNCE_HEAD);
2176         if (unlikely(ret < 0))
2177             return ret;
2178 
2179         bd_num += ret;
2180     }
2181 
2182     dma_sync_single_for_device(ring_to_dev(ring), dma, size,
2183                    DMA_TO_DEVICE);
2184 
2185     hns3_ring_stats_update(ring, tx_bounce);
2186 
2187     return bd_num;
2188 }
2189 
2190 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
2191                   struct sk_buff *skb)
2192 {
2193     struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2194     u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
2195     struct sg_table *sgt;
2196     int i, bd_num = 0;
2197     dma_addr_t dma;
2198     u32 cb_len;
2199     int nents;
2200 
2201     if (skb_has_frag_list(skb))
2202         nfrag = HNS3_MAX_TSO_BD_NUM;
2203 
2204     /* hns3_can_use_tx_sgl() is called to ensure the below
2205      * function can always return the tx buffer.
2206      */
2207     sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
2208                   &dma, &cb_len);
2209 
2210     /* scatterlist follows by the sg table */
2211     sgt->sgl = (struct scatterlist *)(sgt + 1);
2212     sg_init_table(sgt->sgl, nfrag);
2213     nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
2214     if (unlikely(nents < 0)) {
2215         hns3_tx_spare_rollback(ring, cb_len);
2216         hns3_ring_stats_update(ring, skb2sgl_err);
2217         return -ENOMEM;
2218     }
2219 
2220     sgt->orig_nents = nents;
2221     sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
2222                 DMA_TO_DEVICE);
2223     if (unlikely(!sgt->nents)) {
2224         hns3_tx_spare_rollback(ring, cb_len);
2225         hns3_ring_stats_update(ring, map_sg_err);
2226         return -ENOMEM;
2227     }
2228 
2229     desc_cb->priv = skb;
2230     desc_cb->length = cb_len;
2231     desc_cb->dma = dma;
2232     desc_cb->type = DESC_TYPE_SGL_SKB;
2233 
2234     for (i = 0; i < sgt->nents; i++)
2235         bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
2236                      sg_dma_len(sgt->sgl + i));
2237     hns3_ring_stats_update(ring, tx_sgl);
2238 
2239     return bd_num;
2240 }
2241 
2242 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
2243                     struct sk_buff *skb)
2244 {
2245     u32 space;
2246 
2247     if (!ring->tx_spare)
2248         goto out;
2249 
2250     space = hns3_tx_spare_space(ring);
2251 
2252     if (hns3_can_use_tx_sgl(ring, skb, space))
2253         return hns3_handle_tx_sgl(ring, skb);
2254 
2255     if (hns3_can_use_tx_bounce(ring, skb, space))
2256         return hns3_handle_tx_bounce(ring, skb);
2257 
2258 out:
2259     return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
2260 }
2261 
2262 static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
2263                 struct sk_buff *skb,
2264                 struct hns3_desc_cb *desc_cb,
2265                 int next_to_use_head)
2266 {
2267     int ret;
2268 
2269     ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
2270                  desc_cb);
2271     if (unlikely(ret < 0))
2272         goto fill_err;
2273 
2274     /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2275      * zero, which is unlikely, and 'ret > 0' means how many tx desc
2276      * need to be notified to the hw.
2277      */
2278     ret = hns3_handle_desc_filling(ring, skb);
2279     if (likely(ret > 0))
2280         return ret;
2281 
2282 fill_err:
2283     hns3_clear_desc(ring, next_to_use_head);
2284     return ret;
2285 }
2286 
2287 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
2288 {
2289     struct hns3_nic_priv *priv = netdev_priv(netdev);
2290     struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
2291     struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2292     struct netdev_queue *dev_queue;
2293     int pre_ntu, ret;
2294     bool doorbell;
2295 
2296     /* Hardware can only handle short frames above 32 bytes */
2297     if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
2298         hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2299 
2300         hns3_ring_stats_update(ring, sw_err_cnt);
2301 
2302         return NETDEV_TX_OK;
2303     }
2304 
2305     /* Prefetch the data used later */
2306     prefetch(skb->data);
2307 
2308     ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
2309     if (unlikely(ret <= 0)) {
2310         if (ret == -EBUSY) {
2311             hns3_tx_doorbell(ring, 0, true);
2312             return NETDEV_TX_BUSY;
2313         }
2314 
2315         hns3_rl_err(netdev, "xmit error: %d!\n", ret);
2316         goto out_err_tx_ok;
2317     }
2318 
2319     ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
2320     if (unlikely(ret <= 0))
2321         goto out_err_tx_ok;
2322 
2323     pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
2324                     (ring->desc_num - 1);
2325 
2326     if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2327         hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
2328 
2329     ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
2330                 cpu_to_le16(BIT(HNS3_TXD_FE_B));
2331     trace_hns3_tx_desc(ring, pre_ntu);
2332 
2333     skb_tx_timestamp(skb);
2334 
2335     /* Complete translate all packets */
2336     dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
2337     doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
2338                       netdev_xmit_more());
2339     hns3_tx_doorbell(ring, ret, doorbell);
2340 
2341     return NETDEV_TX_OK;
2342 
2343 out_err_tx_ok:
2344     dev_kfree_skb_any(skb);
2345     hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2346     return NETDEV_TX_OK;
2347 }
2348 
2349 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
2350 {
2351     char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
2352     char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
2353     struct hnae3_handle *h = hns3_get_handle(netdev);
2354     struct sockaddr *mac_addr = p;
2355     int ret;
2356 
2357     if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
2358         return -EADDRNOTAVAIL;
2359 
2360     if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
2361         hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2362         netdev_info(netdev, "already using mac address %s\n",
2363                 format_mac_addr_sa);
2364         return 0;
2365     }
2366 
2367     /* For VF device, if there is a perm_addr, then the user will not
2368      * be allowed to change the address.
2369      */
2370     if (!hns3_is_phys_func(h->pdev) &&
2371         !is_zero_ether_addr(netdev->perm_addr)) {
2372         hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
2373         hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2374         netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
2375                format_mac_addr_perm, format_mac_addr_sa);
2376         return -EPERM;
2377     }
2378 
2379     ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
2380     if (ret) {
2381         netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
2382         return ret;
2383     }
2384 
2385     eth_hw_addr_set(netdev, mac_addr->sa_data);
2386 
2387     return 0;
2388 }
2389 
2390 static int hns3_nic_do_ioctl(struct net_device *netdev,
2391                  struct ifreq *ifr, int cmd)
2392 {
2393     struct hnae3_handle *h = hns3_get_handle(netdev);
2394 
2395     if (!netif_running(netdev))
2396         return -EINVAL;
2397 
2398     if (!h->ae_algo->ops->do_ioctl)
2399         return -EOPNOTSUPP;
2400 
2401     return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
2402 }
2403 
2404 static int hns3_nic_set_features(struct net_device *netdev,
2405                  netdev_features_t features)
2406 {
2407     netdev_features_t changed = netdev->features ^ features;
2408     struct hns3_nic_priv *priv = netdev_priv(netdev);
2409     struct hnae3_handle *h = priv->ae_handle;
2410     bool enable;
2411     int ret;
2412 
2413     if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
2414         enable = !!(features & NETIF_F_GRO_HW);
2415         ret = h->ae_algo->ops->set_gro_en(h, enable);
2416         if (ret)
2417             return ret;
2418     }
2419 
2420     if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
2421         h->ae_algo->ops->enable_hw_strip_rxvtag) {
2422         enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
2423         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
2424         if (ret)
2425             return ret;
2426     }
2427 
2428     if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
2429         enable = !!(features & NETIF_F_NTUPLE);
2430         h->ae_algo->ops->enable_fd(h, enable);
2431     }
2432 
2433     if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
2434         h->ae_algo->ops->cls_flower_active(h)) {
2435         netdev_err(netdev,
2436                "there are offloaded TC filters active, cannot disable HW TC offload");
2437         return -EINVAL;
2438     }
2439 
2440     if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2441         h->ae_algo->ops->enable_vlan_filter) {
2442         enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2443         ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
2444         if (ret)
2445             return ret;
2446     }
2447 
2448     netdev->features = features;
2449     return 0;
2450 }
2451 
2452 static netdev_features_t hns3_features_check(struct sk_buff *skb,
2453                          struct net_device *dev,
2454                          netdev_features_t features)
2455 {
2456 #define HNS3_MAX_HDR_LEN    480U
2457 #define HNS3_MAX_L4_HDR_LEN 60U
2458 
2459     size_t len;
2460 
2461     if (skb->ip_summed != CHECKSUM_PARTIAL)
2462         return features;
2463 
2464     if (skb->encapsulation)
2465         len = skb_inner_transport_header(skb) - skb->data;
2466     else
2467         len = skb_transport_header(skb) - skb->data;
2468 
2469     /* Assume L4 is 60 byte as TCP is the only protocol with a
2470      * a flexible value, and it's max len is 60 bytes.
2471      */
2472     len += HNS3_MAX_L4_HDR_LEN;
2473 
2474     /* Hardware only supports checksum on the skb with a max header
2475      * len of 480 bytes.
2476      */
2477     if (len > HNS3_MAX_HDR_LEN)
2478         features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2479 
2480     return features;
2481 }
2482 
2483 static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
2484                  struct hns3_enet_ring *ring, bool is_tx)
2485 {
2486     unsigned int start;
2487 
2488     do {
2489         start = u64_stats_fetch_begin_irq(&ring->syncp);
2490         if (is_tx) {
2491             stats->tx_bytes += ring->stats.tx_bytes;
2492             stats->tx_packets += ring->stats.tx_pkts;
2493             stats->tx_dropped += ring->stats.sw_err_cnt;
2494             stats->tx_dropped += ring->stats.tx_vlan_err;
2495             stats->tx_dropped += ring->stats.tx_l4_proto_err;
2496             stats->tx_dropped += ring->stats.tx_l2l3l4_err;
2497             stats->tx_dropped += ring->stats.tx_tso_err;
2498             stats->tx_dropped += ring->stats.over_max_recursion;
2499             stats->tx_dropped += ring->stats.hw_limitation;
2500             stats->tx_dropped += ring->stats.copy_bits_err;
2501             stats->tx_dropped += ring->stats.skb2sgl_err;
2502             stats->tx_dropped += ring->stats.map_sg_err;
2503             stats->tx_errors += ring->stats.sw_err_cnt;
2504             stats->tx_errors += ring->stats.tx_vlan_err;
2505             stats->tx_errors += ring->stats.tx_l4_proto_err;
2506             stats->tx_errors += ring->stats.tx_l2l3l4_err;
2507             stats->tx_errors += ring->stats.tx_tso_err;
2508             stats->tx_errors += ring->stats.over_max_recursion;
2509             stats->tx_errors += ring->stats.hw_limitation;
2510             stats->tx_errors += ring->stats.copy_bits_err;
2511             stats->tx_errors += ring->stats.skb2sgl_err;
2512             stats->tx_errors += ring->stats.map_sg_err;
2513         } else {
2514             stats->rx_bytes += ring->stats.rx_bytes;
2515             stats->rx_packets += ring->stats.rx_pkts;
2516             stats->rx_dropped += ring->stats.l2_err;
2517             stats->rx_errors += ring->stats.l2_err;
2518             stats->rx_errors += ring->stats.l3l4_csum_err;
2519             stats->rx_crc_errors += ring->stats.l2_err;
2520             stats->multicast += ring->stats.rx_multicast;
2521             stats->rx_length_errors += ring->stats.err_pkt_len;
2522         }
2523     } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2524 }
2525 
2526 static void hns3_nic_get_stats64(struct net_device *netdev,
2527                  struct rtnl_link_stats64 *stats)
2528 {
2529     struct hns3_nic_priv *priv = netdev_priv(netdev);
2530     int queue_num = priv->ae_handle->kinfo.num_tqps;
2531     struct hnae3_handle *handle = priv->ae_handle;
2532     struct rtnl_link_stats64 ring_total_stats;
2533     struct hns3_enet_ring *ring;
2534     unsigned int idx;
2535 
2536     if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
2537         return;
2538 
2539     handle->ae_algo->ops->update_stats(handle, &netdev->stats);
2540 
2541     memset(&ring_total_stats, 0, sizeof(ring_total_stats));
2542     for (idx = 0; idx < queue_num; idx++) {
2543         /* fetch the tx stats */
2544         ring = &priv->ring[idx];
2545         hns3_fetch_stats(&ring_total_stats, ring, true);
2546 
2547         /* fetch the rx stats */
2548         ring = &priv->ring[idx + queue_num];
2549         hns3_fetch_stats(&ring_total_stats, ring, false);
2550     }
2551 
2552     stats->tx_bytes = ring_total_stats.tx_bytes;
2553     stats->tx_packets = ring_total_stats.tx_packets;
2554     stats->rx_bytes = ring_total_stats.rx_bytes;
2555     stats->rx_packets = ring_total_stats.rx_packets;
2556 
2557     stats->rx_errors = ring_total_stats.rx_errors;
2558     stats->multicast = ring_total_stats.multicast;
2559     stats->rx_length_errors = ring_total_stats.rx_length_errors;
2560     stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
2561     stats->rx_missed_errors = netdev->stats.rx_missed_errors;
2562 
2563     stats->tx_errors = ring_total_stats.tx_errors;
2564     stats->rx_dropped = ring_total_stats.rx_dropped;
2565     stats->tx_dropped = ring_total_stats.tx_dropped;
2566     stats->collisions = netdev->stats.collisions;
2567     stats->rx_over_errors = netdev->stats.rx_over_errors;
2568     stats->rx_frame_errors = netdev->stats.rx_frame_errors;
2569     stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
2570     stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
2571     stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
2572     stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
2573     stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
2574     stats->tx_window_errors = netdev->stats.tx_window_errors;
2575     stats->rx_compressed = netdev->stats.rx_compressed;
2576     stats->tx_compressed = netdev->stats.tx_compressed;
2577 }
2578 
2579 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2580 {
2581     struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2582     struct hnae3_knic_private_info *kinfo;
2583     u8 tc = mqprio_qopt->qopt.num_tc;
2584     u16 mode = mqprio_qopt->mode;
2585     u8 hw = mqprio_qopt->qopt.hw;
2586     struct hnae3_handle *h;
2587 
2588     if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
2589            mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
2590         return -EOPNOTSUPP;
2591 
2592     if (tc > HNAE3_MAX_TC)
2593         return -EINVAL;
2594 
2595     if (!netdev)
2596         return -EINVAL;
2597 
2598     h = hns3_get_handle(netdev);
2599     kinfo = &h->kinfo;
2600 
2601     netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2602 
2603     return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2604         kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
2605 }
2606 
2607 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
2608                     struct flow_cls_offload *flow)
2609 {
2610     int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
2611     struct hnae3_handle *h = hns3_get_handle(priv->netdev);
2612 
2613     switch (flow->command) {
2614     case FLOW_CLS_REPLACE:
2615         if (h->ae_algo->ops->add_cls_flower)
2616             return h->ae_algo->ops->add_cls_flower(h, flow, tc);
2617         break;
2618     case FLOW_CLS_DESTROY:
2619         if (h->ae_algo->ops->del_cls_flower)
2620             return h->ae_algo->ops->del_cls_flower(h, flow);
2621         break;
2622     default:
2623         break;
2624     }
2625 
2626     return -EOPNOTSUPP;
2627 }
2628 
2629 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2630                   void *cb_priv)
2631 {
2632     struct hns3_nic_priv *priv = cb_priv;
2633 
2634     if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
2635         return -EOPNOTSUPP;
2636 
2637     switch (type) {
2638     case TC_SETUP_CLSFLOWER:
2639         return hns3_setup_tc_cls_flower(priv, type_data);
2640     default:
2641         return -EOPNOTSUPP;
2642     }
2643 }
2644 
2645 static LIST_HEAD(hns3_block_cb_list);
2646 
2647 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2648                  void *type_data)
2649 {
2650     struct hns3_nic_priv *priv = netdev_priv(dev);
2651     int ret;
2652 
2653     switch (type) {
2654     case TC_SETUP_QDISC_MQPRIO:
2655         ret = hns3_setup_tc(dev, type_data);
2656         break;
2657     case TC_SETUP_BLOCK:
2658         ret = flow_block_cb_setup_simple(type_data,
2659                          &hns3_block_cb_list,
2660                          hns3_setup_tc_block_cb,
2661                          priv, priv, true);
2662         break;
2663     default:
2664         return -EOPNOTSUPP;
2665     }
2666 
2667     return ret;
2668 }
2669 
2670 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
2671                 __be16 proto, u16 vid)
2672 {
2673     struct hnae3_handle *h = hns3_get_handle(netdev);
2674     int ret = -EIO;
2675 
2676     if (h->ae_algo->ops->set_vlan_filter)
2677         ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
2678 
2679     return ret;
2680 }
2681 
2682 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
2683                  __be16 proto, u16 vid)
2684 {
2685     struct hnae3_handle *h = hns3_get_handle(netdev);
2686     int ret = -EIO;
2687 
2688     if (h->ae_algo->ops->set_vlan_filter)
2689         ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
2690 
2691     return ret;
2692 }
2693 
2694 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2695                 u8 qos, __be16 vlan_proto)
2696 {
2697     struct hnae3_handle *h = hns3_get_handle(netdev);
2698     int ret = -EIO;
2699 
2700     netif_dbg(h, drv, netdev,
2701           "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2702           vf, vlan, qos, ntohs(vlan_proto));
2703 
2704     if (h->ae_algo->ops->set_vf_vlan_filter)
2705         ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
2706                               qos, vlan_proto);
2707 
2708     return ret;
2709 }
2710 
2711 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2712 {
2713     struct hnae3_handle *handle = hns3_get_handle(netdev);
2714 
2715     if (hns3_nic_resetting(netdev))
2716         return -EBUSY;
2717 
2718     if (!handle->ae_algo->ops->set_vf_spoofchk)
2719         return -EOPNOTSUPP;
2720 
2721     return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
2722 }
2723 
2724 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
2725 {
2726     struct hnae3_handle *handle = hns3_get_handle(netdev);
2727 
2728     if (!handle->ae_algo->ops->set_vf_trust)
2729         return -EOPNOTSUPP;
2730 
2731     return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
2732 }
2733 
2734 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
2735 {
2736     struct hnae3_handle *h = hns3_get_handle(netdev);
2737     int ret;
2738 
2739     if (hns3_nic_resetting(netdev))
2740         return -EBUSY;
2741 
2742     if (!h->ae_algo->ops->set_mtu)
2743         return -EOPNOTSUPP;
2744 
2745     netif_dbg(h, drv, netdev,
2746           "change mtu from %u to %d\n", netdev->mtu, new_mtu);
2747 
2748     ret = h->ae_algo->ops->set_mtu(h, new_mtu);
2749     if (ret)
2750         netdev_err(netdev, "failed to change MTU in hardware %d\n",
2751                ret);
2752     else
2753         netdev->mtu = new_mtu;
2754 
2755     return ret;
2756 }
2757 
2758 static int hns3_get_timeout_queue(struct net_device *ndev)
2759 {
2760     int i;
2761 
2762     /* Find the stopped queue the same way the stack does */
2763     for (i = 0; i < ndev->num_tx_queues; i++) {
2764         struct netdev_queue *q;
2765         unsigned long trans_start;
2766 
2767         q = netdev_get_tx_queue(ndev, i);
2768         trans_start = READ_ONCE(q->trans_start);
2769         if (netif_xmit_stopped(q) &&
2770             time_after(jiffies,
2771                    (trans_start + ndev->watchdog_timeo))) {
2772 #ifdef CONFIG_BQL
2773             struct dql *dql = &q->dql;
2774 
2775             netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
2776                     dql->last_obj_cnt, dql->num_queued,
2777                     dql->adj_limit, dql->num_completed);
2778 #endif
2779             netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
2780                     q->state,
2781                     jiffies_to_msecs(jiffies - trans_start));
2782             break;
2783         }
2784     }
2785 
2786     return i;
2787 }
2788 
2789 static void hns3_dump_queue_stats(struct net_device *ndev,
2790                   struct hns3_enet_ring *tx_ring,
2791                   int timeout_queue)
2792 {
2793     struct napi_struct *napi = &tx_ring->tqp_vector->napi;
2794     struct hns3_nic_priv *priv = netdev_priv(ndev);
2795 
2796     netdev_info(ndev,
2797             "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2798             priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
2799             tx_ring->next_to_clean, napi->state);
2800 
2801     netdev_info(ndev,
2802             "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2803             tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
2804             tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
2805 
2806     netdev_info(ndev,
2807             "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2808             tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
2809             tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
2810 
2811     netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
2812             tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
2813 }
2814 
2815 static void hns3_dump_queue_reg(struct net_device *ndev,
2816                 struct hns3_enet_ring *tx_ring)
2817 {
2818     netdev_info(ndev,
2819             "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2820             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
2821             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
2822             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
2823             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
2824             readl(tx_ring->tqp_vector->mask_addr));
2825     netdev_info(ndev,
2826             "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2827             hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
2828             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
2829             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
2830             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
2831             hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
2832             hns3_tqp_read_reg(tx_ring,
2833                       HNS3_RING_TX_RING_EBD_OFFSET_REG));
2834 }
2835 
2836 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
2837 {
2838     struct hns3_nic_priv *priv = netdev_priv(ndev);
2839     struct hnae3_handle *h = hns3_get_handle(ndev);
2840     struct hns3_enet_ring *tx_ring;
2841     int timeout_queue;
2842 
2843     timeout_queue = hns3_get_timeout_queue(ndev);
2844     if (timeout_queue >= ndev->num_tx_queues) {
2845         netdev_info(ndev,
2846                 "no netdev TX timeout queue found, timeout count: %llu\n",
2847                 priv->tx_timeout_count);
2848         return false;
2849     }
2850 
2851     priv->tx_timeout_count++;
2852 
2853     tx_ring = &priv->ring[timeout_queue];
2854     hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
2855 
2856     /* When mac received many pause frames continuous, it's unable to send
2857      * packets, which may cause tx timeout
2858      */
2859     if (h->ae_algo->ops->get_mac_stats) {
2860         struct hns3_mac_stats mac_stats;
2861 
2862         h->ae_algo->ops->get_mac_stats(h, &mac_stats);
2863         netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2864                 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
2865     }
2866 
2867     hns3_dump_queue_reg(ndev, tx_ring);
2868 
2869     return true;
2870 }
2871 
2872 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
2873 {
2874     struct hns3_nic_priv *priv = netdev_priv(ndev);
2875     struct hnae3_handle *h = priv->ae_handle;
2876 
2877     if (!hns3_get_tx_timeo_queue_info(ndev))
2878         return;
2879 
2880     /* request the reset, and let the hclge to determine
2881      * which reset level should be done
2882      */
2883     if (h->ae_algo->ops->reset_event)
2884         h->ae_algo->ops->reset_event(h->pdev, h);
2885 }
2886 
2887 #ifdef CONFIG_RFS_ACCEL
2888 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2889                   u16 rxq_index, u32 flow_id)
2890 {
2891     struct hnae3_handle *h = hns3_get_handle(dev);
2892     struct flow_keys fkeys;
2893 
2894     if (!h->ae_algo->ops->add_arfs_entry)
2895         return -EOPNOTSUPP;
2896 
2897     if (skb->encapsulation)
2898         return -EPROTONOSUPPORT;
2899 
2900     if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
2901         return -EPROTONOSUPPORT;
2902 
2903     if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
2904          fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
2905         (fkeys.basic.ip_proto != IPPROTO_TCP &&
2906          fkeys.basic.ip_proto != IPPROTO_UDP))
2907         return -EPROTONOSUPPORT;
2908 
2909     return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
2910 }
2911 #endif
2912 
2913 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
2914                   struct ifla_vf_info *ivf)
2915 {
2916     struct hnae3_handle *h = hns3_get_handle(ndev);
2917 
2918     if (!h->ae_algo->ops->get_vf_config)
2919         return -EOPNOTSUPP;
2920 
2921     return h->ae_algo->ops->get_vf_config(h, vf, ivf);
2922 }
2923 
2924 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
2925                       int link_state)
2926 {
2927     struct hnae3_handle *h = hns3_get_handle(ndev);
2928 
2929     if (!h->ae_algo->ops->set_vf_link_state)
2930         return -EOPNOTSUPP;
2931 
2932     return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
2933 }
2934 
2935 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
2936                 int min_tx_rate, int max_tx_rate)
2937 {
2938     struct hnae3_handle *h = hns3_get_handle(ndev);
2939 
2940     if (!h->ae_algo->ops->set_vf_rate)
2941         return -EOPNOTSUPP;
2942 
2943     return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
2944                         false);
2945 }
2946 
2947 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2948 {
2949     struct hnae3_handle *h = hns3_get_handle(netdev);
2950     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
2951 
2952     if (!h->ae_algo->ops->set_vf_mac)
2953         return -EOPNOTSUPP;
2954 
2955     if (is_multicast_ether_addr(mac)) {
2956         hnae3_format_mac_addr(format_mac_addr, mac);
2957         netdev_err(netdev,
2958                "Invalid MAC:%s specified. Could not set MAC\n",
2959                format_mac_addr);
2960         return -EINVAL;
2961     }
2962 
2963     return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
2964 }
2965 
2966 static const struct net_device_ops hns3_nic_netdev_ops = {
2967     .ndo_open       = hns3_nic_net_open,
2968     .ndo_stop       = hns3_nic_net_stop,
2969     .ndo_start_xmit     = hns3_nic_net_xmit,
2970     .ndo_tx_timeout     = hns3_nic_net_timeout,
2971     .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
2972     .ndo_eth_ioctl      = hns3_nic_do_ioctl,
2973     .ndo_change_mtu     = hns3_nic_change_mtu,
2974     .ndo_set_features   = hns3_nic_set_features,
2975     .ndo_features_check = hns3_features_check,
2976     .ndo_get_stats64    = hns3_nic_get_stats64,
2977     .ndo_setup_tc       = hns3_nic_setup_tc,
2978     .ndo_set_rx_mode    = hns3_nic_set_rx_mode,
2979     .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
2980     .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
2981     .ndo_set_vf_vlan    = hns3_ndo_set_vf_vlan,
2982     .ndo_set_vf_spoofchk    = hns3_set_vf_spoofchk,
2983     .ndo_set_vf_trust   = hns3_set_vf_trust,
2984 #ifdef CONFIG_RFS_ACCEL
2985     .ndo_rx_flow_steer  = hns3_rx_flow_steer,
2986 #endif
2987     .ndo_get_vf_config  = hns3_nic_get_vf_config,
2988     .ndo_set_vf_link_state  = hns3_nic_set_vf_link_state,
2989     .ndo_set_vf_rate    = hns3_nic_set_vf_rate,
2990     .ndo_set_vf_mac     = hns3_nic_set_vf_mac,
2991 };
2992 
2993 bool hns3_is_phys_func(struct pci_dev *pdev)
2994 {
2995     u32 dev_id = pdev->device;
2996 
2997     switch (dev_id) {
2998     case HNAE3_DEV_ID_GE:
2999     case HNAE3_DEV_ID_25GE:
3000     case HNAE3_DEV_ID_25GE_RDMA:
3001     case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
3002     case HNAE3_DEV_ID_50GE_RDMA:
3003     case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
3004     case HNAE3_DEV_ID_100G_RDMA_MACSEC:
3005     case HNAE3_DEV_ID_200G_RDMA:
3006         return true;
3007     case HNAE3_DEV_ID_VF:
3008     case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
3009         return false;
3010     default:
3011         dev_warn(&pdev->dev, "un-recognized pci device-id %u",
3012              dev_id);
3013     }
3014 
3015     return false;
3016 }
3017 
3018 static void hns3_disable_sriov(struct pci_dev *pdev)
3019 {
3020     /* If our VFs are assigned we cannot shut down SR-IOV
3021      * without causing issues, so just leave the hardware
3022      * available but disabled
3023      */
3024     if (pci_vfs_assigned(pdev)) {
3025         dev_warn(&pdev->dev,
3026              "disabling driver while VFs are assigned\n");
3027         return;
3028     }
3029 
3030     pci_disable_sriov(pdev);
3031 }
3032 
3033 /* hns3_probe - Device initialization routine
3034  * @pdev: PCI device information struct
3035  * @ent: entry in hns3_pci_tbl
3036  *
3037  * hns3_probe initializes a PF identified by a pci_dev structure.
3038  * The OS initialization, configuring of the PF private structure,
3039  * and a hardware reset occur.
3040  *
3041  * Returns 0 on success, negative on failure
3042  */
3043 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3044 {
3045     struct hnae3_ae_dev *ae_dev;
3046     int ret;
3047 
3048     ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
3049     if (!ae_dev)
3050         return -ENOMEM;
3051 
3052     ae_dev->pdev = pdev;
3053     ae_dev->flag = ent->driver_data;
3054     pci_set_drvdata(pdev, ae_dev);
3055 
3056     ret = hnae3_register_ae_dev(ae_dev);
3057     if (ret)
3058         pci_set_drvdata(pdev, NULL);
3059 
3060     return ret;
3061 }
3062 
3063 /**
3064  * hns3_clean_vf_config
3065  * @pdev: pointer to a pci_dev structure
3066  * @num_vfs: number of VFs allocated
3067  *
3068  * Clean residual vf config after disable sriov
3069  **/
3070 static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
3071 {
3072     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3073 
3074     if (ae_dev->ops->clean_vf_config)
3075         ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
3076 }
3077 
3078 /* hns3_remove - Device removal routine
3079  * @pdev: PCI device information struct
3080  */
3081 static void hns3_remove(struct pci_dev *pdev)
3082 {
3083     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3084 
3085     if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
3086         hns3_disable_sriov(pdev);
3087 
3088     hnae3_unregister_ae_dev(ae_dev);
3089     pci_set_drvdata(pdev, NULL);
3090 }
3091 
3092 /**
3093  * hns3_pci_sriov_configure
3094  * @pdev: pointer to a pci_dev structure
3095  * @num_vfs: number of VFs to allocate
3096  *
3097  * Enable or change the number of VFs. Called when the user updates the number
3098  * of VFs in sysfs.
3099  **/
3100 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
3101 {
3102     int ret;
3103 
3104     if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
3105         dev_warn(&pdev->dev, "Can not config SRIOV\n");
3106         return -EINVAL;
3107     }
3108 
3109     if (num_vfs) {
3110         ret = pci_enable_sriov(pdev, num_vfs);
3111         if (ret)
3112             dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
3113         else
3114             return num_vfs;
3115     } else if (!pci_vfs_assigned(pdev)) {
3116         int num_vfs_pre = pci_num_vf(pdev);
3117 
3118         pci_disable_sriov(pdev);
3119         hns3_clean_vf_config(pdev, num_vfs_pre);
3120     } else {
3121         dev_warn(&pdev->dev,
3122              "Unable to free VFs because some are assigned to VMs.\n");
3123     }
3124 
3125     return 0;
3126 }
3127 
3128 static void hns3_shutdown(struct pci_dev *pdev)
3129 {
3130     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3131 
3132     hnae3_unregister_ae_dev(ae_dev);
3133     pci_set_drvdata(pdev, NULL);
3134 
3135     if (system_state == SYSTEM_POWER_OFF)
3136         pci_set_power_state(pdev, PCI_D3hot);
3137 }
3138 
3139 static int __maybe_unused hns3_suspend(struct device *dev)
3140 {
3141     struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3142 
3143     if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3144         dev_info(dev, "Begin to suspend.\n");
3145         if (ae_dev->ops && ae_dev->ops->reset_prepare)
3146             ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
3147     }
3148 
3149     return 0;
3150 }
3151 
3152 static int __maybe_unused hns3_resume(struct device *dev)
3153 {
3154     struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3155 
3156     if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3157         dev_info(dev, "Begin to resume.\n");
3158         if (ae_dev->ops && ae_dev->ops->reset_done)
3159             ae_dev->ops->reset_done(ae_dev);
3160     }
3161 
3162     return 0;
3163 }
3164 
3165 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
3166                         pci_channel_state_t state)
3167 {
3168     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3169     pci_ers_result_t ret;
3170 
3171     dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
3172 
3173     if (state == pci_channel_io_perm_failure)
3174         return PCI_ERS_RESULT_DISCONNECT;
3175 
3176     if (!ae_dev || !ae_dev->ops) {
3177         dev_err(&pdev->dev,
3178             "Can't recover - error happened before device initialized\n");
3179         return PCI_ERS_RESULT_NONE;
3180     }
3181 
3182     if (ae_dev->ops->handle_hw_ras_error)
3183         ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
3184     else
3185         return PCI_ERS_RESULT_NONE;
3186 
3187     return ret;
3188 }
3189 
3190 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
3191 {
3192     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3193     const struct hnae3_ae_ops *ops;
3194     enum hnae3_reset_type reset_type;
3195     struct device *dev = &pdev->dev;
3196 
3197     if (!ae_dev || !ae_dev->ops)
3198         return PCI_ERS_RESULT_NONE;
3199 
3200     ops = ae_dev->ops;
3201     /* request the reset */
3202     if (ops->reset_event && ops->get_reset_level &&
3203         ops->set_default_reset_request) {
3204         if (ae_dev->hw_err_reset_req) {
3205             reset_type = ops->get_reset_level(ae_dev,
3206                         &ae_dev->hw_err_reset_req);
3207             ops->set_default_reset_request(ae_dev, reset_type);
3208             dev_info(dev, "requesting reset due to PCI error\n");
3209             ops->reset_event(pdev, NULL);
3210         }
3211 
3212         return PCI_ERS_RESULT_RECOVERED;
3213     }
3214 
3215     return PCI_ERS_RESULT_DISCONNECT;
3216 }
3217 
3218 static void hns3_reset_prepare(struct pci_dev *pdev)
3219 {
3220     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3221 
3222     dev_info(&pdev->dev, "FLR prepare\n");
3223     if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
3224         ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
3225 }
3226 
3227 static void hns3_reset_done(struct pci_dev *pdev)
3228 {
3229     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3230 
3231     dev_info(&pdev->dev, "FLR done\n");
3232     if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
3233         ae_dev->ops->reset_done(ae_dev);
3234 }
3235 
3236 static const struct pci_error_handlers hns3_err_handler = {
3237     .error_detected = hns3_error_detected,
3238     .slot_reset     = hns3_slot_reset,
3239     .reset_prepare  = hns3_reset_prepare,
3240     .reset_done = hns3_reset_done,
3241 };
3242 
3243 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
3244 
3245 static struct pci_driver hns3_driver = {
3246     .name     = hns3_driver_name,
3247     .id_table = hns3_pci_tbl,
3248     .probe    = hns3_probe,
3249     .remove   = hns3_remove,
3250     .shutdown = hns3_shutdown,
3251     .driver.pm  = &hns3_pm_ops,
3252     .sriov_configure = hns3_pci_sriov_configure,
3253     .err_handler    = &hns3_err_handler,
3254 };
3255 
3256 /* set default feature to hns3 */
3257 static void hns3_set_default_feature(struct net_device *netdev)
3258 {
3259     struct hnae3_handle *h = hns3_get_handle(netdev);
3260     struct pci_dev *pdev = h->pdev;
3261     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3262 
3263     netdev->priv_flags |= IFF_UNICAST_FLT;
3264 
3265     netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3266 
3267     netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3268         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3269         NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3270         NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3271         NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3272         NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3273 
3274     if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3275         netdev->features |= NETIF_F_GRO_HW;
3276 
3277         if (!(h->flags & HNAE3_SUPPORT_VF))
3278             netdev->features |= NETIF_F_NTUPLE;
3279     }
3280 
3281     if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
3282         netdev->features |= NETIF_F_GSO_UDP_L4;
3283 
3284     if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
3285         netdev->features |= NETIF_F_HW_CSUM;
3286     else
3287         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3288 
3289     if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps))
3290         netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3291 
3292     if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps))
3293         netdev->features |= NETIF_F_HW_TC;
3294 
3295     netdev->hw_features |= netdev->features;
3296     if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
3297         netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3298 
3299     netdev->vlan_features |= netdev->features &
3300         ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
3301           NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE |
3302           NETIF_F_HW_TC);
3303 
3304     netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
3305 }
3306 
3307 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
3308                  struct hns3_desc_cb *cb)
3309 {
3310     unsigned int order = hns3_page_order(ring);
3311     struct page *p;
3312 
3313     if (ring->page_pool) {
3314         p = page_pool_dev_alloc_frag(ring->page_pool,
3315                          &cb->page_offset,
3316                          hns3_buf_size(ring));
3317         if (unlikely(!p))
3318             return -ENOMEM;
3319 
3320         cb->priv = p;
3321         cb->buf = page_address(p);
3322         cb->dma = page_pool_get_dma_addr(p);
3323         cb->type = DESC_TYPE_PP_FRAG;
3324         cb->reuse_flag = 0;
3325         return 0;
3326     }
3327 
3328     p = dev_alloc_pages(order);
3329     if (!p)
3330         return -ENOMEM;
3331 
3332     cb->priv = p;
3333     cb->page_offset = 0;
3334     cb->reuse_flag = 0;
3335     cb->buf  = page_address(p);
3336     cb->length = hns3_page_size(ring);
3337     cb->type = DESC_TYPE_PAGE;
3338     page_ref_add(p, USHRT_MAX - 1);
3339     cb->pagecnt_bias = USHRT_MAX;
3340 
3341     return 0;
3342 }
3343 
3344 static void hns3_free_buffer(struct hns3_enet_ring *ring,
3345                  struct hns3_desc_cb *cb, int budget)
3346 {
3347     if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
3348             DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
3349         napi_consume_skb(cb->priv, budget);
3350     else if (!HNAE3_IS_TX_RING(ring)) {
3351         if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
3352             __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
3353         else if (cb->type & DESC_TYPE_PP_FRAG)
3354             page_pool_put_full_page(ring->page_pool, cb->priv,
3355                         false);
3356     }
3357     memset(cb, 0, sizeof(*cb));
3358 }
3359 
3360 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
3361 {
3362     cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
3363                    cb->length, ring_to_dma_dir(ring));
3364 
3365     if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
3366         return -EIO;
3367 
3368     return 0;
3369 }
3370 
3371 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
3372                   struct hns3_desc_cb *cb)
3373 {
3374     if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
3375         dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
3376                  ring_to_dma_dir(ring));
3377     else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
3378         dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
3379                    ring_to_dma_dir(ring));
3380     else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
3381                  DESC_TYPE_SGL_SKB))
3382         hns3_tx_spare_reclaim_cb(ring, cb);
3383 }
3384 
3385 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
3386 {
3387     hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3388     ring->desc[i].addr = 0;
3389     ring->desc_cb[i].refill = 0;
3390 }
3391 
3392 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
3393                     int budget)
3394 {
3395     struct hns3_desc_cb *cb = &ring->desc_cb[i];
3396 
3397     if (!ring->desc_cb[i].dma)
3398         return;
3399 
3400     hns3_buffer_detach(ring, i);
3401     hns3_free_buffer(ring, cb, budget);
3402 }
3403 
3404 static void hns3_free_buffers(struct hns3_enet_ring *ring)
3405 {
3406     int i;
3407 
3408     for (i = 0; i < ring->desc_num; i++)
3409         hns3_free_buffer_detach(ring, i, 0);
3410 }
3411 
3412 /* free desc along with its attached buffer */
3413 static void hns3_free_desc(struct hns3_enet_ring *ring)
3414 {
3415     int size = ring->desc_num * sizeof(ring->desc[0]);
3416 
3417     hns3_free_buffers(ring);
3418 
3419     if (ring->desc) {
3420         dma_free_coherent(ring_to_dev(ring), size,
3421                   ring->desc, ring->desc_dma_addr);
3422         ring->desc = NULL;
3423     }
3424 }
3425 
3426 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
3427 {
3428     int size = ring->desc_num * sizeof(ring->desc[0]);
3429 
3430     ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
3431                     &ring->desc_dma_addr, GFP_KERNEL);
3432     if (!ring->desc)
3433         return -ENOMEM;
3434 
3435     return 0;
3436 }
3437 
3438 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
3439                    struct hns3_desc_cb *cb)
3440 {
3441     int ret;
3442 
3443     ret = hns3_alloc_buffer(ring, cb);
3444     if (ret || ring->page_pool)
3445         goto out;
3446 
3447     ret = hns3_map_buffer(ring, cb);
3448     if (ret)
3449         goto out_with_buf;
3450 
3451     return 0;
3452 
3453 out_with_buf:
3454     hns3_free_buffer(ring, cb, 0);
3455 out:
3456     return ret;
3457 }
3458 
3459 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
3460 {
3461     int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
3462 
3463     if (ret)
3464         return ret;
3465 
3466     ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3467                      ring->desc_cb[i].page_offset);
3468     ring->desc_cb[i].refill = 1;
3469 
3470     return 0;
3471 }
3472 
3473 /* Allocate memory for raw pkg, and map with dma */
3474 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
3475 {
3476     int i, j, ret;
3477 
3478     for (i = 0; i < ring->desc_num; i++) {
3479         ret = hns3_alloc_and_attach_buffer(ring, i);
3480         if (ret)
3481             goto out_buffer_fail;
3482     }
3483 
3484     return 0;
3485 
3486 out_buffer_fail:
3487     for (j = i - 1; j >= 0; j--)
3488         hns3_free_buffer_detach(ring, j, 0);
3489     return ret;
3490 }
3491 
3492 /* detach a in-used buffer and replace with a reserved one */
3493 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
3494                 struct hns3_desc_cb *res_cb)
3495 {
3496     hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3497     ring->desc_cb[i] = *res_cb;
3498     ring->desc_cb[i].refill = 1;
3499     ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3500                      ring->desc_cb[i].page_offset);
3501     ring->desc[i].rx.bd_base_info = 0;
3502 }
3503 
3504 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
3505 {
3506     ring->desc_cb[i].reuse_flag = 0;
3507     ring->desc_cb[i].refill = 1;
3508     ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3509                      ring->desc_cb[i].page_offset);
3510     ring->desc[i].rx.bd_base_info = 0;
3511 
3512     dma_sync_single_for_device(ring_to_dev(ring),
3513             ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
3514             hns3_buf_size(ring),
3515             DMA_FROM_DEVICE);
3516 }
3517 
3518 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
3519                   int *bytes, int *pkts, int budget)
3520 {
3521     /* pair with ring->last_to_use update in hns3_tx_doorbell(),
3522      * smp_store_release() is not used in hns3_tx_doorbell() because
3523      * the doorbell operation already have the needed barrier operation.
3524      */
3525     int ltu = smp_load_acquire(&ring->last_to_use);
3526     int ntc = ring->next_to_clean;
3527     struct hns3_desc_cb *desc_cb;
3528     bool reclaimed = false;
3529     struct hns3_desc *desc;
3530 
3531     while (ltu != ntc) {
3532         desc = &ring->desc[ntc];
3533 
3534         if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
3535                 BIT(HNS3_TXD_VLD_B))
3536             break;
3537 
3538         desc_cb = &ring->desc_cb[ntc];
3539 
3540         if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
3541                      DESC_TYPE_BOUNCE_HEAD |
3542                      DESC_TYPE_SGL_SKB)) {
3543             (*pkts)++;
3544             (*bytes) += desc_cb->send_bytes;
3545         }
3546 
3547         /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3548         hns3_free_buffer_detach(ring, ntc, budget);
3549 
3550         if (++ntc == ring->desc_num)
3551             ntc = 0;
3552 
3553         /* Issue prefetch for next Tx descriptor */
3554         prefetch(&ring->desc_cb[ntc]);
3555         reclaimed = true;
3556     }
3557 
3558     if (unlikely(!reclaimed))
3559         return false;
3560 
3561     /* This smp_store_release() pairs with smp_load_acquire() in
3562      * ring_space called by hns3_nic_net_xmit.
3563      */
3564     smp_store_release(&ring->next_to_clean, ntc);
3565 
3566     hns3_tx_spare_update(ring);
3567 
3568     return true;
3569 }
3570 
3571 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
3572 {
3573     struct net_device *netdev = ring_to_netdev(ring);
3574     struct hns3_nic_priv *priv = netdev_priv(netdev);
3575     struct netdev_queue *dev_queue;
3576     int bytes, pkts;
3577 
3578     bytes = 0;
3579     pkts = 0;
3580 
3581     if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
3582         return;
3583 
3584     ring->tqp_vector->tx_group.total_bytes += bytes;
3585     ring->tqp_vector->tx_group.total_packets += pkts;
3586 
3587     u64_stats_update_begin(&ring->syncp);
3588     ring->stats.tx_bytes += bytes;
3589     ring->stats.tx_pkts += pkts;
3590     u64_stats_update_end(&ring->syncp);
3591 
3592     dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
3593     netdev_tx_completed_queue(dev_queue, pkts, bytes);
3594 
3595     if (unlikely(netif_carrier_ok(netdev) &&
3596              ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
3597         /* Make sure that anybody stopping the queue after this
3598          * sees the new next_to_clean.
3599          */
3600         smp_mb();
3601         if (netif_tx_queue_stopped(dev_queue) &&
3602             !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
3603             netif_tx_wake_queue(dev_queue);
3604             ring->stats.restart_queue++;
3605         }
3606     }
3607 }
3608 
3609 static int hns3_desc_unused(struct hns3_enet_ring *ring)
3610 {
3611     int ntc = ring->next_to_clean;
3612     int ntu = ring->next_to_use;
3613 
3614     if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
3615         return ring->desc_num;
3616 
3617     return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
3618 }
3619 
3620 /* Return true if there is any allocation failure */
3621 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
3622                       int cleand_count)
3623 {
3624     struct hns3_desc_cb *desc_cb;
3625     struct hns3_desc_cb res_cbs;
3626     int i, ret;
3627 
3628     for (i = 0; i < cleand_count; i++) {
3629         desc_cb = &ring->desc_cb[ring->next_to_use];
3630         if (desc_cb->reuse_flag) {
3631             hns3_ring_stats_update(ring, reuse_pg_cnt);
3632 
3633             hns3_reuse_buffer(ring, ring->next_to_use);
3634         } else {
3635             ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
3636             if (ret) {
3637                 hns3_ring_stats_update(ring, sw_err_cnt);
3638 
3639                 hns3_rl_err(ring_to_netdev(ring),
3640                         "alloc rx buffer failed: %d\n",
3641                         ret);
3642 
3643                 writel(i, ring->tqp->io_base +
3644                        HNS3_RING_RX_RING_HEAD_REG);
3645                 return true;
3646             }
3647             hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
3648 
3649             hns3_ring_stats_update(ring, non_reuse_pg);
3650         }
3651 
3652         ring_ptr_move_fw(ring, next_to_use);
3653     }
3654 
3655     writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
3656     return false;
3657 }
3658 
3659 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
3660 {
3661     return page_count(cb->priv) == cb->pagecnt_bias;
3662 }
3663 
3664 static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
3665                     struct hns3_enet_ring *ring,
3666                     int pull_len,
3667                     struct hns3_desc_cb *desc_cb)
3668 {
3669     struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3670     u32 frag_offset = desc_cb->page_offset + pull_len;
3671     int size = le16_to_cpu(desc->rx.size);
3672     u32 frag_size = size - pull_len;
3673     void *frag = napi_alloc_frag(frag_size);
3674 
3675     if (unlikely(!frag)) {
3676         hns3_ring_stats_update(ring, frag_alloc_err);
3677 
3678         hns3_rl_err(ring_to_netdev(ring),
3679                 "failed to allocate rx frag\n");
3680         return -ENOMEM;
3681     }
3682 
3683     desc_cb->reuse_flag = 1;
3684     memcpy(frag, desc_cb->buf + frag_offset, frag_size);
3685     skb_add_rx_frag(skb, i, virt_to_page(frag),
3686             offset_in_page(frag), frag_size, frag_size);
3687 
3688     hns3_ring_stats_update(ring, frag_alloc);
3689     return 0;
3690 }
3691 
3692 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3693                 struct hns3_enet_ring *ring, int pull_len,
3694                 struct hns3_desc_cb *desc_cb)
3695 {
3696     struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3697     u32 frag_offset = desc_cb->page_offset + pull_len;
3698     int size = le16_to_cpu(desc->rx.size);
3699     u32 truesize = hns3_buf_size(ring);
3700     u32 frag_size = size - pull_len;
3701     int ret = 0;
3702     bool reused;
3703 
3704     if (ring->page_pool) {
3705         skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3706                 frag_size, truesize);
3707         return;
3708     }
3709 
3710     /* Avoid re-using remote or pfmem page */
3711     if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
3712         goto out;
3713 
3714     reused = hns3_can_reuse_page(desc_cb);
3715 
3716     /* Rx page can be reused when:
3717      * 1. Rx page is only owned by the driver when page_offset
3718      *    is zero, which means 0 @ truesize will be used by
3719      *    stack after skb_add_rx_frag() is called, and the rest
3720      *    of rx page can be reused by driver.
3721      * Or
3722      * 2. Rx page is only owned by the driver when page_offset
3723      *    is non-zero, which means page_offset @ truesize will
3724      *    be used by stack after skb_add_rx_frag() is called,
3725      *    and 0 @ truesize can be reused by driver.
3726      */
3727     if ((!desc_cb->page_offset && reused) ||
3728         ((desc_cb->page_offset + truesize + truesize) <=
3729          hns3_page_size(ring) && desc_cb->page_offset)) {
3730         desc_cb->page_offset += truesize;
3731         desc_cb->reuse_flag = 1;
3732     } else if (desc_cb->page_offset && reused) {
3733         desc_cb->page_offset = 0;
3734         desc_cb->reuse_flag = 1;
3735     } else if (frag_size <= ring->rx_copybreak) {
3736         ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
3737         if (ret)
3738             goto out;
3739     }
3740 
3741 out:
3742     desc_cb->pagecnt_bias--;
3743 
3744     if (unlikely(!desc_cb->pagecnt_bias)) {
3745         page_ref_add(desc_cb->priv, USHRT_MAX);
3746         desc_cb->pagecnt_bias = USHRT_MAX;
3747     }
3748 
3749     skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3750             frag_size, truesize);
3751 
3752     if (unlikely(!desc_cb->reuse_flag))
3753         __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
3754 }
3755 
3756 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
3757 {
3758     __be16 type = skb->protocol;
3759     struct tcphdr *th;
3760     int depth = 0;
3761 
3762     while (eth_type_vlan(type)) {
3763         struct vlan_hdr *vh;
3764 
3765         if ((depth + VLAN_HLEN) > skb_headlen(skb))
3766             return -EFAULT;
3767 
3768         vh = (struct vlan_hdr *)(skb->data + depth);
3769         type = vh->h_vlan_encapsulated_proto;
3770         depth += VLAN_HLEN;
3771     }
3772 
3773     skb_set_network_header(skb, depth);
3774 
3775     if (type == htons(ETH_P_IP)) {
3776         const struct iphdr *iph = ip_hdr(skb);
3777 
3778         depth += sizeof(struct iphdr);
3779         skb_set_transport_header(skb, depth);
3780         th = tcp_hdr(skb);
3781         th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
3782                       iph->daddr, 0);
3783     } else if (type == htons(ETH_P_IPV6)) {
3784         const struct ipv6hdr *iph = ipv6_hdr(skb);
3785 
3786         depth += sizeof(struct ipv6hdr);
3787         skb_set_transport_header(skb, depth);
3788         th = tcp_hdr(skb);
3789         th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
3790                       &iph->daddr, 0);
3791     } else {
3792         hns3_rl_err(skb->dev,
3793                 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3794                 be16_to_cpu(type), depth);
3795         return -EFAULT;
3796     }
3797 
3798     skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
3799     if (th->cwr)
3800         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
3801 
3802     if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
3803         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
3804 
3805     skb->csum_start = (unsigned char *)th - skb->head;
3806     skb->csum_offset = offsetof(struct tcphdr, check);
3807     skb->ip_summed = CHECKSUM_PARTIAL;
3808 
3809     trace_hns3_gro(skb);
3810 
3811     return 0;
3812 }
3813 
3814 static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
3815                    struct sk_buff *skb, u32 ptype, u16 csum)
3816 {
3817     if (ptype == HNS3_INVALID_PTYPE ||
3818         hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
3819         return false;
3820 
3821     hns3_ring_stats_update(ring, csum_complete);
3822     skb->ip_summed = CHECKSUM_COMPLETE;
3823     skb->csum = csum_unfold((__force __sum16)csum);
3824 
3825     return true;
3826 }
3827 
3828 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
3829                 u32 ol_info, u32 ptype)
3830 {
3831     int l3_type, l4_type;
3832     int ol4_type;
3833 
3834     if (ptype != HNS3_INVALID_PTYPE) {
3835         skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
3836         skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
3837 
3838         return;
3839     }
3840 
3841     ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
3842                    HNS3_RXD_OL4ID_S);
3843     switch (ol4_type) {
3844     case HNS3_OL4_TYPE_MAC_IN_UDP:
3845     case HNS3_OL4_TYPE_NVGRE:
3846         skb->csum_level = 1;
3847         fallthrough;
3848     case HNS3_OL4_TYPE_NO_TUN:
3849         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
3850                       HNS3_RXD_L3ID_S);
3851         l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
3852                       HNS3_RXD_L4ID_S);
3853         /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3854         if ((l3_type == HNS3_L3_TYPE_IPV4 ||
3855              l3_type == HNS3_L3_TYPE_IPV6) &&
3856             (l4_type == HNS3_L4_TYPE_UDP ||
3857              l4_type == HNS3_L4_TYPE_TCP ||
3858              l4_type == HNS3_L4_TYPE_SCTP))
3859             skb->ip_summed = CHECKSUM_UNNECESSARY;
3860         break;
3861     default:
3862         break;
3863     }
3864 }
3865 
3866 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
3867                  u32 l234info, u32 bd_base_info, u32 ol_info,
3868                  u16 csum)
3869 {
3870     struct net_device *netdev = ring_to_netdev(ring);
3871     struct hns3_nic_priv *priv = netdev_priv(netdev);
3872     u32 ptype = HNS3_INVALID_PTYPE;
3873 
3874     skb->ip_summed = CHECKSUM_NONE;
3875 
3876     skb_checksum_none_assert(skb);
3877 
3878     if (!(netdev->features & NETIF_F_RXCSUM))
3879         return;
3880 
3881     if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
3882         ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
3883                     HNS3_RXD_PTYPE_S);
3884 
3885     if (hns3_checksum_complete(ring, skb, ptype, csum))
3886         return;
3887 
3888     /* check if hardware has done checksum */
3889     if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
3890         return;
3891 
3892     if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
3893                  BIT(HNS3_RXD_OL3E_B) |
3894                  BIT(HNS3_RXD_OL4E_B)))) {
3895         hns3_ring_stats_update(ring, l3l4_csum_err);
3896 
3897         return;
3898     }
3899 
3900     hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
3901 }
3902 
3903 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
3904 {
3905     if (skb_has_frag_list(skb))
3906         napi_gro_flush(&ring->tqp_vector->napi, false);
3907 
3908     napi_gro_receive(&ring->tqp_vector->napi, skb);
3909 }
3910 
3911 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
3912                 struct hns3_desc *desc, u32 l234info,
3913                 u16 *vlan_tag)
3914 {
3915     struct hnae3_handle *handle = ring->tqp->handle;
3916     struct pci_dev *pdev = ring->tqp->handle->pdev;
3917     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3918 
3919     if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
3920         *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3921         if (!(*vlan_tag & VLAN_VID_MASK))
3922             *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3923 
3924         return (*vlan_tag != 0);
3925     }
3926 
3927 #define HNS3_STRP_OUTER_VLAN    0x1
3928 #define HNS3_STRP_INNER_VLAN    0x2
3929 #define HNS3_STRP_BOTH      0x3
3930 
3931     /* Hardware always insert VLAN tag into RX descriptor when
3932      * remove the tag from packet, driver needs to determine
3933      * reporting which tag to stack.
3934      */
3935     switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
3936                 HNS3_RXD_STRP_TAGP_S)) {
3937     case HNS3_STRP_OUTER_VLAN:
3938         if (handle->port_base_vlan_state !=
3939                 HNAE3_PORT_BASE_VLAN_DISABLE)
3940             return false;
3941 
3942         *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3943         return true;
3944     case HNS3_STRP_INNER_VLAN:
3945         if (handle->port_base_vlan_state !=
3946                 HNAE3_PORT_BASE_VLAN_DISABLE)
3947             return false;
3948 
3949         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3950         return true;
3951     case HNS3_STRP_BOTH:
3952         if (handle->port_base_vlan_state ==
3953                 HNAE3_PORT_BASE_VLAN_DISABLE)
3954             *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3955         else
3956             *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3957 
3958         return true;
3959     default:
3960         return false;
3961     }
3962 }
3963 
3964 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
3965 {
3966     ring->desc[ring->next_to_clean].rx.bd_base_info &=
3967         cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
3968     ring->desc_cb[ring->next_to_clean].refill = 0;
3969     ring->next_to_clean += 1;
3970 
3971     if (unlikely(ring->next_to_clean == ring->desc_num))
3972         ring->next_to_clean = 0;
3973 }
3974 
3975 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
3976               unsigned char *va)
3977 {
3978     struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
3979     struct net_device *netdev = ring_to_netdev(ring);
3980     struct sk_buff *skb;
3981 
3982     ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
3983     skb = ring->skb;
3984     if (unlikely(!skb)) {
3985         hns3_rl_err(netdev, "alloc rx skb fail\n");
3986         hns3_ring_stats_update(ring, sw_err_cnt);
3987 
3988         return -ENOMEM;
3989     }
3990 
3991     trace_hns3_rx_desc(ring);
3992     prefetchw(skb->data);
3993 
3994     ring->pending_buf = 1;
3995     ring->frag_num = 0;
3996     ring->tail_skb = NULL;
3997     if (length <= HNS3_RX_HEAD_SIZE) {
3998         memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
3999 
4000         /* We can reuse buffer as-is, just make sure it is reusable */
4001         if (dev_page_is_reusable(desc_cb->priv))
4002             desc_cb->reuse_flag = 1;
4003         else if (desc_cb->type & DESC_TYPE_PP_FRAG)
4004             page_pool_put_full_page(ring->page_pool, desc_cb->priv,
4005                         false);
4006         else /* This page cannot be reused so discard it */
4007             __page_frag_cache_drain(desc_cb->priv,
4008                         desc_cb->pagecnt_bias);
4009 
4010         hns3_rx_ring_move_fw(ring);
4011         return 0;
4012     }
4013 
4014     if (ring->page_pool)
4015         skb_mark_for_recycle(skb);
4016 
4017     hns3_ring_stats_update(ring, seg_pkt_cnt);
4018 
4019     ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
4020     __skb_put(skb, ring->pull_len);
4021     hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
4022                 desc_cb);
4023     hns3_rx_ring_move_fw(ring);
4024 
4025     return 0;
4026 }
4027 
4028 static int hns3_add_frag(struct hns3_enet_ring *ring)
4029 {
4030     struct sk_buff *skb = ring->skb;
4031     struct sk_buff *head_skb = skb;
4032     struct sk_buff *new_skb;
4033     struct hns3_desc_cb *desc_cb;
4034     struct hns3_desc *desc;
4035     u32 bd_base_info;
4036 
4037     do {
4038         desc = &ring->desc[ring->next_to_clean];
4039         desc_cb = &ring->desc_cb[ring->next_to_clean];
4040         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4041         /* make sure HW write desc complete */
4042         dma_rmb();
4043         if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4044             return -ENXIO;
4045 
4046         if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
4047             new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
4048             if (unlikely(!new_skb)) {
4049                 hns3_rl_err(ring_to_netdev(ring),
4050                         "alloc rx fraglist skb fail\n");
4051                 return -ENXIO;
4052             }
4053 
4054             if (ring->page_pool)
4055                 skb_mark_for_recycle(new_skb);
4056 
4057             ring->frag_num = 0;
4058 
4059             if (ring->tail_skb) {
4060                 ring->tail_skb->next = new_skb;
4061                 ring->tail_skb = new_skb;
4062             } else {
4063                 skb_shinfo(skb)->frag_list = new_skb;
4064                 ring->tail_skb = new_skb;
4065             }
4066         }
4067 
4068         if (ring->tail_skb) {
4069             head_skb->truesize += hns3_buf_size(ring);
4070             head_skb->data_len += le16_to_cpu(desc->rx.size);
4071             head_skb->len += le16_to_cpu(desc->rx.size);
4072             skb = ring->tail_skb;
4073         }
4074 
4075         dma_sync_single_for_cpu(ring_to_dev(ring),
4076                 desc_cb->dma + desc_cb->page_offset,
4077                 hns3_buf_size(ring),
4078                 DMA_FROM_DEVICE);
4079 
4080         hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
4081         trace_hns3_rx_desc(ring);
4082         hns3_rx_ring_move_fw(ring);
4083         ring->pending_buf++;
4084     } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
4085 
4086     return 0;
4087 }
4088 
4089 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
4090                      struct sk_buff *skb, u32 l234info,
4091                      u32 bd_base_info, u32 ol_info, u16 csum)
4092 {
4093     struct net_device *netdev = ring_to_netdev(ring);
4094     struct hns3_nic_priv *priv = netdev_priv(netdev);
4095     u32 l3_type;
4096 
4097     skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
4098                             HNS3_RXD_GRO_SIZE_M,
4099                             HNS3_RXD_GRO_SIZE_S);
4100     /* if there is no HW GRO, do not set gro params */
4101     if (!skb_shinfo(skb)->gso_size) {
4102         hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
4103                  csum);
4104         return 0;
4105     }
4106 
4107     NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
4108                           HNS3_RXD_GRO_COUNT_M,
4109                           HNS3_RXD_GRO_COUNT_S);
4110 
4111     if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
4112         u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
4113                         HNS3_RXD_PTYPE_S);
4114 
4115         l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
4116     } else {
4117         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
4118                       HNS3_RXD_L3ID_S);
4119     }
4120 
4121     if (l3_type == HNS3_L3_TYPE_IPV4)
4122         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
4123     else if (l3_type == HNS3_L3_TYPE_IPV6)
4124         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
4125     else
4126         return -EFAULT;
4127 
4128     return  hns3_gro_complete(skb, l234info);
4129 }
4130 
4131 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
4132                      struct sk_buff *skb, u32 rss_hash)
4133 {
4134     struct hnae3_handle *handle = ring->tqp->handle;
4135     enum pkt_hash_types rss_type;
4136 
4137     if (rss_hash)
4138         rss_type = handle->kinfo.rss_type;
4139     else
4140         rss_type = PKT_HASH_TYPE_NONE;
4141 
4142     skb_set_hash(skb, rss_hash, rss_type);
4143 }
4144 
4145 static void hns3_handle_rx_ts_info(struct net_device *netdev,
4146                    struct hns3_desc *desc, struct sk_buff *skb,
4147                    u32 bd_base_info)
4148 {
4149     if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
4150         struct hnae3_handle *h = hns3_get_handle(netdev);
4151         u32 nsec = le32_to_cpu(desc->ts_nsec);
4152         u32 sec = le32_to_cpu(desc->ts_sec);
4153 
4154         if (h->ae_algo->ops->get_rx_hwts)
4155             h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
4156     }
4157 }
4158 
4159 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
4160                     struct hns3_desc *desc, struct sk_buff *skb,
4161                     u32 l234info)
4162 {
4163     struct net_device *netdev = ring_to_netdev(ring);
4164 
4165     /* Based on hw strategy, the tag offloaded will be stored at
4166      * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4167      * in one layer tag case.
4168      */
4169     if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
4170         u16 vlan_tag;
4171 
4172         if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
4173             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
4174                            vlan_tag);
4175     }
4176 }
4177 
4178 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
4179 {
4180     struct net_device *netdev = ring_to_netdev(ring);
4181     enum hns3_pkt_l2t_type l2_frame_type;
4182     u32 bd_base_info, l234info, ol_info;
4183     struct hns3_desc *desc;
4184     unsigned int len;
4185     int pre_ntc, ret;
4186     u16 csum;
4187 
4188     /* bdinfo handled below is only valid on the last BD of the
4189      * current packet, and ring->next_to_clean indicates the first
4190      * descriptor of next packet, so need - 1 below.
4191      */
4192     pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
4193                     (ring->desc_num - 1);
4194     desc = &ring->desc[pre_ntc];
4195     bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4196     l234info = le32_to_cpu(desc->rx.l234_info);
4197     ol_info = le32_to_cpu(desc->rx.ol_info);
4198     csum = le16_to_cpu(desc->csum);
4199 
4200     hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
4201 
4202     hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
4203 
4204     if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
4205                   BIT(HNS3_RXD_L2E_B))))) {
4206         u64_stats_update_begin(&ring->syncp);
4207         if (l234info & BIT(HNS3_RXD_L2E_B))
4208             ring->stats.l2_err++;
4209         else
4210             ring->stats.err_pkt_len++;
4211         u64_stats_update_end(&ring->syncp);
4212 
4213         return -EFAULT;
4214     }
4215 
4216     len = skb->len;
4217 
4218     /* Do update ip stack process */
4219     skb->protocol = eth_type_trans(skb, netdev);
4220 
4221     /* This is needed in order to enable forwarding support */
4222     ret = hns3_set_gro_and_checksum(ring, skb, l234info,
4223                     bd_base_info, ol_info, csum);
4224     if (unlikely(ret)) {
4225         hns3_ring_stats_update(ring, rx_err_cnt);
4226         return ret;
4227     }
4228 
4229     l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
4230                     HNS3_RXD_DMAC_S);
4231 
4232     u64_stats_update_begin(&ring->syncp);
4233     ring->stats.rx_pkts++;
4234     ring->stats.rx_bytes += len;
4235 
4236     if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
4237         ring->stats.rx_multicast++;
4238 
4239     u64_stats_update_end(&ring->syncp);
4240 
4241     ring->tqp_vector->rx_group.total_bytes += len;
4242 
4243     hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
4244     return 0;
4245 }
4246 
4247 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
4248 {
4249     struct sk_buff *skb = ring->skb;
4250     struct hns3_desc_cb *desc_cb;
4251     struct hns3_desc *desc;
4252     unsigned int length;
4253     u32 bd_base_info;
4254     int ret;
4255 
4256     desc = &ring->desc[ring->next_to_clean];
4257     desc_cb = &ring->desc_cb[ring->next_to_clean];
4258 
4259     prefetch(desc);
4260 
4261     if (!skb) {
4262         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4263         /* Check valid BD */
4264         if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
4265             return -ENXIO;
4266 
4267         dma_rmb();
4268         length = le16_to_cpu(desc->rx.size);
4269 
4270         ring->va = desc_cb->buf + desc_cb->page_offset;
4271 
4272         dma_sync_single_for_cpu(ring_to_dev(ring),
4273                 desc_cb->dma + desc_cb->page_offset,
4274                 hns3_buf_size(ring),
4275                 DMA_FROM_DEVICE);
4276 
4277         /* Prefetch first cache line of first page.
4278          * Idea is to cache few bytes of the header of the packet.
4279          * Our L1 Cache line size is 64B so need to prefetch twice to make
4280          * it 128B. But in actual we can have greater size of caches with
4281          * 128B Level 1 cache lines. In such a case, single fetch would
4282          * suffice to cache in the relevant part of the header.
4283          */
4284         net_prefetch(ring->va);
4285 
4286         ret = hns3_alloc_skb(ring, length, ring->va);
4287         skb = ring->skb;
4288 
4289         if (ret < 0) /* alloc buffer fail */
4290             return ret;
4291         if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
4292             ret = hns3_add_frag(ring);
4293             if (ret)
4294                 return ret;
4295         }
4296     } else {
4297         ret = hns3_add_frag(ring);
4298         if (ret)
4299             return ret;
4300     }
4301 
4302     /* As the head data may be changed when GRO enable, copy
4303      * the head data in after other data rx completed
4304      */
4305     if (skb->len > HNS3_RX_HEAD_SIZE)
4306         memcpy(skb->data, ring->va,
4307                ALIGN(ring->pull_len, sizeof(long)));
4308 
4309     ret = hns3_handle_bdinfo(ring, skb);
4310     if (unlikely(ret)) {
4311         dev_kfree_skb_any(skb);
4312         return ret;
4313     }
4314 
4315     skb_record_rx_queue(skb, ring->tqp->tqp_index);
4316     return 0;
4317 }
4318 
4319 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
4320                void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
4321 {
4322 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4323     int unused_count = hns3_desc_unused(ring);
4324     bool failure = false;
4325     int recv_pkts = 0;
4326     int err;
4327 
4328     unused_count -= ring->pending_buf;
4329 
4330     while (recv_pkts < budget) {
4331         /* Reuse or realloc buffers */
4332         if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
4333             failure = failure ||
4334                 hns3_nic_alloc_rx_buffers(ring, unused_count);
4335             unused_count = 0;
4336         }
4337 
4338         /* Poll one pkt */
4339         err = hns3_handle_rx_bd(ring);
4340         /* Do not get FE for the packet or failed to alloc skb */
4341         if (unlikely(!ring->skb || err == -ENXIO)) {
4342             goto out;
4343         } else if (likely(!err)) {
4344             rx_fn(ring, ring->skb);
4345             recv_pkts++;
4346         }
4347 
4348         unused_count += ring->pending_buf;
4349         ring->skb = NULL;
4350         ring->pending_buf = 0;
4351     }
4352 
4353 out:
4354     /* sync head pointer before exiting, since hardware will calculate
4355      * FBD number with head pointer
4356      */
4357     if (unused_count > 0)
4358         failure = failure ||
4359               hns3_nic_alloc_rx_buffers(ring, unused_count);
4360 
4361     return failure ? budget : recv_pkts;
4362 }
4363 
4364 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4365 {
4366     struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
4367     struct dim_sample sample = {};
4368 
4369     if (!rx_group->coal.adapt_enable)
4370         return;
4371 
4372     dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
4373               rx_group->total_bytes, &sample);
4374     net_dim(&rx_group->dim, sample);
4375 }
4376 
4377 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4378 {
4379     struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
4380     struct dim_sample sample = {};
4381 
4382     if (!tx_group->coal.adapt_enable)
4383         return;
4384 
4385     dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
4386               tx_group->total_bytes, &sample);
4387     net_dim(&tx_group->dim, sample);
4388 }
4389 
4390 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
4391 {
4392     struct hns3_nic_priv *priv = netdev_priv(napi->dev);
4393     struct hns3_enet_ring *ring;
4394     int rx_pkt_total = 0;
4395 
4396     struct hns3_enet_tqp_vector *tqp_vector =
4397         container_of(napi, struct hns3_enet_tqp_vector, napi);
4398     bool clean_complete = true;
4399     int rx_budget = budget;
4400 
4401     if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4402         napi_complete(napi);
4403         return 0;
4404     }
4405 
4406     /* Since the actual Tx work is minimal, we can give the Tx a larger
4407      * budget and be more aggressive about cleaning up the Tx descriptors.
4408      */
4409     hns3_for_each_ring(ring, tqp_vector->tx_group)
4410         hns3_clean_tx_ring(ring, budget);
4411 
4412     /* make sure rx ring budget not smaller than 1 */
4413     if (tqp_vector->num_tqps > 1)
4414         rx_budget = max(budget / tqp_vector->num_tqps, 1);
4415 
4416     hns3_for_each_ring(ring, tqp_vector->rx_group) {
4417         int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
4418                             hns3_rx_skb);
4419         if (rx_cleaned >= rx_budget)
4420             clean_complete = false;
4421 
4422         rx_pkt_total += rx_cleaned;
4423     }
4424 
4425     tqp_vector->rx_group.total_packets += rx_pkt_total;
4426 
4427     if (!clean_complete)
4428         return budget;
4429 
4430     if (napi_complete(napi) &&
4431         likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4432         hns3_update_rx_int_coalesce(tqp_vector);
4433         hns3_update_tx_int_coalesce(tqp_vector);
4434 
4435         hns3_mask_vector_irq(tqp_vector, 1);
4436     }
4437 
4438     return rx_pkt_total;
4439 }
4440 
4441 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4442                   struct hnae3_ring_chain_node **head,
4443                   bool is_tx)
4444 {
4445     u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
4446     u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
4447     struct hnae3_ring_chain_node *cur_chain = *head;
4448     struct pci_dev *pdev = tqp_vector->handle->pdev;
4449     struct hnae3_ring_chain_node *chain;
4450     struct hns3_enet_ring *ring;
4451 
4452     ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
4453 
4454     if (cur_chain) {
4455         while (cur_chain->next)
4456             cur_chain = cur_chain->next;
4457     }
4458 
4459     while (ring) {
4460         chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
4461         if (!chain)
4462             return -ENOMEM;
4463         if (cur_chain)
4464             cur_chain->next = chain;
4465         else
4466             *head = chain;
4467         chain->tqp_index = ring->tqp->tqp_index;
4468         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
4469                 bit_value);
4470         hnae3_set_field(chain->int_gl_idx,
4471                 HNAE3_RING_GL_IDX_M,
4472                 HNAE3_RING_GL_IDX_S, field_value);
4473 
4474         cur_chain = chain;
4475 
4476         ring = ring->next;
4477     }
4478 
4479     return 0;
4480 }
4481 
4482 static struct hnae3_ring_chain_node *
4483 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
4484 {
4485     struct pci_dev *pdev = tqp_vector->handle->pdev;
4486     struct hnae3_ring_chain_node *cur_chain = NULL;
4487     struct hnae3_ring_chain_node *chain;
4488 
4489     if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
4490         goto err_free_chain;
4491 
4492     if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
4493         goto err_free_chain;
4494 
4495     return cur_chain;
4496 
4497 err_free_chain:
4498     while (cur_chain) {
4499         chain = cur_chain->next;
4500         devm_kfree(&pdev->dev, cur_chain);
4501         cur_chain = chain;
4502     }
4503 
4504     return NULL;
4505 }
4506 
4507 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4508                     struct hnae3_ring_chain_node *head)
4509 {
4510     struct pci_dev *pdev = tqp_vector->handle->pdev;
4511     struct hnae3_ring_chain_node *chain_tmp, *chain;
4512 
4513     chain = head;
4514 
4515     while (chain) {
4516         chain_tmp = chain->next;
4517         devm_kfree(&pdev->dev, chain);
4518         chain = chain_tmp;
4519     }
4520 }
4521 
4522 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
4523                    struct hns3_enet_ring *ring)
4524 {
4525     ring->next = group->ring;
4526     group->ring = ring;
4527 
4528     group->count++;
4529 }
4530 
4531 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
4532 {
4533     struct pci_dev *pdev = priv->ae_handle->pdev;
4534     struct hns3_enet_tqp_vector *tqp_vector;
4535     int num_vectors = priv->vector_num;
4536     int numa_node;
4537     int vector_i;
4538 
4539     numa_node = dev_to_node(&pdev->dev);
4540 
4541     for (vector_i = 0; vector_i < num_vectors; vector_i++) {
4542         tqp_vector = &priv->tqp_vector[vector_i];
4543         cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
4544                 &tqp_vector->affinity_mask);
4545     }
4546 }
4547 
4548 static void hns3_rx_dim_work(struct work_struct *work)
4549 {
4550     struct dim *dim = container_of(work, struct dim, work);
4551     struct hns3_enet_ring_group *group = container_of(dim,
4552         struct hns3_enet_ring_group, dim);
4553     struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4554     struct dim_cq_moder cur_moder =
4555         net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
4556 
4557     hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
4558     tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
4559 
4560     if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
4561         hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
4562         tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
4563     }
4564 
4565     dim->state = DIM_START_MEASURE;
4566 }
4567 
4568 static void hns3_tx_dim_work(struct work_struct *work)
4569 {
4570     struct dim *dim = container_of(work, struct dim, work);
4571     struct hns3_enet_ring_group *group = container_of(dim,
4572         struct hns3_enet_ring_group, dim);
4573     struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4574     struct dim_cq_moder cur_moder =
4575         net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
4576 
4577     hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
4578     tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
4579 
4580     if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
4581         hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
4582         tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
4583     }
4584 
4585     dim->state = DIM_START_MEASURE;
4586 }
4587 
4588 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
4589 {
4590     INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
4591     INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
4592 }
4593 
4594 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
4595 {
4596     struct hnae3_handle *h = priv->ae_handle;
4597     struct hns3_enet_tqp_vector *tqp_vector;
4598     int ret;
4599     int i;
4600 
4601     hns3_nic_set_cpumask(priv);
4602 
4603     for (i = 0; i < priv->vector_num; i++) {
4604         tqp_vector = &priv->tqp_vector[i];
4605         hns3_vector_coalesce_init_hw(tqp_vector, priv);
4606         tqp_vector->num_tqps = 0;
4607         hns3_nic_init_dim(tqp_vector);
4608     }
4609 
4610     for (i = 0; i < h->kinfo.num_tqps; i++) {
4611         u16 vector_i = i % priv->vector_num;
4612         u16 tqp_num = h->kinfo.num_tqps;
4613 
4614         tqp_vector = &priv->tqp_vector[vector_i];
4615 
4616         hns3_add_ring_to_group(&tqp_vector->tx_group,
4617                        &priv->ring[i]);
4618 
4619         hns3_add_ring_to_group(&tqp_vector->rx_group,
4620                        &priv->ring[i + tqp_num]);
4621 
4622         priv->ring[i].tqp_vector = tqp_vector;
4623         priv->ring[i + tqp_num].tqp_vector = tqp_vector;
4624         tqp_vector->num_tqps++;
4625     }
4626 
4627     for (i = 0; i < priv->vector_num; i++) {
4628         struct hnae3_ring_chain_node *vector_ring_chain;
4629 
4630         tqp_vector = &priv->tqp_vector[i];
4631 
4632         tqp_vector->rx_group.total_bytes = 0;
4633         tqp_vector->rx_group.total_packets = 0;
4634         tqp_vector->tx_group.total_bytes = 0;
4635         tqp_vector->tx_group.total_packets = 0;
4636         tqp_vector->handle = h;
4637 
4638         vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4639         if (!vector_ring_chain) {
4640             ret = -ENOMEM;
4641             goto map_ring_fail;
4642         }
4643 
4644         ret = h->ae_algo->ops->map_ring_to_vector(h,
4645             tqp_vector->vector_irq, vector_ring_chain);
4646 
4647         hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4648 
4649         if (ret)
4650             goto map_ring_fail;
4651 
4652         netif_napi_add(priv->netdev, &tqp_vector->napi,
4653                    hns3_nic_common_poll, NAPI_POLL_WEIGHT);
4654     }
4655 
4656     return 0;
4657 
4658 map_ring_fail:
4659     while (i--)
4660         netif_napi_del(&priv->tqp_vector[i].napi);
4661 
4662     return ret;
4663 }
4664 
4665 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
4666 {
4667     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
4668     struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
4669     struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
4670 
4671     /* initialize the configuration for interrupt coalescing.
4672      * 1. GL (Interrupt Gap Limiter)
4673      * 2. RL (Interrupt Rate Limiter)
4674      * 3. QL (Interrupt Quantity Limiter)
4675      *
4676      * Default: enable interrupt coalescing self-adaptive and GL
4677      */
4678     tx_coal->adapt_enable = 1;
4679     rx_coal->adapt_enable = 1;
4680 
4681     tx_coal->int_gl = HNS3_INT_GL_50K;
4682     rx_coal->int_gl = HNS3_INT_GL_50K;
4683 
4684     rx_coal->flow_level = HNS3_FLOW_LOW;
4685     tx_coal->flow_level = HNS3_FLOW_LOW;
4686 
4687     if (ae_dev->dev_specs.int_ql_max) {
4688         tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4689         rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4690     }
4691 }
4692 
4693 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
4694 {
4695     struct hnae3_handle *h = priv->ae_handle;
4696     struct hns3_enet_tqp_vector *tqp_vector;
4697     struct hnae3_vector_info *vector;
4698     struct pci_dev *pdev = h->pdev;
4699     u16 tqp_num = h->kinfo.num_tqps;
4700     u16 vector_num;
4701     int ret = 0;
4702     u16 i;
4703 
4704     /* RSS size, cpu online and vector_num should be the same */
4705     /* Should consider 2p/4p later */
4706     vector_num = min_t(u16, num_online_cpus(), tqp_num);
4707 
4708     vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
4709                   GFP_KERNEL);
4710     if (!vector)
4711         return -ENOMEM;
4712 
4713     /* save the actual available vector number */
4714     vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
4715 
4716     priv->vector_num = vector_num;
4717     priv->tqp_vector = (struct hns3_enet_tqp_vector *)
4718         devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
4719                  GFP_KERNEL);
4720     if (!priv->tqp_vector) {
4721         ret = -ENOMEM;
4722         goto out;
4723     }
4724 
4725     for (i = 0; i < priv->vector_num; i++) {
4726         tqp_vector = &priv->tqp_vector[i];
4727         tqp_vector->idx = i;
4728         tqp_vector->mask_addr = vector[i].io_addr;
4729         tqp_vector->vector_irq = vector[i].vector;
4730         hns3_vector_coalesce_init(tqp_vector, priv);
4731     }
4732 
4733 out:
4734     devm_kfree(&pdev->dev, vector);
4735     return ret;
4736 }
4737 
4738 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
4739 {
4740     group->ring = NULL;
4741     group->count = 0;
4742 }
4743 
4744 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
4745 {
4746     struct hnae3_ring_chain_node *vector_ring_chain;
4747     struct hnae3_handle *h = priv->ae_handle;
4748     struct hns3_enet_tqp_vector *tqp_vector;
4749     int i;
4750 
4751     for (i = 0; i < priv->vector_num; i++) {
4752         tqp_vector = &priv->tqp_vector[i];
4753 
4754         if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
4755             continue;
4756 
4757         /* Since the mapping can be overwritten, when fail to get the
4758          * chain between vector and ring, we should go on to deal with
4759          * the remaining options.
4760          */
4761         vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4762         if (!vector_ring_chain)
4763             dev_warn(priv->dev, "failed to get ring chain\n");
4764 
4765         h->ae_algo->ops->unmap_ring_from_vector(h,
4766             tqp_vector->vector_irq, vector_ring_chain);
4767 
4768         hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4769 
4770         hns3_clear_ring_group(&tqp_vector->rx_group);
4771         hns3_clear_ring_group(&tqp_vector->tx_group);
4772         netif_napi_del(&priv->tqp_vector[i].napi);
4773     }
4774 }
4775 
4776 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
4777 {
4778     struct hnae3_handle *h = priv->ae_handle;
4779     struct pci_dev *pdev = h->pdev;
4780     int i, ret;
4781 
4782     for (i = 0; i < priv->vector_num; i++) {
4783         struct hns3_enet_tqp_vector *tqp_vector;
4784 
4785         tqp_vector = &priv->tqp_vector[i];
4786         ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
4787         if (ret)
4788             return;
4789     }
4790 
4791     devm_kfree(&pdev->dev, priv->tqp_vector);
4792 }
4793 
4794 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
4795                   unsigned int ring_type)
4796 {
4797     int queue_num = priv->ae_handle->kinfo.num_tqps;
4798     struct hns3_enet_ring *ring;
4799     int desc_num;
4800 
4801     if (ring_type == HNAE3_RING_TYPE_TX) {
4802         ring = &priv->ring[q->tqp_index];
4803         desc_num = priv->ae_handle->kinfo.num_tx_desc;
4804         ring->queue_index = q->tqp_index;
4805         ring->tx_copybreak = priv->tx_copybreak;
4806         ring->last_to_use = 0;
4807     } else {
4808         ring = &priv->ring[q->tqp_index + queue_num];
4809         desc_num = priv->ae_handle->kinfo.num_rx_desc;
4810         ring->queue_index = q->tqp_index;
4811         ring->rx_copybreak = priv->rx_copybreak;
4812     }
4813 
4814     hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
4815 
4816     ring->tqp = q;
4817     ring->desc = NULL;
4818     ring->desc_cb = NULL;
4819     ring->dev = priv->dev;
4820     ring->desc_dma_addr = 0;
4821     ring->buf_size = q->buf_size;
4822     ring->desc_num = desc_num;
4823     ring->next_to_use = 0;
4824     ring->next_to_clean = 0;
4825 }
4826 
4827 static void hns3_queue_to_ring(struct hnae3_queue *tqp,
4828                    struct hns3_nic_priv *priv)
4829 {
4830     hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
4831     hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
4832 }
4833 
4834 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
4835 {
4836     struct hnae3_handle *h = priv->ae_handle;
4837     struct pci_dev *pdev = h->pdev;
4838     int i;
4839 
4840     priv->ring = devm_kzalloc(&pdev->dev,
4841                   array3_size(h->kinfo.num_tqps,
4842                           sizeof(*priv->ring), 2),
4843                   GFP_KERNEL);
4844     if (!priv->ring)
4845         return -ENOMEM;
4846 
4847     for (i = 0; i < h->kinfo.num_tqps; i++)
4848         hns3_queue_to_ring(h->kinfo.tqp[i], priv);
4849 
4850     return 0;
4851 }
4852 
4853 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
4854 {
4855     if (!priv->ring)
4856         return;
4857 
4858     devm_kfree(priv->dev, priv->ring);
4859     priv->ring = NULL;
4860 }
4861 
4862 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
4863 {
4864     struct page_pool_params pp_params = {
4865         .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
4866                 PP_FLAG_DMA_SYNC_DEV,
4867         .order = hns3_page_order(ring),
4868         .pool_size = ring->desc_num * hns3_buf_size(ring) /
4869                 (PAGE_SIZE << hns3_page_order(ring)),
4870         .nid = dev_to_node(ring_to_dev(ring)),
4871         .dev = ring_to_dev(ring),
4872         .dma_dir = DMA_FROM_DEVICE,
4873         .offset = 0,
4874         .max_len = PAGE_SIZE << hns3_page_order(ring),
4875     };
4876 
4877     ring->page_pool = page_pool_create(&pp_params);
4878     if (IS_ERR(ring->page_pool)) {
4879         dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
4880              PTR_ERR(ring->page_pool));
4881         ring->page_pool = NULL;
4882     }
4883 }
4884 
4885 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
4886 {
4887     int ret;
4888 
4889     if (ring->desc_num <= 0 || ring->buf_size <= 0)
4890         return -EINVAL;
4891 
4892     ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
4893                      sizeof(ring->desc_cb[0]), GFP_KERNEL);
4894     if (!ring->desc_cb) {
4895         ret = -ENOMEM;
4896         goto out;
4897     }
4898 
4899     ret = hns3_alloc_desc(ring);
4900     if (ret)
4901         goto out_with_desc_cb;
4902 
4903     if (!HNAE3_IS_TX_RING(ring)) {
4904         if (page_pool_enabled)
4905             hns3_alloc_page_pool(ring);
4906 
4907         ret = hns3_alloc_ring_buffers(ring);
4908         if (ret)
4909             goto out_with_desc;
4910     } else {
4911         hns3_init_tx_spare_buffer(ring);
4912     }
4913 
4914     return 0;
4915 
4916 out_with_desc:
4917     hns3_free_desc(ring);
4918 out_with_desc_cb:
4919     devm_kfree(ring_to_dev(ring), ring->desc_cb);
4920     ring->desc_cb = NULL;
4921 out:
4922     return ret;
4923 }
4924 
4925 void hns3_fini_ring(struct hns3_enet_ring *ring)
4926 {
4927     hns3_free_desc(ring);
4928     devm_kfree(ring_to_dev(ring), ring->desc_cb);
4929     ring->desc_cb = NULL;
4930     ring->next_to_clean = 0;
4931     ring->next_to_use = 0;
4932     ring->last_to_use = 0;
4933     ring->pending_buf = 0;
4934     if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
4935         dev_kfree_skb_any(ring->skb);
4936         ring->skb = NULL;
4937     } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
4938         struct hns3_tx_spare *tx_spare = ring->tx_spare;
4939 
4940         dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
4941                    DMA_TO_DEVICE);
4942         free_pages((unsigned long)tx_spare->buf,
4943                get_order(tx_spare->len));
4944         devm_kfree(ring_to_dev(ring), tx_spare);
4945         ring->tx_spare = NULL;
4946     }
4947 
4948     if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
4949         page_pool_destroy(ring->page_pool);
4950         ring->page_pool = NULL;
4951     }
4952 }
4953 
4954 static int hns3_buf_size2type(u32 buf_size)
4955 {
4956     int bd_size_type;
4957 
4958     switch (buf_size) {
4959     case 512:
4960         bd_size_type = HNS3_BD_SIZE_512_TYPE;
4961         break;
4962     case 1024:
4963         bd_size_type = HNS3_BD_SIZE_1024_TYPE;
4964         break;
4965     case 2048:
4966         bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4967         break;
4968     case 4096:
4969         bd_size_type = HNS3_BD_SIZE_4096_TYPE;
4970         break;
4971     default:
4972         bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4973     }
4974 
4975     return bd_size_type;
4976 }
4977 
4978 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
4979 {
4980     dma_addr_t dma = ring->desc_dma_addr;
4981     struct hnae3_queue *q = ring->tqp;
4982 
4983     if (!HNAE3_IS_TX_RING(ring)) {
4984         hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
4985         hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
4986                    (u32)((dma >> 31) >> 1));
4987 
4988         hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
4989                    hns3_buf_size2type(ring->buf_size));
4990         hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
4991                    ring->desc_num / 8 - 1);
4992     } else {
4993         hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
4994                    (u32)dma);
4995         hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
4996                    (u32)((dma >> 31) >> 1));
4997 
4998         hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
4999                    ring->desc_num / 8 - 1);
5000     }
5001 }
5002 
5003 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
5004 {
5005     struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5006     struct hnae3_tc_info *tc_info = &kinfo->tc_info;
5007     int i;
5008 
5009     for (i = 0; i < tc_info->num_tc; i++) {
5010         int j;
5011 
5012         for (j = 0; j < tc_info->tqp_count[i]; j++) {
5013             struct hnae3_queue *q;
5014 
5015             q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
5016             hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
5017         }
5018     }
5019 }
5020 
5021 int hns3_init_all_ring(struct hns3_nic_priv *priv)
5022 {
5023     struct hnae3_handle *h = priv->ae_handle;
5024     int ring_num = h->kinfo.num_tqps * 2;
5025     int i, j;
5026     int ret;
5027 
5028     for (i = 0; i < ring_num; i++) {
5029         ret = hns3_alloc_ring_memory(&priv->ring[i]);
5030         if (ret) {
5031             dev_err(priv->dev,
5032                 "Alloc ring memory fail! ret=%d\n", ret);
5033             goto out_when_alloc_ring_memory;
5034         }
5035 
5036         u64_stats_init(&priv->ring[i].syncp);
5037     }
5038 
5039     return 0;
5040 
5041 out_when_alloc_ring_memory:
5042     for (j = i - 1; j >= 0; j--)
5043         hns3_fini_ring(&priv->ring[j]);
5044 
5045     return -ENOMEM;
5046 }
5047 
5048 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
5049 {
5050     struct hnae3_handle *h = priv->ae_handle;
5051     int i;
5052 
5053     for (i = 0; i < h->kinfo.num_tqps; i++) {
5054         hns3_fini_ring(&priv->ring[i]);
5055         hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
5056     }
5057 }
5058 
5059 /* Set mac addr if it is configured. or leave it to the AE driver */
5060 static int hns3_init_mac_addr(struct net_device *netdev)
5061 {
5062     struct hns3_nic_priv *priv = netdev_priv(netdev);
5063     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5064     struct hnae3_handle *h = priv->ae_handle;
5065     u8 mac_addr_temp[ETH_ALEN];
5066     int ret = 0;
5067 
5068     if (h->ae_algo->ops->get_mac_addr)
5069         h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
5070 
5071     /* Check if the MAC address is valid, if not get a random one */
5072     if (!is_valid_ether_addr(mac_addr_temp)) {
5073         eth_hw_addr_random(netdev);
5074         hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
5075         dev_warn(priv->dev, "using random MAC address %s\n",
5076              format_mac_addr);
5077     } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
5078         eth_hw_addr_set(netdev, mac_addr_temp);
5079         ether_addr_copy(netdev->perm_addr, mac_addr_temp);
5080     } else {
5081         return 0;
5082     }
5083 
5084     if (h->ae_algo->ops->set_mac_addr)
5085         ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
5086 
5087     return ret;
5088 }
5089 
5090 static int hns3_init_phy(struct net_device *netdev)
5091 {
5092     struct hnae3_handle *h = hns3_get_handle(netdev);
5093     int ret = 0;
5094 
5095     if (h->ae_algo->ops->mac_connect_phy)
5096         ret = h->ae_algo->ops->mac_connect_phy(h);
5097 
5098     return ret;
5099 }
5100 
5101 static void hns3_uninit_phy(struct net_device *netdev)
5102 {
5103     struct hnae3_handle *h = hns3_get_handle(netdev);
5104 
5105     if (h->ae_algo->ops->mac_disconnect_phy)
5106         h->ae_algo->ops->mac_disconnect_phy(h);
5107 }
5108 
5109 static int hns3_client_start(struct hnae3_handle *handle)
5110 {
5111     if (!handle->ae_algo->ops->client_start)
5112         return 0;
5113 
5114     return handle->ae_algo->ops->client_start(handle);
5115 }
5116 
5117 static void hns3_client_stop(struct hnae3_handle *handle)
5118 {
5119     if (!handle->ae_algo->ops->client_stop)
5120         return;
5121 
5122     handle->ae_algo->ops->client_stop(handle);
5123 }
5124 
5125 static void hns3_info_show(struct hns3_nic_priv *priv)
5126 {
5127     struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5128     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5129 
5130     hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
5131     dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
5132     dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
5133     dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
5134     dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
5135     dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
5136     dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
5137     dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
5138     dev_info(priv->dev, "Total number of enabled TCs: %u\n",
5139          kinfo->tc_info.num_tc);
5140     dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
5141 }
5142 
5143 static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
5144                     enum dim_cq_period_mode mode, bool is_tx)
5145 {
5146     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
5147     struct hnae3_handle *handle = priv->ae_handle;
5148     int i;
5149 
5150     if (is_tx) {
5151         priv->tx_cqe_mode = mode;
5152 
5153         for (i = 0; i < priv->vector_num; i++)
5154             priv->tqp_vector[i].tx_group.dim.mode = mode;
5155     } else {
5156         priv->rx_cqe_mode = mode;
5157 
5158         for (i = 0; i < priv->vector_num; i++)
5159             priv->tqp_vector[i].rx_group.dim.mode = mode;
5160     }
5161 
5162     if (hnae3_ae_dev_cq_supported(ae_dev)) {
5163         u32 new_mode;
5164         u64 reg;
5165 
5166         new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ?
5167             HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE;
5168         reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG;
5169 
5170         writel(new_mode, handle->kinfo.io_base + reg);
5171     }
5172 }
5173 
5174 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
5175                   enum dim_cq_period_mode tx_mode,
5176                   enum dim_cq_period_mode rx_mode)
5177 {
5178     hns3_set_cq_period_mode(priv, tx_mode, true);
5179     hns3_set_cq_period_mode(priv, rx_mode, false);
5180 }
5181 
5182 static void hns3_state_init(struct hnae3_handle *handle)
5183 {
5184     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
5185     struct net_device *netdev = handle->kinfo.netdev;
5186     struct hns3_nic_priv *priv = netdev_priv(netdev);
5187 
5188     set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5189 
5190     if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
5191         set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
5192 
5193     if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5194         set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
5195 
5196     if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
5197         set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
5198 
5199     if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
5200         set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
5201 }
5202 
5203 static void hns3_state_uninit(struct hnae3_handle *handle)
5204 {
5205     struct hns3_nic_priv *priv  = handle->priv;
5206 
5207     clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
5208 }
5209 
5210 static int hns3_client_init(struct hnae3_handle *handle)
5211 {
5212     struct pci_dev *pdev = handle->pdev;
5213     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5214     u16 alloc_tqps, max_rss_size;
5215     struct hns3_nic_priv *priv;
5216     struct net_device *netdev;
5217     int ret;
5218 
5219     handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
5220                             &max_rss_size);
5221     netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
5222     if (!netdev)
5223         return -ENOMEM;
5224 
5225     priv = netdev_priv(netdev);
5226     priv->dev = &pdev->dev;
5227     priv->netdev = netdev;
5228     priv->ae_handle = handle;
5229     priv->tx_timeout_count = 0;
5230     priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
5231     set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
5232 
5233     handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
5234 
5235     handle->kinfo.netdev = netdev;
5236     handle->priv = (void *)priv;
5237 
5238     hns3_init_mac_addr(netdev);
5239 
5240     hns3_set_default_feature(netdev);
5241 
5242     netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
5243     netdev->priv_flags |= IFF_UNICAST_FLT;
5244     netdev->netdev_ops = &hns3_nic_netdev_ops;
5245     SET_NETDEV_DEV(netdev, &pdev->dev);
5246     hns3_ethtool_set_ops(netdev);
5247 
5248     /* Carrier off reporting is important to ethtool even BEFORE open */
5249     netif_carrier_off(netdev);
5250 
5251     ret = hns3_get_ring_config(priv);
5252     if (ret) {
5253         ret = -ENOMEM;
5254         goto out_get_ring_cfg;
5255     }
5256 
5257     hns3_nic_init_coal_cfg(priv);
5258 
5259     ret = hns3_nic_alloc_vector_data(priv);
5260     if (ret) {
5261         ret = -ENOMEM;
5262         goto out_alloc_vector_data;
5263     }
5264 
5265     ret = hns3_nic_init_vector_data(priv);
5266     if (ret) {
5267         ret = -ENOMEM;
5268         goto out_init_vector_data;
5269     }
5270 
5271     ret = hns3_init_all_ring(priv);
5272     if (ret) {
5273         ret = -ENOMEM;
5274         goto out_init_ring;
5275     }
5276 
5277     hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE,
5278                  DIM_CQ_PERIOD_MODE_START_FROM_EQE);
5279 
5280     ret = hns3_init_phy(netdev);
5281     if (ret)
5282         goto out_init_phy;
5283 
5284     /* the device can work without cpu rmap, only aRFS needs it */
5285     ret = hns3_set_rx_cpu_rmap(netdev);
5286     if (ret)
5287         dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5288 
5289     ret = hns3_nic_init_irq(priv);
5290     if (ret) {
5291         dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5292         hns3_free_rx_cpu_rmap(netdev);
5293         goto out_init_irq_fail;
5294     }
5295 
5296     ret = hns3_client_start(handle);
5297     if (ret) {
5298         dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5299         goto out_client_start;
5300     }
5301 
5302     hns3_dcbnl_setup(handle);
5303 
5304     ret = hns3_dbg_init(handle);
5305     if (ret) {
5306         dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
5307             ret);
5308         goto out_client_start;
5309     }
5310 
5311     netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
5312 
5313     hns3_state_init(handle);
5314 
5315     ret = register_netdev(netdev);
5316     if (ret) {
5317         dev_err(priv->dev, "probe register netdev fail!\n");
5318         goto out_reg_netdev_fail;
5319     }
5320 
5321     if (netif_msg_drv(handle))
5322         hns3_info_show(priv);
5323 
5324     return ret;
5325 
5326 out_reg_netdev_fail:
5327     hns3_state_uninit(handle);
5328     hns3_dbg_uninit(handle);
5329     hns3_client_stop(handle);
5330 out_client_start:
5331     hns3_free_rx_cpu_rmap(netdev);
5332     hns3_nic_uninit_irq(priv);
5333 out_init_irq_fail:
5334     hns3_uninit_phy(netdev);
5335 out_init_phy:
5336     hns3_uninit_all_ring(priv);
5337 out_init_ring:
5338     hns3_nic_uninit_vector_data(priv);
5339 out_init_vector_data:
5340     hns3_nic_dealloc_vector_data(priv);
5341 out_alloc_vector_data:
5342     priv->ring = NULL;
5343 out_get_ring_cfg:
5344     priv->ae_handle = NULL;
5345     free_netdev(netdev);
5346     return ret;
5347 }
5348 
5349 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
5350 {
5351     struct net_device *netdev = handle->kinfo.netdev;
5352     struct hns3_nic_priv *priv = netdev_priv(netdev);
5353 
5354     if (netdev->reg_state != NETREG_UNINITIALIZED)
5355         unregister_netdev(netdev);
5356 
5357     hns3_client_stop(handle);
5358 
5359     hns3_uninit_phy(netdev);
5360 
5361     if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5362         netdev_warn(netdev, "already uninitialized\n");
5363         goto out_netdev_free;
5364     }
5365 
5366     hns3_free_rx_cpu_rmap(netdev);
5367 
5368     hns3_nic_uninit_irq(priv);
5369 
5370     hns3_clear_all_ring(handle, true);
5371 
5372     hns3_nic_uninit_vector_data(priv);
5373 
5374     hns3_nic_dealloc_vector_data(priv);
5375 
5376     hns3_uninit_all_ring(priv);
5377 
5378     hns3_put_ring_config(priv);
5379 
5380 out_netdev_free:
5381     hns3_dbg_uninit(handle);
5382     free_netdev(netdev);
5383 }
5384 
5385 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
5386 {
5387     struct net_device *netdev = handle->kinfo.netdev;
5388 
5389     if (!netdev)
5390         return;
5391 
5392     if (linkup) {
5393         netif_tx_wake_all_queues(netdev);
5394         netif_carrier_on(netdev);
5395         if (netif_msg_link(handle))
5396             netdev_info(netdev, "link up\n");
5397     } else {
5398         netif_carrier_off(netdev);
5399         netif_tx_stop_all_queues(netdev);
5400         if (netif_msg_link(handle))
5401             netdev_info(netdev, "link down\n");
5402     }
5403 }
5404 
5405 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
5406 {
5407     while (ring->next_to_clean != ring->next_to_use) {
5408         ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
5409         hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
5410         ring_ptr_move_fw(ring, next_to_clean);
5411     }
5412 
5413     ring->pending_buf = 0;
5414 }
5415 
5416 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
5417 {
5418     struct hns3_desc_cb res_cbs;
5419     int ret;
5420 
5421     while (ring->next_to_use != ring->next_to_clean) {
5422         /* When a buffer is not reused, it's memory has been
5423          * freed in hns3_handle_rx_bd or will be freed by
5424          * stack, so we need to replace the buffer here.
5425          */
5426         if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5427             ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
5428             if (ret) {
5429                 hns3_ring_stats_update(ring, sw_err_cnt);
5430                 /* if alloc new buffer fail, exit directly
5431                  * and reclear in up flow.
5432                  */
5433                 netdev_warn(ring_to_netdev(ring),
5434                         "reserve buffer map failed, ret = %d\n",
5435                         ret);
5436                 return ret;
5437             }
5438             hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
5439         }
5440         ring_ptr_move_fw(ring, next_to_use);
5441     }
5442 
5443     /* Free the pending skb in rx ring */
5444     if (ring->skb) {
5445         dev_kfree_skb_any(ring->skb);
5446         ring->skb = NULL;
5447         ring->pending_buf = 0;
5448     }
5449 
5450     return 0;
5451 }
5452 
5453 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
5454 {
5455     while (ring->next_to_use != ring->next_to_clean) {
5456         /* When a buffer is not reused, it's memory has been
5457          * freed in hns3_handle_rx_bd or will be freed by
5458          * stack, so only need to unmap the buffer here.
5459          */
5460         if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5461             hns3_unmap_buffer(ring,
5462                       &ring->desc_cb[ring->next_to_use]);
5463             ring->desc_cb[ring->next_to_use].dma = 0;
5464         }
5465 
5466         ring_ptr_move_fw(ring, next_to_use);
5467     }
5468 }
5469 
5470 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
5471 {
5472     struct net_device *ndev = h->kinfo.netdev;
5473     struct hns3_nic_priv *priv = netdev_priv(ndev);
5474     u32 i;
5475 
5476     for (i = 0; i < h->kinfo.num_tqps; i++) {
5477         struct hns3_enet_ring *ring;
5478 
5479         ring = &priv->ring[i];
5480         hns3_clear_tx_ring(ring);
5481 
5482         ring = &priv->ring[i + h->kinfo.num_tqps];
5483         /* Continue to clear other rings even if clearing some
5484          * rings failed.
5485          */
5486         if (force)
5487             hns3_force_clear_rx_ring(ring);
5488         else
5489             hns3_clear_rx_ring(ring);
5490     }
5491 }
5492 
5493 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
5494 {
5495     struct net_device *ndev = h->kinfo.netdev;
5496     struct hns3_nic_priv *priv = netdev_priv(ndev);
5497     struct hns3_enet_ring *rx_ring;
5498     int i, j;
5499     int ret;
5500 
5501     ret = h->ae_algo->ops->reset_queue(h);
5502     if (ret)
5503         return ret;
5504 
5505     for (i = 0; i < h->kinfo.num_tqps; i++) {
5506         hns3_init_ring_hw(&priv->ring[i]);
5507 
5508         /* We need to clear tx ring here because self test will
5509          * use the ring and will not run down before up
5510          */
5511         hns3_clear_tx_ring(&priv->ring[i]);
5512         priv->ring[i].next_to_clean = 0;
5513         priv->ring[i].next_to_use = 0;
5514         priv->ring[i].last_to_use = 0;
5515 
5516         rx_ring = &priv->ring[i + h->kinfo.num_tqps];
5517         hns3_init_ring_hw(rx_ring);
5518         ret = hns3_clear_rx_ring(rx_ring);
5519         if (ret)
5520             return ret;
5521 
5522         /* We can not know the hardware head and tail when this
5523          * function is called in reset flow, so we reuse all desc.
5524          */
5525         for (j = 0; j < rx_ring->desc_num; j++)
5526             hns3_reuse_buffer(rx_ring, j);
5527 
5528         rx_ring->next_to_clean = 0;
5529         rx_ring->next_to_use = 0;
5530     }
5531 
5532     hns3_init_tx_ring_tc(priv);
5533 
5534     return 0;
5535 }
5536 
5537 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
5538 {
5539     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5540     struct net_device *ndev = kinfo->netdev;
5541     struct hns3_nic_priv *priv = netdev_priv(ndev);
5542 
5543     if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
5544         return 0;
5545 
5546     if (!netif_running(ndev))
5547         return 0;
5548 
5549     return hns3_nic_net_stop(ndev);
5550 }
5551 
5552 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
5553 {
5554     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5555     struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
5556     int ret = 0;
5557 
5558     if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5559         netdev_err(kinfo->netdev, "device is not initialized yet\n");
5560         return -EFAULT;
5561     }
5562 
5563     clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5564 
5565     if (netif_running(kinfo->netdev)) {
5566         ret = hns3_nic_net_open(kinfo->netdev);
5567         if (ret) {
5568             set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5569             netdev_err(kinfo->netdev,
5570                    "net up fail, ret=%d!\n", ret);
5571             return ret;
5572         }
5573     }
5574 
5575     return ret;
5576 }
5577 
5578 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
5579 {
5580     struct net_device *netdev = handle->kinfo.netdev;
5581     struct hns3_nic_priv *priv = netdev_priv(netdev);
5582     int ret;
5583 
5584     /* Carrier off reporting is important to ethtool even BEFORE open */
5585     netif_carrier_off(netdev);
5586 
5587     ret = hns3_get_ring_config(priv);
5588     if (ret)
5589         return ret;
5590 
5591     ret = hns3_nic_alloc_vector_data(priv);
5592     if (ret)
5593         goto err_put_ring;
5594 
5595     ret = hns3_nic_init_vector_data(priv);
5596     if (ret)
5597         goto err_dealloc_vector;
5598 
5599     ret = hns3_init_all_ring(priv);
5600     if (ret)
5601         goto err_uninit_vector;
5602 
5603     hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode);
5604 
5605     /* the device can work without cpu rmap, only aRFS needs it */
5606     ret = hns3_set_rx_cpu_rmap(netdev);
5607     if (ret)
5608         dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5609 
5610     ret = hns3_nic_init_irq(priv);
5611     if (ret) {
5612         dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5613         hns3_free_rx_cpu_rmap(netdev);
5614         goto err_init_irq_fail;
5615     }
5616 
5617     if (!hns3_is_phys_func(handle->pdev))
5618         hns3_init_mac_addr(netdev);
5619 
5620     ret = hns3_client_start(handle);
5621     if (ret) {
5622         dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5623         goto err_client_start_fail;
5624     }
5625 
5626     set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5627 
5628     return ret;
5629 
5630 err_client_start_fail:
5631     hns3_free_rx_cpu_rmap(netdev);
5632     hns3_nic_uninit_irq(priv);
5633 err_init_irq_fail:
5634     hns3_uninit_all_ring(priv);
5635 err_uninit_vector:
5636     hns3_nic_uninit_vector_data(priv);
5637 err_dealloc_vector:
5638     hns3_nic_dealloc_vector_data(priv);
5639 err_put_ring:
5640     hns3_put_ring_config(priv);
5641 
5642     return ret;
5643 }
5644 
5645 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
5646 {
5647     struct net_device *netdev = handle->kinfo.netdev;
5648     struct hns3_nic_priv *priv = netdev_priv(netdev);
5649 
5650     if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5651         netdev_warn(netdev, "already uninitialized\n");
5652         return 0;
5653     }
5654 
5655     hns3_free_rx_cpu_rmap(netdev);
5656     hns3_nic_uninit_irq(priv);
5657     hns3_clear_all_ring(handle, true);
5658     hns3_reset_tx_queue(priv->ae_handle);
5659 
5660     hns3_nic_uninit_vector_data(priv);
5661 
5662     hns3_nic_dealloc_vector_data(priv);
5663 
5664     hns3_uninit_all_ring(priv);
5665 
5666     hns3_put_ring_config(priv);
5667 
5668     return 0;
5669 }
5670 
5671 int hns3_reset_notify(struct hnae3_handle *handle,
5672               enum hnae3_reset_notify_type type)
5673 {
5674     int ret = 0;
5675 
5676     switch (type) {
5677     case HNAE3_UP_CLIENT:
5678         ret = hns3_reset_notify_up_enet(handle);
5679         break;
5680     case HNAE3_DOWN_CLIENT:
5681         ret = hns3_reset_notify_down_enet(handle);
5682         break;
5683     case HNAE3_INIT_CLIENT:
5684         ret = hns3_reset_notify_init_enet(handle);
5685         break;
5686     case HNAE3_UNINIT_CLIENT:
5687         ret = hns3_reset_notify_uninit_enet(handle);
5688         break;
5689     default:
5690         break;
5691     }
5692 
5693     return ret;
5694 }
5695 
5696 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
5697                 bool rxfh_configured)
5698 {
5699     int ret;
5700 
5701     ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
5702                          rxfh_configured);
5703     if (ret) {
5704         dev_err(&handle->pdev->dev,
5705             "Change tqp num(%u) fail.\n", new_tqp_num);
5706         return ret;
5707     }
5708 
5709     ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
5710     if (ret)
5711         return ret;
5712 
5713     ret =  hns3_reset_notify(handle, HNAE3_UP_CLIENT);
5714     if (ret)
5715         hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
5716 
5717     return ret;
5718 }
5719 
5720 int hns3_set_channels(struct net_device *netdev,
5721               struct ethtool_channels *ch)
5722 {
5723     struct hnae3_handle *h = hns3_get_handle(netdev);
5724     struct hnae3_knic_private_info *kinfo = &h->kinfo;
5725     bool rxfh_configured = netif_is_rxfh_configured(netdev);
5726     u32 new_tqp_num = ch->combined_count;
5727     u16 org_tqp_num;
5728     int ret;
5729 
5730     if (hns3_nic_resetting(netdev))
5731         return -EBUSY;
5732 
5733     if (ch->rx_count || ch->tx_count)
5734         return -EINVAL;
5735 
5736     if (kinfo->tc_info.mqprio_active) {
5737         dev_err(&netdev->dev,
5738             "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5739         return -EINVAL;
5740     }
5741 
5742     if (new_tqp_num > hns3_get_max_available_channels(h) ||
5743         new_tqp_num < 1) {
5744         dev_err(&netdev->dev,
5745             "Change tqps fail, the tqp range is from 1 to %u",
5746             hns3_get_max_available_channels(h));
5747         return -EINVAL;
5748     }
5749 
5750     if (kinfo->rss_size == new_tqp_num)
5751         return 0;
5752 
5753     netif_dbg(h, drv, netdev,
5754           "set channels: tqp_num=%u, rxfh=%d\n",
5755           new_tqp_num, rxfh_configured);
5756 
5757     ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
5758     if (ret)
5759         return ret;
5760 
5761     ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
5762     if (ret)
5763         return ret;
5764 
5765     org_tqp_num = h->kinfo.num_tqps;
5766     ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
5767     if (ret) {
5768         int ret1;
5769 
5770         netdev_warn(netdev,
5771                 "Change channels fail, revert to old value\n");
5772         ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
5773         if (ret1) {
5774             netdev_err(netdev,
5775                    "revert to old channel fail\n");
5776             return ret1;
5777         }
5778 
5779         return ret;
5780     }
5781 
5782     return 0;
5783 }
5784 
5785 static const struct hns3_hw_error_info hns3_hw_err[] = {
5786     { .type = HNAE3_PPU_POISON_ERROR,
5787       .msg = "PPU poison" },
5788     { .type = HNAE3_CMDQ_ECC_ERROR,
5789       .msg = "IMP CMDQ error" },
5790     { .type = HNAE3_IMP_RD_POISON_ERROR,
5791       .msg = "IMP RD poison" },
5792     { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
5793       .msg = "ROCEE AXI RESP error" },
5794 };
5795 
5796 static void hns3_process_hw_error(struct hnae3_handle *handle,
5797                   enum hnae3_hw_error_type type)
5798 {
5799     int i;
5800 
5801     for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
5802         if (hns3_hw_err[i].type == type) {
5803             dev_err(&handle->pdev->dev, "Detected %s!\n",
5804                 hns3_hw_err[i].msg);
5805             break;
5806         }
5807     }
5808 }
5809 
5810 static const struct hnae3_client_ops client_ops = {
5811     .init_instance = hns3_client_init,
5812     .uninit_instance = hns3_client_uninit,
5813     .link_status_change = hns3_link_status_change,
5814     .reset_notify = hns3_reset_notify,
5815     .process_hw_error = hns3_process_hw_error,
5816 };
5817 
5818 /* hns3_init_module - Driver registration routine
5819  * hns3_init_module is the first routine called when the driver is
5820  * loaded. All it does is register with the PCI subsystem.
5821  */
5822 static int __init hns3_init_module(void)
5823 {
5824     int ret;
5825 
5826     pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
5827     pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
5828 
5829     client.type = HNAE3_CLIENT_KNIC;
5830     snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
5831          hns3_driver_name);
5832 
5833     client.ops = &client_ops;
5834 
5835     INIT_LIST_HEAD(&client.node);
5836 
5837     hns3_dbg_register_debugfs(hns3_driver_name);
5838 
5839     ret = hnae3_register_client(&client);
5840     if (ret)
5841         goto err_reg_client;
5842 
5843     ret = pci_register_driver(&hns3_driver);
5844     if (ret)
5845         goto err_reg_driver;
5846 
5847     return ret;
5848 
5849 err_reg_driver:
5850     hnae3_unregister_client(&client);
5851 err_reg_client:
5852     hns3_dbg_unregister_debugfs();
5853     return ret;
5854 }
5855 module_init(hns3_init_module);
5856 
5857 /* hns3_exit_module - Driver exit cleanup routine
5858  * hns3_exit_module is called just before the driver is removed
5859  * from memory.
5860  */
5861 static void __exit hns3_exit_module(void)
5862 {
5863     pci_unregister_driver(&hns3_driver);
5864     hnae3_unregister_client(&client);
5865     hns3_dbg_unregister_debugfs();
5866 }
5867 module_exit(hns3_exit_module);
5868 
5869 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
5870 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5871 MODULE_LICENSE("GPL");
5872 MODULE_ALIAS("pci:hns-nic");