Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Keystone NetCP Core driver
0004  *
0005  * Copyright (C) 2014 Texas Instruments Incorporated
0006  * Authors: Sandeep Nair <sandeep_n@ti.com>
0007  *      Sandeep Paulraj <s-paulraj@ti.com>
0008  *      Cyril Chemparathy <cyril@ti.com>
0009  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
0010  *      Murali Karicheri <m-karicheri2@ti.com>
0011  *      Wingman Kwok <w-kwok2@ti.com>
0012  */
0013 
0014 #include <linux/io.h>
0015 #include <linux/module.h>
0016 #include <linux/of_net.h>
0017 #include <linux/of_address.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/soc/ti/knav_qmss.h>
0022 #include <linux/soc/ti/knav_dma.h>
0023 
0024 #include "netcp.h"
0025 
0026 #define NETCP_SOP_OFFSET    (NET_IP_ALIGN + NET_SKB_PAD)
0027 #define NETCP_TX_TIMEOUT    (5 * HZ)
0028 #define NETCP_PACKET_SIZE   (ETH_FRAME_LEN + ETH_FCS_LEN)
0029 #define NETCP_MIN_PACKET_SIZE   ETH_ZLEN
0030 #define NETCP_MAX_MCAST_ADDR    16
0031 
0032 #define NETCP_EFUSE_REG_INDEX   0
0033 
0034 #define NETCP_MOD_PROBE_SKIPPED 1
0035 #define NETCP_MOD_PROBE_FAILED  2
0036 
0037 #define NETCP_DEBUG (NETIF_MSG_HW   | NETIF_MSG_WOL     |   \
0038             NETIF_MSG_DRV   | NETIF_MSG_LINK    |   \
0039             NETIF_MSG_IFUP  | NETIF_MSG_INTR    |   \
0040             NETIF_MSG_PROBE | NETIF_MSG_TIMER   |   \
0041             NETIF_MSG_IFDOWN    | NETIF_MSG_RX_ERR  |   \
0042             NETIF_MSG_TX_ERR    | NETIF_MSG_TX_DONE |   \
0043             NETIF_MSG_PKTDATA   | NETIF_MSG_TX_QUEUED   |   \
0044             NETIF_MSG_RX_STATUS)
0045 
0046 #define NETCP_EFUSE_ADDR_SWAP   2
0047 
0048 #define knav_queue_get_id(q)    knav_queue_device_control(q, \
0049                 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
0050 
0051 #define knav_queue_enable_notify(q) knav_queue_device_control(q,    \
0052                     KNAV_QUEUE_ENABLE_NOTIFY,   \
0053                     (unsigned long)NULL)
0054 
0055 #define knav_queue_disable_notify(q) knav_queue_device_control(q,   \
0056                     KNAV_QUEUE_DISABLE_NOTIFY,  \
0057                     (unsigned long)NULL)
0058 
0059 #define knav_queue_get_count(q) knav_queue_device_control(q, \
0060                 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
0061 
0062 #define for_each_netcp_module(module)           \
0063     list_for_each_entry(module, &netcp_modules, module_list)
0064 
0065 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
0066     list_for_each_entry(inst_modpriv, \
0067         &((netcp_device)->modpriv_head), inst_list)
0068 
0069 #define for_each_module(netcp, intf_modpriv)            \
0070     list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
0071 
0072 /* Module management structures */
0073 struct netcp_device {
0074     struct list_head    device_list;
0075     struct list_head    interface_head;
0076     struct list_head    modpriv_head;
0077     struct device       *device;
0078 };
0079 
0080 struct netcp_inst_modpriv {
0081     struct netcp_device *netcp_device;
0082     struct netcp_module *netcp_module;
0083     struct list_head    inst_list;
0084     void            *module_priv;
0085 };
0086 
0087 struct netcp_intf_modpriv {
0088     struct netcp_intf   *netcp_priv;
0089     struct netcp_module *netcp_module;
0090     struct list_head    intf_list;
0091     void            *module_priv;
0092 };
0093 
0094 struct netcp_tx_cb {
0095     void    *ts_context;
0096     void    (*txtstamp)(void *context, struct sk_buff *skb);
0097 };
0098 
0099 static LIST_HEAD(netcp_devices);
0100 static LIST_HEAD(netcp_modules);
0101 static DEFINE_MUTEX(netcp_modules_lock);
0102 
0103 static int netcp_debug_level = -1;
0104 module_param(netcp_debug_level, int, 0);
0105 MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
0106 
0107 /* Helper functions - Get/Set */
0108 static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
0109              struct knav_dma_desc *desc)
0110 {
0111     *buff_len = le32_to_cpu(desc->buff_len);
0112     *buff = le32_to_cpu(desc->buff);
0113     *ndesc = le32_to_cpu(desc->next_desc);
0114 }
0115 
0116 static void get_desc_info(u32 *desc_info, u32 *pkt_info,
0117               struct knav_dma_desc *desc)
0118 {
0119     *desc_info = le32_to_cpu(desc->desc_info);
0120     *pkt_info = le32_to_cpu(desc->packet_info);
0121 }
0122 
0123 static u32 get_sw_data(int index, struct knav_dma_desc *desc)
0124 {
0125     /* No Endian conversion needed as this data is untouched by hw */
0126     return desc->sw_data[index];
0127 }
0128 
0129 /* use these macros to get sw data */
0130 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
0131 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
0132 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
0133 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
0134 
0135 static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
0136                  struct knav_dma_desc *desc)
0137 {
0138     *buff = le32_to_cpu(desc->orig_buff);
0139     *buff_len = le32_to_cpu(desc->orig_len);
0140 }
0141 
0142 static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
0143 {
0144     int i;
0145 
0146     for (i = 0; i < num_words; i++)
0147         words[i] = le32_to_cpu(desc[i]);
0148 }
0149 
0150 static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
0151              struct knav_dma_desc *desc)
0152 {
0153     desc->buff_len = cpu_to_le32(buff_len);
0154     desc->buff = cpu_to_le32(buff);
0155     desc->next_desc = cpu_to_le32(ndesc);
0156 }
0157 
0158 static void set_desc_info(u32 desc_info, u32 pkt_info,
0159               struct knav_dma_desc *desc)
0160 {
0161     desc->desc_info = cpu_to_le32(desc_info);
0162     desc->packet_info = cpu_to_le32(pkt_info);
0163 }
0164 
0165 static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
0166 {
0167     /* No Endian conversion needed as this data is untouched by hw */
0168     desc->sw_data[index] = data;
0169 }
0170 
0171 /* use these macros to set sw data */
0172 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
0173 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
0174 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
0175 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
0176 
0177 static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
0178                  struct knav_dma_desc *desc)
0179 {
0180     desc->orig_buff = cpu_to_le32(buff);
0181     desc->orig_len = cpu_to_le32(buff_len);
0182 }
0183 
0184 static void set_words(u32 *words, int num_words, __le32 *desc)
0185 {
0186     int i;
0187 
0188     for (i = 0; i < num_words; i++)
0189         desc[i] = cpu_to_le32(words[i]);
0190 }
0191 
0192 /* Read the e-fuse value as 32 bit values to be endian independent */
0193 static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
0194 {
0195     unsigned int addr0, addr1;
0196 
0197     addr1 = readl(efuse_mac + 4);
0198     addr0 = readl(efuse_mac);
0199 
0200     switch (swap) {
0201     case NETCP_EFUSE_ADDR_SWAP:
0202         addr0 = addr1;
0203         addr1 = readl(efuse_mac);
0204         break;
0205     default:
0206         break;
0207     }
0208 
0209     x[0] = (addr1 & 0x0000ff00) >> 8;
0210     x[1] = addr1 & 0x000000ff;
0211     x[2] = (addr0 & 0xff000000) >> 24;
0212     x[3] = (addr0 & 0x00ff0000) >> 16;
0213     x[4] = (addr0 & 0x0000ff00) >> 8;
0214     x[5] = addr0 & 0x000000ff;
0215 
0216     return 0;
0217 }
0218 
0219 /* Module management routines */
0220 static int netcp_register_interface(struct netcp_intf *netcp)
0221 {
0222     int ret;
0223 
0224     ret = register_netdev(netcp->ndev);
0225     if (!ret)
0226         netcp->netdev_registered = true;
0227     return ret;
0228 }
0229 
0230 static int netcp_module_probe(struct netcp_device *netcp_device,
0231                   struct netcp_module *module)
0232 {
0233     struct device *dev = netcp_device->device;
0234     struct device_node *devices, *interface, *node = dev->of_node;
0235     struct device_node *child;
0236     struct netcp_inst_modpriv *inst_modpriv;
0237     struct netcp_intf *netcp_intf;
0238     struct netcp_module *tmp;
0239     bool primary_module_registered = false;
0240     int ret;
0241 
0242     /* Find this module in the sub-tree for this device */
0243     devices = of_get_child_by_name(node, "netcp-devices");
0244     if (!devices) {
0245         dev_err(dev, "could not find netcp-devices node\n");
0246         return NETCP_MOD_PROBE_SKIPPED;
0247     }
0248 
0249     for_each_available_child_of_node(devices, child) {
0250         const char *name;
0251         char node_name[32];
0252 
0253         if (of_property_read_string(child, "label", &name) < 0) {
0254             snprintf(node_name, sizeof(node_name), "%pOFn", child);
0255             name = node_name;
0256         }
0257         if (!strcasecmp(module->name, name))
0258             break;
0259     }
0260 
0261     of_node_put(devices);
0262     /* If module not used for this device, skip it */
0263     if (!child) {
0264         dev_warn(dev, "module(%s) not used for device\n", module->name);
0265         return NETCP_MOD_PROBE_SKIPPED;
0266     }
0267 
0268     inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
0269     if (!inst_modpriv) {
0270         of_node_put(child);
0271         return -ENOMEM;
0272     }
0273 
0274     inst_modpriv->netcp_device = netcp_device;
0275     inst_modpriv->netcp_module = module;
0276     list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
0277 
0278     ret = module->probe(netcp_device, dev, child,
0279                 &inst_modpriv->module_priv);
0280     of_node_put(child);
0281     if (ret) {
0282         dev_err(dev, "Probe of module(%s) failed with %d\n",
0283             module->name, ret);
0284         list_del(&inst_modpriv->inst_list);
0285         devm_kfree(dev, inst_modpriv);
0286         return NETCP_MOD_PROBE_FAILED;
0287     }
0288 
0289     /* Attach modules only if the primary module is probed */
0290     for_each_netcp_module(tmp) {
0291         if (tmp->primary)
0292             primary_module_registered = true;
0293     }
0294 
0295     if (!primary_module_registered)
0296         return 0;
0297 
0298     /* Attach module to interfaces */
0299     list_for_each_entry(netcp_intf, &netcp_device->interface_head,
0300                 interface_list) {
0301         struct netcp_intf_modpriv *intf_modpriv;
0302 
0303         intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
0304                         GFP_KERNEL);
0305         if (!intf_modpriv)
0306             return -ENOMEM;
0307 
0308         interface = of_parse_phandle(netcp_intf->node_interface,
0309                          module->name, 0);
0310 
0311         if (!interface) {
0312             devm_kfree(dev, intf_modpriv);
0313             continue;
0314         }
0315 
0316         intf_modpriv->netcp_priv = netcp_intf;
0317         intf_modpriv->netcp_module = module;
0318         list_add_tail(&intf_modpriv->intf_list,
0319                   &netcp_intf->module_head);
0320 
0321         ret = module->attach(inst_modpriv->module_priv,
0322                      netcp_intf->ndev, interface,
0323                      &intf_modpriv->module_priv);
0324         of_node_put(interface);
0325         if (ret) {
0326             dev_dbg(dev, "Attach of module %s declined with %d\n",
0327                 module->name, ret);
0328             list_del(&intf_modpriv->intf_list);
0329             devm_kfree(dev, intf_modpriv);
0330             continue;
0331         }
0332     }
0333 
0334     /* Now register the interface with netdev */
0335     list_for_each_entry(netcp_intf,
0336                 &netcp_device->interface_head,
0337                 interface_list) {
0338         /* If interface not registered then register now */
0339         if (!netcp_intf->netdev_registered) {
0340             ret = netcp_register_interface(netcp_intf);
0341             if (ret)
0342                 return -ENODEV;
0343         }
0344     }
0345     return 0;
0346 }
0347 
0348 int netcp_register_module(struct netcp_module *module)
0349 {
0350     struct netcp_device *netcp_device;
0351     struct netcp_module *tmp;
0352     int ret;
0353 
0354     if (!module->name) {
0355         WARN(1, "error registering netcp module: no name\n");
0356         return -EINVAL;
0357     }
0358 
0359     if (!module->probe) {
0360         WARN(1, "error registering netcp module: no probe\n");
0361         return -EINVAL;
0362     }
0363 
0364     mutex_lock(&netcp_modules_lock);
0365 
0366     for_each_netcp_module(tmp) {
0367         if (!strcasecmp(tmp->name, module->name)) {
0368             mutex_unlock(&netcp_modules_lock);
0369             return -EEXIST;
0370         }
0371     }
0372     list_add_tail(&module->module_list, &netcp_modules);
0373 
0374     list_for_each_entry(netcp_device, &netcp_devices, device_list) {
0375         ret = netcp_module_probe(netcp_device, module);
0376         if (ret < 0)
0377             goto fail;
0378     }
0379     mutex_unlock(&netcp_modules_lock);
0380     return 0;
0381 
0382 fail:
0383     mutex_unlock(&netcp_modules_lock);
0384     netcp_unregister_module(module);
0385     return ret;
0386 }
0387 EXPORT_SYMBOL_GPL(netcp_register_module);
0388 
0389 static void netcp_release_module(struct netcp_device *netcp_device,
0390                  struct netcp_module *module)
0391 {
0392     struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
0393     struct netcp_intf *netcp_intf, *netcp_tmp;
0394     struct device *dev = netcp_device->device;
0395 
0396     /* Release the module from each interface */
0397     list_for_each_entry_safe(netcp_intf, netcp_tmp,
0398                  &netcp_device->interface_head,
0399                  interface_list) {
0400         struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
0401 
0402         list_for_each_entry_safe(intf_modpriv, intf_tmp,
0403                      &netcp_intf->module_head,
0404                      intf_list) {
0405             if (intf_modpriv->netcp_module == module) {
0406                 module->release(intf_modpriv->module_priv);
0407                 list_del(&intf_modpriv->intf_list);
0408                 devm_kfree(dev, intf_modpriv);
0409                 break;
0410             }
0411         }
0412     }
0413 
0414     /* Remove the module from each instance */
0415     list_for_each_entry_safe(inst_modpriv, inst_tmp,
0416                  &netcp_device->modpriv_head, inst_list) {
0417         if (inst_modpriv->netcp_module == module) {
0418             module->remove(netcp_device,
0419                        inst_modpriv->module_priv);
0420             list_del(&inst_modpriv->inst_list);
0421             devm_kfree(dev, inst_modpriv);
0422             break;
0423         }
0424     }
0425 }
0426 
0427 void netcp_unregister_module(struct netcp_module *module)
0428 {
0429     struct netcp_device *netcp_device;
0430     struct netcp_module *module_tmp;
0431 
0432     mutex_lock(&netcp_modules_lock);
0433 
0434     list_for_each_entry(netcp_device, &netcp_devices, device_list) {
0435         netcp_release_module(netcp_device, module);
0436     }
0437 
0438     /* Remove the module from the module list */
0439     for_each_netcp_module(module_tmp) {
0440         if (module == module_tmp) {
0441             list_del(&module->module_list);
0442             break;
0443         }
0444     }
0445 
0446     mutex_unlock(&netcp_modules_lock);
0447 }
0448 EXPORT_SYMBOL_GPL(netcp_unregister_module);
0449 
0450 void *netcp_module_get_intf_data(struct netcp_module *module,
0451                  struct netcp_intf *intf)
0452 {
0453     struct netcp_intf_modpriv *intf_modpriv;
0454 
0455     list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
0456         if (intf_modpriv->netcp_module == module)
0457             return intf_modpriv->module_priv;
0458     return NULL;
0459 }
0460 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
0461 
0462 /* Module TX and RX Hook management */
0463 struct netcp_hook_list {
0464     struct list_head     list;
0465     netcp_hook_rtn      *hook_rtn;
0466     void            *hook_data;
0467     int          order;
0468 };
0469 
0470 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
0471               netcp_hook_rtn *hook_rtn, void *hook_data)
0472 {
0473     struct netcp_hook_list *entry;
0474     struct netcp_hook_list *next;
0475     unsigned long flags;
0476 
0477     entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
0478     if (!entry)
0479         return -ENOMEM;
0480 
0481     entry->hook_rtn  = hook_rtn;
0482     entry->hook_data = hook_data;
0483     entry->order     = order;
0484 
0485     spin_lock_irqsave(&netcp_priv->lock, flags);
0486     list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
0487         if (next->order > order)
0488             break;
0489     }
0490     __list_add(&entry->list, next->list.prev, &next->list);
0491     spin_unlock_irqrestore(&netcp_priv->lock, flags);
0492 
0493     return 0;
0494 }
0495 EXPORT_SYMBOL_GPL(netcp_register_txhook);
0496 
0497 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
0498                 netcp_hook_rtn *hook_rtn, void *hook_data)
0499 {
0500     struct netcp_hook_list *next, *n;
0501     unsigned long flags;
0502 
0503     spin_lock_irqsave(&netcp_priv->lock, flags);
0504     list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
0505         if ((next->order     == order) &&
0506             (next->hook_rtn  == hook_rtn) &&
0507             (next->hook_data == hook_data)) {
0508             list_del(&next->list);
0509             spin_unlock_irqrestore(&netcp_priv->lock, flags);
0510             devm_kfree(netcp_priv->dev, next);
0511             return 0;
0512         }
0513     }
0514     spin_unlock_irqrestore(&netcp_priv->lock, flags);
0515     return -ENOENT;
0516 }
0517 EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
0518 
0519 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
0520               netcp_hook_rtn *hook_rtn, void *hook_data)
0521 {
0522     struct netcp_hook_list *entry;
0523     struct netcp_hook_list *next;
0524     unsigned long flags;
0525 
0526     entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
0527     if (!entry)
0528         return -ENOMEM;
0529 
0530     entry->hook_rtn  = hook_rtn;
0531     entry->hook_data = hook_data;
0532     entry->order     = order;
0533 
0534     spin_lock_irqsave(&netcp_priv->lock, flags);
0535     list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
0536         if (next->order > order)
0537             break;
0538     }
0539     __list_add(&entry->list, next->list.prev, &next->list);
0540     spin_unlock_irqrestore(&netcp_priv->lock, flags);
0541 
0542     return 0;
0543 }
0544 EXPORT_SYMBOL_GPL(netcp_register_rxhook);
0545 
0546 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
0547                 netcp_hook_rtn *hook_rtn, void *hook_data)
0548 {
0549     struct netcp_hook_list *next, *n;
0550     unsigned long flags;
0551 
0552     spin_lock_irqsave(&netcp_priv->lock, flags);
0553     list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
0554         if ((next->order     == order) &&
0555             (next->hook_rtn  == hook_rtn) &&
0556             (next->hook_data == hook_data)) {
0557             list_del(&next->list);
0558             spin_unlock_irqrestore(&netcp_priv->lock, flags);
0559             devm_kfree(netcp_priv->dev, next);
0560             return 0;
0561         }
0562     }
0563     spin_unlock_irqrestore(&netcp_priv->lock, flags);
0564 
0565     return -ENOENT;
0566 }
0567 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook);
0568 
0569 static void netcp_frag_free(bool is_frag, void *ptr)
0570 {
0571     if (is_frag)
0572         skb_free_frag(ptr);
0573     else
0574         kfree(ptr);
0575 }
0576 
0577 static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
0578                      struct knav_dma_desc *desc)
0579 {
0580     struct knav_dma_desc *ndesc;
0581     dma_addr_t dma_desc, dma_buf;
0582     unsigned int buf_len, dma_sz = sizeof(*ndesc);
0583     void *buf_ptr;
0584     u32 tmp;
0585 
0586     get_words(&dma_desc, 1, &desc->next_desc);
0587 
0588     while (dma_desc) {
0589         ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
0590         if (unlikely(!ndesc)) {
0591             dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
0592             break;
0593         }
0594         get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
0595         /* warning!!!! We are retrieving the virtual ptr in the sw_data
0596          * field as a 32bit value. Will not work on 64bit machines
0597          */
0598         buf_ptr = (void *)GET_SW_DATA0(ndesc);
0599         buf_len = (int)GET_SW_DATA1(desc);
0600         dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
0601         __free_page(buf_ptr);
0602         knav_pool_desc_put(netcp->rx_pool, desc);
0603     }
0604     /* warning!!!! We are retrieving the virtual ptr in the sw_data
0605      * field as a 32bit value. Will not work on 64bit machines
0606      */
0607     buf_ptr = (void *)GET_SW_DATA0(desc);
0608     buf_len = (int)GET_SW_DATA1(desc);
0609 
0610     if (buf_ptr)
0611         netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
0612     knav_pool_desc_put(netcp->rx_pool, desc);
0613 }
0614 
0615 static void netcp_empty_rx_queue(struct netcp_intf *netcp)
0616 {
0617     struct netcp_stats *rx_stats = &netcp->stats;
0618     struct knav_dma_desc *desc;
0619     unsigned int dma_sz;
0620     dma_addr_t dma;
0621 
0622     for (; ;) {
0623         dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
0624         if (!dma)
0625             break;
0626 
0627         desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
0628         if (unlikely(!desc)) {
0629             dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
0630                 __func__);
0631             rx_stats->rx_errors++;
0632             continue;
0633         }
0634         netcp_free_rx_desc_chain(netcp, desc);
0635         rx_stats->rx_dropped++;
0636     }
0637 }
0638 
0639 static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
0640 {
0641     struct netcp_stats *rx_stats = &netcp->stats;
0642     unsigned int dma_sz, buf_len, org_buf_len;
0643     struct knav_dma_desc *desc, *ndesc;
0644     unsigned int pkt_sz = 0, accum_sz;
0645     struct netcp_hook_list *rx_hook;
0646     dma_addr_t dma_desc, dma_buff;
0647     struct netcp_packet p_info;
0648     struct sk_buff *skb;
0649     void *org_buf_ptr;
0650     u32 tmp;
0651 
0652     dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
0653     if (!dma_desc)
0654         return -1;
0655 
0656     desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
0657     if (unlikely(!desc)) {
0658         dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
0659         return 0;
0660     }
0661 
0662     get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
0663     /* warning!!!! We are retrieving the virtual ptr in the sw_data
0664      * field as a 32bit value. Will not work on 64bit machines
0665      */
0666     org_buf_ptr = (void *)GET_SW_DATA0(desc);
0667     org_buf_len = (int)GET_SW_DATA1(desc);
0668 
0669     if (unlikely(!org_buf_ptr)) {
0670         dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
0671         goto free_desc;
0672     }
0673 
0674     pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
0675     accum_sz = buf_len;
0676     dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
0677 
0678     /* Build a new sk_buff for the primary buffer */
0679     skb = build_skb(org_buf_ptr, org_buf_len);
0680     if (unlikely(!skb)) {
0681         dev_err(netcp->ndev_dev, "build_skb() failed\n");
0682         goto free_desc;
0683     }
0684 
0685     /* update data, tail and len */
0686     skb_reserve(skb, NETCP_SOP_OFFSET);
0687     __skb_put(skb, buf_len);
0688 
0689     /* Fill in the page fragment list */
0690     while (dma_desc) {
0691         struct page *page;
0692 
0693         ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
0694         if (unlikely(!ndesc)) {
0695             dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
0696             goto free_desc;
0697         }
0698 
0699         get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
0700         /* warning!!!! We are retrieving the virtual ptr in the sw_data
0701          * field as a 32bit value. Will not work on 64bit machines
0702          */
0703         page = (struct page *)GET_SW_DATA0(ndesc);
0704 
0705         if (likely(dma_buff && buf_len && page)) {
0706             dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
0707                        DMA_FROM_DEVICE);
0708         } else {
0709             dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
0710                 &dma_buff, buf_len, page);
0711             goto free_desc;
0712         }
0713 
0714         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
0715                 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
0716         accum_sz += buf_len;
0717 
0718         /* Free the descriptor */
0719         knav_pool_desc_put(netcp->rx_pool, ndesc);
0720     }
0721 
0722     /* check for packet len and warn */
0723     if (unlikely(pkt_sz != accum_sz))
0724         dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
0725             pkt_sz, accum_sz);
0726 
0727     /* Newer version of the Ethernet switch can trim the Ethernet FCS
0728      * from the packet and is indicated in hw_cap. So trim it only for
0729      * older h/w
0730      */
0731     if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
0732         __pskb_trim(skb, skb->len - ETH_FCS_LEN);
0733 
0734     /* Call each of the RX hooks */
0735     p_info.skb = skb;
0736     skb->dev = netcp->ndev;
0737     p_info.rxtstamp_complete = false;
0738     get_desc_info(&tmp, &p_info.eflags, desc);
0739     p_info.epib = desc->epib;
0740     p_info.psdata = (u32 __force *)desc->psdata;
0741     p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
0742              KNAV_DMA_DESC_EFLAGS_MASK);
0743     list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
0744         int ret;
0745 
0746         ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
0747                     &p_info);
0748         if (unlikely(ret)) {
0749             dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
0750                 rx_hook->order, ret);
0751             /* Free the primary descriptor */
0752             rx_stats->rx_dropped++;
0753             knav_pool_desc_put(netcp->rx_pool, desc);
0754             dev_kfree_skb(skb);
0755             return 0;
0756         }
0757     }
0758     /* Free the primary descriptor */
0759     knav_pool_desc_put(netcp->rx_pool, desc);
0760 
0761     u64_stats_update_begin(&rx_stats->syncp_rx);
0762     rx_stats->rx_packets++;
0763     rx_stats->rx_bytes += skb->len;
0764     u64_stats_update_end(&rx_stats->syncp_rx);
0765 
0766     /* push skb up the stack */
0767     skb->protocol = eth_type_trans(skb, netcp->ndev);
0768     netif_receive_skb(skb);
0769     return 0;
0770 
0771 free_desc:
0772     netcp_free_rx_desc_chain(netcp, desc);
0773     rx_stats->rx_errors++;
0774     return 0;
0775 }
0776 
0777 static int netcp_process_rx_packets(struct netcp_intf *netcp,
0778                     unsigned int budget)
0779 {
0780     int i;
0781 
0782     for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
0783         ;
0784     return i;
0785 }
0786 
0787 /* Release descriptors and attached buffers from Rx FDQ */
0788 static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
0789 {
0790     struct knav_dma_desc *desc;
0791     unsigned int buf_len, dma_sz;
0792     dma_addr_t dma;
0793     void *buf_ptr;
0794 
0795     /* Allocate descriptor */
0796     while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
0797         desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
0798         if (unlikely(!desc)) {
0799             dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
0800             continue;
0801         }
0802 
0803         get_org_pkt_info(&dma, &buf_len, desc);
0804         /* warning!!!! We are retrieving the virtual ptr in the sw_data
0805          * field as a 32bit value. Will not work on 64bit machines
0806          */
0807         buf_ptr = (void *)GET_SW_DATA0(desc);
0808 
0809         if (unlikely(!dma)) {
0810             dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
0811             knav_pool_desc_put(netcp->rx_pool, desc);
0812             continue;
0813         }
0814 
0815         if (unlikely(!buf_ptr)) {
0816             dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
0817             knav_pool_desc_put(netcp->rx_pool, desc);
0818             continue;
0819         }
0820 
0821         if (fdq == 0) {
0822             dma_unmap_single(netcp->dev, dma, buf_len,
0823                      DMA_FROM_DEVICE);
0824             netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
0825         } else {
0826             dma_unmap_page(netcp->dev, dma, buf_len,
0827                        DMA_FROM_DEVICE);
0828             __free_page(buf_ptr);
0829         }
0830 
0831         knav_pool_desc_put(netcp->rx_pool, desc);
0832     }
0833 }
0834 
0835 static void netcp_rxpool_free(struct netcp_intf *netcp)
0836 {
0837     int i;
0838 
0839     for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
0840          !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
0841         netcp_free_rx_buf(netcp, i);
0842 
0843     if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
0844         dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
0845             netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
0846 
0847     knav_pool_destroy(netcp->rx_pool);
0848     netcp->rx_pool = NULL;
0849 }
0850 
0851 static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
0852 {
0853     struct knav_dma_desc *hwdesc;
0854     unsigned int buf_len, dma_sz;
0855     u32 desc_info, pkt_info;
0856     struct page *page;
0857     dma_addr_t dma;
0858     void *bufptr;
0859     u32 sw_data[2];
0860 
0861     /* Allocate descriptor */
0862     hwdesc = knav_pool_desc_get(netcp->rx_pool);
0863     if (IS_ERR_OR_NULL(hwdesc)) {
0864         dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
0865         return -ENOMEM;
0866     }
0867 
0868     if (likely(fdq == 0)) {
0869         unsigned int primary_buf_len;
0870         /* Allocate a primary receive queue entry */
0871         buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
0872         primary_buf_len = SKB_DATA_ALIGN(buf_len) +
0873                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0874 
0875         bufptr = netdev_alloc_frag(primary_buf_len);
0876         sw_data[1] = primary_buf_len;
0877 
0878         if (unlikely(!bufptr)) {
0879             dev_warn_ratelimited(netcp->ndev_dev,
0880                          "Primary RX buffer alloc failed\n");
0881             goto fail;
0882         }
0883         dma = dma_map_single(netcp->dev, bufptr, buf_len,
0884                      DMA_TO_DEVICE);
0885         if (unlikely(dma_mapping_error(netcp->dev, dma)))
0886             goto fail;
0887 
0888         /* warning!!!! We are saving the virtual ptr in the sw_data
0889          * field as a 32bit value. Will not work on 64bit machines
0890          */
0891         sw_data[0] = (u32)bufptr;
0892     } else {
0893         /* Allocate a secondary receive queue entry */
0894         page = alloc_page(GFP_ATOMIC | GFP_DMA);
0895         if (unlikely(!page)) {
0896             dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
0897             goto fail;
0898         }
0899         buf_len = PAGE_SIZE;
0900         dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
0901         /* warning!!!! We are saving the virtual ptr in the sw_data
0902          * field as a 32bit value. Will not work on 64bit machines
0903          */
0904         sw_data[0] = (u32)page;
0905         sw_data[1] = 0;
0906     }
0907 
0908     desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
0909     desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
0910     pkt_info =  KNAV_DMA_DESC_HAS_EPIB;
0911     pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
0912     pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
0913             KNAV_DMA_DESC_RETQ_SHIFT;
0914     set_org_pkt_info(dma, buf_len, hwdesc);
0915     SET_SW_DATA0(sw_data[0], hwdesc);
0916     SET_SW_DATA1(sw_data[1], hwdesc);
0917     set_desc_info(desc_info, pkt_info, hwdesc);
0918 
0919     /* Push to FDQs */
0920     knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
0921                &dma_sz);
0922     knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
0923     return 0;
0924 
0925 fail:
0926     knav_pool_desc_put(netcp->rx_pool, hwdesc);
0927     return -ENOMEM;
0928 }
0929 
0930 /* Refill Rx FDQ with descriptors & attached buffers */
0931 static void netcp_rxpool_refill(struct netcp_intf *netcp)
0932 {
0933     u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
0934     int i, ret = 0;
0935 
0936     /* Calculate the FDQ deficit and refill */
0937     for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
0938         fdq_deficit[i] = netcp->rx_queue_depths[i] -
0939                  knav_queue_get_count(netcp->rx_fdq[i]);
0940 
0941         while (fdq_deficit[i]-- && !ret)
0942             ret = netcp_allocate_rx_buf(netcp, i);
0943     } /* end for fdqs */
0944 }
0945 
0946 /* NAPI poll */
0947 static int netcp_rx_poll(struct napi_struct *napi, int budget)
0948 {
0949     struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
0950                         rx_napi);
0951     unsigned int packets;
0952 
0953     packets = netcp_process_rx_packets(netcp, budget);
0954 
0955     netcp_rxpool_refill(netcp);
0956     if (packets < budget) {
0957         napi_complete_done(&netcp->rx_napi, packets);
0958         knav_queue_enable_notify(netcp->rx_queue);
0959     }
0960 
0961     return packets;
0962 }
0963 
0964 static void netcp_rx_notify(void *arg)
0965 {
0966     struct netcp_intf *netcp = arg;
0967 
0968     knav_queue_disable_notify(netcp->rx_queue);
0969     napi_schedule(&netcp->rx_napi);
0970 }
0971 
0972 static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
0973                      struct knav_dma_desc *desc,
0974                      unsigned int desc_sz)
0975 {
0976     struct knav_dma_desc *ndesc = desc;
0977     dma_addr_t dma_desc, dma_buf;
0978     unsigned int buf_len;
0979 
0980     while (ndesc) {
0981         get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
0982 
0983         if (dma_buf && buf_len)
0984             dma_unmap_single(netcp->dev, dma_buf, buf_len,
0985                      DMA_TO_DEVICE);
0986         else
0987             dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
0988                  &dma_buf, buf_len);
0989 
0990         knav_pool_desc_put(netcp->tx_pool, ndesc);
0991         ndesc = NULL;
0992         if (dma_desc) {
0993             ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
0994                              desc_sz);
0995             if (!ndesc)
0996                 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
0997         }
0998     }
0999 }
1000 
1001 static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
1002                       unsigned int budget)
1003 {
1004     struct netcp_stats *tx_stats = &netcp->stats;
1005     struct knav_dma_desc *desc;
1006     struct netcp_tx_cb *tx_cb;
1007     struct sk_buff *skb;
1008     unsigned int dma_sz;
1009     dma_addr_t dma;
1010     int pkts = 0;
1011 
1012     while (budget--) {
1013         dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
1014         if (!dma)
1015             break;
1016         desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
1017         if (unlikely(!desc)) {
1018             dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
1019             tx_stats->tx_errors++;
1020             continue;
1021         }
1022 
1023         /* warning!!!! We are retrieving the virtual ptr in the sw_data
1024          * field as a 32bit value. Will not work on 64bit machines
1025          */
1026         skb = (struct sk_buff *)GET_SW_DATA0(desc);
1027         netcp_free_tx_desc_chain(netcp, desc, dma_sz);
1028         if (!skb) {
1029             dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
1030             tx_stats->tx_errors++;
1031             continue;
1032         }
1033 
1034         tx_cb = (struct netcp_tx_cb *)skb->cb;
1035         if (tx_cb->txtstamp)
1036             tx_cb->txtstamp(tx_cb->ts_context, skb);
1037 
1038         if (netif_subqueue_stopped(netcp->ndev, skb) &&
1039             netif_running(netcp->ndev) &&
1040             (knav_pool_count(netcp->tx_pool) >
1041             netcp->tx_resume_threshold)) {
1042             u16 subqueue = skb_get_queue_mapping(skb);
1043 
1044             netif_wake_subqueue(netcp->ndev, subqueue);
1045         }
1046 
1047         u64_stats_update_begin(&tx_stats->syncp_tx);
1048         tx_stats->tx_packets++;
1049         tx_stats->tx_bytes += skb->len;
1050         u64_stats_update_end(&tx_stats->syncp_tx);
1051         dev_kfree_skb(skb);
1052         pkts++;
1053     }
1054     return pkts;
1055 }
1056 
1057 static int netcp_tx_poll(struct napi_struct *napi, int budget)
1058 {
1059     int packets;
1060     struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
1061                         tx_napi);
1062 
1063     packets = netcp_process_tx_compl_packets(netcp, budget);
1064     if (packets < budget) {
1065         napi_complete(&netcp->tx_napi);
1066         knav_queue_enable_notify(netcp->tx_compl_q);
1067     }
1068 
1069     return packets;
1070 }
1071 
1072 static void netcp_tx_notify(void *arg)
1073 {
1074     struct netcp_intf *netcp = arg;
1075 
1076     knav_queue_disable_notify(netcp->tx_compl_q);
1077     napi_schedule(&netcp->tx_napi);
1078 }
1079 
1080 static struct knav_dma_desc*
1081 netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1082 {
1083     struct knav_dma_desc *desc, *ndesc, *pdesc;
1084     unsigned int pkt_len = skb_headlen(skb);
1085     struct device *dev = netcp->dev;
1086     dma_addr_t dma_addr;
1087     unsigned int dma_sz;
1088     int i;
1089 
1090     /* Map the linear buffer */
1091     dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1092     if (unlikely(dma_mapping_error(dev, dma_addr))) {
1093         dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1094         return NULL;
1095     }
1096 
1097     desc = knav_pool_desc_get(netcp->tx_pool);
1098     if (IS_ERR_OR_NULL(desc)) {
1099         dev_err(netcp->ndev_dev, "out of TX desc\n");
1100         dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1101         return NULL;
1102     }
1103 
1104     set_pkt_info(dma_addr, pkt_len, 0, desc);
1105     if (skb_is_nonlinear(skb)) {
1106         prefetchw(skb_shinfo(skb));
1107     } else {
1108         desc->next_desc = 0;
1109         goto upd_pkt_len;
1110     }
1111 
1112     pdesc = desc;
1113 
1114     /* Handle the case where skb is fragmented in pages */
1115     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1116         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1117         struct page *page = skb_frag_page(frag);
1118         u32 page_offset = skb_frag_off(frag);
1119         u32 buf_len = skb_frag_size(frag);
1120         dma_addr_t desc_dma;
1121         u32 desc_dma_32;
1122 
1123         dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1124                     DMA_TO_DEVICE);
1125         if (unlikely(!dma_addr)) {
1126             dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1127             goto free_descs;
1128         }
1129 
1130         ndesc = knav_pool_desc_get(netcp->tx_pool);
1131         if (IS_ERR_OR_NULL(ndesc)) {
1132             dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1133             dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1134             goto free_descs;
1135         }
1136 
1137         desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
1138         set_pkt_info(dma_addr, buf_len, 0, ndesc);
1139         desc_dma_32 = (u32)desc_dma;
1140         set_words(&desc_dma_32, 1, &pdesc->next_desc);
1141         pkt_len += buf_len;
1142         if (pdesc != desc)
1143             knav_pool_desc_map(netcp->tx_pool, pdesc,
1144                        sizeof(*pdesc), &desc_dma, &dma_sz);
1145         pdesc = ndesc;
1146     }
1147     if (pdesc != desc)
1148         knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1149                    &dma_addr, &dma_sz);
1150 
1151     /* frag list based linkage is not supported for now. */
1152     if (skb_shinfo(skb)->frag_list) {
1153         dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1154         goto free_descs;
1155     }
1156 
1157 upd_pkt_len:
1158     WARN_ON(pkt_len != skb->len);
1159 
1160     pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1161     set_words(&pkt_len, 1, &desc->desc_info);
1162     return desc;
1163 
1164 free_descs:
1165     netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1166     return NULL;
1167 }
1168 
1169 static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1170                    struct sk_buff *skb,
1171                    struct knav_dma_desc *desc)
1172 {
1173     struct netcp_tx_pipe *tx_pipe = NULL;
1174     struct netcp_hook_list *tx_hook;
1175     struct netcp_packet p_info;
1176     struct netcp_tx_cb *tx_cb;
1177     unsigned int dma_sz;
1178     dma_addr_t dma;
1179     u32 tmp = 0;
1180     int ret = 0;
1181 
1182     p_info.netcp = netcp;
1183     p_info.skb = skb;
1184     p_info.tx_pipe = NULL;
1185     p_info.psdata_len = 0;
1186     p_info.ts_context = NULL;
1187     p_info.txtstamp = NULL;
1188     p_info.epib = desc->epib;
1189     p_info.psdata = (u32 __force *)desc->psdata;
1190     memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
1191 
1192     /* Find out where to inject the packet for transmission */
1193     list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1194         ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1195                     &p_info);
1196         if (unlikely(ret != 0)) {
1197             dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1198                 tx_hook->order, ret);
1199             ret = (ret < 0) ? ret : NETDEV_TX_OK;
1200             goto out;
1201         }
1202     }
1203 
1204     /* Make sure some TX hook claimed the packet */
1205     tx_pipe = p_info.tx_pipe;
1206     if (!tx_pipe) {
1207         dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1208         ret = -ENXIO;
1209         goto out;
1210     }
1211 
1212     tx_cb = (struct netcp_tx_cb *)skb->cb;
1213     tx_cb->ts_context = p_info.ts_context;
1214     tx_cb->txtstamp = p_info.txtstamp;
1215 
1216     /* update descriptor */
1217     if (p_info.psdata_len) {
1218         /* psdata points to both native-endian and device-endian data */
1219         __le32 *psdata = (void __force *)p_info.psdata;
1220 
1221         set_words((u32 *)psdata +
1222               (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
1223               p_info.psdata_len, psdata);
1224         tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1225             KNAV_DMA_DESC_PSLEN_SHIFT;
1226     }
1227 
1228     tmp |= KNAV_DMA_DESC_HAS_EPIB |
1229         ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1230         KNAV_DMA_DESC_RETQ_SHIFT);
1231 
1232     if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
1233         tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
1234             KNAV_DMA_DESC_PSFLAG_SHIFT);
1235     }
1236 
1237     set_words(&tmp, 1, &desc->packet_info);
1238     /* warning!!!! We are saving the virtual ptr in the sw_data
1239      * field as a 32bit value. Will not work on 64bit machines
1240      */
1241     SET_SW_DATA0((u32)skb, desc);
1242 
1243     if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1244         tmp = tx_pipe->switch_to_port;
1245         set_words(&tmp, 1, &desc->tag_info);
1246     }
1247 
1248     /* submit packet descriptor */
1249     ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1250                  &dma_sz);
1251     if (unlikely(ret)) {
1252         dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1253         ret = -ENOMEM;
1254         goto out;
1255     }
1256     skb_tx_timestamp(skb);
1257     knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1258 
1259 out:
1260     return ret;
1261 }
1262 
1263 /* Submit the packet */
1264 static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1265 {
1266     struct netcp_intf *netcp = netdev_priv(ndev);
1267     struct netcp_stats *tx_stats = &netcp->stats;
1268     int subqueue = skb_get_queue_mapping(skb);
1269     struct knav_dma_desc *desc;
1270     int desc_count, ret = 0;
1271 
1272     if (unlikely(skb->len <= 0)) {
1273         dev_kfree_skb(skb);
1274         return NETDEV_TX_OK;
1275     }
1276 
1277     if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1278         ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1279         if (ret < 0) {
1280             /* If we get here, the skb has already been dropped */
1281             dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1282                  ret);
1283             tx_stats->tx_dropped++;
1284             return ret;
1285         }
1286         skb->len = NETCP_MIN_PACKET_SIZE;
1287     }
1288 
1289     desc = netcp_tx_map_skb(skb, netcp);
1290     if (unlikely(!desc)) {
1291         netif_stop_subqueue(ndev, subqueue);
1292         ret = -ENOBUFS;
1293         goto drop;
1294     }
1295 
1296     ret = netcp_tx_submit_skb(netcp, skb, desc);
1297     if (ret)
1298         goto drop;
1299 
1300     /* Check Tx pool count & stop subqueue if needed */
1301     desc_count = knav_pool_count(netcp->tx_pool);
1302     if (desc_count < netcp->tx_pause_threshold) {
1303         dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1304         netif_stop_subqueue(ndev, subqueue);
1305     }
1306     return NETDEV_TX_OK;
1307 
1308 drop:
1309     tx_stats->tx_dropped++;
1310     if (desc)
1311         netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1312     dev_kfree_skb(skb);
1313     return ret;
1314 }
1315 
1316 int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1317 {
1318     if (tx_pipe->dma_channel) {
1319         knav_dma_close_channel(tx_pipe->dma_channel);
1320         tx_pipe->dma_channel = NULL;
1321     }
1322     return 0;
1323 }
1324 EXPORT_SYMBOL_GPL(netcp_txpipe_close);
1325 
1326 int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1327 {
1328     struct device *dev = tx_pipe->netcp_device->device;
1329     struct knav_dma_cfg config;
1330     int ret = 0;
1331     u8 name[16];
1332 
1333     memset(&config, 0, sizeof(config));
1334     config.direction = DMA_MEM_TO_DEV;
1335     config.u.tx.filt_einfo = false;
1336     config.u.tx.filt_pswords = false;
1337     config.u.tx.priority = DMA_PRIO_MED_L;
1338 
1339     tx_pipe->dma_channel = knav_dma_open_channel(dev,
1340                 tx_pipe->dma_chan_name, &config);
1341     if (IS_ERR(tx_pipe->dma_channel)) {
1342         dev_err(dev, "failed opening tx chan(%s)\n",
1343             tx_pipe->dma_chan_name);
1344         ret = PTR_ERR(tx_pipe->dma_channel);
1345         goto err;
1346     }
1347 
1348     snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1349     tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1350                          KNAV_QUEUE_SHARED);
1351     if (IS_ERR(tx_pipe->dma_queue)) {
1352         dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
1353             name, tx_pipe->dma_queue);
1354         ret = PTR_ERR(tx_pipe->dma_queue);
1355         goto err;
1356     }
1357 
1358     dev_dbg(dev, "opened tx pipe %s\n", name);
1359     return 0;
1360 
1361 err:
1362     if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1363         knav_dma_close_channel(tx_pipe->dma_channel);
1364     tx_pipe->dma_channel = NULL;
1365     return ret;
1366 }
1367 EXPORT_SYMBOL_GPL(netcp_txpipe_open);
1368 
1369 int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1370               struct netcp_device *netcp_device,
1371               const char *dma_chan_name, unsigned int dma_queue_id)
1372 {
1373     memset(tx_pipe, 0, sizeof(*tx_pipe));
1374     tx_pipe->netcp_device = netcp_device;
1375     tx_pipe->dma_chan_name = dma_chan_name;
1376     tx_pipe->dma_queue_id = dma_queue_id;
1377     return 0;
1378 }
1379 EXPORT_SYMBOL_GPL(netcp_txpipe_init);
1380 
1381 static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1382                       const u8 *addr,
1383                       enum netcp_addr_type type)
1384 {
1385     struct netcp_addr *naddr;
1386 
1387     list_for_each_entry(naddr, &netcp->addr_list, node) {
1388         if (naddr->type != type)
1389             continue;
1390         if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1391             continue;
1392         return naddr;
1393     }
1394 
1395     return NULL;
1396 }
1397 
1398 static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1399                      const u8 *addr,
1400                      enum netcp_addr_type type)
1401 {
1402     struct netcp_addr *naddr;
1403 
1404     naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1405     if (!naddr)
1406         return NULL;
1407 
1408     naddr->type = type;
1409     naddr->flags = 0;
1410     naddr->netcp = netcp;
1411     if (addr)
1412         ether_addr_copy(naddr->addr, addr);
1413     else
1414         eth_zero_addr(naddr->addr);
1415     list_add_tail(&naddr->node, &netcp->addr_list);
1416 
1417     return naddr;
1418 }
1419 
1420 static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1421 {
1422     list_del(&naddr->node);
1423     devm_kfree(netcp->dev, naddr);
1424 }
1425 
1426 static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1427 {
1428     struct netcp_addr *naddr;
1429 
1430     list_for_each_entry(naddr, &netcp->addr_list, node)
1431         naddr->flags = 0;
1432 }
1433 
1434 static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1435                 enum netcp_addr_type type)
1436 {
1437     struct netcp_addr *naddr;
1438 
1439     naddr = netcp_addr_find(netcp, addr, type);
1440     if (naddr) {
1441         naddr->flags |= ADDR_VALID;
1442         return;
1443     }
1444 
1445     naddr = netcp_addr_add(netcp, addr, type);
1446     if (!WARN_ON(!naddr))
1447         naddr->flags |= ADDR_NEW;
1448 }
1449 
1450 static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1451 {
1452     struct netcp_addr *naddr, *tmp;
1453     struct netcp_intf_modpriv *priv;
1454     struct netcp_module *module;
1455     int error;
1456 
1457     list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1458         if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1459             continue;
1460         dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1461             naddr->addr, naddr->type);
1462         for_each_module(netcp, priv) {
1463             module = priv->netcp_module;
1464             if (!module->del_addr)
1465                 continue;
1466             error = module->del_addr(priv->module_priv,
1467                          naddr);
1468             WARN_ON(error);
1469         }
1470         netcp_addr_del(netcp, naddr);
1471     }
1472 }
1473 
1474 static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1475 {
1476     struct netcp_addr *naddr, *tmp;
1477     struct netcp_intf_modpriv *priv;
1478     struct netcp_module *module;
1479     int error;
1480 
1481     list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1482         if (!(naddr->flags & ADDR_NEW))
1483             continue;
1484         dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1485             naddr->addr, naddr->type);
1486 
1487         for_each_module(netcp, priv) {
1488             module = priv->netcp_module;
1489             if (!module->add_addr)
1490                 continue;
1491             error = module->add_addr(priv->module_priv, naddr);
1492             WARN_ON(error);
1493         }
1494     }
1495 }
1496 
1497 static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc)
1498 {
1499     struct netcp_intf_modpriv *priv;
1500     struct netcp_module *module;
1501     int error;
1502 
1503     for_each_module(netcp, priv) {
1504         module = priv->netcp_module;
1505         if (!module->set_rx_mode)
1506             continue;
1507 
1508         error = module->set_rx_mode(priv->module_priv, promisc);
1509         if (error)
1510             return error;
1511     }
1512     return 0;
1513 }
1514 
1515 static void netcp_set_rx_mode(struct net_device *ndev)
1516 {
1517     struct netcp_intf *netcp = netdev_priv(ndev);
1518     struct netdev_hw_addr *ndev_addr;
1519     bool promisc;
1520 
1521     promisc = (ndev->flags & IFF_PROMISC ||
1522            ndev->flags & IFF_ALLMULTI ||
1523            netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1524 
1525     spin_lock(&netcp->lock);
1526     /* first clear all marks */
1527     netcp_addr_clear_mark(netcp);
1528 
1529     /* next add new entries, mark existing ones */
1530     netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1531     for_each_dev_addr(ndev, ndev_addr)
1532         netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1533     netdev_for_each_uc_addr(ndev_addr, ndev)
1534         netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1535     netdev_for_each_mc_addr(ndev_addr, ndev)
1536         netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1537 
1538     if (promisc)
1539         netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1540 
1541     /* finally sweep and callout into modules */
1542     netcp_addr_sweep_del(netcp);
1543     netcp_addr_sweep_add(netcp);
1544     netcp_set_promiscuous(netcp, promisc);
1545     spin_unlock(&netcp->lock);
1546 }
1547 
1548 static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1549 {
1550     int i;
1551 
1552     if (netcp->rx_channel) {
1553         knav_dma_close_channel(netcp->rx_channel);
1554         netcp->rx_channel = NULL;
1555     }
1556 
1557     if (!IS_ERR_OR_NULL(netcp->rx_pool))
1558         netcp_rxpool_free(netcp);
1559 
1560     if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1561         knav_queue_close(netcp->rx_queue);
1562         netcp->rx_queue = NULL;
1563     }
1564 
1565     for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1566          !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1567         knav_queue_close(netcp->rx_fdq[i]);
1568         netcp->rx_fdq[i] = NULL;
1569     }
1570 
1571     if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1572         knav_queue_close(netcp->tx_compl_q);
1573         netcp->tx_compl_q = NULL;
1574     }
1575 
1576     if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1577         knav_pool_destroy(netcp->tx_pool);
1578         netcp->tx_pool = NULL;
1579     }
1580 }
1581 
1582 static int netcp_setup_navigator_resources(struct net_device *ndev)
1583 {
1584     struct netcp_intf *netcp = netdev_priv(ndev);
1585     struct knav_queue_notify_config notify_cfg;
1586     struct knav_dma_cfg config;
1587     u32 last_fdq = 0;
1588     u8 name[16];
1589     int ret;
1590     int i;
1591 
1592     /* Create Rx/Tx descriptor pools */
1593     snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1594     netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1595                         netcp->rx_pool_region_id);
1596     if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1597         dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1598         ret = PTR_ERR(netcp->rx_pool);
1599         goto fail;
1600     }
1601 
1602     snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1603     netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1604                         netcp->tx_pool_region_id);
1605     if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1606         dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1607         ret = PTR_ERR(netcp->tx_pool);
1608         goto fail;
1609     }
1610 
1611     /* open Tx completion queue */
1612     snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1613     netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1614     if (IS_ERR(netcp->tx_compl_q)) {
1615         ret = PTR_ERR(netcp->tx_compl_q);
1616         goto fail;
1617     }
1618     netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1619 
1620     /* Set notification for Tx completion */
1621     notify_cfg.fn = netcp_tx_notify;
1622     notify_cfg.fn_arg = netcp;
1623     ret = knav_queue_device_control(netcp->tx_compl_q,
1624                     KNAV_QUEUE_SET_NOTIFIER,
1625                     (unsigned long)&notify_cfg);
1626     if (ret)
1627         goto fail;
1628 
1629     knav_queue_disable_notify(netcp->tx_compl_q);
1630 
1631     /* open Rx completion queue */
1632     snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1633     netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1634     if (IS_ERR(netcp->rx_queue)) {
1635         ret = PTR_ERR(netcp->rx_queue);
1636         goto fail;
1637     }
1638     netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1639 
1640     /* Set notification for Rx completion */
1641     notify_cfg.fn = netcp_rx_notify;
1642     notify_cfg.fn_arg = netcp;
1643     ret = knav_queue_device_control(netcp->rx_queue,
1644                     KNAV_QUEUE_SET_NOTIFIER,
1645                     (unsigned long)&notify_cfg);
1646     if (ret)
1647         goto fail;
1648 
1649     knav_queue_disable_notify(netcp->rx_queue);
1650 
1651     /* open Rx FDQs */
1652     for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1653          ++i) {
1654         snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1655         netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1656         if (IS_ERR(netcp->rx_fdq[i])) {
1657             ret = PTR_ERR(netcp->rx_fdq[i]);
1658             goto fail;
1659         }
1660     }
1661 
1662     memset(&config, 0, sizeof(config));
1663     config.direction        = DMA_DEV_TO_MEM;
1664     config.u.rx.einfo_present   = true;
1665     config.u.rx.psinfo_present  = true;
1666     config.u.rx.err_mode        = DMA_DROP;
1667     config.u.rx.desc_type       = DMA_DESC_HOST;
1668     config.u.rx.psinfo_at_sop   = false;
1669     config.u.rx.sop_offset      = NETCP_SOP_OFFSET;
1670     config.u.rx.dst_q       = netcp->rx_queue_id;
1671     config.u.rx.thresh      = DMA_THRESH_NONE;
1672 
1673     for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1674         if (netcp->rx_fdq[i])
1675             last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1676         config.u.rx.fdq[i] = last_fdq;
1677     }
1678 
1679     netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1680                     netcp->dma_chan_name, &config);
1681     if (IS_ERR(netcp->rx_channel)) {
1682         dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1683             netcp->dma_chan_name);
1684         ret = PTR_ERR(netcp->rx_channel);
1685         goto fail;
1686     }
1687 
1688     dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1689     return 0;
1690 
1691 fail:
1692     netcp_free_navigator_resources(netcp);
1693     return ret;
1694 }
1695 
1696 /* Open the device */
1697 static int netcp_ndo_open(struct net_device *ndev)
1698 {
1699     struct netcp_intf *netcp = netdev_priv(ndev);
1700     struct netcp_intf_modpriv *intf_modpriv;
1701     struct netcp_module *module;
1702     int ret;
1703 
1704     netif_carrier_off(ndev);
1705     ret = netcp_setup_navigator_resources(ndev);
1706     if (ret) {
1707         dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1708         goto fail;
1709     }
1710 
1711     for_each_module(netcp, intf_modpriv) {
1712         module = intf_modpriv->netcp_module;
1713         if (module->open) {
1714             ret = module->open(intf_modpriv->module_priv, ndev);
1715             if (ret != 0) {
1716                 dev_err(netcp->ndev_dev, "module open failed\n");
1717                 goto fail_open;
1718             }
1719         }
1720     }
1721 
1722     napi_enable(&netcp->rx_napi);
1723     napi_enable(&netcp->tx_napi);
1724     knav_queue_enable_notify(netcp->tx_compl_q);
1725     knav_queue_enable_notify(netcp->rx_queue);
1726     netcp_rxpool_refill(netcp);
1727     netif_tx_wake_all_queues(ndev);
1728     dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1729     return 0;
1730 
1731 fail_open:
1732     for_each_module(netcp, intf_modpriv) {
1733         module = intf_modpriv->netcp_module;
1734         if (module->close)
1735             module->close(intf_modpriv->module_priv, ndev);
1736     }
1737 
1738 fail:
1739     netcp_free_navigator_resources(netcp);
1740     return ret;
1741 }
1742 
1743 /* Close the device */
1744 static int netcp_ndo_stop(struct net_device *ndev)
1745 {
1746     struct netcp_intf *netcp = netdev_priv(ndev);
1747     struct netcp_intf_modpriv *intf_modpriv;
1748     struct netcp_module *module;
1749     int err = 0;
1750 
1751     netif_tx_stop_all_queues(ndev);
1752     netif_carrier_off(ndev);
1753     netcp_addr_clear_mark(netcp);
1754     netcp_addr_sweep_del(netcp);
1755     knav_queue_disable_notify(netcp->rx_queue);
1756     knav_queue_disable_notify(netcp->tx_compl_q);
1757     napi_disable(&netcp->rx_napi);
1758     napi_disable(&netcp->tx_napi);
1759 
1760     for_each_module(netcp, intf_modpriv) {
1761         module = intf_modpriv->netcp_module;
1762         if (module->close) {
1763             err = module->close(intf_modpriv->module_priv, ndev);
1764             if (err != 0)
1765                 dev_err(netcp->ndev_dev, "Close failed\n");
1766         }
1767     }
1768 
1769     /* Recycle Rx descriptors from completion queue */
1770     netcp_empty_rx_queue(netcp);
1771 
1772     /* Recycle Tx descriptors from completion queue */
1773     netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1774 
1775     if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1776         dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1777             netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1778 
1779     netcp_free_navigator_resources(netcp);
1780     dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1781     return 0;
1782 }
1783 
1784 static int netcp_ndo_ioctl(struct net_device *ndev,
1785                struct ifreq *req, int cmd)
1786 {
1787     struct netcp_intf *netcp = netdev_priv(ndev);
1788     struct netcp_intf_modpriv *intf_modpriv;
1789     struct netcp_module *module;
1790     int ret = -1, err = -EOPNOTSUPP;
1791 
1792     if (!netif_running(ndev))
1793         return -EINVAL;
1794 
1795     for_each_module(netcp, intf_modpriv) {
1796         module = intf_modpriv->netcp_module;
1797         if (!module->ioctl)
1798             continue;
1799 
1800         err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1801         if ((err < 0) && (err != -EOPNOTSUPP)) {
1802             ret = err;
1803             goto out;
1804         }
1805         if (err == 0)
1806             ret = err;
1807     }
1808 
1809 out:
1810     return (ret == 0) ? 0 : err;
1811 }
1812 
1813 static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1814 {
1815     struct netcp_intf *netcp = netdev_priv(ndev);
1816     unsigned int descs = knav_pool_count(netcp->tx_pool);
1817 
1818     dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1819     netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1820     netif_trans_update(ndev);
1821     netif_tx_wake_all_queues(ndev);
1822 }
1823 
1824 static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1825 {
1826     struct netcp_intf *netcp = netdev_priv(ndev);
1827     struct netcp_intf_modpriv *intf_modpriv;
1828     struct netcp_module *module;
1829     unsigned long flags;
1830     int err = 0;
1831 
1832     dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1833 
1834     spin_lock_irqsave(&netcp->lock, flags);
1835     for_each_module(netcp, intf_modpriv) {
1836         module = intf_modpriv->netcp_module;
1837         if ((module->add_vid) && (vid != 0)) {
1838             err = module->add_vid(intf_modpriv->module_priv, vid);
1839             if (err != 0) {
1840                 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1841                     vid);
1842                 break;
1843             }
1844         }
1845     }
1846     spin_unlock_irqrestore(&netcp->lock, flags);
1847 
1848     return err;
1849 }
1850 
1851 static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1852 {
1853     struct netcp_intf *netcp = netdev_priv(ndev);
1854     struct netcp_intf_modpriv *intf_modpriv;
1855     struct netcp_module *module;
1856     unsigned long flags;
1857     int err = 0;
1858 
1859     dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1860 
1861     spin_lock_irqsave(&netcp->lock, flags);
1862     for_each_module(netcp, intf_modpriv) {
1863         module = intf_modpriv->netcp_module;
1864         if (module->del_vid) {
1865             err = module->del_vid(intf_modpriv->module_priv, vid);
1866             if (err != 0) {
1867                 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1868                     vid);
1869                 break;
1870             }
1871         }
1872     }
1873     spin_unlock_irqrestore(&netcp->lock, flags);
1874     return err;
1875 }
1876 
1877 static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1878               void *type_data)
1879 {
1880     struct tc_mqprio_qopt *mqprio = type_data;
1881     u8 num_tc;
1882     int i;
1883 
1884     /* setup tc must be called under rtnl lock */
1885     ASSERT_RTNL();
1886 
1887     if (type != TC_SETUP_QDISC_MQPRIO)
1888         return -EOPNOTSUPP;
1889 
1890     mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1891     num_tc = mqprio->num_tc;
1892 
1893     /* Sanity-check the number of traffic classes requested */
1894     if ((dev->real_num_tx_queues <= 1) ||
1895         (dev->real_num_tx_queues < num_tc))
1896         return -EINVAL;
1897 
1898     /* Configure traffic class to queue mappings */
1899     if (num_tc) {
1900         netdev_set_num_tc(dev, num_tc);
1901         for (i = 0; i < num_tc; i++)
1902             netdev_set_tc_queue(dev, i, 1, i);
1903     } else {
1904         netdev_reset_tc(dev);
1905     }
1906 
1907     return 0;
1908 }
1909 
1910 static void
1911 netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
1912 {
1913     struct netcp_intf *netcp = netdev_priv(ndev);
1914     struct netcp_stats *p = &netcp->stats;
1915     u64 rxpackets, rxbytes, txpackets, txbytes;
1916     unsigned int start;
1917 
1918     do {
1919         start = u64_stats_fetch_begin_irq(&p->syncp_rx);
1920         rxpackets       = p->rx_packets;
1921         rxbytes         = p->rx_bytes;
1922     } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
1923 
1924     do {
1925         start = u64_stats_fetch_begin_irq(&p->syncp_tx);
1926         txpackets       = p->tx_packets;
1927         txbytes         = p->tx_bytes;
1928     } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
1929 
1930     stats->rx_packets = rxpackets;
1931     stats->rx_bytes = rxbytes;
1932     stats->tx_packets = txpackets;
1933     stats->tx_bytes = txbytes;
1934 
1935     /* The following are stored as 32 bit */
1936     stats->rx_errors = p->rx_errors;
1937     stats->rx_dropped = p->rx_dropped;
1938     stats->tx_dropped = p->tx_dropped;
1939 }
1940 
1941 static const struct net_device_ops netcp_netdev_ops = {
1942     .ndo_open       = netcp_ndo_open,
1943     .ndo_stop       = netcp_ndo_stop,
1944     .ndo_start_xmit     = netcp_ndo_start_xmit,
1945     .ndo_set_rx_mode    = netcp_set_rx_mode,
1946     .ndo_eth_ioctl           = netcp_ndo_ioctl,
1947     .ndo_get_stats64        = netcp_get_stats,
1948     .ndo_set_mac_address    = eth_mac_addr,
1949     .ndo_validate_addr  = eth_validate_addr,
1950     .ndo_vlan_rx_add_vid    = netcp_rx_add_vid,
1951     .ndo_vlan_rx_kill_vid   = netcp_rx_kill_vid,
1952     .ndo_tx_timeout     = netcp_ndo_tx_timeout,
1953     .ndo_select_queue   = dev_pick_tx_zero,
1954     .ndo_setup_tc       = netcp_setup_tc,
1955 };
1956 
1957 static int netcp_create_interface(struct netcp_device *netcp_device,
1958                   struct device_node *node_interface)
1959 {
1960     struct device *dev = netcp_device->device;
1961     struct device_node *node = dev->of_node;
1962     struct netcp_intf *netcp;
1963     struct net_device *ndev;
1964     resource_size_t size;
1965     struct resource res;
1966     void __iomem *efuse = NULL;
1967     u32 efuse_mac = 0;
1968     u8 efuse_mac_addr[6];
1969     u32 temp[2];
1970     int ret = 0;
1971 
1972     ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1973     if (!ndev) {
1974         dev_err(dev, "Error allocating netdev\n");
1975         return -ENOMEM;
1976     }
1977 
1978     ndev->features |= NETIF_F_SG;
1979     ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1980     ndev->hw_features = ndev->features;
1981     ndev->vlan_features |=  NETIF_F_SG;
1982 
1983     /* MTU range: 68 - 9486 */
1984     ndev->min_mtu = ETH_MIN_MTU;
1985     ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1986 
1987     netcp = netdev_priv(ndev);
1988     spin_lock_init(&netcp->lock);
1989     INIT_LIST_HEAD(&netcp->module_head);
1990     INIT_LIST_HEAD(&netcp->txhook_list_head);
1991     INIT_LIST_HEAD(&netcp->rxhook_list_head);
1992     INIT_LIST_HEAD(&netcp->addr_list);
1993     u64_stats_init(&netcp->stats.syncp_rx);
1994     u64_stats_init(&netcp->stats.syncp_tx);
1995     netcp->netcp_device = netcp_device;
1996     netcp->dev = netcp_device->device;
1997     netcp->ndev = ndev;
1998     netcp->ndev_dev  = &ndev->dev;
1999     netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
2000     netcp->tx_pause_threshold = MAX_SKB_FRAGS;
2001     netcp->tx_resume_threshold = netcp->tx_pause_threshold;
2002     netcp->node_interface = node_interface;
2003 
2004     ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
2005     if (efuse_mac) {
2006         if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
2007             dev_err(dev, "could not find efuse-mac reg resource\n");
2008             ret = -ENODEV;
2009             goto quit;
2010         }
2011         size = resource_size(&res);
2012 
2013         if (!devm_request_mem_region(dev, res.start, size,
2014                          dev_name(dev))) {
2015             dev_err(dev, "could not reserve resource\n");
2016             ret = -ENOMEM;
2017             goto quit;
2018         }
2019 
2020         efuse = devm_ioremap(dev, res.start, size);
2021         if (!efuse) {
2022             dev_err(dev, "could not map resource\n");
2023             devm_release_mem_region(dev, res.start, size);
2024             ret = -ENOMEM;
2025             goto quit;
2026         }
2027 
2028         emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
2029         if (is_valid_ether_addr(efuse_mac_addr))
2030             eth_hw_addr_set(ndev, efuse_mac_addr);
2031         else
2032             eth_hw_addr_random(ndev);
2033 
2034         devm_iounmap(dev, efuse);
2035         devm_release_mem_region(dev, res.start, size);
2036     } else {
2037         ret = of_get_ethdev_address(node_interface, ndev);
2038         if (ret)
2039             eth_hw_addr_random(ndev);
2040     }
2041 
2042     ret = of_property_read_string(node_interface, "rx-channel",
2043                       &netcp->dma_chan_name);
2044     if (ret < 0) {
2045         dev_err(dev, "missing \"rx-channel\" parameter\n");
2046         ret = -ENODEV;
2047         goto quit;
2048     }
2049 
2050     ret = of_property_read_u32(node_interface, "rx-queue",
2051                    &netcp->rx_queue_id);
2052     if (ret < 0) {
2053         dev_warn(dev, "missing \"rx-queue\" parameter\n");
2054         netcp->rx_queue_id = KNAV_QUEUE_QPEND;
2055     }
2056 
2057     ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
2058                      netcp->rx_queue_depths,
2059                      KNAV_DMA_FDQ_PER_CHAN);
2060     if (ret < 0) {
2061         dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
2062         netcp->rx_queue_depths[0] = 128;
2063     }
2064 
2065     ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
2066     if (ret < 0) {
2067         dev_err(dev, "missing \"rx-pool\" parameter\n");
2068         ret = -ENODEV;
2069         goto quit;
2070     }
2071     netcp->rx_pool_size = temp[0];
2072     netcp->rx_pool_region_id = temp[1];
2073 
2074     ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
2075     if (ret < 0) {
2076         dev_err(dev, "missing \"tx-pool\" parameter\n");
2077         ret = -ENODEV;
2078         goto quit;
2079     }
2080     netcp->tx_pool_size = temp[0];
2081     netcp->tx_pool_region_id = temp[1];
2082 
2083     if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
2084         dev_err(dev, "tx-pool size too small, must be at least %ld\n",
2085             MAX_SKB_FRAGS);
2086         ret = -ENODEV;
2087         goto quit;
2088     }
2089 
2090     ret = of_property_read_u32(node_interface, "tx-completion-queue",
2091                    &netcp->tx_compl_qid);
2092     if (ret < 0) {
2093         dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
2094         netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
2095     }
2096 
2097     /* NAPI register */
2098     netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT);
2099     netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
2100 
2101     /* Register the network device */
2102     ndev->dev_id        = 0;
2103     ndev->watchdog_timeo    = NETCP_TX_TIMEOUT;
2104     ndev->netdev_ops    = &netcp_netdev_ops;
2105     SET_NETDEV_DEV(ndev, dev);
2106 
2107     list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
2108     return 0;
2109 
2110 quit:
2111     free_netdev(ndev);
2112     return ret;
2113 }
2114 
2115 static void netcp_delete_interface(struct netcp_device *netcp_device,
2116                    struct net_device *ndev)
2117 {
2118     struct netcp_intf_modpriv *intf_modpriv, *tmp;
2119     struct netcp_intf *netcp = netdev_priv(ndev);
2120     struct netcp_module *module;
2121 
2122     dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2123         ndev->name);
2124 
2125     /* Notify each of the modules that the interface is going away */
2126     list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2127                  intf_list) {
2128         module = intf_modpriv->netcp_module;
2129         dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2130             module->name);
2131         if (module->release)
2132             module->release(intf_modpriv->module_priv);
2133         list_del(&intf_modpriv->intf_list);
2134     }
2135     WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2136          ndev->name);
2137 
2138     list_del(&netcp->interface_list);
2139 
2140     of_node_put(netcp->node_interface);
2141     unregister_netdev(ndev);
2142     free_netdev(ndev);
2143 }
2144 
2145 static int netcp_probe(struct platform_device *pdev)
2146 {
2147     struct device_node *node = pdev->dev.of_node;
2148     struct netcp_intf *netcp_intf, *netcp_tmp;
2149     struct device_node *child, *interfaces;
2150     struct netcp_device *netcp_device;
2151     struct device *dev = &pdev->dev;
2152     struct netcp_module *module;
2153     int ret;
2154 
2155     if (!knav_dma_device_ready() ||
2156         !knav_qmss_device_ready())
2157         return -EPROBE_DEFER;
2158 
2159     if (!node) {
2160         dev_err(dev, "could not find device info\n");
2161         return -ENODEV;
2162     }
2163 
2164     /* Allocate a new NETCP device instance */
2165     netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2166     if (!netcp_device)
2167         return -ENOMEM;
2168 
2169     pm_runtime_enable(&pdev->dev);
2170     ret = pm_runtime_get_sync(&pdev->dev);
2171     if (ret < 0) {
2172         dev_err(dev, "Failed to enable NETCP power-domain\n");
2173         pm_runtime_disable(&pdev->dev);
2174         return ret;
2175     }
2176 
2177     /* Initialize the NETCP device instance */
2178     INIT_LIST_HEAD(&netcp_device->interface_head);
2179     INIT_LIST_HEAD(&netcp_device->modpriv_head);
2180     netcp_device->device = dev;
2181     platform_set_drvdata(pdev, netcp_device);
2182 
2183     /* create interfaces */
2184     interfaces = of_get_child_by_name(node, "netcp-interfaces");
2185     if (!interfaces) {
2186         dev_err(dev, "could not find netcp-interfaces node\n");
2187         ret = -ENODEV;
2188         goto probe_quit;
2189     }
2190 
2191     for_each_available_child_of_node(interfaces, child) {
2192         ret = netcp_create_interface(netcp_device, child);
2193         if (ret) {
2194             dev_err(dev, "could not create interface(%pOFn)\n",
2195                 child);
2196             goto probe_quit_interface;
2197         }
2198     }
2199 
2200     of_node_put(interfaces);
2201 
2202     /* Add the device instance to the list */
2203     list_add_tail(&netcp_device->device_list, &netcp_devices);
2204 
2205     /* Probe & attach any modules already registered */
2206     mutex_lock(&netcp_modules_lock);
2207     for_each_netcp_module(module) {
2208         ret = netcp_module_probe(netcp_device, module);
2209         if (ret < 0)
2210             dev_err(dev, "module(%s) probe failed\n", module->name);
2211     }
2212     mutex_unlock(&netcp_modules_lock);
2213     return 0;
2214 
2215 probe_quit_interface:
2216     list_for_each_entry_safe(netcp_intf, netcp_tmp,
2217                  &netcp_device->interface_head,
2218                  interface_list) {
2219         netcp_delete_interface(netcp_device, netcp_intf->ndev);
2220     }
2221 
2222     of_node_put(interfaces);
2223 
2224 probe_quit:
2225     pm_runtime_put_sync(&pdev->dev);
2226     pm_runtime_disable(&pdev->dev);
2227     platform_set_drvdata(pdev, NULL);
2228     return ret;
2229 }
2230 
2231 static int netcp_remove(struct platform_device *pdev)
2232 {
2233     struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2234     struct netcp_intf *netcp_intf, *netcp_tmp;
2235     struct netcp_inst_modpriv *inst_modpriv, *tmp;
2236     struct netcp_module *module;
2237 
2238     list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2239                  inst_list) {
2240         module = inst_modpriv->netcp_module;
2241         dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2242         module->remove(netcp_device, inst_modpriv->module_priv);
2243         list_del(&inst_modpriv->inst_list);
2244     }
2245 
2246     /* now that all modules are removed, clean up the interfaces */
2247     list_for_each_entry_safe(netcp_intf, netcp_tmp,
2248                  &netcp_device->interface_head,
2249                  interface_list) {
2250         netcp_delete_interface(netcp_device, netcp_intf->ndev);
2251     }
2252 
2253     WARN(!list_empty(&netcp_device->interface_head),
2254          "%s interface list not empty!\n", pdev->name);
2255 
2256     pm_runtime_put_sync(&pdev->dev);
2257     pm_runtime_disable(&pdev->dev);
2258     platform_set_drvdata(pdev, NULL);
2259     return 0;
2260 }
2261 
2262 static const struct of_device_id of_match[] = {
2263     { .compatible = "ti,netcp-1.0", },
2264     {},
2265 };
2266 MODULE_DEVICE_TABLE(of, of_match);
2267 
2268 static struct platform_driver netcp_driver = {
2269     .driver = {
2270         .name       = "netcp-1.0",
2271         .of_match_table = of_match,
2272     },
2273     .probe = netcp_probe,
2274     .remove = netcp_remove,
2275 };
2276 module_platform_driver(netcp_driver);
2277 
2278 MODULE_LICENSE("GPL v2");
2279 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2280 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");