Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * drivers/net/ethernet/ibm/emac/mal.c
0004  *
0005  * Memory Access Layer (MAL) support
0006  *
0007  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
0008  *                <benh@kernel.crashing.org>
0009  *
0010  * Based on the arch/ppc version of the driver:
0011  *
0012  * Copyright (c) 2004, 2005 Zultys Technologies.
0013  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
0014  *
0015  * Based on original work by
0016  *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
0017  *      David Gibson <hermes@gibson.dropbear.id.au>,
0018  *
0019  *      Armin Kuster <akuster@mvista.com>
0020  *      Copyright 2002 MontaVista Softare Inc.
0021  */
0022 
0023 #include <linux/delay.h>
0024 #include <linux/slab.h>
0025 #include <linux/of_irq.h>
0026 
0027 #include "core.h"
0028 #include <asm/dcr-regs.h>
0029 
0030 static int mal_count;
0031 
0032 int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
0033 {
0034     unsigned long flags;
0035 
0036     spin_lock_irqsave(&mal->lock, flags);
0037 
0038     MAL_DBG(mal, "reg(%08x, %08x)" NL,
0039         commac->tx_chan_mask, commac->rx_chan_mask);
0040 
0041     /* Don't let multiple commacs claim the same channel(s) */
0042     if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
0043         (mal->rx_chan_mask & commac->rx_chan_mask)) {
0044         spin_unlock_irqrestore(&mal->lock, flags);
0045         printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
0046                mal->index);
0047         return -EBUSY;
0048     }
0049 
0050     if (list_empty(&mal->list))
0051         napi_enable(&mal->napi);
0052     mal->tx_chan_mask |= commac->tx_chan_mask;
0053     mal->rx_chan_mask |= commac->rx_chan_mask;
0054     list_add(&commac->list, &mal->list);
0055 
0056     spin_unlock_irqrestore(&mal->lock, flags);
0057 
0058     return 0;
0059 }
0060 
0061 void mal_unregister_commac(struct mal_instance  *mal,
0062         struct mal_commac *commac)
0063 {
0064     unsigned long flags;
0065 
0066     spin_lock_irqsave(&mal->lock, flags);
0067 
0068     MAL_DBG(mal, "unreg(%08x, %08x)" NL,
0069         commac->tx_chan_mask, commac->rx_chan_mask);
0070 
0071     mal->tx_chan_mask &= ~commac->tx_chan_mask;
0072     mal->rx_chan_mask &= ~commac->rx_chan_mask;
0073     list_del_init(&commac->list);
0074     if (list_empty(&mal->list))
0075         napi_disable(&mal->napi);
0076 
0077     spin_unlock_irqrestore(&mal->lock, flags);
0078 }
0079 
0080 int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
0081 {
0082     BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
0083            size > MAL_MAX_RX_SIZE);
0084 
0085     MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
0086 
0087     if (size & 0xf) {
0088         printk(KERN_WARNING
0089                "mal%d: incorrect RX size %lu for the channel %d\n",
0090                mal->index, size, channel);
0091         return -EINVAL;
0092     }
0093 
0094     set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
0095     return 0;
0096 }
0097 
0098 int mal_tx_bd_offset(struct mal_instance *mal, int channel)
0099 {
0100     BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
0101 
0102     return channel * NUM_TX_BUFF;
0103 }
0104 
0105 int mal_rx_bd_offset(struct mal_instance *mal, int channel)
0106 {
0107     BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
0108     return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
0109 }
0110 
0111 void mal_enable_tx_channel(struct mal_instance *mal, int channel)
0112 {
0113     unsigned long flags;
0114 
0115     spin_lock_irqsave(&mal->lock, flags);
0116 
0117     MAL_DBG(mal, "enable_tx(%d)" NL, channel);
0118 
0119     set_mal_dcrn(mal, MAL_TXCASR,
0120              get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
0121 
0122     spin_unlock_irqrestore(&mal->lock, flags);
0123 }
0124 
0125 void mal_disable_tx_channel(struct mal_instance *mal, int channel)
0126 {
0127     set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
0128 
0129     MAL_DBG(mal, "disable_tx(%d)" NL, channel);
0130 }
0131 
0132 void mal_enable_rx_channel(struct mal_instance *mal, int channel)
0133 {
0134     unsigned long flags;
0135 
0136     /*
0137      * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
0138      * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
0139      * for the bitmask
0140      */
0141     if (!(channel % 8))
0142         channel >>= 3;
0143 
0144     spin_lock_irqsave(&mal->lock, flags);
0145 
0146     MAL_DBG(mal, "enable_rx(%d)" NL, channel);
0147 
0148     set_mal_dcrn(mal, MAL_RXCASR,
0149              get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
0150 
0151     spin_unlock_irqrestore(&mal->lock, flags);
0152 }
0153 
0154 void mal_disable_rx_channel(struct mal_instance *mal, int channel)
0155 {
0156     /*
0157      * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
0158      * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
0159      * for the bitmask
0160      */
0161     if (!(channel % 8))
0162         channel >>= 3;
0163 
0164     set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
0165 
0166     MAL_DBG(mal, "disable_rx(%d)" NL, channel);
0167 }
0168 
0169 void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
0170 {
0171     unsigned long flags;
0172 
0173     spin_lock_irqsave(&mal->lock, flags);
0174 
0175     MAL_DBG(mal, "poll_add(%p)" NL, commac);
0176 
0177     /* starts disabled */
0178     set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
0179 
0180     list_add_tail(&commac->poll_list, &mal->poll_list);
0181 
0182     spin_unlock_irqrestore(&mal->lock, flags);
0183 }
0184 
0185 void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
0186 {
0187     unsigned long flags;
0188 
0189     spin_lock_irqsave(&mal->lock, flags);
0190 
0191     MAL_DBG(mal, "poll_del(%p)" NL, commac);
0192 
0193     list_del(&commac->poll_list);
0194 
0195     spin_unlock_irqrestore(&mal->lock, flags);
0196 }
0197 
0198 /* synchronized by mal_poll() */
0199 static inline void mal_enable_eob_irq(struct mal_instance *mal)
0200 {
0201     MAL_DBG2(mal, "enable_irq" NL);
0202 
0203     // XXX might want to cache MAL_CFG as the DCR read can be slooooow
0204     set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
0205 }
0206 
0207 /* synchronized by NAPI state */
0208 static inline void mal_disable_eob_irq(struct mal_instance *mal)
0209 {
0210     // XXX might want to cache MAL_CFG as the DCR read can be slooooow
0211     set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
0212 
0213     MAL_DBG2(mal, "disable_irq" NL);
0214 }
0215 
0216 static irqreturn_t mal_serr(int irq, void *dev_instance)
0217 {
0218     struct mal_instance *mal = dev_instance;
0219 
0220     u32 esr = get_mal_dcrn(mal, MAL_ESR);
0221 
0222     /* Clear the error status register */
0223     set_mal_dcrn(mal, MAL_ESR, esr);
0224 
0225     MAL_DBG(mal, "SERR %08x" NL, esr);
0226 
0227     if (esr & MAL_ESR_EVB) {
0228         if (esr & MAL_ESR_DE) {
0229             /* We ignore Descriptor error,
0230              * TXDE or RXDE interrupt will be generated anyway.
0231              */
0232             return IRQ_HANDLED;
0233         }
0234 
0235         if (esr & MAL_ESR_PEIN) {
0236             /* PLB error, it's probably buggy hardware or
0237              * incorrect physical address in BD (i.e. bug)
0238              */
0239             if (net_ratelimit())
0240                 printk(KERN_ERR
0241                        "mal%d: system error, "
0242                        "PLB (ESR = 0x%08x)\n",
0243                        mal->index, esr);
0244             return IRQ_HANDLED;
0245         }
0246 
0247         /* OPB error, it's probably buggy hardware or incorrect
0248          * EBC setup
0249          */
0250         if (net_ratelimit())
0251             printk(KERN_ERR
0252                    "mal%d: system error, OPB (ESR = 0x%08x)\n",
0253                    mal->index, esr);
0254     }
0255     return IRQ_HANDLED;
0256 }
0257 
0258 static inline void mal_schedule_poll(struct mal_instance *mal)
0259 {
0260     if (likely(napi_schedule_prep(&mal->napi))) {
0261         MAL_DBG2(mal, "schedule_poll" NL);
0262         spin_lock(&mal->lock);
0263         mal_disable_eob_irq(mal);
0264         spin_unlock(&mal->lock);
0265         __napi_schedule(&mal->napi);
0266     } else
0267         MAL_DBG2(mal, "already in poll" NL);
0268 }
0269 
0270 static irqreturn_t mal_txeob(int irq, void *dev_instance)
0271 {
0272     struct mal_instance *mal = dev_instance;
0273 
0274     u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
0275 
0276     MAL_DBG2(mal, "txeob %08x" NL, r);
0277 
0278     mal_schedule_poll(mal);
0279     set_mal_dcrn(mal, MAL_TXEOBISR, r);
0280 
0281 #ifdef CONFIG_PPC_DCR_NATIVE
0282     if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
0283         mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
0284                 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
0285 #endif
0286 
0287     return IRQ_HANDLED;
0288 }
0289 
0290 static irqreturn_t mal_rxeob(int irq, void *dev_instance)
0291 {
0292     struct mal_instance *mal = dev_instance;
0293 
0294     u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
0295 
0296     MAL_DBG2(mal, "rxeob %08x" NL, r);
0297 
0298     mal_schedule_poll(mal);
0299     set_mal_dcrn(mal, MAL_RXEOBISR, r);
0300 
0301 #ifdef CONFIG_PPC_DCR_NATIVE
0302     if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
0303         mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
0304                 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
0305 #endif
0306 
0307     return IRQ_HANDLED;
0308 }
0309 
0310 static irqreturn_t mal_txde(int irq, void *dev_instance)
0311 {
0312     struct mal_instance *mal = dev_instance;
0313 
0314     u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
0315     set_mal_dcrn(mal, MAL_TXDEIR, deir);
0316 
0317     MAL_DBG(mal, "txde %08x" NL, deir);
0318 
0319     if (net_ratelimit())
0320         printk(KERN_ERR
0321                "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
0322                mal->index, deir);
0323 
0324     return IRQ_HANDLED;
0325 }
0326 
0327 static irqreturn_t mal_rxde(int irq, void *dev_instance)
0328 {
0329     struct mal_instance *mal = dev_instance;
0330     struct list_head *l;
0331 
0332     u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
0333 
0334     MAL_DBG(mal, "rxde %08x" NL, deir);
0335 
0336     list_for_each(l, &mal->list) {
0337         struct mal_commac *mc = list_entry(l, struct mal_commac, list);
0338         if (deir & mc->rx_chan_mask) {
0339             set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
0340             mc->ops->rxde(mc->dev);
0341         }
0342     }
0343 
0344     mal_schedule_poll(mal);
0345     set_mal_dcrn(mal, MAL_RXDEIR, deir);
0346 
0347     return IRQ_HANDLED;
0348 }
0349 
0350 static irqreturn_t mal_int(int irq, void *dev_instance)
0351 {
0352     struct mal_instance *mal = dev_instance;
0353     u32 esr = get_mal_dcrn(mal, MAL_ESR);
0354 
0355     if (esr & MAL_ESR_EVB) {
0356         /* descriptor error */
0357         if (esr & MAL_ESR_DE) {
0358             if (esr & MAL_ESR_CIDT)
0359                 return mal_rxde(irq, dev_instance);
0360             else
0361                 return mal_txde(irq, dev_instance);
0362         } else { /* SERR */
0363             return mal_serr(irq, dev_instance);
0364         }
0365     }
0366     return IRQ_HANDLED;
0367 }
0368 
0369 void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
0370 {
0371     /* Spinlock-type semantics: only one caller disable poll at a time */
0372     while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
0373         msleep(1);
0374 
0375     /* Synchronize with the MAL NAPI poller */
0376     napi_synchronize(&mal->napi);
0377 }
0378 
0379 void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
0380 {
0381     smp_wmb();
0382     clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
0383 
0384     /* Feels better to trigger a poll here to catch up with events that
0385      * may have happened on this channel while disabled. It will most
0386      * probably be delayed until the next interrupt but that's mostly a
0387      * non-issue in the context where this is called.
0388      */
0389     napi_schedule(&mal->napi);
0390 }
0391 
0392 static int mal_poll(struct napi_struct *napi, int budget)
0393 {
0394     struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
0395     struct list_head *l;
0396     int received = 0;
0397     unsigned long flags;
0398 
0399     MAL_DBG2(mal, "poll(%d)" NL, budget);
0400 
0401     /* Process TX skbs */
0402     list_for_each(l, &mal->poll_list) {
0403         struct mal_commac *mc =
0404             list_entry(l, struct mal_commac, poll_list);
0405         mc->ops->poll_tx(mc->dev);
0406     }
0407 
0408     /* Process RX skbs.
0409      *
0410      * We _might_ need something more smart here to enforce polling
0411      * fairness.
0412      */
0413     list_for_each(l, &mal->poll_list) {
0414         struct mal_commac *mc =
0415             list_entry(l, struct mal_commac, poll_list);
0416         int n;
0417         if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
0418             continue;
0419         n = mc->ops->poll_rx(mc->dev, budget - received);
0420         if (n) {
0421             received += n;
0422             if (received >= budget)
0423                 return budget;
0424         }
0425     }
0426 
0427     if (napi_complete_done(napi, received)) {
0428         /* We need to disable IRQs to protect from RXDE IRQ here */
0429         spin_lock_irqsave(&mal->lock, flags);
0430         mal_enable_eob_irq(mal);
0431         spin_unlock_irqrestore(&mal->lock, flags);
0432     }
0433 
0434     /* Check for "rotting" packet(s) */
0435     list_for_each(l, &mal->poll_list) {
0436         struct mal_commac *mc =
0437             list_entry(l, struct mal_commac, poll_list);
0438         if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
0439             continue;
0440         if (unlikely(mc->ops->peek_rx(mc->dev) ||
0441                  test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
0442             MAL_DBG2(mal, "rotting packet" NL);
0443             if (!napi_reschedule(napi))
0444                 goto more_work;
0445 
0446             spin_lock_irqsave(&mal->lock, flags);
0447             mal_disable_eob_irq(mal);
0448             spin_unlock_irqrestore(&mal->lock, flags);
0449         }
0450         mc->ops->poll_tx(mc->dev);
0451     }
0452 
0453  more_work:
0454     MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
0455     return received;
0456 }
0457 
0458 static void mal_reset(struct mal_instance *mal)
0459 {
0460     int n = 10;
0461 
0462     MAL_DBG(mal, "reset" NL);
0463 
0464     set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
0465 
0466     /* Wait for reset to complete (1 system clock) */
0467     while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
0468         --n;
0469 
0470     if (unlikely(!n))
0471         printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
0472 }
0473 
0474 int mal_get_regs_len(struct mal_instance *mal)
0475 {
0476     return sizeof(struct emac_ethtool_regs_subhdr) +
0477         sizeof(struct mal_regs);
0478 }
0479 
0480 void *mal_dump_regs(struct mal_instance *mal, void *buf)
0481 {
0482     struct emac_ethtool_regs_subhdr *hdr = buf;
0483     struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
0484     int i;
0485 
0486     hdr->version = mal->version;
0487     hdr->index = mal->index;
0488 
0489     regs->tx_count = mal->num_tx_chans;
0490     regs->rx_count = mal->num_rx_chans;
0491 
0492     regs->cfg = get_mal_dcrn(mal, MAL_CFG);
0493     regs->esr = get_mal_dcrn(mal, MAL_ESR);
0494     regs->ier = get_mal_dcrn(mal, MAL_IER);
0495     regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
0496     regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
0497     regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
0498     regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
0499     regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
0500     regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
0501     regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
0502     regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
0503 
0504     for (i = 0; i < regs->tx_count; ++i)
0505         regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
0506 
0507     for (i = 0; i < regs->rx_count; ++i) {
0508         regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
0509         regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
0510     }
0511     return regs + 1;
0512 }
0513 
0514 static int mal_probe(struct platform_device *ofdev)
0515 {
0516     struct mal_instance *mal;
0517     int err = 0, i, bd_size;
0518     int index = mal_count++;
0519     unsigned int dcr_base;
0520     const u32 *prop;
0521     u32 cfg;
0522     unsigned long irqflags;
0523     irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
0524 
0525     mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
0526     if (!mal)
0527         return -ENOMEM;
0528 
0529     mal->index = index;
0530     mal->ofdev = ofdev;
0531     mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
0532 
0533     MAL_DBG(mal, "probe" NL);
0534 
0535     prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
0536     if (prop == NULL) {
0537         printk(KERN_ERR
0538                "mal%d: can't find MAL num-tx-chans property!\n",
0539                index);
0540         err = -ENODEV;
0541         goto fail;
0542     }
0543     mal->num_tx_chans = prop[0];
0544 
0545     prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
0546     if (prop == NULL) {
0547         printk(KERN_ERR
0548                "mal%d: can't find MAL num-rx-chans property!\n",
0549                index);
0550         err = -ENODEV;
0551         goto fail;
0552     }
0553     mal->num_rx_chans = prop[0];
0554 
0555     dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
0556     if (dcr_base == 0) {
0557         printk(KERN_ERR
0558                "mal%d: can't find DCR resource!\n", index);
0559         err = -ENODEV;
0560         goto fail;
0561     }
0562     mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
0563     if (!DCR_MAP_OK(mal->dcr_host)) {
0564         printk(KERN_ERR
0565                "mal%d: failed to map DCRs !\n", index);
0566         err = -ENODEV;
0567         goto fail;
0568     }
0569 
0570     if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
0571 #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
0572         defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
0573         mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
0574                 MAL_FTR_COMMON_ERR_INT);
0575 #else
0576         printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
0577                 ofdev->dev.of_node);
0578         err = -ENODEV;
0579         goto fail;
0580 #endif
0581     }
0582 
0583     mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
0584     mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
0585     mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
0586 
0587     if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
0588         mal->txde_irq = mal->rxde_irq = mal->serr_irq;
0589     } else {
0590         mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
0591         mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
0592     }
0593 
0594     if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
0595         !mal->txde_irq  || !mal->rxde_irq) {
0596         printk(KERN_ERR
0597                "mal%d: failed to map interrupts !\n", index);
0598         err = -ENODEV;
0599         goto fail_unmap;
0600     }
0601 
0602     INIT_LIST_HEAD(&mal->poll_list);
0603     INIT_LIST_HEAD(&mal->list);
0604     spin_lock_init(&mal->lock);
0605 
0606     init_dummy_netdev(&mal->dummy_dev);
0607 
0608     netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
0609                   CONFIG_IBM_EMAC_POLL_WEIGHT);
0610 
0611     /* Load power-on reset defaults */
0612     mal_reset(mal);
0613 
0614     /* Set the MAL configuration register */
0615     cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
0616     cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
0617 
0618     /* Current Axon is not happy with priority being non-0, it can
0619      * deadlock, fix it up here
0620      */
0621     if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
0622         cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
0623 
0624     /* Apply configuration */
0625     set_mal_dcrn(mal, MAL_CFG, cfg);
0626 
0627     /* Allocate space for BD rings */
0628     BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
0629     BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
0630 
0631     bd_size = sizeof(struct mal_descriptor) *
0632         (NUM_TX_BUFF * mal->num_tx_chans +
0633          NUM_RX_BUFF * mal->num_rx_chans);
0634     mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
0635                       GFP_KERNEL);
0636     if (mal->bd_virt == NULL) {
0637         err = -ENOMEM;
0638         goto fail_unmap;
0639     }
0640 
0641     for (i = 0; i < mal->num_tx_chans; ++i)
0642         set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
0643                  sizeof(struct mal_descriptor) *
0644                  mal_tx_bd_offset(mal, i));
0645 
0646     for (i = 0; i < mal->num_rx_chans; ++i)
0647         set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
0648                  sizeof(struct mal_descriptor) *
0649                  mal_rx_bd_offset(mal, i));
0650 
0651     if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
0652         irqflags = IRQF_SHARED;
0653         hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
0654     } else {
0655         irqflags = 0;
0656         hdlr_serr = mal_serr;
0657         hdlr_txde = mal_txde;
0658         hdlr_rxde = mal_rxde;
0659     }
0660 
0661     err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
0662     if (err)
0663         goto fail2;
0664     err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
0665     if (err)
0666         goto fail3;
0667     err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
0668     if (err)
0669         goto fail4;
0670     err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
0671     if (err)
0672         goto fail5;
0673     err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
0674     if (err)
0675         goto fail6;
0676 
0677     /* Enable all MAL SERR interrupt sources */
0678     set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
0679 
0680     /* Enable EOB interrupt */
0681     mal_enable_eob_irq(mal);
0682 
0683     printk(KERN_INFO
0684            "MAL v%d %pOF, %d TX channels, %d RX channels\n",
0685            mal->version, ofdev->dev.of_node,
0686            mal->num_tx_chans, mal->num_rx_chans);
0687 
0688     /* Advertise this instance to the rest of the world */
0689     wmb();
0690     platform_set_drvdata(ofdev, mal);
0691 
0692     return 0;
0693 
0694  fail6:
0695     free_irq(mal->rxde_irq, mal);
0696  fail5:
0697     free_irq(mal->txeob_irq, mal);
0698  fail4:
0699     free_irq(mal->txde_irq, mal);
0700  fail3:
0701     free_irq(mal->serr_irq, mal);
0702  fail2:
0703     dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
0704  fail_unmap:
0705     dcr_unmap(mal->dcr_host, 0x100);
0706  fail:
0707     kfree(mal);
0708 
0709     return err;
0710 }
0711 
0712 static int mal_remove(struct platform_device *ofdev)
0713 {
0714     struct mal_instance *mal = platform_get_drvdata(ofdev);
0715 
0716     MAL_DBG(mal, "remove" NL);
0717 
0718     /* Synchronize with scheduled polling */
0719     napi_disable(&mal->napi);
0720 
0721     if (!list_empty(&mal->list))
0722         /* This is *very* bad */
0723         WARN(1, KERN_EMERG
0724                "mal%d: commac list is not empty on remove!\n",
0725                mal->index);
0726 
0727     free_irq(mal->serr_irq, mal);
0728     free_irq(mal->txde_irq, mal);
0729     free_irq(mal->txeob_irq, mal);
0730     free_irq(mal->rxde_irq, mal);
0731     free_irq(mal->rxeob_irq, mal);
0732 
0733     mal_reset(mal);
0734 
0735     dma_free_coherent(&ofdev->dev,
0736               sizeof(struct mal_descriptor) *
0737               (NUM_TX_BUFF * mal->num_tx_chans +
0738                NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
0739               mal->bd_dma);
0740     kfree(mal);
0741 
0742     return 0;
0743 }
0744 
0745 static const struct of_device_id mal_platform_match[] =
0746 {
0747     {
0748         .compatible = "ibm,mcmal",
0749     },
0750     {
0751         .compatible = "ibm,mcmal2",
0752     },
0753     /* Backward compat */
0754     {
0755         .type       = "mcmal-dma",
0756         .compatible = "ibm,mcmal",
0757     },
0758     {
0759         .type       = "mcmal-dma",
0760         .compatible = "ibm,mcmal2",
0761     },
0762     {},
0763 };
0764 
0765 static struct platform_driver mal_of_driver = {
0766     .driver = {
0767         .name = "mcmal",
0768         .of_match_table = mal_platform_match,
0769     },
0770     .probe = mal_probe,
0771     .remove = mal_remove,
0772 };
0773 
0774 int __init mal_init(void)
0775 {
0776     return platform_driver_register(&mal_of_driver);
0777 }
0778 
0779 void mal_exit(void)
0780 {
0781     platform_driver_unregister(&mal_of_driver);
0782 }