0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/kernel.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/delay.h>
0013 #include <linux/rio.h>
0014 #include <linux/rio_drv.h>
0015 #include <linux/slab.h>
0016 #include <linux/rio_ids.h>
0017
0018 #include <linux/netdevice.h>
0019 #include <linux/etherdevice.h>
0020 #include <linux/skbuff.h>
0021 #include <linux/crc32.h>
0022 #include <linux/ethtool.h>
0023 #include <linux/reboot.h>
0024
0025 #define DRV_NAME "rionet"
0026 #define DRV_VERSION "0.3"
0027 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
0028 #define DRV_DESC "Ethernet over RapidIO"
0029
0030 MODULE_AUTHOR(DRV_AUTHOR);
0031 MODULE_DESCRIPTION(DRV_DESC);
0032 MODULE_LICENSE("GPL");
0033
0034 #define RIONET_DEFAULT_MSGLEVEL \
0035 (NETIF_MSG_DRV | \
0036 NETIF_MSG_LINK | \
0037 NETIF_MSG_RX_ERR | \
0038 NETIF_MSG_TX_ERR)
0039
0040 #define RIONET_DOORBELL_JOIN 0x1000
0041 #define RIONET_DOORBELL_LEAVE 0x1001
0042
0043 #define RIONET_MAILBOX 0
0044
0045 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
0046 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
0047 #define RIONET_MAX_NETS 8
0048 #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
0049 #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
0050
0051 struct rionet_private {
0052 struct rio_mport *mport;
0053 struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
0054 struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
0055 int rx_slot;
0056 int tx_slot;
0057 int tx_cnt;
0058 int ack_slot;
0059 spinlock_t lock;
0060 spinlock_t tx_lock;
0061 u32 msg_enable;
0062 bool open;
0063 };
0064
0065 struct rionet_peer {
0066 struct list_head node;
0067 struct rio_dev *rdev;
0068 struct resource *res;
0069 };
0070
0071 struct rionet_net {
0072 struct net_device *ndev;
0073 struct list_head peers;
0074 spinlock_t lock;
0075 struct rio_dev **active;
0076 int nact;
0077 };
0078
0079 static struct rionet_net nets[RIONET_MAX_NETS];
0080
0081 #define is_rionet_capable(src_ops, dst_ops) \
0082 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
0083 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
0084 (src_ops & RIO_SRC_OPS_DOORBELL) && \
0085 (dst_ops & RIO_DST_OPS_DOORBELL))
0086 #define dev_rionet_capable(dev) \
0087 is_rionet_capable(dev->src_ops, dev->dst_ops)
0088
0089 #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
0090 #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
0091
0092 static int rionet_rx_clean(struct net_device *ndev)
0093 {
0094 int i;
0095 int error = 0;
0096 struct rionet_private *rnet = netdev_priv(ndev);
0097 void *data;
0098
0099 i = rnet->rx_slot;
0100
0101 do {
0102 if (!rnet->rx_skb[i])
0103 continue;
0104
0105 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
0106 break;
0107
0108 rnet->rx_skb[i]->data = data;
0109 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
0110 rnet->rx_skb[i]->protocol =
0111 eth_type_trans(rnet->rx_skb[i], ndev);
0112 error = __netif_rx(rnet->rx_skb[i]);
0113
0114 if (error == NET_RX_DROP) {
0115 ndev->stats.rx_dropped++;
0116 } else {
0117 ndev->stats.rx_packets++;
0118 ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
0119 }
0120
0121 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
0122
0123 return i;
0124 }
0125
0126 static void rionet_rx_fill(struct net_device *ndev, int end)
0127 {
0128 int i;
0129 struct rionet_private *rnet = netdev_priv(ndev);
0130
0131 i = rnet->rx_slot;
0132 do {
0133 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
0134
0135 if (!rnet->rx_skb[i])
0136 break;
0137
0138 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
0139 rnet->rx_skb[i]->data);
0140 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
0141
0142 rnet->rx_slot = i;
0143 }
0144
0145 static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
0146 struct rio_dev *rdev)
0147 {
0148 struct rionet_private *rnet = netdev_priv(ndev);
0149
0150 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
0151 rnet->tx_skb[rnet->tx_slot] = skb;
0152
0153 ndev->stats.tx_packets++;
0154 ndev->stats.tx_bytes += skb->len;
0155
0156 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
0157 netif_stop_queue(ndev);
0158
0159 ++rnet->tx_slot;
0160 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
0161
0162 if (netif_msg_tx_queued(rnet))
0163 printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
0164 skb->len);
0165
0166 return 0;
0167 }
0168
0169 static netdev_tx_t rionet_start_xmit(struct sk_buff *skb,
0170 struct net_device *ndev)
0171 {
0172 int i;
0173 struct rionet_private *rnet = netdev_priv(ndev);
0174 struct ethhdr *eth = (struct ethhdr *)skb->data;
0175 u16 destid;
0176 unsigned long flags;
0177 int add_num = 1;
0178
0179 spin_lock_irqsave(&rnet->tx_lock, flags);
0180
0181 if (is_multicast_ether_addr(eth->h_dest))
0182 add_num = nets[rnet->mport->id].nact;
0183
0184 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
0185 netif_stop_queue(ndev);
0186 spin_unlock_irqrestore(&rnet->tx_lock, flags);
0187 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
0188 ndev->name);
0189 return NETDEV_TX_BUSY;
0190 }
0191
0192 if (is_multicast_ether_addr(eth->h_dest)) {
0193 int count = 0;
0194
0195 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
0196 i++)
0197 if (nets[rnet->mport->id].active[i]) {
0198 rionet_queue_tx_msg(skb, ndev,
0199 nets[rnet->mport->id].active[i]);
0200 if (count)
0201 refcount_inc(&skb->users);
0202 count++;
0203 }
0204 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
0205 destid = RIONET_GET_DESTID(eth->h_dest);
0206 if (nets[rnet->mport->id].active[destid])
0207 rionet_queue_tx_msg(skb, ndev,
0208 nets[rnet->mport->id].active[destid]);
0209 else {
0210
0211
0212
0213
0214
0215
0216 ndev->stats.tx_packets++;
0217 ndev->stats.tx_bytes += skb->len;
0218 dev_kfree_skb_any(skb);
0219 }
0220 }
0221
0222 spin_unlock_irqrestore(&rnet->tx_lock, flags);
0223
0224 return NETDEV_TX_OK;
0225 }
0226
0227 static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
0228 u16 info)
0229 {
0230 struct net_device *ndev = dev_id;
0231 struct rionet_private *rnet = netdev_priv(ndev);
0232 struct rionet_peer *peer;
0233 unsigned char netid = rnet->mport->id;
0234
0235 if (netif_msg_intr(rnet))
0236 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
0237 DRV_NAME, sid, tid, info);
0238 if (info == RIONET_DOORBELL_JOIN) {
0239 if (!nets[netid].active[sid]) {
0240 spin_lock(&nets[netid].lock);
0241 list_for_each_entry(peer, &nets[netid].peers, node) {
0242 if (peer->rdev->destid == sid) {
0243 nets[netid].active[sid] = peer->rdev;
0244 nets[netid].nact++;
0245 }
0246 }
0247 spin_unlock(&nets[netid].lock);
0248
0249 rio_mport_send_doorbell(mport, sid,
0250 RIONET_DOORBELL_JOIN);
0251 }
0252 } else if (info == RIONET_DOORBELL_LEAVE) {
0253 spin_lock(&nets[netid].lock);
0254 if (nets[netid].active[sid]) {
0255 nets[netid].active[sid] = NULL;
0256 nets[netid].nact--;
0257 }
0258 spin_unlock(&nets[netid].lock);
0259 } else {
0260 if (netif_msg_intr(rnet))
0261 printk(KERN_WARNING "%s: unhandled doorbell\n",
0262 DRV_NAME);
0263 }
0264 }
0265
0266 static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
0267 {
0268 int n;
0269 struct net_device *ndev = dev_id;
0270 struct rionet_private *rnet = netdev_priv(ndev);
0271
0272 if (netif_msg_intr(rnet))
0273 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
0274 DRV_NAME, mbox, slot);
0275
0276 spin_lock(&rnet->lock);
0277 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
0278 rionet_rx_fill(ndev, n);
0279 spin_unlock(&rnet->lock);
0280 }
0281
0282 static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
0283 {
0284 struct net_device *ndev = dev_id;
0285 struct rionet_private *rnet = netdev_priv(ndev);
0286
0287 spin_lock(&rnet->tx_lock);
0288
0289 if (netif_msg_intr(rnet))
0290 printk(KERN_INFO
0291 "%s: outbound message event, mbox %d slot %d\n",
0292 DRV_NAME, mbox, slot);
0293
0294 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
0295
0296 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
0297 rnet->tx_skb[rnet->ack_slot] = NULL;
0298 ++rnet->ack_slot;
0299 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
0300 rnet->tx_cnt--;
0301 }
0302
0303 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
0304 netif_wake_queue(ndev);
0305
0306 spin_unlock(&rnet->tx_lock);
0307 }
0308
0309 static int rionet_open(struct net_device *ndev)
0310 {
0311 int i, rc = 0;
0312 struct rionet_peer *peer;
0313 struct rionet_private *rnet = netdev_priv(ndev);
0314 unsigned char netid = rnet->mport->id;
0315 unsigned long flags;
0316
0317 if (netif_msg_ifup(rnet))
0318 printk(KERN_INFO "%s: open\n", DRV_NAME);
0319
0320 if ((rc = rio_request_inb_dbell(rnet->mport,
0321 (void *)ndev,
0322 RIONET_DOORBELL_JOIN,
0323 RIONET_DOORBELL_LEAVE,
0324 rionet_dbell_event)) < 0)
0325 goto out;
0326
0327 if ((rc = rio_request_inb_mbox(rnet->mport,
0328 (void *)ndev,
0329 RIONET_MAILBOX,
0330 RIONET_RX_RING_SIZE,
0331 rionet_inb_msg_event)) < 0)
0332 goto out;
0333
0334 if ((rc = rio_request_outb_mbox(rnet->mport,
0335 (void *)ndev,
0336 RIONET_MAILBOX,
0337 RIONET_TX_RING_SIZE,
0338 rionet_outb_msg_event)) < 0)
0339 goto out;
0340
0341
0342 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
0343 rnet->rx_skb[i] = NULL;
0344 rnet->rx_slot = 0;
0345 rionet_rx_fill(ndev, 0);
0346
0347 rnet->tx_slot = 0;
0348 rnet->tx_cnt = 0;
0349 rnet->ack_slot = 0;
0350
0351 netif_carrier_on(ndev);
0352 netif_start_queue(ndev);
0353
0354 spin_lock_irqsave(&nets[netid].lock, flags);
0355 list_for_each_entry(peer, &nets[netid].peers, node) {
0356
0357 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
0358 }
0359 spin_unlock_irqrestore(&nets[netid].lock, flags);
0360 rnet->open = true;
0361
0362 out:
0363 return rc;
0364 }
0365
0366 static int rionet_close(struct net_device *ndev)
0367 {
0368 struct rionet_private *rnet = netdev_priv(ndev);
0369 struct rionet_peer *peer;
0370 unsigned char netid = rnet->mport->id;
0371 unsigned long flags;
0372 int i;
0373
0374 if (netif_msg_ifup(rnet))
0375 printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
0376
0377 netif_stop_queue(ndev);
0378 netif_carrier_off(ndev);
0379 rnet->open = false;
0380
0381 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
0382 kfree_skb(rnet->rx_skb[i]);
0383
0384 spin_lock_irqsave(&nets[netid].lock, flags);
0385 list_for_each_entry(peer, &nets[netid].peers, node) {
0386 if (nets[netid].active[peer->rdev->destid]) {
0387 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
0388 nets[netid].active[peer->rdev->destid] = NULL;
0389 }
0390 if (peer->res)
0391 rio_release_outb_dbell(peer->rdev, peer->res);
0392 }
0393 spin_unlock_irqrestore(&nets[netid].lock, flags);
0394
0395 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
0396 RIONET_DOORBELL_LEAVE);
0397 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
0398 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
0399
0400 return 0;
0401 }
0402
0403 static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
0404 {
0405 struct rio_dev *rdev = to_rio_dev(dev);
0406 unsigned char netid = rdev->net->hport->id;
0407 struct rionet_peer *peer;
0408 int state, found = 0;
0409 unsigned long flags;
0410
0411 if (!dev_rionet_capable(rdev))
0412 return;
0413
0414 spin_lock_irqsave(&nets[netid].lock, flags);
0415 list_for_each_entry(peer, &nets[netid].peers, node) {
0416 if (peer->rdev == rdev) {
0417 list_del(&peer->node);
0418 if (nets[netid].active[rdev->destid]) {
0419 state = atomic_read(&rdev->state);
0420 if (state != RIO_DEVICE_GONE &&
0421 state != RIO_DEVICE_INITIALIZING) {
0422 rio_send_doorbell(rdev,
0423 RIONET_DOORBELL_LEAVE);
0424 }
0425 nets[netid].active[rdev->destid] = NULL;
0426 nets[netid].nact--;
0427 }
0428 found = 1;
0429 break;
0430 }
0431 }
0432 spin_unlock_irqrestore(&nets[netid].lock, flags);
0433
0434 if (found) {
0435 if (peer->res)
0436 rio_release_outb_dbell(rdev, peer->res);
0437 kfree(peer);
0438 }
0439 }
0440
0441 static void rionet_get_drvinfo(struct net_device *ndev,
0442 struct ethtool_drvinfo *info)
0443 {
0444 struct rionet_private *rnet = netdev_priv(ndev);
0445
0446 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
0447 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
0448 strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
0449 strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
0450 }
0451
0452 static u32 rionet_get_msglevel(struct net_device *ndev)
0453 {
0454 struct rionet_private *rnet = netdev_priv(ndev);
0455
0456 return rnet->msg_enable;
0457 }
0458
0459 static void rionet_set_msglevel(struct net_device *ndev, u32 value)
0460 {
0461 struct rionet_private *rnet = netdev_priv(ndev);
0462
0463 rnet->msg_enable = value;
0464 }
0465
0466 static const struct ethtool_ops rionet_ethtool_ops = {
0467 .get_drvinfo = rionet_get_drvinfo,
0468 .get_msglevel = rionet_get_msglevel,
0469 .set_msglevel = rionet_set_msglevel,
0470 .get_link = ethtool_op_get_link,
0471 };
0472
0473 static const struct net_device_ops rionet_netdev_ops = {
0474 .ndo_open = rionet_open,
0475 .ndo_stop = rionet_close,
0476 .ndo_start_xmit = rionet_start_xmit,
0477 .ndo_validate_addr = eth_validate_addr,
0478 .ndo_set_mac_address = eth_mac_addr,
0479 };
0480
0481 static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
0482 {
0483 int rc = 0;
0484 struct rionet_private *rnet;
0485 u8 addr[ETH_ALEN];
0486 u16 device_id;
0487 const size_t rionet_active_bytes = sizeof(void *) *
0488 RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
0489
0490 nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
0491 get_order(rionet_active_bytes));
0492 if (!nets[mport->id].active) {
0493 rc = -ENOMEM;
0494 goto out;
0495 }
0496 memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
0497
0498
0499 rnet = netdev_priv(ndev);
0500 rnet->mport = mport;
0501 rnet->open = false;
0502
0503
0504 device_id = rio_local_get_device_id(mport);
0505 addr[0] = 0x00;
0506 addr[1] = 0x01;
0507 addr[2] = 0x00;
0508 addr[3] = 0x01;
0509 addr[4] = device_id >> 8;
0510 addr[5] = device_id & 0xff;
0511 eth_hw_addr_set(ndev, addr);
0512
0513 ndev->netdev_ops = &rionet_netdev_ops;
0514 ndev->mtu = RIONET_MAX_MTU;
0515
0516 ndev->min_mtu = ETH_MIN_MTU;
0517 ndev->max_mtu = RIONET_MAX_MTU;
0518 ndev->features = NETIF_F_LLTX;
0519 SET_NETDEV_DEV(ndev, &mport->dev);
0520 ndev->ethtool_ops = &rionet_ethtool_ops;
0521
0522 spin_lock_init(&rnet->lock);
0523 spin_lock_init(&rnet->tx_lock);
0524
0525 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
0526
0527 rc = register_netdev(ndev);
0528 if (rc != 0) {
0529 free_pages((unsigned long)nets[mport->id].active,
0530 get_order(rionet_active_bytes));
0531 goto out;
0532 }
0533
0534 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
0535 ndev->name,
0536 DRV_NAME,
0537 DRV_DESC,
0538 DRV_VERSION,
0539 ndev->dev_addr,
0540 mport->name);
0541
0542 out:
0543 return rc;
0544 }
0545
0546 static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
0547 {
0548 int rc = -ENODEV;
0549 u32 lsrc_ops, ldst_ops;
0550 struct rionet_peer *peer;
0551 struct net_device *ndev = NULL;
0552 struct rio_dev *rdev = to_rio_dev(dev);
0553 unsigned char netid = rdev->net->hport->id;
0554
0555 if (netid >= RIONET_MAX_NETS)
0556 return rc;
0557
0558
0559
0560
0561
0562
0563 if (!nets[netid].ndev) {
0564 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
0565 &lsrc_ops);
0566 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
0567 &ldst_ops);
0568 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
0569 printk(KERN_ERR
0570 "%s: local device %s is not network capable\n",
0571 DRV_NAME, rdev->net->hport->name);
0572 goto out;
0573 }
0574
0575
0576 ndev = alloc_etherdev(sizeof(struct rionet_private));
0577 if (ndev == NULL) {
0578 rc = -ENOMEM;
0579 goto out;
0580 }
0581
0582 rc = rionet_setup_netdev(rdev->net->hport, ndev);
0583 if (rc) {
0584 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
0585 DRV_NAME, rc);
0586 free_netdev(ndev);
0587 goto out;
0588 }
0589
0590 INIT_LIST_HEAD(&nets[netid].peers);
0591 spin_lock_init(&nets[netid].lock);
0592 nets[netid].nact = 0;
0593 nets[netid].ndev = ndev;
0594 }
0595
0596
0597
0598
0599
0600 if (dev_rionet_capable(rdev)) {
0601 struct rionet_private *rnet;
0602 unsigned long flags;
0603
0604 rnet = netdev_priv(nets[netid].ndev);
0605
0606 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
0607 if (!peer) {
0608 rc = -ENOMEM;
0609 goto out;
0610 }
0611 peer->rdev = rdev;
0612 peer->res = rio_request_outb_dbell(peer->rdev,
0613 RIONET_DOORBELL_JOIN,
0614 RIONET_DOORBELL_LEAVE);
0615 if (!peer->res) {
0616 pr_err("%s: error requesting doorbells\n", DRV_NAME);
0617 kfree(peer);
0618 rc = -ENOMEM;
0619 goto out;
0620 }
0621
0622 spin_lock_irqsave(&nets[netid].lock, flags);
0623 list_add_tail(&peer->node, &nets[netid].peers);
0624 spin_unlock_irqrestore(&nets[netid].lock, flags);
0625 pr_debug("%s: %s add peer %s\n",
0626 DRV_NAME, __func__, rio_name(rdev));
0627
0628
0629 if (rnet->open)
0630 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
0631 }
0632
0633 return 0;
0634 out:
0635 return rc;
0636 }
0637
0638 static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
0639 void *unused)
0640 {
0641 struct rionet_peer *peer;
0642 unsigned long flags;
0643 int i;
0644
0645 pr_debug("%s: %s\n", DRV_NAME, __func__);
0646
0647 for (i = 0; i < RIONET_MAX_NETS; i++) {
0648 if (!nets[i].ndev)
0649 continue;
0650
0651 spin_lock_irqsave(&nets[i].lock, flags);
0652 list_for_each_entry(peer, &nets[i].peers, node) {
0653 if (nets[i].active[peer->rdev->destid]) {
0654 rio_send_doorbell(peer->rdev,
0655 RIONET_DOORBELL_LEAVE);
0656 nets[i].active[peer->rdev->destid] = NULL;
0657 }
0658 }
0659 spin_unlock_irqrestore(&nets[i].lock, flags);
0660 }
0661
0662 return NOTIFY_DONE;
0663 }
0664
0665 static void rionet_remove_mport(struct device *dev,
0666 struct class_interface *class_intf)
0667 {
0668 struct rio_mport *mport = to_rio_mport(dev);
0669 struct net_device *ndev;
0670 int id = mport->id;
0671
0672 pr_debug("%s %s\n", __func__, mport->name);
0673
0674 WARN(nets[id].nact, "%s called when connected to %d peers\n",
0675 __func__, nets[id].nact);
0676 WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
0677 __func__);
0678
0679 if (nets[id].ndev) {
0680 ndev = nets[id].ndev;
0681 netif_stop_queue(ndev);
0682 unregister_netdev(ndev);
0683
0684 free_pages((unsigned long)nets[id].active,
0685 get_order(sizeof(void *) *
0686 RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
0687 nets[id].active = NULL;
0688 free_netdev(ndev);
0689 nets[id].ndev = NULL;
0690 }
0691 }
0692
0693 #ifdef MODULE
0694 static struct rio_device_id rionet_id_table[] = {
0695 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
0696 { 0, }
0697 };
0698
0699 MODULE_DEVICE_TABLE(rapidio, rionet_id_table);
0700 #endif
0701
0702 static struct subsys_interface rionet_interface = {
0703 .name = "rionet",
0704 .subsys = &rio_bus_type,
0705 .add_dev = rionet_add_dev,
0706 .remove_dev = rionet_remove_dev,
0707 };
0708
0709 static struct notifier_block rionet_notifier = {
0710 .notifier_call = rionet_shutdown,
0711 };
0712
0713
0714 static struct class_interface rio_mport_interface __refdata = {
0715 .class = &rio_mport_class,
0716 .add_dev = NULL,
0717 .remove_dev = rionet_remove_mport,
0718 };
0719
0720 static int __init rionet_init(void)
0721 {
0722 int ret;
0723
0724 ret = register_reboot_notifier(&rionet_notifier);
0725 if (ret) {
0726 pr_err("%s: failed to register reboot notifier (err=%d)\n",
0727 DRV_NAME, ret);
0728 return ret;
0729 }
0730
0731 ret = class_interface_register(&rio_mport_interface);
0732 if (ret) {
0733 pr_err("%s: class_interface_register error: %d\n",
0734 DRV_NAME, ret);
0735 return ret;
0736 }
0737
0738 return subsys_interface_register(&rionet_interface);
0739 }
0740
0741 static void __exit rionet_exit(void)
0742 {
0743 unregister_reboot_notifier(&rionet_notifier);
0744 subsys_interface_unregister(&rionet_interface);
0745 class_interface_unregister(&rio_mport_interface);
0746 }
0747
0748 late_initcall(rionet_init);
0749 module_exit(rionet_exit);