Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * IPv4 over IEEE 1394, per RFC 2734
0004  * IPv6 over IEEE 1394, per RFC 3146
0005  *
0006  * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
0007  *
0008  * based on eth1394 by Ben Collins et al
0009  */
0010 
0011 #include <linux/bug.h>
0012 #include <linux/compiler.h>
0013 #include <linux/delay.h>
0014 #include <linux/device.h>
0015 #include <linux/ethtool.h>
0016 #include <linux/firewire.h>
0017 #include <linux/firewire-constants.h>
0018 #include <linux/highmem.h>
0019 #include <linux/in.h>
0020 #include <linux/ip.h>
0021 #include <linux/jiffies.h>
0022 #include <linux/mod_devicetable.h>
0023 #include <linux/module.h>
0024 #include <linux/moduleparam.h>
0025 #include <linux/mutex.h>
0026 #include <linux/netdevice.h>
0027 #include <linux/skbuff.h>
0028 #include <linux/slab.h>
0029 #include <linux/spinlock.h>
0030 
0031 #include <asm/unaligned.h>
0032 #include <net/arp.h>
0033 #include <net/firewire.h>
0034 
0035 /* rx limits */
0036 #define FWNET_MAX_FRAGMENTS     30 /* arbitrary, > TX queue depth */
0037 #define FWNET_ISO_PAGE_COUNT        (PAGE_SIZE < 16*1024 ? 4 : 2)
0038 
0039 /* tx limits */
0040 #define FWNET_MAX_QUEUED_DATAGRAMS  20 /* < 64 = number of tlabels */
0041 #define FWNET_MIN_QUEUED_DATAGRAMS  10 /* should keep AT DMA busy enough */
0042 #define FWNET_TX_QUEUE_LEN      FWNET_MAX_QUEUED_DATAGRAMS /* ? */
0043 
0044 #define IEEE1394_BROADCAST_CHANNEL  31
0045 #define IEEE1394_ALL_NODES      (0xffc0 | 0x003f)
0046 #define IEEE1394_MAX_PAYLOAD_S100   512
0047 #define FWNET_NO_FIFO_ADDR      (~0ULL)
0048 
0049 #define IANA_SPECIFIER_ID       0x00005eU
0050 #define RFC2734_SW_VERSION      0x000001U
0051 #define RFC3146_SW_VERSION      0x000002U
0052 
0053 #define IEEE1394_GASP_HDR_SIZE  8
0054 
0055 #define RFC2374_UNFRAG_HDR_SIZE 4
0056 #define RFC2374_FRAG_HDR_SIZE   8
0057 #define RFC2374_FRAG_OVERHEAD   4
0058 
0059 #define RFC2374_HDR_UNFRAG  0   /* unfragmented     */
0060 #define RFC2374_HDR_FIRSTFRAG   1   /* first fragment   */
0061 #define RFC2374_HDR_LASTFRAG    2   /* last fragment    */
0062 #define RFC2374_HDR_INTFRAG 3   /* interior fragment    */
0063 
0064 static bool fwnet_hwaddr_is_multicast(u8 *ha)
0065 {
0066     return !!(*ha & 1);
0067 }
0068 
0069 /* IPv4 and IPv6 encapsulation header */
0070 struct rfc2734_header {
0071     u32 w0;
0072     u32 w1;
0073 };
0074 
0075 #define fwnet_get_hdr_lf(h)     (((h)->w0 & 0xc0000000) >> 30)
0076 #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
0077 #define fwnet_get_hdr_dg_size(h)    ((((h)->w0 & 0x0fff0000) >> 16) + 1)
0078 #define fwnet_get_hdr_fg_off(h)     (((h)->w0 & 0x00000fff))
0079 #define fwnet_get_hdr_dgl(h)        (((h)->w1 & 0xffff0000) >> 16)
0080 
0081 #define fwnet_set_hdr_lf(lf)        ((lf) << 30)
0082 #define fwnet_set_hdr_ether_type(et)    (et)
0083 #define fwnet_set_hdr_dg_size(dgs)  (((dgs) - 1) << 16)
0084 #define fwnet_set_hdr_fg_off(fgo)   (fgo)
0085 
0086 #define fwnet_set_hdr_dgl(dgl)      ((dgl) << 16)
0087 
0088 static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
0089         unsigned ether_type)
0090 {
0091     hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
0092           | fwnet_set_hdr_ether_type(ether_type);
0093 }
0094 
0095 static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
0096         unsigned ether_type, unsigned dg_size, unsigned dgl)
0097 {
0098     hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
0099           | fwnet_set_hdr_dg_size(dg_size)
0100           | fwnet_set_hdr_ether_type(ether_type);
0101     hdr->w1 = fwnet_set_hdr_dgl(dgl);
0102 }
0103 
0104 static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
0105         unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
0106 {
0107     hdr->w0 = fwnet_set_hdr_lf(lf)
0108           | fwnet_set_hdr_dg_size(dg_size)
0109           | fwnet_set_hdr_fg_off(fg_off);
0110     hdr->w1 = fwnet_set_hdr_dgl(dgl);
0111 }
0112 
0113 /* This list keeps track of what parts of the datagram have been filled in */
0114 struct fwnet_fragment_info {
0115     struct list_head fi_link;
0116     u16 offset;
0117     u16 len;
0118 };
0119 
0120 struct fwnet_partial_datagram {
0121     struct list_head pd_link;
0122     struct list_head fi_list;
0123     struct sk_buff *skb;
0124     /* FIXME Why not use skb->data? */
0125     char *pbuf;
0126     u16 datagram_label;
0127     u16 ether_type;
0128     u16 datagram_size;
0129 };
0130 
0131 static DEFINE_MUTEX(fwnet_device_mutex);
0132 static LIST_HEAD(fwnet_device_list);
0133 
0134 struct fwnet_device {
0135     struct list_head dev_link;
0136     spinlock_t lock;
0137     enum {
0138         FWNET_BROADCAST_ERROR,
0139         FWNET_BROADCAST_RUNNING,
0140         FWNET_BROADCAST_STOPPED,
0141     } broadcast_state;
0142     struct fw_iso_context *broadcast_rcv_context;
0143     struct fw_iso_buffer broadcast_rcv_buffer;
0144     void **broadcast_rcv_buffer_ptrs;
0145     unsigned broadcast_rcv_next_ptr;
0146     unsigned num_broadcast_rcv_ptrs;
0147     unsigned rcv_buffer_size;
0148     /*
0149      * This value is the maximum unfragmented datagram size that can be
0150      * sent by the hardware.  It already has the GASP overhead and the
0151      * unfragmented datagram header overhead calculated into it.
0152      */
0153     unsigned broadcast_xmt_max_payload;
0154     u16 broadcast_xmt_datagramlabel;
0155 
0156     /*
0157      * The CSR address that remote nodes must send datagrams to for us to
0158      * receive them.
0159      */
0160     struct fw_address_handler handler;
0161     u64 local_fifo;
0162 
0163     /* Number of tx datagrams that have been queued but not yet acked */
0164     int queued_datagrams;
0165 
0166     int peer_count;
0167     struct list_head peer_list;
0168     struct fw_card *card;
0169     struct net_device *netdev;
0170 };
0171 
0172 struct fwnet_peer {
0173     struct list_head peer_link;
0174     struct fwnet_device *dev;
0175     u64 guid;
0176 
0177     /* guarded by dev->lock */
0178     struct list_head pd_list; /* received partial datagrams */
0179     unsigned pdg_size;        /* pd_list size */
0180 
0181     u16 datagram_label;       /* outgoing datagram label */
0182     u16 max_payload;          /* includes RFC2374_FRAG_HDR_SIZE overhead */
0183     int node_id;
0184     int generation;
0185     unsigned speed;
0186 };
0187 
0188 /* This is our task struct. It's used for the packet complete callback.  */
0189 struct fwnet_packet_task {
0190     struct fw_transaction transaction;
0191     struct rfc2734_header hdr;
0192     struct sk_buff *skb;
0193     struct fwnet_device *dev;
0194 
0195     int outstanding_pkts;
0196     u64 fifo_addr;
0197     u16 dest_node;
0198     u16 max_payload;
0199     u8 generation;
0200     u8 speed;
0201     u8 enqueued;
0202 };
0203 
0204 /*
0205  * saddr == NULL means use device source address.
0206  * daddr == NULL means leave destination address (eg unresolved arp).
0207  */
0208 static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
0209             unsigned short type, const void *daddr,
0210             const void *saddr, unsigned len)
0211 {
0212     struct fwnet_header *h;
0213 
0214     h = skb_push(skb, sizeof(*h));
0215     put_unaligned_be16(type, &h->h_proto);
0216 
0217     if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
0218         memset(h->h_dest, 0, net->addr_len);
0219 
0220         return net->hard_header_len;
0221     }
0222 
0223     if (daddr) {
0224         memcpy(h->h_dest, daddr, net->addr_len);
0225 
0226         return net->hard_header_len;
0227     }
0228 
0229     return -net->hard_header_len;
0230 }
0231 
0232 static int fwnet_header_cache(const struct neighbour *neigh,
0233                   struct hh_cache *hh, __be16 type)
0234 {
0235     struct net_device *net;
0236     struct fwnet_header *h;
0237 
0238     if (type == cpu_to_be16(ETH_P_802_3))
0239         return -1;
0240     net = neigh->dev;
0241     h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
0242     h->h_proto = type;
0243     memcpy(h->h_dest, neigh->ha, net->addr_len);
0244 
0245     /* Pairs with the READ_ONCE() in neigh_resolve_output(),
0246      * neigh_hh_output() and neigh_update_hhs().
0247      */
0248     smp_store_release(&hh->hh_len, FWNET_HLEN);
0249 
0250     return 0;
0251 }
0252 
0253 /* Called by Address Resolution module to notify changes in address. */
0254 static void fwnet_header_cache_update(struct hh_cache *hh,
0255         const struct net_device *net, const unsigned char *haddr)
0256 {
0257     memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
0258 }
0259 
0260 static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
0261 {
0262     memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
0263 
0264     return FWNET_ALEN;
0265 }
0266 
0267 static const struct header_ops fwnet_header_ops = {
0268     .create         = fwnet_header_create,
0269     .cache      = fwnet_header_cache,
0270     .cache_update   = fwnet_header_cache_update,
0271     .parse          = fwnet_header_parse,
0272 };
0273 
0274 /* FIXME: is this correct for all cases? */
0275 static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
0276                    unsigned offset, unsigned len)
0277 {
0278     struct fwnet_fragment_info *fi;
0279     unsigned end = offset + len;
0280 
0281     list_for_each_entry(fi, &pd->fi_list, fi_link)
0282         if (offset < fi->offset + fi->len && end > fi->offset)
0283             return true;
0284 
0285     return false;
0286 }
0287 
0288 /* Assumes that new fragment does not overlap any existing fragments */
0289 static struct fwnet_fragment_info *fwnet_frag_new(
0290     struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
0291 {
0292     struct fwnet_fragment_info *fi, *fi2, *new;
0293     struct list_head *list;
0294 
0295     list = &pd->fi_list;
0296     list_for_each_entry(fi, &pd->fi_list, fi_link) {
0297         if (fi->offset + fi->len == offset) {
0298             /* The new fragment can be tacked on to the end */
0299             /* Did the new fragment plug a hole? */
0300             fi2 = list_entry(fi->fi_link.next,
0301                      struct fwnet_fragment_info, fi_link);
0302             if (fi->offset + fi->len == fi2->offset) {
0303                 /* glue fragments together */
0304                 fi->len += len + fi2->len;
0305                 list_del(&fi2->fi_link);
0306                 kfree(fi2);
0307             } else {
0308                 fi->len += len;
0309             }
0310 
0311             return fi;
0312         }
0313         if (offset + len == fi->offset) {
0314             /* The new fragment can be tacked on to the beginning */
0315             /* Did the new fragment plug a hole? */
0316             fi2 = list_entry(fi->fi_link.prev,
0317                      struct fwnet_fragment_info, fi_link);
0318             if (fi2->offset + fi2->len == fi->offset) {
0319                 /* glue fragments together */
0320                 fi2->len += fi->len + len;
0321                 list_del(&fi->fi_link);
0322                 kfree(fi);
0323 
0324                 return fi2;
0325             }
0326             fi->offset = offset;
0327             fi->len += len;
0328 
0329             return fi;
0330         }
0331         if (offset > fi->offset + fi->len) {
0332             list = &fi->fi_link;
0333             break;
0334         }
0335         if (offset + len < fi->offset) {
0336             list = fi->fi_link.prev;
0337             break;
0338         }
0339     }
0340 
0341     new = kmalloc(sizeof(*new), GFP_ATOMIC);
0342     if (!new)
0343         return NULL;
0344 
0345     new->offset = offset;
0346     new->len = len;
0347     list_add(&new->fi_link, list);
0348 
0349     return new;
0350 }
0351 
0352 static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
0353         struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
0354         void *frag_buf, unsigned frag_off, unsigned frag_len)
0355 {
0356     struct fwnet_partial_datagram *new;
0357     struct fwnet_fragment_info *fi;
0358 
0359     new = kmalloc(sizeof(*new), GFP_ATOMIC);
0360     if (!new)
0361         goto fail;
0362 
0363     INIT_LIST_HEAD(&new->fi_list);
0364     fi = fwnet_frag_new(new, frag_off, frag_len);
0365     if (fi == NULL)
0366         goto fail_w_new;
0367 
0368     new->datagram_label = datagram_label;
0369     new->datagram_size = dg_size;
0370     new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
0371     if (new->skb == NULL)
0372         goto fail_w_fi;
0373 
0374     skb_reserve(new->skb, LL_RESERVED_SPACE(net));
0375     new->pbuf = skb_put(new->skb, dg_size);
0376     memcpy(new->pbuf + frag_off, frag_buf, frag_len);
0377     list_add_tail(&new->pd_link, &peer->pd_list);
0378 
0379     return new;
0380 
0381 fail_w_fi:
0382     kfree(fi);
0383 fail_w_new:
0384     kfree(new);
0385 fail:
0386     return NULL;
0387 }
0388 
0389 static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
0390                             u16 datagram_label)
0391 {
0392     struct fwnet_partial_datagram *pd;
0393 
0394     list_for_each_entry(pd, &peer->pd_list, pd_link)
0395         if (pd->datagram_label == datagram_label)
0396             return pd;
0397 
0398     return NULL;
0399 }
0400 
0401 
0402 static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
0403 {
0404     struct fwnet_fragment_info *fi, *n;
0405 
0406     list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
0407         kfree(fi);
0408 
0409     list_del(&old->pd_link);
0410     dev_kfree_skb_any(old->skb);
0411     kfree(old);
0412 }
0413 
0414 static bool fwnet_pd_update(struct fwnet_peer *peer,
0415         struct fwnet_partial_datagram *pd, void *frag_buf,
0416         unsigned frag_off, unsigned frag_len)
0417 {
0418     if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
0419         return false;
0420 
0421     memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
0422 
0423     /*
0424      * Move list entry to beginning of list so that oldest partial
0425      * datagrams percolate to the end of the list
0426      */
0427     list_move_tail(&pd->pd_link, &peer->pd_list);
0428 
0429     return true;
0430 }
0431 
0432 static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
0433 {
0434     struct fwnet_fragment_info *fi;
0435 
0436     fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
0437 
0438     return fi->len == pd->datagram_size;
0439 }
0440 
0441 /* caller must hold dev->lock */
0442 static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
0443                           u64 guid)
0444 {
0445     struct fwnet_peer *peer;
0446 
0447     list_for_each_entry(peer, &dev->peer_list, peer_link)
0448         if (peer->guid == guid)
0449             return peer;
0450 
0451     return NULL;
0452 }
0453 
0454 /* caller must hold dev->lock */
0455 static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
0456                         int node_id, int generation)
0457 {
0458     struct fwnet_peer *peer;
0459 
0460     list_for_each_entry(peer, &dev->peer_list, peer_link)
0461         if (peer->node_id    == node_id &&
0462             peer->generation == generation)
0463             return peer;
0464 
0465     return NULL;
0466 }
0467 
0468 /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
0469 static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
0470 {
0471     max_rec = min(max_rec, speed + 8);
0472     max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
0473 
0474     return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
0475 }
0476 
0477 
0478 static int fwnet_finish_incoming_packet(struct net_device *net,
0479                     struct sk_buff *skb, u16 source_node_id,
0480                     bool is_broadcast, u16 ether_type)
0481 {
0482     int status;
0483 
0484     switch (ether_type) {
0485     case ETH_P_ARP:
0486     case ETH_P_IP:
0487 #if IS_ENABLED(CONFIG_IPV6)
0488     case ETH_P_IPV6:
0489 #endif
0490         break;
0491     default:
0492         goto err;
0493     }
0494 
0495     /* Write metadata, and then pass to the receive level */
0496     skb->dev = net;
0497     skb->ip_summed = CHECKSUM_NONE;
0498 
0499     /*
0500      * Parse the encapsulation header. This actually does the job of
0501      * converting to an ethernet-like pseudo frame header.
0502      */
0503     if (dev_hard_header(skb, net, ether_type,
0504                is_broadcast ? net->broadcast : net->dev_addr,
0505                NULL, skb->len) >= 0) {
0506         struct fwnet_header *eth;
0507         u16 *rawp;
0508         __be16 protocol;
0509 
0510         skb_reset_mac_header(skb);
0511         skb_pull(skb, sizeof(*eth));
0512         eth = (struct fwnet_header *)skb_mac_header(skb);
0513         if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
0514             if (memcmp(eth->h_dest, net->broadcast,
0515                    net->addr_len) == 0)
0516                 skb->pkt_type = PACKET_BROADCAST;
0517 #if 0
0518             else
0519                 skb->pkt_type = PACKET_MULTICAST;
0520 #endif
0521         } else {
0522             if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
0523                 skb->pkt_type = PACKET_OTHERHOST;
0524         }
0525         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
0526             protocol = eth->h_proto;
0527         } else {
0528             rawp = (u16 *)skb->data;
0529             if (*rawp == 0xffff)
0530                 protocol = htons(ETH_P_802_3);
0531             else
0532                 protocol = htons(ETH_P_802_2);
0533         }
0534         skb->protocol = protocol;
0535     }
0536     status = netif_rx(skb);
0537     if (status == NET_RX_DROP) {
0538         net->stats.rx_errors++;
0539         net->stats.rx_dropped++;
0540     } else {
0541         net->stats.rx_packets++;
0542         net->stats.rx_bytes += skb->len;
0543     }
0544 
0545     return 0;
0546 
0547  err:
0548     net->stats.rx_errors++;
0549     net->stats.rx_dropped++;
0550 
0551     dev_kfree_skb_any(skb);
0552 
0553     return -ENOENT;
0554 }
0555 
0556 static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
0557                  int source_node_id, int generation,
0558                  bool is_broadcast)
0559 {
0560     struct sk_buff *skb;
0561     struct net_device *net = dev->netdev;
0562     struct rfc2734_header hdr;
0563     unsigned lf;
0564     unsigned long flags;
0565     struct fwnet_peer *peer;
0566     struct fwnet_partial_datagram *pd;
0567     int fg_off;
0568     int dg_size;
0569     u16 datagram_label;
0570     int retval;
0571     u16 ether_type;
0572 
0573     if (len <= RFC2374_UNFRAG_HDR_SIZE)
0574         return 0;
0575 
0576     hdr.w0 = be32_to_cpu(buf[0]);
0577     lf = fwnet_get_hdr_lf(&hdr);
0578     if (lf == RFC2374_HDR_UNFRAG) {
0579         /*
0580          * An unfragmented datagram has been received by the ieee1394
0581          * bus. Build an skbuff around it so we can pass it to the
0582          * high level network layer.
0583          */
0584         ether_type = fwnet_get_hdr_ether_type(&hdr);
0585         buf++;
0586         len -= RFC2374_UNFRAG_HDR_SIZE;
0587 
0588         skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
0589         if (unlikely(!skb)) {
0590             net->stats.rx_dropped++;
0591 
0592             return -ENOMEM;
0593         }
0594         skb_reserve(skb, LL_RESERVED_SPACE(net));
0595         skb_put_data(skb, buf, len);
0596 
0597         return fwnet_finish_incoming_packet(net, skb, source_node_id,
0598                             is_broadcast, ether_type);
0599     }
0600 
0601     /* A datagram fragment has been received, now the fun begins. */
0602 
0603     if (len <= RFC2374_FRAG_HDR_SIZE)
0604         return 0;
0605 
0606     hdr.w1 = ntohl(buf[1]);
0607     buf += 2;
0608     len -= RFC2374_FRAG_HDR_SIZE;
0609     if (lf == RFC2374_HDR_FIRSTFRAG) {
0610         ether_type = fwnet_get_hdr_ether_type(&hdr);
0611         fg_off = 0;
0612     } else {
0613         ether_type = 0;
0614         fg_off = fwnet_get_hdr_fg_off(&hdr);
0615     }
0616     datagram_label = fwnet_get_hdr_dgl(&hdr);
0617     dg_size = fwnet_get_hdr_dg_size(&hdr);
0618 
0619     if (fg_off + len > dg_size)
0620         return 0;
0621 
0622     spin_lock_irqsave(&dev->lock, flags);
0623 
0624     peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
0625     if (!peer) {
0626         retval = -ENOENT;
0627         goto fail;
0628     }
0629 
0630     pd = fwnet_pd_find(peer, datagram_label);
0631     if (pd == NULL) {
0632         while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
0633             /* remove the oldest */
0634             fwnet_pd_delete(list_first_entry(&peer->pd_list,
0635                 struct fwnet_partial_datagram, pd_link));
0636             peer->pdg_size--;
0637         }
0638         pd = fwnet_pd_new(net, peer, datagram_label,
0639                   dg_size, buf, fg_off, len);
0640         if (pd == NULL) {
0641             retval = -ENOMEM;
0642             goto fail;
0643         }
0644         peer->pdg_size++;
0645     } else {
0646         if (fwnet_frag_overlap(pd, fg_off, len) ||
0647             pd->datagram_size != dg_size) {
0648             /*
0649              * Differing datagram sizes or overlapping fragments,
0650              * discard old datagram and start a new one.
0651              */
0652             fwnet_pd_delete(pd);
0653             pd = fwnet_pd_new(net, peer, datagram_label,
0654                       dg_size, buf, fg_off, len);
0655             if (pd == NULL) {
0656                 peer->pdg_size--;
0657                 retval = -ENOMEM;
0658                 goto fail;
0659             }
0660         } else {
0661             if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
0662                 /*
0663                  * Couldn't save off fragment anyway
0664                  * so might as well obliterate the
0665                  * datagram now.
0666                  */
0667                 fwnet_pd_delete(pd);
0668                 peer->pdg_size--;
0669                 retval = -ENOMEM;
0670                 goto fail;
0671             }
0672         }
0673     } /* new datagram or add to existing one */
0674 
0675     if (lf == RFC2374_HDR_FIRSTFRAG)
0676         pd->ether_type = ether_type;
0677 
0678     if (fwnet_pd_is_complete(pd)) {
0679         ether_type = pd->ether_type;
0680         peer->pdg_size--;
0681         skb = skb_get(pd->skb);
0682         fwnet_pd_delete(pd);
0683 
0684         spin_unlock_irqrestore(&dev->lock, flags);
0685 
0686         return fwnet_finish_incoming_packet(net, skb, source_node_id,
0687                             false, ether_type);
0688     }
0689     /*
0690      * Datagram is not complete, we're done for the
0691      * moment.
0692      */
0693     retval = 0;
0694  fail:
0695     spin_unlock_irqrestore(&dev->lock, flags);
0696 
0697     return retval;
0698 }
0699 
0700 static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
0701         int tcode, int destination, int source, int generation,
0702         unsigned long long offset, void *payload, size_t length,
0703         void *callback_data)
0704 {
0705     struct fwnet_device *dev = callback_data;
0706     int rcode;
0707 
0708     if (destination == IEEE1394_ALL_NODES) {
0709         kfree(r);
0710 
0711         return;
0712     }
0713 
0714     if (offset != dev->handler.offset)
0715         rcode = RCODE_ADDRESS_ERROR;
0716     else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
0717         rcode = RCODE_TYPE_ERROR;
0718     else if (fwnet_incoming_packet(dev, payload, length,
0719                        source, generation, false) != 0) {
0720         dev_err(&dev->netdev->dev, "incoming packet failure\n");
0721         rcode = RCODE_CONFLICT_ERROR;
0722     } else
0723         rcode = RCODE_COMPLETE;
0724 
0725     fw_send_response(card, r, rcode);
0726 }
0727 
0728 static int gasp_source_id(__be32 *p)
0729 {
0730     return be32_to_cpu(p[0]) >> 16;
0731 }
0732 
0733 static u32 gasp_specifier_id(__be32 *p)
0734 {
0735     return (be32_to_cpu(p[0]) & 0xffff) << 8 |
0736            (be32_to_cpu(p[1]) & 0xff000000) >> 24;
0737 }
0738 
0739 static u32 gasp_version(__be32 *p)
0740 {
0741     return be32_to_cpu(p[1]) & 0xffffff;
0742 }
0743 
0744 static void fwnet_receive_broadcast(struct fw_iso_context *context,
0745         u32 cycle, size_t header_length, void *header, void *data)
0746 {
0747     struct fwnet_device *dev;
0748     struct fw_iso_packet packet;
0749     __be16 *hdr_ptr;
0750     __be32 *buf_ptr;
0751     int retval;
0752     u32 length;
0753     unsigned long offset;
0754     unsigned long flags;
0755 
0756     dev = data;
0757     hdr_ptr = header;
0758     length = be16_to_cpup(hdr_ptr);
0759 
0760     spin_lock_irqsave(&dev->lock, flags);
0761 
0762     offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
0763     buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
0764     if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
0765         dev->broadcast_rcv_next_ptr = 0;
0766 
0767     spin_unlock_irqrestore(&dev->lock, flags);
0768 
0769     if (length > IEEE1394_GASP_HDR_SIZE &&
0770         gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
0771         (gasp_version(buf_ptr) == RFC2734_SW_VERSION
0772 #if IS_ENABLED(CONFIG_IPV6)
0773          || gasp_version(buf_ptr) == RFC3146_SW_VERSION
0774 #endif
0775         ))
0776         fwnet_incoming_packet(dev, buf_ptr + 2,
0777                       length - IEEE1394_GASP_HDR_SIZE,
0778                       gasp_source_id(buf_ptr),
0779                       context->card->generation, true);
0780 
0781     packet.payload_length = dev->rcv_buffer_size;
0782     packet.interrupt = 1;
0783     packet.skip = 0;
0784     packet.tag = 3;
0785     packet.sy = 0;
0786     packet.header_length = IEEE1394_GASP_HDR_SIZE;
0787 
0788     spin_lock_irqsave(&dev->lock, flags);
0789 
0790     retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
0791                       &dev->broadcast_rcv_buffer, offset);
0792 
0793     spin_unlock_irqrestore(&dev->lock, flags);
0794 
0795     if (retval >= 0)
0796         fw_iso_context_queue_flush(dev->broadcast_rcv_context);
0797     else
0798         dev_err(&dev->netdev->dev, "requeue failed\n");
0799 }
0800 
0801 static struct kmem_cache *fwnet_packet_task_cache;
0802 
0803 static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
0804 {
0805     dev_kfree_skb_any(ptask->skb);
0806     kmem_cache_free(fwnet_packet_task_cache, ptask);
0807 }
0808 
0809 /* Caller must hold dev->lock. */
0810 static void dec_queued_datagrams(struct fwnet_device *dev)
0811 {
0812     if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
0813         netif_wake_queue(dev->netdev);
0814 }
0815 
0816 static int fwnet_send_packet(struct fwnet_packet_task *ptask);
0817 
0818 static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
0819 {
0820     struct fwnet_device *dev = ptask->dev;
0821     struct sk_buff *skb = ptask->skb;
0822     unsigned long flags;
0823     bool free;
0824 
0825     spin_lock_irqsave(&dev->lock, flags);
0826 
0827     ptask->outstanding_pkts--;
0828 
0829     /* Check whether we or the networking TX soft-IRQ is last user. */
0830     free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
0831     if (free)
0832         dec_queued_datagrams(dev);
0833 
0834     if (ptask->outstanding_pkts == 0) {
0835         dev->netdev->stats.tx_packets++;
0836         dev->netdev->stats.tx_bytes += skb->len;
0837     }
0838 
0839     spin_unlock_irqrestore(&dev->lock, flags);
0840 
0841     if (ptask->outstanding_pkts > 0) {
0842         u16 dg_size;
0843         u16 fg_off;
0844         u16 datagram_label;
0845         u16 lf;
0846 
0847         /* Update the ptask to point to the next fragment and send it */
0848         lf = fwnet_get_hdr_lf(&ptask->hdr);
0849         switch (lf) {
0850         case RFC2374_HDR_LASTFRAG:
0851         case RFC2374_HDR_UNFRAG:
0852         default:
0853             dev_err(&dev->netdev->dev,
0854                 "outstanding packet %x lf %x, header %x,%x\n",
0855                 ptask->outstanding_pkts, lf, ptask->hdr.w0,
0856                 ptask->hdr.w1);
0857             BUG();
0858 
0859         case RFC2374_HDR_FIRSTFRAG:
0860             /* Set frag type here for future interior fragments */
0861             dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
0862             fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
0863             datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
0864             break;
0865 
0866         case RFC2374_HDR_INTFRAG:
0867             dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
0868             fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
0869                   + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
0870             datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
0871             break;
0872         }
0873 
0874         if (ptask->dest_node == IEEE1394_ALL_NODES) {
0875             skb_pull(skb,
0876                  ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
0877         } else {
0878             skb_pull(skb, ptask->max_payload);
0879         }
0880         if (ptask->outstanding_pkts > 1) {
0881             fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
0882                       dg_size, fg_off, datagram_label);
0883         } else {
0884             fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
0885                       dg_size, fg_off, datagram_label);
0886             ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
0887         }
0888         fwnet_send_packet(ptask);
0889     }
0890 
0891     if (free)
0892         fwnet_free_ptask(ptask);
0893 }
0894 
0895 static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
0896 {
0897     struct fwnet_device *dev = ptask->dev;
0898     unsigned long flags;
0899     bool free;
0900 
0901     spin_lock_irqsave(&dev->lock, flags);
0902 
0903     /* One fragment failed; don't try to send remaining fragments. */
0904     ptask->outstanding_pkts = 0;
0905 
0906     /* Check whether we or the networking TX soft-IRQ is last user. */
0907     free = ptask->enqueued;
0908     if (free)
0909         dec_queued_datagrams(dev);
0910 
0911     dev->netdev->stats.tx_dropped++;
0912     dev->netdev->stats.tx_errors++;
0913 
0914     spin_unlock_irqrestore(&dev->lock, flags);
0915 
0916     if (free)
0917         fwnet_free_ptask(ptask);
0918 }
0919 
0920 static void fwnet_write_complete(struct fw_card *card, int rcode,
0921                  void *payload, size_t length, void *data)
0922 {
0923     struct fwnet_packet_task *ptask = data;
0924     static unsigned long j;
0925     static int last_rcode, errors_skipped;
0926 
0927     if (rcode == RCODE_COMPLETE) {
0928         fwnet_transmit_packet_done(ptask);
0929     } else {
0930         if (printk_timed_ratelimit(&j,  1000) || rcode != last_rcode) {
0931             dev_err(&ptask->dev->netdev->dev,
0932                 "fwnet_write_complete failed: %x (skipped %d)\n",
0933                 rcode, errors_skipped);
0934 
0935             errors_skipped = 0;
0936             last_rcode = rcode;
0937         } else {
0938             errors_skipped++;
0939         }
0940         fwnet_transmit_packet_failed(ptask);
0941     }
0942 }
0943 
0944 static int fwnet_send_packet(struct fwnet_packet_task *ptask)
0945 {
0946     struct fwnet_device *dev;
0947     unsigned tx_len;
0948     struct rfc2734_header *bufhdr;
0949     unsigned long flags;
0950     bool free;
0951 
0952     dev = ptask->dev;
0953     tx_len = ptask->max_payload;
0954     switch (fwnet_get_hdr_lf(&ptask->hdr)) {
0955     case RFC2374_HDR_UNFRAG:
0956         bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
0957         put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
0958         break;
0959 
0960     case RFC2374_HDR_FIRSTFRAG:
0961     case RFC2374_HDR_INTFRAG:
0962     case RFC2374_HDR_LASTFRAG:
0963         bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
0964         put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
0965         put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
0966         break;
0967 
0968     default:
0969         BUG();
0970     }
0971     if (ptask->dest_node == IEEE1394_ALL_NODES) {
0972         u8 *p;
0973         int generation;
0974         int node_id;
0975         unsigned int sw_version;
0976 
0977         /* ptask->generation may not have been set yet */
0978         generation = dev->card->generation;
0979         smp_rmb();
0980         node_id = dev->card->node_id;
0981 
0982         switch (ptask->skb->protocol) {
0983         default:
0984             sw_version = RFC2734_SW_VERSION;
0985             break;
0986 #if IS_ENABLED(CONFIG_IPV6)
0987         case htons(ETH_P_IPV6):
0988             sw_version = RFC3146_SW_VERSION;
0989 #endif
0990         }
0991 
0992         p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
0993         put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
0994         put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
0995                         | sw_version, &p[4]);
0996 
0997         /* We should not transmit if broadcast_channel.valid == 0. */
0998         fw_send_request(dev->card, &ptask->transaction,
0999                 TCODE_STREAM_DATA,
1000                 fw_stream_packet_destination_id(3,
1001                         IEEE1394_BROADCAST_CHANNEL, 0),
1002                 generation, SCODE_100, 0ULL, ptask->skb->data,
1003                 tx_len + 8, fwnet_write_complete, ptask);
1004 
1005         spin_lock_irqsave(&dev->lock, flags);
1006 
1007         /* If the AT tasklet already ran, we may be last user. */
1008         free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
1009         if (!free)
1010             ptask->enqueued = true;
1011         else
1012             dec_queued_datagrams(dev);
1013 
1014         spin_unlock_irqrestore(&dev->lock, flags);
1015 
1016         goto out;
1017     }
1018 
1019     fw_send_request(dev->card, &ptask->transaction,
1020             TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
1021             ptask->generation, ptask->speed, ptask->fifo_addr,
1022             ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1023 
1024     spin_lock_irqsave(&dev->lock, flags);
1025 
1026     /* If the AT tasklet already ran, we may be last user. */
1027     free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
1028     if (!free)
1029         ptask->enqueued = true;
1030     else
1031         dec_queued_datagrams(dev);
1032 
1033     spin_unlock_irqrestore(&dev->lock, flags);
1034 
1035     netif_trans_update(dev->netdev);
1036  out:
1037     if (free)
1038         fwnet_free_ptask(ptask);
1039 
1040     return 0;
1041 }
1042 
1043 static void fwnet_fifo_stop(struct fwnet_device *dev)
1044 {
1045     if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
1046         return;
1047 
1048     fw_core_remove_address_handler(&dev->handler);
1049     dev->local_fifo = FWNET_NO_FIFO_ADDR;
1050 }
1051 
1052 static int fwnet_fifo_start(struct fwnet_device *dev)
1053 {
1054     int retval;
1055 
1056     if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
1057         return 0;
1058 
1059     dev->handler.length = 4096;
1060     dev->handler.address_callback = fwnet_receive_packet;
1061     dev->handler.callback_data = dev;
1062 
1063     retval = fw_core_add_address_handler(&dev->handler,
1064                          &fw_high_memory_region);
1065     if (retval < 0)
1066         return retval;
1067 
1068     dev->local_fifo = dev->handler.offset;
1069 
1070     return 0;
1071 }
1072 
1073 static void __fwnet_broadcast_stop(struct fwnet_device *dev)
1074 {
1075     unsigned u;
1076 
1077     if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
1078         for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
1079             kunmap(dev->broadcast_rcv_buffer.pages[u]);
1080         fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1081     }
1082     if (dev->broadcast_rcv_context) {
1083         fw_iso_context_destroy(dev->broadcast_rcv_context);
1084         dev->broadcast_rcv_context = NULL;
1085     }
1086     kfree(dev->broadcast_rcv_buffer_ptrs);
1087     dev->broadcast_rcv_buffer_ptrs = NULL;
1088     dev->broadcast_state = FWNET_BROADCAST_ERROR;
1089 }
1090 
1091 static void fwnet_broadcast_stop(struct fwnet_device *dev)
1092 {
1093     if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
1094         return;
1095     fw_iso_context_stop(dev->broadcast_rcv_context);
1096     __fwnet_broadcast_stop(dev);
1097 }
1098 
1099 static int fwnet_broadcast_start(struct fwnet_device *dev)
1100 {
1101     struct fw_iso_context *context;
1102     int retval;
1103     unsigned num_packets;
1104     unsigned max_receive;
1105     struct fw_iso_packet packet;
1106     unsigned long offset;
1107     void **ptrptr;
1108     unsigned u;
1109 
1110     if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
1111         return 0;
1112 
1113     max_receive = 1U << (dev->card->max_receive + 1);
1114     num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
1115 
1116     ptrptr = kmalloc_array(num_packets, sizeof(void *), GFP_KERNEL);
1117     if (!ptrptr) {
1118         retval = -ENOMEM;
1119         goto failed;
1120     }
1121     dev->broadcast_rcv_buffer_ptrs = ptrptr;
1122 
1123     context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
1124                     IEEE1394_BROADCAST_CHANNEL,
1125                     dev->card->link_speed, 8,
1126                     fwnet_receive_broadcast, dev);
1127     if (IS_ERR(context)) {
1128         retval = PTR_ERR(context);
1129         goto failed;
1130     }
1131 
1132     retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
1133                     FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
1134     if (retval < 0)
1135         goto failed;
1136 
1137     dev->broadcast_state = FWNET_BROADCAST_STOPPED;
1138 
1139     for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
1140         void *ptr;
1141         unsigned v;
1142 
1143         ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
1144         for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
1145             *ptrptr++ = (void *) ((char *)ptr + v * max_receive);
1146     }
1147     dev->broadcast_rcv_context = context;
1148 
1149     packet.payload_length = max_receive;
1150     packet.interrupt = 1;
1151     packet.skip = 0;
1152     packet.tag = 3;
1153     packet.sy = 0;
1154     packet.header_length = IEEE1394_GASP_HDR_SIZE;
1155     offset = 0;
1156 
1157     for (u = 0; u < num_packets; u++) {
1158         retval = fw_iso_context_queue(context, &packet,
1159                 &dev->broadcast_rcv_buffer, offset);
1160         if (retval < 0)
1161             goto failed;
1162 
1163         offset += max_receive;
1164     }
1165     dev->num_broadcast_rcv_ptrs = num_packets;
1166     dev->rcv_buffer_size = max_receive;
1167     dev->broadcast_rcv_next_ptr = 0U;
1168     retval = fw_iso_context_start(context, -1, 0,
1169             FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
1170     if (retval < 0)
1171         goto failed;
1172 
1173     /* FIXME: adjust it according to the min. speed of all known peers? */
1174     dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
1175             - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
1176     dev->broadcast_state = FWNET_BROADCAST_RUNNING;
1177 
1178     return 0;
1179 
1180  failed:
1181     __fwnet_broadcast_stop(dev);
1182     return retval;
1183 }
1184 
1185 static void set_carrier_state(struct fwnet_device *dev)
1186 {
1187     if (dev->peer_count > 1)
1188         netif_carrier_on(dev->netdev);
1189     else
1190         netif_carrier_off(dev->netdev);
1191 }
1192 
1193 /* ifup */
1194 static int fwnet_open(struct net_device *net)
1195 {
1196     struct fwnet_device *dev = netdev_priv(net);
1197     int ret;
1198 
1199     ret = fwnet_broadcast_start(dev);
1200     if (ret)
1201         return ret;
1202 
1203     netif_start_queue(net);
1204 
1205     spin_lock_irq(&dev->lock);
1206     set_carrier_state(dev);
1207     spin_unlock_irq(&dev->lock);
1208 
1209     return 0;
1210 }
1211 
1212 /* ifdown */
1213 static int fwnet_stop(struct net_device *net)
1214 {
1215     struct fwnet_device *dev = netdev_priv(net);
1216 
1217     netif_stop_queue(net);
1218     fwnet_broadcast_stop(dev);
1219 
1220     return 0;
1221 }
1222 
1223 static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1224 {
1225     struct fwnet_header hdr_buf;
1226     struct fwnet_device *dev = netdev_priv(net);
1227     __be16 proto;
1228     u16 dest_node;
1229     unsigned max_payload;
1230     u16 dg_size;
1231     u16 *datagram_label_ptr;
1232     struct fwnet_packet_task *ptask;
1233     struct fwnet_peer *peer;
1234     unsigned long flags;
1235 
1236     spin_lock_irqsave(&dev->lock, flags);
1237 
1238     /* Can this happen? */
1239     if (netif_queue_stopped(dev->netdev)) {
1240         spin_unlock_irqrestore(&dev->lock, flags);
1241 
1242         return NETDEV_TX_BUSY;
1243     }
1244 
1245     ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
1246     if (ptask == NULL)
1247         goto fail;
1248 
1249     skb = skb_share_check(skb, GFP_ATOMIC);
1250     if (!skb)
1251         goto fail;
1252 
1253     /*
1254      * Make a copy of the driver-specific header.
1255      * We might need to rebuild the header on tx failure.
1256      */
1257     memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
1258     proto = hdr_buf.h_proto;
1259 
1260     switch (proto) {
1261     case htons(ETH_P_ARP):
1262     case htons(ETH_P_IP):
1263 #if IS_ENABLED(CONFIG_IPV6)
1264     case htons(ETH_P_IPV6):
1265 #endif
1266         break;
1267     default:
1268         goto fail;
1269     }
1270 
1271     skb_pull(skb, sizeof(hdr_buf));
1272     dg_size = skb->len;
1273 
1274     /*
1275      * Set the transmission type for the packet.  ARP packets and IP
1276      * broadcast packets are sent via GASP.
1277      */
1278     if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
1279         max_payload        = dev->broadcast_xmt_max_payload;
1280         datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
1281 
1282         ptask->fifo_addr   = FWNET_NO_FIFO_ADDR;
1283         ptask->generation  = 0;
1284         ptask->dest_node   = IEEE1394_ALL_NODES;
1285         ptask->speed       = SCODE_100;
1286     } else {
1287         union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
1288         __be64 guid = get_unaligned(&ha->uc.uniq_id);
1289         u8 generation;
1290 
1291         peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
1292         if (!peer)
1293             goto fail;
1294 
1295         generation         = peer->generation;
1296         dest_node          = peer->node_id;
1297         max_payload        = peer->max_payload;
1298         datagram_label_ptr = &peer->datagram_label;
1299 
1300         ptask->fifo_addr   = get_unaligned_be48(ha->uc.fifo);
1301         ptask->generation  = generation;
1302         ptask->dest_node   = dest_node;
1303         ptask->speed       = peer->speed;
1304     }
1305 
1306     ptask->hdr.w0 = 0;
1307     ptask->hdr.w1 = 0;
1308     ptask->skb = skb;
1309     ptask->dev = dev;
1310 
1311     /* Does it all fit in one packet? */
1312     if (dg_size <= max_payload) {
1313         fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
1314         ptask->outstanding_pkts = 1;
1315         max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
1316     } else {
1317         u16 datagram_label;
1318 
1319         max_payload -= RFC2374_FRAG_OVERHEAD;
1320         datagram_label = (*datagram_label_ptr)++;
1321         fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
1322                   datagram_label);
1323         ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
1324         max_payload += RFC2374_FRAG_HDR_SIZE;
1325     }
1326 
1327     if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
1328         netif_stop_queue(dev->netdev);
1329 
1330     spin_unlock_irqrestore(&dev->lock, flags);
1331 
1332     ptask->max_payload = max_payload;
1333     ptask->enqueued    = 0;
1334 
1335     fwnet_send_packet(ptask);
1336 
1337     return NETDEV_TX_OK;
1338 
1339  fail:
1340     spin_unlock_irqrestore(&dev->lock, flags);
1341 
1342     if (ptask)
1343         kmem_cache_free(fwnet_packet_task_cache, ptask);
1344 
1345     if (skb != NULL)
1346         dev_kfree_skb(skb);
1347 
1348     net->stats.tx_dropped++;
1349     net->stats.tx_errors++;
1350 
1351     /*
1352      * FIXME: According to a patch from 2003-02-26, "returning non-zero
1353      * causes serious problems" here, allegedly.  Before that patch,
1354      * -ERRNO was returned which is not appropriate under Linux 2.6.
1355      * Perhaps more needs to be done?  Stop the queue in serious
1356      * conditions and restart it elsewhere?
1357      */
1358     return NETDEV_TX_OK;
1359 }
1360 
1361 static const struct ethtool_ops fwnet_ethtool_ops = {
1362     .get_link   = ethtool_op_get_link,
1363 };
1364 
1365 static const struct net_device_ops fwnet_netdev_ops = {
1366     .ndo_open       = fwnet_open,
1367     .ndo_stop   = fwnet_stop,
1368     .ndo_start_xmit = fwnet_tx,
1369 };
1370 
1371 static void fwnet_init_dev(struct net_device *net)
1372 {
1373     net->header_ops     = &fwnet_header_ops;
1374     net->netdev_ops     = &fwnet_netdev_ops;
1375     net->watchdog_timeo = 2 * HZ;
1376     net->flags      = IFF_BROADCAST | IFF_MULTICAST;
1377     net->features       = NETIF_F_HIGHDMA;
1378     net->addr_len       = FWNET_ALEN;
1379     net->hard_header_len    = FWNET_HLEN;
1380     net->type       = ARPHRD_IEEE1394;
1381     net->tx_queue_len   = FWNET_TX_QUEUE_LEN;
1382     net->ethtool_ops    = &fwnet_ethtool_ops;
1383 }
1384 
1385 /* caller must hold fwnet_device_mutex */
1386 static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
1387 {
1388     struct fwnet_device *dev;
1389 
1390     list_for_each_entry(dev, &fwnet_device_list, dev_link)
1391         if (dev->card == card)
1392             return dev;
1393 
1394     return NULL;
1395 }
1396 
1397 static int fwnet_add_peer(struct fwnet_device *dev,
1398               struct fw_unit *unit, struct fw_device *device)
1399 {
1400     struct fwnet_peer *peer;
1401 
1402     peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1403     if (!peer)
1404         return -ENOMEM;
1405 
1406     dev_set_drvdata(&unit->device, peer);
1407 
1408     peer->dev = dev;
1409     peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1410     INIT_LIST_HEAD(&peer->pd_list);
1411     peer->pdg_size = 0;
1412     peer->datagram_label = 0;
1413     peer->speed = device->max_speed;
1414     peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
1415 
1416     peer->generation = device->generation;
1417     smp_rmb();
1418     peer->node_id = device->node_id;
1419 
1420     spin_lock_irq(&dev->lock);
1421     list_add_tail(&peer->peer_link, &dev->peer_list);
1422     dev->peer_count++;
1423     set_carrier_state(dev);
1424     spin_unlock_irq(&dev->lock);
1425 
1426     return 0;
1427 }
1428 
1429 static int fwnet_probe(struct fw_unit *unit,
1430                const struct ieee1394_device_id *id)
1431 {
1432     struct fw_device *device = fw_parent_device(unit);
1433     struct fw_card *card = device->card;
1434     struct net_device *net;
1435     bool allocated_netdev = false;
1436     struct fwnet_device *dev;
1437     union fwnet_hwaddr ha;
1438     int ret;
1439 
1440     mutex_lock(&fwnet_device_mutex);
1441 
1442     dev = fwnet_dev_find(card);
1443     if (dev) {
1444         net = dev->netdev;
1445         goto have_dev;
1446     }
1447 
1448     net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN,
1449                fwnet_init_dev);
1450     if (net == NULL) {
1451         mutex_unlock(&fwnet_device_mutex);
1452         return -ENOMEM;
1453     }
1454 
1455     allocated_netdev = true;
1456     SET_NETDEV_DEV(net, card->device);
1457     dev = netdev_priv(net);
1458 
1459     spin_lock_init(&dev->lock);
1460     dev->broadcast_state = FWNET_BROADCAST_ERROR;
1461     dev->broadcast_rcv_context = NULL;
1462     dev->broadcast_xmt_max_payload = 0;
1463     dev->broadcast_xmt_datagramlabel = 0;
1464     dev->local_fifo = FWNET_NO_FIFO_ADDR;
1465     dev->queued_datagrams = 0;
1466     INIT_LIST_HEAD(&dev->peer_list);
1467     dev->card = card;
1468     dev->netdev = net;
1469 
1470     ret = fwnet_fifo_start(dev);
1471     if (ret < 0)
1472         goto out;
1473     dev->local_fifo = dev->handler.offset;
1474 
1475     /*
1476      * default MTU: RFC 2734 cl. 4, RFC 3146 cl. 4
1477      * maximum MTU: RFC 2734 cl. 4.2, fragment encapsulation header's
1478      *              maximum possible datagram_size + 1 = 0xfff + 1
1479      */
1480     net->mtu = 1500U;
1481     net->min_mtu = ETH_MIN_MTU;
1482     net->max_mtu = 4096U;
1483 
1484     /* Set our hardware address while we're at it */
1485     ha.uc.uniq_id = cpu_to_be64(card->guid);
1486     ha.uc.max_rec = dev->card->max_receive;
1487     ha.uc.sspd = dev->card->link_speed;
1488     put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
1489     dev_addr_set(net, ha.u);
1490 
1491     memset(net->broadcast, -1, net->addr_len);
1492 
1493     ret = register_netdev(net);
1494     if (ret)
1495         goto out;
1496 
1497     list_add_tail(&dev->dev_link, &fwnet_device_list);
1498     dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
1499            dev_name(card->device));
1500  have_dev:
1501     ret = fwnet_add_peer(dev, unit, device);
1502     if (ret && allocated_netdev) {
1503         unregister_netdev(net);
1504         list_del(&dev->dev_link);
1505  out:
1506         fwnet_fifo_stop(dev);
1507         free_netdev(net);
1508     }
1509 
1510     mutex_unlock(&fwnet_device_mutex);
1511 
1512     return ret;
1513 }
1514 
1515 /*
1516  * FIXME abort partially sent fragmented datagrams,
1517  * discard partially received fragmented datagrams
1518  */
1519 static void fwnet_update(struct fw_unit *unit)
1520 {
1521     struct fw_device *device = fw_parent_device(unit);
1522     struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
1523     int generation;
1524 
1525     generation = device->generation;
1526 
1527     spin_lock_irq(&peer->dev->lock);
1528     peer->node_id    = device->node_id;
1529     peer->generation = generation;
1530     spin_unlock_irq(&peer->dev->lock);
1531 }
1532 
1533 static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
1534 {
1535     struct fwnet_partial_datagram *pd, *pd_next;
1536 
1537     spin_lock_irq(&dev->lock);
1538     list_del(&peer->peer_link);
1539     dev->peer_count--;
1540     set_carrier_state(dev);
1541     spin_unlock_irq(&dev->lock);
1542 
1543     list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
1544         fwnet_pd_delete(pd);
1545 
1546     kfree(peer);
1547 }
1548 
1549 static void fwnet_remove(struct fw_unit *unit)
1550 {
1551     struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
1552     struct fwnet_device *dev = peer->dev;
1553     struct net_device *net;
1554     int i;
1555 
1556     mutex_lock(&fwnet_device_mutex);
1557 
1558     net = dev->netdev;
1559 
1560     fwnet_remove_peer(peer, dev);
1561 
1562     if (list_empty(&dev->peer_list)) {
1563         unregister_netdev(net);
1564 
1565         fwnet_fifo_stop(dev);
1566 
1567         for (i = 0; dev->queued_datagrams && i < 5; i++)
1568             ssleep(1);
1569         WARN_ON(dev->queued_datagrams);
1570         list_del(&dev->dev_link);
1571 
1572         free_netdev(net);
1573     }
1574 
1575     mutex_unlock(&fwnet_device_mutex);
1576 }
1577 
1578 static const struct ieee1394_device_id fwnet_id_table[] = {
1579     {
1580         .match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
1581                 IEEE1394_MATCH_VERSION,
1582         .specifier_id = IANA_SPECIFIER_ID,
1583         .version      = RFC2734_SW_VERSION,
1584     },
1585 #if IS_ENABLED(CONFIG_IPV6)
1586     {
1587         .match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
1588                 IEEE1394_MATCH_VERSION,
1589         .specifier_id = IANA_SPECIFIER_ID,
1590         .version      = RFC3146_SW_VERSION,
1591     },
1592 #endif
1593     { }
1594 };
1595 
1596 static struct fw_driver fwnet_driver = {
1597     .driver = {
1598         .owner  = THIS_MODULE,
1599         .name   = KBUILD_MODNAME,
1600         .bus    = &fw_bus_type,
1601     },
1602     .probe    = fwnet_probe,
1603     .update   = fwnet_update,
1604     .remove   = fwnet_remove,
1605     .id_table = fwnet_id_table,
1606 };
1607 
1608 static const u32 rfc2374_unit_directory_data[] = {
1609     0x00040000, /* directory_length     */
1610     0x1200005e, /* unit_specifier_id: IANA  */
1611     0x81000003, /* textual descriptor offset    */
1612     0x13000001, /* unit_sw_version: RFC 2734    */
1613     0x81000005, /* textual descriptor offset    */
1614     0x00030000, /* descriptor_length        */
1615     0x00000000, /* text             */
1616     0x00000000, /* minimal ASCII, en        */
1617     0x49414e41, /* I A N A          */
1618     0x00030000, /* descriptor_length        */
1619     0x00000000, /* text             */
1620     0x00000000, /* minimal ASCII, en        */
1621     0x49507634, /* I P v 4          */
1622 };
1623 
1624 static struct fw_descriptor rfc2374_unit_directory = {
1625     .length = ARRAY_SIZE(rfc2374_unit_directory_data),
1626     .key    = (CSR_DIRECTORY | CSR_UNIT) << 24,
1627     .data   = rfc2374_unit_directory_data
1628 };
1629 
1630 #if IS_ENABLED(CONFIG_IPV6)
1631 static const u32 rfc3146_unit_directory_data[] = {
1632     0x00040000, /* directory_length     */
1633     0x1200005e, /* unit_specifier_id: IANA  */
1634     0x81000003, /* textual descriptor offset    */
1635     0x13000002, /* unit_sw_version: RFC 3146    */
1636     0x81000005, /* textual descriptor offset    */
1637     0x00030000, /* descriptor_length        */
1638     0x00000000, /* text             */
1639     0x00000000, /* minimal ASCII, en        */
1640     0x49414e41, /* I A N A          */
1641     0x00030000, /* descriptor_length        */
1642     0x00000000, /* text             */
1643     0x00000000, /* minimal ASCII, en        */
1644     0x49507636, /* I P v 6          */
1645 };
1646 
1647 static struct fw_descriptor rfc3146_unit_directory = {
1648     .length = ARRAY_SIZE(rfc3146_unit_directory_data),
1649     .key    = (CSR_DIRECTORY | CSR_UNIT) << 24,
1650     .data   = rfc3146_unit_directory_data
1651 };
1652 #endif
1653 
1654 static int __init fwnet_init(void)
1655 {
1656     int err;
1657 
1658     err = fw_core_add_descriptor(&rfc2374_unit_directory);
1659     if (err)
1660         return err;
1661 
1662 #if IS_ENABLED(CONFIG_IPV6)
1663     err = fw_core_add_descriptor(&rfc3146_unit_directory);
1664     if (err)
1665         goto out;
1666 #endif
1667 
1668     fwnet_packet_task_cache = kmem_cache_create("packet_task",
1669             sizeof(struct fwnet_packet_task), 0, 0, NULL);
1670     if (!fwnet_packet_task_cache) {
1671         err = -ENOMEM;
1672         goto out2;
1673     }
1674 
1675     err = driver_register(&fwnet_driver.driver);
1676     if (!err)
1677         return 0;
1678 
1679     kmem_cache_destroy(fwnet_packet_task_cache);
1680 out2:
1681 #if IS_ENABLED(CONFIG_IPV6)
1682     fw_core_remove_descriptor(&rfc3146_unit_directory);
1683 out:
1684 #endif
1685     fw_core_remove_descriptor(&rfc2374_unit_directory);
1686 
1687     return err;
1688 }
1689 module_init(fwnet_init);
1690 
1691 static void __exit fwnet_cleanup(void)
1692 {
1693     driver_unregister(&fwnet_driver.driver);
1694     kmem_cache_destroy(fwnet_packet_task_cache);
1695 #if IS_ENABLED(CONFIG_IPV6)
1696     fw_core_remove_descriptor(&rfc3146_unit_directory);
1697 #endif
1698     fw_core_remove_descriptor(&rfc2374_unit_directory);
1699 }
1700 module_exit(fwnet_cleanup);
1701 
1702 MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
1703 MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
1704 MODULE_LICENSE("GPL");
1705 MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);