0001
0002
0003
0004
0005
0006
0007 #include <linux/gfp.h>
0008 #include <linux/hdreg.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/moduleparam.h>
0012 #include <net/net_namespace.h>
0013 #include <asm/unaligned.h>
0014 #include "aoe.h"
0015
0016 #define NECODES 5
0017
0018 static char *aoe_errlist[] =
0019 {
0020 "no such error",
0021 "unrecognized command code",
0022 "bad argument parameter",
0023 "device unavailable",
0024 "config string present",
0025 "unsupported version"
0026 };
0027
0028 enum {
0029 IFLISTSZ = 1024,
0030 };
0031
0032 static char aoe_iflist[IFLISTSZ];
0033 module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
0034 MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
0035
0036 static wait_queue_head_t txwq;
0037 static struct ktstate kts;
0038
0039 #ifndef MODULE
0040 static int __init aoe_iflist_setup(char *str)
0041 {
0042 strncpy(aoe_iflist, str, IFLISTSZ);
0043 aoe_iflist[IFLISTSZ - 1] = '\0';
0044 return 1;
0045 }
0046
0047 __setup("aoe_iflist=", aoe_iflist_setup);
0048 #endif
0049
0050 static spinlock_t txlock;
0051 static struct sk_buff_head skbtxq;
0052
0053
0054 static int
0055 tx(int id) __must_hold(&txlock)
0056 {
0057 struct sk_buff *skb;
0058 struct net_device *ifp;
0059
0060 while ((skb = skb_dequeue(&skbtxq))) {
0061 spin_unlock_irq(&txlock);
0062 ifp = skb->dev;
0063 if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
0064 pr_warn("aoe: packet could not be sent on %s. %s\n",
0065 ifp ? ifp->name : "netif",
0066 "consider increasing tx_queue_len");
0067 spin_lock_irq(&txlock);
0068 }
0069 return 0;
0070 }
0071
0072 int
0073 is_aoe_netif(struct net_device *ifp)
0074 {
0075 register char *p, *q;
0076 register int len;
0077
0078 if (aoe_iflist[0] == '\0')
0079 return 1;
0080
0081 p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
0082 for (; *p; p = q + strspn(q, WHITESPACE)) {
0083 q = p + strcspn(p, WHITESPACE);
0084 if (q != p)
0085 len = q - p;
0086 else
0087 len = strlen(p);
0088
0089 if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
0090 return 1;
0091 if (q == p)
0092 break;
0093 }
0094
0095 return 0;
0096 }
0097
0098 int
0099 set_aoe_iflist(const char __user *user_str, size_t size)
0100 {
0101 if (size >= IFLISTSZ)
0102 return -EINVAL;
0103
0104 if (copy_from_user(aoe_iflist, user_str, size)) {
0105 printk(KERN_INFO "aoe: copy from user failed\n");
0106 return -EFAULT;
0107 }
0108 aoe_iflist[size] = 0x00;
0109 return 0;
0110 }
0111
0112 void
0113 aoenet_xmit(struct sk_buff_head *queue)
0114 {
0115 struct sk_buff *skb, *tmp;
0116 ulong flags;
0117
0118 skb_queue_walk_safe(queue, skb, tmp) {
0119 __skb_unlink(skb, queue);
0120 spin_lock_irqsave(&txlock, flags);
0121 skb_queue_tail(&skbtxq, skb);
0122 spin_unlock_irqrestore(&txlock, flags);
0123 wake_up(&txwq);
0124 }
0125 }
0126
0127
0128
0129
0130 static int
0131 aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
0132 {
0133 struct aoe_hdr *h;
0134 struct aoe_atahdr *ah;
0135 u32 n;
0136 int sn;
0137
0138 if (dev_net(ifp) != &init_net)
0139 goto exit;
0140
0141 skb = skb_share_check(skb, GFP_ATOMIC);
0142 if (skb == NULL)
0143 return 0;
0144 if (!is_aoe_netif(ifp))
0145 goto exit;
0146 skb_push(skb, ETH_HLEN);
0147 sn = sizeof(*h) + sizeof(*ah);
0148 if (skb->len >= sn) {
0149 sn -= skb_headlen(skb);
0150 if (sn > 0 && !__pskb_pull_tail(skb, sn))
0151 goto exit;
0152 }
0153 h = (struct aoe_hdr *) skb->data;
0154 n = get_unaligned_be32(&h->tag);
0155 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
0156 goto exit;
0157
0158 if (h->verfl & AOEFL_ERR) {
0159 n = h->err;
0160 if (n > NECODES)
0161 n = 0;
0162 if (net_ratelimit())
0163 printk(KERN_ERR
0164 "%s%d.%d@%s; ecode=%d '%s'\n",
0165 "aoe: error packet from ",
0166 get_unaligned_be16(&h->major),
0167 h->minor, skb->dev->name,
0168 h->err, aoe_errlist[n]);
0169 goto exit;
0170 }
0171
0172 switch (h->cmd) {
0173 case AOECMD_ATA:
0174
0175 skb = aoecmd_ata_rsp(skb);
0176 break;
0177 case AOECMD_CFG:
0178 aoecmd_cfg_rsp(skb);
0179 break;
0180 default:
0181 if (h->cmd >= AOECMD_VEND_MIN)
0182 break;
0183 pr_info("aoe: unknown AoE command type 0x%02x\n", h->cmd);
0184 break;
0185 }
0186
0187 if (!skb)
0188 return 0;
0189 exit:
0190 dev_kfree_skb(skb);
0191 return 0;
0192 }
0193
0194 static struct packet_type aoe_pt __read_mostly = {
0195 .type = __constant_htons(ETH_P_AOE),
0196 .func = aoenet_rcv,
0197 };
0198
0199 int __init
0200 aoenet_init(void)
0201 {
0202 skb_queue_head_init(&skbtxq);
0203 init_waitqueue_head(&txwq);
0204 spin_lock_init(&txlock);
0205 kts.lock = &txlock;
0206 kts.fn = tx;
0207 kts.waitq = &txwq;
0208 kts.id = 0;
0209 snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id);
0210 if (aoe_ktstart(&kts))
0211 return -EAGAIN;
0212 dev_add_pack(&aoe_pt);
0213 return 0;
0214 }
0215
0216 void
0217 aoenet_exit(void)
0218 {
0219 aoe_ktstop(&kts);
0220 skb_queue_purge(&skbtxq);
0221 dev_remove_pack(&aoe_pt);
0222 }
0223