0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef _LINUX_ETHERDEVICE_H
0018 #define _LINUX_ETHERDEVICE_H
0019
0020 #include <linux/if_ether.h>
0021 #include <linux/netdevice.h>
0022 #include <linux/random.h>
0023 #include <linux/crc32.h>
0024 #include <asm/unaligned.h>
0025 #include <asm/bitsperlong.h>
0026
0027 #ifdef __KERNEL__
0028 struct device;
0029 struct fwnode_handle;
0030
0031 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
0032 int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
0033 unsigned char *arch_get_platform_mac_address(void);
0034 int nvmem_get_mac_address(struct device *dev, void *addrbuf);
0035 int device_get_mac_address(struct device *dev, char *addr);
0036 int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
0037 int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
0038
0039 u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
0040 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
0041 extern const struct header_ops eth_header_ops;
0042
0043 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
0044 const void *daddr, const void *saddr, unsigned len);
0045 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
0046 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
0047 __be16 type);
0048 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
0049 const unsigned char *haddr);
0050 __be16 eth_header_parse_protocol(const struct sk_buff *skb);
0051 int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
0052 void eth_commit_mac_addr_change(struct net_device *dev, void *p);
0053 int eth_mac_addr(struct net_device *dev, void *p);
0054 int eth_validate_addr(struct net_device *dev);
0055
0056 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
0057 unsigned int rxqs);
0058 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
0059 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
0060
0061 struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
0062 unsigned int txqs,
0063 unsigned int rxqs);
0064 #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
0065
0066 struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
0067 int eth_gro_complete(struct sk_buff *skb, int nhoff);
0068
0069
0070 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
0071 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
0072 #define eth_stp_addr eth_reserved_addr_base
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 static inline bool is_link_local_ether_addr(const u8 *addr)
0084 {
0085 __be16 *a = (__be16 *)addr;
0086 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
0087 static const __be16 m = cpu_to_be16(0xfff0);
0088
0089 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0090 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
0091 (__force int)((a[2] ^ b[2]) & m)) == 0;
0092 #else
0093 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
0094 #endif
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 static inline bool is_zero_ether_addr(const u8 *addr)
0106 {
0107 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0108 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
0109 #else
0110 return (*(const u16 *)(addr + 0) |
0111 *(const u16 *)(addr + 2) |
0112 *(const u16 *)(addr + 4)) == 0;
0113 #endif
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123 static inline bool is_multicast_ether_addr(const u8 *addr)
0124 {
0125 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0126 u32 a = *(const u32 *)addr;
0127 #else
0128 u16 a = *(const u16 *)addr;
0129 #endif
0130 #ifdef __BIG_ENDIAN
0131 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
0132 #else
0133 return 0x01 & a;
0134 #endif
0135 }
0136
0137 static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
0138 {
0139 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
0140 #ifdef __BIG_ENDIAN
0141 return 0x01 & ((*(const u64 *)addr) >> 56);
0142 #else
0143 return 0x01 & (*(const u64 *)addr);
0144 #endif
0145 #else
0146 return is_multicast_ether_addr(addr);
0147 #endif
0148 }
0149
0150
0151
0152
0153
0154
0155
0156 static inline bool is_local_ether_addr(const u8 *addr)
0157 {
0158 return 0x02 & addr[0];
0159 }
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 static inline bool is_broadcast_ether_addr(const u8 *addr)
0170 {
0171 return (*(const u16 *)(addr + 0) &
0172 *(const u16 *)(addr + 2) &
0173 *(const u16 *)(addr + 4)) == 0xffff;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182 static inline bool is_unicast_ether_addr(const u8 *addr)
0183 {
0184 return !is_multicast_ether_addr(addr);
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 static inline bool is_valid_ether_addr(const u8 *addr)
0199 {
0200
0201
0202 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
0203 }
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 static inline bool eth_proto_is_802_3(__be16 proto)
0214 {
0215 #ifndef __BIG_ENDIAN
0216
0217 proto &= htons(0xFF00);
0218 #endif
0219
0220 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230 static inline void eth_random_addr(u8 *addr)
0231 {
0232 get_random_bytes(addr, ETH_ALEN);
0233 addr[0] &= 0xfe;
0234 addr[0] |= 0x02;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243 static inline void eth_broadcast_addr(u8 *addr)
0244 {
0245 memset(addr, 0xff, ETH_ALEN);
0246 }
0247
0248
0249
0250
0251
0252
0253
0254 static inline void eth_zero_addr(u8 *addr)
0255 {
0256 memset(addr, 0x00, ETH_ALEN);
0257 }
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 static inline void eth_hw_addr_random(struct net_device *dev)
0269 {
0270 u8 addr[ETH_ALEN];
0271
0272 eth_random_addr(addr);
0273 __dev_addr_set(dev, addr, ETH_ALEN);
0274 dev->addr_assign_type = NET_ADDR_RANDOM;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283 static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
0284 {
0285 return ether_crc(ETH_ALEN, ha->addr);
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295 static inline void ether_addr_copy(u8 *dst, const u8 *src)
0296 {
0297 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0298 *(u32 *)dst = *(const u32 *)src;
0299 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
0300 #else
0301 u16 *a = (u16 *)dst;
0302 const u16 *b = (const u16 *)src;
0303
0304 a[0] = b[0];
0305 a[1] = b[1];
0306 a[2] = b[2];
0307 #endif
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317 static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
0318 {
0319 __dev_addr_set(dev, addr, ETH_ALEN);
0320 }
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 static inline void eth_hw_addr_inherit(struct net_device *dst,
0331 struct net_device *src)
0332 {
0333 dst->addr_assign_type = src->addr_assign_type;
0334 eth_hw_addr_set(dst, src->dev_addr);
0335 }
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
0347 {
0348 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0349 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
0350 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
0351
0352 return fold == 0;
0353 #else
0354 const u16 *a = (const u16 *)addr1;
0355 const u16 *b = (const u16 *)addr2;
0356
0357 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
0358 #endif
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
0376 {
0377 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
0378 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
0379
0380 #ifdef __BIG_ENDIAN
0381 return (fold >> 16) == 0;
0382 #else
0383 return (fold << 16) == 0;
0384 #endif
0385 #else
0386 return ether_addr_equal(addr1, addr2);
0387 #endif
0388 }
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
0400 {
0401 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0402 return ether_addr_equal(addr1, addr2);
0403 #else
0404 return memcmp(addr1, addr2, ETH_ALEN) == 0;
0405 #endif
0406 }
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
0419 const u8 *mask)
0420 {
0421 int i;
0422
0423 for (i = 0; i < ETH_ALEN; i++) {
0424 if ((addr1[i] ^ addr2[i]) & mask[i])
0425 return false;
0426 }
0427
0428 return true;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437 static inline u64 ether_addr_to_u64(const u8 *addr)
0438 {
0439 u64 u = 0;
0440 int i;
0441
0442 for (i = 0; i < ETH_ALEN; i++)
0443 u = u << 8 | addr[i];
0444
0445 return u;
0446 }
0447
0448
0449
0450
0451
0452
0453 static inline void u64_to_ether_addr(u64 u, u8 *addr)
0454 {
0455 int i;
0456
0457 for (i = ETH_ALEN - 1; i >= 0; i--) {
0458 addr[i] = u & 0xff;
0459 u = u >> 8;
0460 }
0461 }
0462
0463
0464
0465
0466
0467
0468 static inline void eth_addr_dec(u8 *addr)
0469 {
0470 u64 u = ether_addr_to_u64(addr);
0471
0472 u--;
0473 u64_to_ether_addr(u, addr);
0474 }
0475
0476
0477
0478
0479
0480 static inline void eth_addr_inc(u8 *addr)
0481 {
0482 u64 u = ether_addr_to_u64(addr);
0483
0484 u++;
0485 u64_to_ether_addr(u, addr);
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 static inline bool is_etherdev_addr(const struct net_device *dev,
0500 const u8 addr[6 + 2])
0501 {
0502 struct netdev_hw_addr *ha;
0503 bool res = false;
0504
0505 rcu_read_lock();
0506 for_each_dev_addr(dev, ha) {
0507 res = ether_addr_equal_64bits(addr, ha->addr);
0508 if (res)
0509 break;
0510 }
0511 rcu_read_unlock();
0512 return res;
0513 }
0514 #endif
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 static inline unsigned long compare_ether_header(const void *a, const void *b)
0529 {
0530 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
0531 unsigned long fold;
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541 fold = *(unsigned long *)a ^ *(unsigned long *)b;
0542 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
0543 return fold;
0544 #else
0545 u32 *a32 = (u32 *)((u8 *)a + 2);
0546 u32 *b32 = (u32 *)((u8 *)b + 2);
0547
0548 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
0549 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
0550 #endif
0551 }
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
0564 unsigned int id)
0565 {
0566 u64 u = ether_addr_to_u64(base_addr);
0567 u8 addr[ETH_ALEN];
0568
0569 u += id;
0570 u64_to_ether_addr(u, addr);
0571 eth_hw_addr_set(dev, addr);
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581 static inline int eth_skb_pad(struct sk_buff *skb)
0582 {
0583 return skb_put_padto(skb, ETH_ZLEN);
0584 }
0585
0586 #endif