0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/atomic.h>
0012 #include <linux/highmem.h>
0013 #include <linux/if_vlan.h>
0014 #include <linux/jhash.h>
0015 #include <linux/module.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/sizes.h>
0019 #include <linux/thunderbolt.h>
0020 #include <linux/uuid.h>
0021 #include <linux/workqueue.h>
0022
0023 #include <net/ip6_checksum.h>
0024
0025
0026 #define TBNET_LOGIN_DELAY 4500
0027 #define TBNET_LOGIN_TIMEOUT 500
0028 #define TBNET_LOGOUT_TIMEOUT 1000
0029
0030 #define TBNET_RING_SIZE 256
0031 #define TBNET_LOGIN_RETRIES 60
0032 #define TBNET_LOGOUT_RETRIES 10
0033 #define TBNET_MATCH_FRAGS_ID BIT(1)
0034 #define TBNET_64K_FRAMES BIT(2)
0035 #define TBNET_MAX_MTU SZ_64K
0036 #define TBNET_FRAME_SIZE SZ_4K
0037 #define TBNET_MAX_PAYLOAD_SIZE \
0038 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
0039
0040 #define TBNET_RX_MAX_SIZE \
0041 (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0042 #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
0043 #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
0044
0045 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 struct thunderbolt_ip_frame_header {
0059 u32 frame_size;
0060 u16 frame_index;
0061 u16 frame_id;
0062 u32 frame_count;
0063 };
0064
0065 enum thunderbolt_ip_frame_pdf {
0066 TBIP_PDF_FRAME_START = 1,
0067 TBIP_PDF_FRAME_END,
0068 };
0069
0070 enum thunderbolt_ip_type {
0071 TBIP_LOGIN,
0072 TBIP_LOGIN_RESPONSE,
0073 TBIP_LOGOUT,
0074 TBIP_STATUS,
0075 };
0076
0077 struct thunderbolt_ip_header {
0078 u32 route_hi;
0079 u32 route_lo;
0080 u32 length_sn;
0081 uuid_t uuid;
0082 uuid_t initiator_uuid;
0083 uuid_t target_uuid;
0084 u32 type;
0085 u32 command_id;
0086 };
0087
0088 #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
0089 #define TBIP_HDR_SN_MASK GENMASK(28, 27)
0090 #define TBIP_HDR_SN_SHIFT 27
0091
0092 struct thunderbolt_ip_login {
0093 struct thunderbolt_ip_header hdr;
0094 u32 proto_version;
0095 u32 transmit_path;
0096 u32 reserved[4];
0097 };
0098
0099 #define TBIP_LOGIN_PROTO_VERSION 1
0100
0101 struct thunderbolt_ip_login_response {
0102 struct thunderbolt_ip_header hdr;
0103 u32 status;
0104 u32 receiver_mac[2];
0105 u32 receiver_mac_len;
0106 u32 reserved[4];
0107 };
0108
0109 struct thunderbolt_ip_logout {
0110 struct thunderbolt_ip_header hdr;
0111 };
0112
0113 struct thunderbolt_ip_status {
0114 struct thunderbolt_ip_header hdr;
0115 u32 status;
0116 };
0117
0118 struct tbnet_stats {
0119 u64 tx_packets;
0120 u64 rx_packets;
0121 u64 tx_bytes;
0122 u64 rx_bytes;
0123 u64 rx_errors;
0124 u64 tx_errors;
0125 u64 rx_length_errors;
0126 u64 rx_over_errors;
0127 u64 rx_crc_errors;
0128 u64 rx_missed_errors;
0129 };
0130
0131 struct tbnet_frame {
0132 struct net_device *dev;
0133 struct page *page;
0134 struct ring_frame frame;
0135 };
0136
0137 struct tbnet_ring {
0138 struct tbnet_frame frames[TBNET_RING_SIZE];
0139 unsigned int cons;
0140 unsigned int prod;
0141 struct tb_ring *ring;
0142 };
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 struct tbnet {
0177 const struct tb_service *svc;
0178 struct tb_xdomain *xd;
0179 struct tb_protocol_handler handler;
0180 struct net_device *dev;
0181 struct napi_struct napi;
0182 struct tbnet_stats stats;
0183 struct sk_buff *skb;
0184 atomic_t command_id;
0185 bool login_sent;
0186 bool login_received;
0187 int local_transmit_path;
0188 int remote_transmit_path;
0189 struct mutex connection_lock;
0190 int login_retries;
0191 struct delayed_work login_work;
0192 struct work_struct connected_work;
0193 struct work_struct disconnect_work;
0194 struct thunderbolt_ip_frame_header rx_hdr;
0195 struct tbnet_ring rx_ring;
0196 atomic_t frame_id;
0197 struct tbnet_ring tx_ring;
0198 };
0199
0200
0201 static const uuid_t tbnet_dir_uuid =
0202 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
0203 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
0204
0205
0206 static const uuid_t tbnet_svc_uuid =
0207 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
0208 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
0209
0210 static struct tb_property_dir *tbnet_dir;
0211
0212 static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
0213 u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
0214 enum thunderbolt_ip_type type, size_t size, u32 command_id)
0215 {
0216 u32 length_sn;
0217
0218
0219 length_sn = (size - 3 * 4) / 4;
0220 length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
0221
0222 hdr->route_hi = upper_32_bits(route);
0223 hdr->route_lo = lower_32_bits(route);
0224 hdr->length_sn = length_sn;
0225 uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
0226 uuid_copy(&hdr->initiator_uuid, initiator_uuid);
0227 uuid_copy(&hdr->target_uuid, target_uuid);
0228 hdr->type = type;
0229 hdr->command_id = command_id;
0230 }
0231
0232 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
0233 u32 command_id)
0234 {
0235 struct thunderbolt_ip_login_response reply;
0236 struct tb_xdomain *xd = net->xd;
0237
0238 memset(&reply, 0, sizeof(reply));
0239 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
0240 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
0241 command_id);
0242 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
0243 reply.receiver_mac_len = ETH_ALEN;
0244
0245 return tb_xdomain_response(xd, &reply, sizeof(reply),
0246 TB_CFG_PKG_XDOMAIN_RESP);
0247 }
0248
0249 static int tbnet_login_request(struct tbnet *net, u8 sequence)
0250 {
0251 struct thunderbolt_ip_login_response reply;
0252 struct thunderbolt_ip_login request;
0253 struct tb_xdomain *xd = net->xd;
0254
0255 memset(&request, 0, sizeof(request));
0256 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
0257 xd->remote_uuid, TBIP_LOGIN, sizeof(request),
0258 atomic_inc_return(&net->command_id));
0259
0260 request.proto_version = TBIP_LOGIN_PROTO_VERSION;
0261 request.transmit_path = net->local_transmit_path;
0262
0263 return tb_xdomain_request(xd, &request, sizeof(request),
0264 TB_CFG_PKG_XDOMAIN_RESP, &reply,
0265 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
0266 TBNET_LOGIN_TIMEOUT);
0267 }
0268
0269 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
0270 u32 command_id)
0271 {
0272 struct thunderbolt_ip_status reply;
0273 struct tb_xdomain *xd = net->xd;
0274
0275 memset(&reply, 0, sizeof(reply));
0276 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
0277 xd->remote_uuid, TBIP_STATUS, sizeof(reply),
0278 atomic_inc_return(&net->command_id));
0279 return tb_xdomain_response(xd, &reply, sizeof(reply),
0280 TB_CFG_PKG_XDOMAIN_RESP);
0281 }
0282
0283 static int tbnet_logout_request(struct tbnet *net)
0284 {
0285 struct thunderbolt_ip_logout request;
0286 struct thunderbolt_ip_status reply;
0287 struct tb_xdomain *xd = net->xd;
0288
0289 memset(&request, 0, sizeof(request));
0290 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
0291 xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
0292 atomic_inc_return(&net->command_id));
0293
0294 return tb_xdomain_request(xd, &request, sizeof(request),
0295 TB_CFG_PKG_XDOMAIN_RESP, &reply,
0296 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
0297 TBNET_LOGOUT_TIMEOUT);
0298 }
0299
0300 static void start_login(struct tbnet *net)
0301 {
0302 mutex_lock(&net->connection_lock);
0303 net->login_sent = false;
0304 net->login_received = false;
0305 mutex_unlock(&net->connection_lock);
0306
0307 queue_delayed_work(system_long_wq, &net->login_work,
0308 msecs_to_jiffies(1000));
0309 }
0310
0311 static void stop_login(struct tbnet *net)
0312 {
0313 cancel_delayed_work_sync(&net->login_work);
0314 cancel_work_sync(&net->connected_work);
0315 }
0316
0317 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
0318 {
0319 return tf->frame.size ? : TBNET_FRAME_SIZE;
0320 }
0321
0322 static void tbnet_free_buffers(struct tbnet_ring *ring)
0323 {
0324 unsigned int i;
0325
0326 for (i = 0; i < TBNET_RING_SIZE; i++) {
0327 struct device *dma_dev = tb_ring_dma_device(ring->ring);
0328 struct tbnet_frame *tf = &ring->frames[i];
0329 enum dma_data_direction dir;
0330 unsigned int order;
0331 size_t size;
0332
0333 if (!tf->page)
0334 continue;
0335
0336 if (ring->ring->is_tx) {
0337 dir = DMA_TO_DEVICE;
0338 order = 0;
0339 size = TBNET_FRAME_SIZE;
0340 } else {
0341 dir = DMA_FROM_DEVICE;
0342 order = TBNET_RX_PAGE_ORDER;
0343 size = TBNET_RX_PAGE_SIZE;
0344 }
0345
0346 if (tf->frame.buffer_phy)
0347 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
0348 dir);
0349
0350 __free_pages(tf->page, order);
0351 tf->page = NULL;
0352 }
0353
0354 ring->cons = 0;
0355 ring->prod = 0;
0356 }
0357
0358 static void tbnet_tear_down(struct tbnet *net, bool send_logout)
0359 {
0360 netif_carrier_off(net->dev);
0361 netif_stop_queue(net->dev);
0362
0363 stop_login(net);
0364
0365 mutex_lock(&net->connection_lock);
0366
0367 if (net->login_sent && net->login_received) {
0368 int ret, retries = TBNET_LOGOUT_RETRIES;
0369
0370 while (send_logout && retries-- > 0) {
0371 ret = tbnet_logout_request(net);
0372 if (ret != -ETIMEDOUT)
0373 break;
0374 }
0375
0376 tb_ring_stop(net->rx_ring.ring);
0377 tb_ring_stop(net->tx_ring.ring);
0378 tbnet_free_buffers(&net->rx_ring);
0379 tbnet_free_buffers(&net->tx_ring);
0380
0381 ret = tb_xdomain_disable_paths(net->xd,
0382 net->local_transmit_path,
0383 net->rx_ring.ring->hop,
0384 net->remote_transmit_path,
0385 net->tx_ring.ring->hop);
0386 if (ret)
0387 netdev_warn(net->dev, "failed to disable DMA paths\n");
0388
0389 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
0390 net->remote_transmit_path = 0;
0391 }
0392
0393 net->login_retries = 0;
0394 net->login_sent = false;
0395 net->login_received = false;
0396
0397 mutex_unlock(&net->connection_lock);
0398 }
0399
0400 static int tbnet_handle_packet(const void *buf, size_t size, void *data)
0401 {
0402 const struct thunderbolt_ip_login *pkg = buf;
0403 struct tbnet *net = data;
0404 u32 command_id;
0405 int ret = 0;
0406 u32 sequence;
0407 u64 route;
0408
0409
0410 if (size < sizeof(struct thunderbolt_ip_header))
0411 return 0;
0412 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
0413 return 0;
0414 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
0415 return 0;
0416
0417 route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
0418 route &= ~BIT_ULL(63);
0419 if (route != net->xd->route)
0420 return 0;
0421
0422 sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
0423 sequence >>= TBIP_HDR_SN_SHIFT;
0424 command_id = pkg->hdr.command_id;
0425
0426 switch (pkg->hdr.type) {
0427 case TBIP_LOGIN:
0428 if (!netif_running(net->dev))
0429 break;
0430
0431 ret = tbnet_login_response(net, route, sequence,
0432 pkg->hdr.command_id);
0433 if (!ret) {
0434 mutex_lock(&net->connection_lock);
0435 net->login_received = true;
0436 net->remote_transmit_path = pkg->transmit_path;
0437
0438
0439
0440
0441
0442 if (net->login_retries >= TBNET_LOGIN_RETRIES ||
0443 !net->login_sent) {
0444 net->login_retries = 0;
0445 queue_delayed_work(system_long_wq,
0446 &net->login_work, 0);
0447 }
0448 mutex_unlock(&net->connection_lock);
0449
0450 queue_work(system_long_wq, &net->connected_work);
0451 }
0452 break;
0453
0454 case TBIP_LOGOUT:
0455 ret = tbnet_logout_response(net, route, sequence, command_id);
0456 if (!ret)
0457 queue_work(system_long_wq, &net->disconnect_work);
0458 break;
0459
0460 default:
0461 return 0;
0462 }
0463
0464 if (ret)
0465 netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
0466
0467 return 1;
0468 }
0469
0470 static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
0471 {
0472 return ring->prod - ring->cons;
0473 }
0474
0475 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
0476 {
0477 struct tbnet_ring *ring = &net->rx_ring;
0478 int ret;
0479
0480 while (nbuffers--) {
0481 struct device *dma_dev = tb_ring_dma_device(ring->ring);
0482 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
0483 struct tbnet_frame *tf = &ring->frames[index];
0484 dma_addr_t dma_addr;
0485
0486 if (tf->page)
0487 break;
0488
0489
0490
0491
0492
0493 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
0494 if (!tf->page) {
0495 ret = -ENOMEM;
0496 goto err_free;
0497 }
0498
0499 dma_addr = dma_map_page(dma_dev, tf->page, 0,
0500 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
0501 if (dma_mapping_error(dma_dev, dma_addr)) {
0502 ret = -ENOMEM;
0503 goto err_free;
0504 }
0505
0506 tf->frame.buffer_phy = dma_addr;
0507 tf->dev = net->dev;
0508
0509 tb_ring_rx(ring->ring, &tf->frame);
0510
0511 ring->prod++;
0512 }
0513
0514 return 0;
0515
0516 err_free:
0517 tbnet_free_buffers(ring);
0518 return ret;
0519 }
0520
0521 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
0522 {
0523 struct tbnet_ring *ring = &net->tx_ring;
0524 struct device *dma_dev = tb_ring_dma_device(ring->ring);
0525 struct tbnet_frame *tf;
0526 unsigned int index;
0527
0528 if (!tbnet_available_buffers(ring))
0529 return NULL;
0530
0531 index = ring->cons++ & (TBNET_RING_SIZE - 1);
0532
0533 tf = &ring->frames[index];
0534 tf->frame.size = 0;
0535
0536 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
0537 tbnet_frame_size(tf), DMA_TO_DEVICE);
0538
0539 return tf;
0540 }
0541
0542 static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
0543 bool canceled)
0544 {
0545 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
0546 struct tbnet *net = netdev_priv(tf->dev);
0547
0548
0549 net->tx_ring.prod++;
0550
0551 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
0552 netif_wake_queue(net->dev);
0553 }
0554
0555 static int tbnet_alloc_tx_buffers(struct tbnet *net)
0556 {
0557 struct tbnet_ring *ring = &net->tx_ring;
0558 struct device *dma_dev = tb_ring_dma_device(ring->ring);
0559 unsigned int i;
0560
0561 for (i = 0; i < TBNET_RING_SIZE; i++) {
0562 struct tbnet_frame *tf = &ring->frames[i];
0563 dma_addr_t dma_addr;
0564
0565 tf->page = alloc_page(GFP_KERNEL);
0566 if (!tf->page) {
0567 tbnet_free_buffers(ring);
0568 return -ENOMEM;
0569 }
0570
0571 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
0572 DMA_TO_DEVICE);
0573 if (dma_mapping_error(dma_dev, dma_addr)) {
0574 __free_page(tf->page);
0575 tf->page = NULL;
0576 tbnet_free_buffers(ring);
0577 return -ENOMEM;
0578 }
0579
0580 tf->dev = net->dev;
0581 tf->frame.buffer_phy = dma_addr;
0582 tf->frame.callback = tbnet_tx_callback;
0583 tf->frame.sof = TBIP_PDF_FRAME_START;
0584 tf->frame.eof = TBIP_PDF_FRAME_END;
0585 }
0586
0587 ring->cons = 0;
0588 ring->prod = TBNET_RING_SIZE - 1;
0589
0590 return 0;
0591 }
0592
0593 static void tbnet_connected_work(struct work_struct *work)
0594 {
0595 struct tbnet *net = container_of(work, typeof(*net), connected_work);
0596 bool connected;
0597 int ret;
0598
0599 if (netif_carrier_ok(net->dev))
0600 return;
0601
0602 mutex_lock(&net->connection_lock);
0603 connected = net->login_sent && net->login_received;
0604 mutex_unlock(&net->connection_lock);
0605
0606 if (!connected)
0607 return;
0608
0609 ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
0610 if (ret != net->remote_transmit_path) {
0611 netdev_err(net->dev, "failed to allocate Rx HopID\n");
0612 return;
0613 }
0614
0615
0616
0617
0618 ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
0619 net->rx_ring.ring->hop,
0620 net->remote_transmit_path,
0621 net->tx_ring.ring->hop);
0622 if (ret) {
0623 netdev_err(net->dev, "failed to enable DMA paths\n");
0624 return;
0625 }
0626
0627 tb_ring_start(net->tx_ring.ring);
0628 tb_ring_start(net->rx_ring.ring);
0629
0630 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
0631 if (ret)
0632 goto err_stop_rings;
0633
0634 ret = tbnet_alloc_tx_buffers(net);
0635 if (ret)
0636 goto err_free_rx_buffers;
0637
0638 netif_carrier_on(net->dev);
0639 netif_start_queue(net->dev);
0640 return;
0641
0642 err_free_rx_buffers:
0643 tbnet_free_buffers(&net->rx_ring);
0644 err_stop_rings:
0645 tb_ring_stop(net->rx_ring.ring);
0646 tb_ring_stop(net->tx_ring.ring);
0647 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
0648 }
0649
0650 static void tbnet_login_work(struct work_struct *work)
0651 {
0652 struct tbnet *net = container_of(work, typeof(*net), login_work.work);
0653 unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
0654 int ret;
0655
0656 if (netif_carrier_ok(net->dev))
0657 return;
0658
0659 ret = tbnet_login_request(net, net->login_retries % 4);
0660 if (ret) {
0661 if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
0662 queue_delayed_work(system_long_wq, &net->login_work,
0663 delay);
0664 } else {
0665 netdev_info(net->dev, "ThunderboltIP login timed out\n");
0666 }
0667 } else {
0668 net->login_retries = 0;
0669
0670 mutex_lock(&net->connection_lock);
0671 net->login_sent = true;
0672 mutex_unlock(&net->connection_lock);
0673
0674 queue_work(system_long_wq, &net->connected_work);
0675 }
0676 }
0677
0678 static void tbnet_disconnect_work(struct work_struct *work)
0679 {
0680 struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
0681
0682 tbnet_tear_down(net, false);
0683 }
0684
0685 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
0686 const struct thunderbolt_ip_frame_header *hdr)
0687 {
0688 u32 frame_id, frame_count, frame_size, frame_index;
0689 unsigned int size;
0690
0691 if (tf->frame.flags & RING_DESC_CRC_ERROR) {
0692 net->stats.rx_crc_errors++;
0693 return false;
0694 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
0695 net->stats.rx_over_errors++;
0696 return false;
0697 }
0698
0699
0700 size = tbnet_frame_size(tf);
0701 if (size <= sizeof(*hdr)) {
0702 net->stats.rx_length_errors++;
0703 return false;
0704 }
0705
0706 frame_count = le32_to_cpu(hdr->frame_count);
0707 frame_size = le32_to_cpu(hdr->frame_size);
0708 frame_index = le16_to_cpu(hdr->frame_index);
0709 frame_id = le16_to_cpu(hdr->frame_id);
0710
0711 if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
0712 net->stats.rx_length_errors++;
0713 return false;
0714 }
0715
0716
0717
0718
0719 if (net->skb && net->rx_hdr.frame_count) {
0720
0721 if (frame_count != net->rx_hdr.frame_count) {
0722 net->stats.rx_length_errors++;
0723 return false;
0724 }
0725
0726
0727
0728
0729 if (frame_index != net->rx_hdr.frame_index + 1 ||
0730 frame_id != net->rx_hdr.frame_id) {
0731 net->stats.rx_missed_errors++;
0732 return false;
0733 }
0734
0735 if (net->skb->len + frame_size > TBNET_MAX_MTU) {
0736 net->stats.rx_length_errors++;
0737 return false;
0738 }
0739
0740 return true;
0741 }
0742
0743
0744 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
0745 net->stats.rx_length_errors++;
0746 return false;
0747 }
0748 if (frame_index != 0) {
0749 net->stats.rx_missed_errors++;
0750 return false;
0751 }
0752
0753 return true;
0754 }
0755
0756 static int tbnet_poll(struct napi_struct *napi, int budget)
0757 {
0758 struct tbnet *net = container_of(napi, struct tbnet, napi);
0759 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
0760 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
0761 unsigned int rx_packets = 0;
0762
0763 while (rx_packets < budget) {
0764 const struct thunderbolt_ip_frame_header *hdr;
0765 unsigned int hdr_size = sizeof(*hdr);
0766 struct sk_buff *skb = NULL;
0767 struct ring_frame *frame;
0768 struct tbnet_frame *tf;
0769 struct page *page;
0770 bool last = true;
0771 u32 frame_size;
0772
0773
0774
0775
0776
0777 if (cleaned_count >= MAX_SKB_FRAGS) {
0778 tbnet_alloc_rx_buffers(net, cleaned_count);
0779 cleaned_count = 0;
0780 }
0781
0782 frame = tb_ring_poll(net->rx_ring.ring);
0783 if (!frame)
0784 break;
0785
0786 dma_unmap_page(dma_dev, frame->buffer_phy,
0787 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
0788
0789 tf = container_of(frame, typeof(*tf), frame);
0790
0791 page = tf->page;
0792 tf->page = NULL;
0793 net->rx_ring.cons++;
0794 cleaned_count++;
0795
0796 hdr = page_address(page);
0797 if (!tbnet_check_frame(net, tf, hdr)) {
0798 __free_pages(page, TBNET_RX_PAGE_ORDER);
0799 dev_kfree_skb_any(net->skb);
0800 net->skb = NULL;
0801 continue;
0802 }
0803
0804 frame_size = le32_to_cpu(hdr->frame_size);
0805
0806 skb = net->skb;
0807 if (!skb) {
0808 skb = build_skb(page_address(page),
0809 TBNET_RX_PAGE_SIZE);
0810 if (!skb) {
0811 __free_pages(page, TBNET_RX_PAGE_ORDER);
0812 net->stats.rx_errors++;
0813 break;
0814 }
0815
0816 skb_reserve(skb, hdr_size);
0817 skb_put(skb, frame_size);
0818
0819 net->skb = skb;
0820 } else {
0821 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0822 page, hdr_size, frame_size,
0823 TBNET_RX_PAGE_SIZE - hdr_size);
0824 }
0825
0826 net->rx_hdr.frame_size = frame_size;
0827 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
0828 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
0829 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
0830 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
0831
0832 rx_packets++;
0833 net->stats.rx_bytes += frame_size;
0834
0835 if (last) {
0836 skb->protocol = eth_type_trans(skb, net->dev);
0837 napi_gro_receive(&net->napi, skb);
0838 net->skb = NULL;
0839 }
0840 }
0841
0842 net->stats.rx_packets += rx_packets;
0843
0844 if (cleaned_count)
0845 tbnet_alloc_rx_buffers(net, cleaned_count);
0846
0847 if (rx_packets >= budget)
0848 return budget;
0849
0850 napi_complete_done(napi, rx_packets);
0851
0852 tb_ring_poll_complete(net->rx_ring.ring);
0853
0854 return rx_packets;
0855 }
0856
0857 static void tbnet_start_poll(void *data)
0858 {
0859 struct tbnet *net = data;
0860
0861 napi_schedule(&net->napi);
0862 }
0863
0864 static int tbnet_open(struct net_device *dev)
0865 {
0866 struct tbnet *net = netdev_priv(dev);
0867 struct tb_xdomain *xd = net->xd;
0868 u16 sof_mask, eof_mask;
0869 struct tb_ring *ring;
0870 int hopid;
0871
0872 netif_carrier_off(dev);
0873
0874 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
0875 RING_FLAG_FRAME);
0876 if (!ring) {
0877 netdev_err(dev, "failed to allocate Tx ring\n");
0878 return -ENOMEM;
0879 }
0880 net->tx_ring.ring = ring;
0881
0882 hopid = tb_xdomain_alloc_out_hopid(xd, -1);
0883 if (hopid < 0) {
0884 netdev_err(dev, "failed to allocate Tx HopID\n");
0885 tb_ring_free(net->tx_ring.ring);
0886 net->tx_ring.ring = NULL;
0887 return hopid;
0888 }
0889 net->local_transmit_path = hopid;
0890
0891 sof_mask = BIT(TBIP_PDF_FRAME_START);
0892 eof_mask = BIT(TBIP_PDF_FRAME_END);
0893
0894 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
0895 RING_FLAG_FRAME, 0, sof_mask, eof_mask,
0896 tbnet_start_poll, net);
0897 if (!ring) {
0898 netdev_err(dev, "failed to allocate Rx ring\n");
0899 tb_ring_free(net->tx_ring.ring);
0900 net->tx_ring.ring = NULL;
0901 return -ENOMEM;
0902 }
0903 net->rx_ring.ring = ring;
0904
0905 napi_enable(&net->napi);
0906 start_login(net);
0907
0908 return 0;
0909 }
0910
0911 static int tbnet_stop(struct net_device *dev)
0912 {
0913 struct tbnet *net = netdev_priv(dev);
0914
0915 napi_disable(&net->napi);
0916
0917 cancel_work_sync(&net->disconnect_work);
0918 tbnet_tear_down(net, true);
0919
0920 tb_ring_free(net->rx_ring.ring);
0921 net->rx_ring.ring = NULL;
0922
0923 tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
0924 tb_ring_free(net->tx_ring.ring);
0925 net->tx_ring.ring = NULL;
0926
0927 return 0;
0928 }
0929
0930 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
0931 struct tbnet_frame **frames, u32 frame_count)
0932 {
0933 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
0934 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
0935 __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
0936 unsigned int i, len, offset = skb_transport_offset(skb);
0937 __be16 protocol = skb->protocol;
0938 void *data = skb->data;
0939 void *dest = hdr + 1;
0940 __sum16 *tucso;
0941
0942 if (skb->ip_summed != CHECKSUM_PARTIAL) {
0943
0944
0945
0946 for (i = 0; i < frame_count; i++) {
0947 hdr = page_address(frames[i]->page);
0948 hdr->frame_count = cpu_to_le32(frame_count);
0949 dma_sync_single_for_device(dma_dev,
0950 frames[i]->frame.buffer_phy,
0951 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
0952 }
0953
0954 return true;
0955 }
0956
0957 if (protocol == htons(ETH_P_8021Q)) {
0958 struct vlan_hdr *vhdr, vh;
0959
0960 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
0961 if (!vhdr)
0962 return false;
0963
0964 protocol = vhdr->h_vlan_encapsulated_proto;
0965 }
0966
0967
0968
0969
0970
0971
0972 if (protocol == htons(ETH_P_IP)) {
0973 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
0974
0975 *ipcso = 0;
0976 *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
0977 ip_hdr(skb)->ihl);
0978
0979 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
0980 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
0981 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
0982 tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
0983 else
0984 return false;
0985
0986 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
0987 ip_hdr(skb)->daddr, 0,
0988 ip_hdr(skb)->protocol, 0);
0989 } else if (skb_is_gso_v6(skb)) {
0990 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
0991 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
0992 &ipv6_hdr(skb)->daddr, 0,
0993 IPPROTO_TCP, 0);
0994 return false;
0995 } else if (protocol == htons(ETH_P_IPV6)) {
0996 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
0997 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
0998 &ipv6_hdr(skb)->daddr, 0,
0999 ipv6_hdr(skb)->nexthdr, 0);
1000 } else {
1001 return false;
1002 }
1003
1004
1005
1006
1007 for (i = 0; i < frame_count; i++) {
1008 hdr = page_address(frames[i]->page);
1009 dest = (void *)(hdr + 1) + offset;
1010 len = le32_to_cpu(hdr->frame_size) - offset;
1011 wsum = csum_partial(dest, len, wsum);
1012 hdr->frame_count = cpu_to_le32(frame_count);
1013
1014 offset = 0;
1015 }
1016
1017 *tucso = csum_fold(wsum);
1018
1019
1020
1021
1022 for (i = 0; i < frame_count; i++) {
1023 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
1024 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
1025 }
1026
1027 return true;
1028 }
1029
1030 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
1031 unsigned int *len)
1032 {
1033 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1034
1035 *len = skb_frag_size(frag);
1036 return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1037 }
1038
1039 static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1040 struct net_device *dev)
1041 {
1042 struct tbnet *net = netdev_priv(dev);
1043 struct tbnet_frame *frames[MAX_SKB_FRAGS];
1044 u16 frame_id = atomic_read(&net->frame_id);
1045 struct thunderbolt_ip_frame_header *hdr;
1046 unsigned int len = skb_headlen(skb);
1047 unsigned int data_len = skb->len;
1048 unsigned int nframes, i;
1049 unsigned int frag = 0;
1050 void *src = skb->data;
1051 u32 frame_index = 0;
1052 bool unmap = false;
1053 void *dest;
1054
1055 nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1056 if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1057 netif_stop_queue(net->dev);
1058 return NETDEV_TX_BUSY;
1059 }
1060
1061 frames[frame_index] = tbnet_get_tx_buffer(net);
1062 if (!frames[frame_index])
1063 goto err_drop;
1064
1065 hdr = page_address(frames[frame_index]->page);
1066 dest = hdr + 1;
1067
1068
1069 while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1070 unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1071
1072 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1073 hdr->frame_index = cpu_to_le16(frame_index);
1074 hdr->frame_id = cpu_to_le16(frame_id);
1075
1076 do {
1077 if (len > size_left) {
1078
1079
1080
1081
1082 memcpy(dest, src, size_left);
1083 len -= size_left;
1084 dest += size_left;
1085 src += size_left;
1086 break;
1087 }
1088
1089 memcpy(dest, src, len);
1090 size_left -= len;
1091 dest += len;
1092
1093 if (unmap) {
1094 kunmap_atomic(src);
1095 unmap = false;
1096 }
1097
1098
1099 if (frag < skb_shinfo(skb)->nr_frags) {
1100
1101 src = tbnet_kmap_frag(skb, frag++, &len);
1102 unmap = true;
1103 } else if (unlikely(size_left > 0)) {
1104 goto err_drop;
1105 }
1106 } while (size_left > 0);
1107
1108 data_len -= TBNET_MAX_PAYLOAD_SIZE;
1109 frame_index++;
1110
1111 frames[frame_index] = tbnet_get_tx_buffer(net);
1112 if (!frames[frame_index])
1113 goto err_drop;
1114
1115 hdr = page_address(frames[frame_index]->page);
1116 dest = hdr + 1;
1117 }
1118
1119 hdr->frame_size = cpu_to_le32(data_len);
1120 hdr->frame_index = cpu_to_le16(frame_index);
1121 hdr->frame_id = cpu_to_le16(frame_id);
1122
1123 frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1124
1125
1126 while (len < data_len) {
1127 memcpy(dest, src, len);
1128 data_len -= len;
1129 dest += len;
1130
1131 if (unmap) {
1132 kunmap_atomic(src);
1133 unmap = false;
1134 }
1135
1136 if (frag < skb_shinfo(skb)->nr_frags) {
1137 src = tbnet_kmap_frag(skb, frag++, &len);
1138 unmap = true;
1139 } else if (unlikely(data_len > 0)) {
1140 goto err_drop;
1141 }
1142 }
1143
1144 memcpy(dest, src, data_len);
1145
1146 if (unmap)
1147 kunmap_atomic(src);
1148
1149 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1150 goto err_drop;
1151
1152 for (i = 0; i < frame_index + 1; i++)
1153 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1154
1155 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1156 atomic_inc(&net->frame_id);
1157
1158 net->stats.tx_packets++;
1159 net->stats.tx_bytes += skb->len;
1160
1161 dev_consume_skb_any(skb);
1162
1163 return NETDEV_TX_OK;
1164
1165 err_drop:
1166
1167 net->tx_ring.cons -= frame_index;
1168
1169 dev_kfree_skb_any(skb);
1170 net->stats.tx_errors++;
1171
1172 return NETDEV_TX_OK;
1173 }
1174
1175 static void tbnet_get_stats64(struct net_device *dev,
1176 struct rtnl_link_stats64 *stats)
1177 {
1178 struct tbnet *net = netdev_priv(dev);
1179
1180 stats->tx_packets = net->stats.tx_packets;
1181 stats->rx_packets = net->stats.rx_packets;
1182 stats->tx_bytes = net->stats.tx_bytes;
1183 stats->rx_bytes = net->stats.rx_bytes;
1184 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1185 net->stats.rx_over_errors + net->stats.rx_crc_errors +
1186 net->stats.rx_missed_errors;
1187 stats->tx_errors = net->stats.tx_errors;
1188 stats->rx_length_errors = net->stats.rx_length_errors;
1189 stats->rx_over_errors = net->stats.rx_over_errors;
1190 stats->rx_crc_errors = net->stats.rx_crc_errors;
1191 stats->rx_missed_errors = net->stats.rx_missed_errors;
1192 }
1193
1194 static const struct net_device_ops tbnet_netdev_ops = {
1195 .ndo_open = tbnet_open,
1196 .ndo_stop = tbnet_stop,
1197 .ndo_start_xmit = tbnet_start_xmit,
1198 .ndo_get_stats64 = tbnet_get_stats64,
1199 };
1200
1201 static void tbnet_generate_mac(struct net_device *dev)
1202 {
1203 const struct tbnet *net = netdev_priv(dev);
1204 const struct tb_xdomain *xd = net->xd;
1205 u8 addr[ETH_ALEN];
1206 u8 phy_port;
1207 u32 hash;
1208
1209 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1210
1211
1212 addr[0] = phy_port << 4 | 0x02;
1213 hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1214 memcpy(addr + 1, &hash, sizeof(hash));
1215 hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1216 addr[5] = hash & 0xff;
1217 eth_hw_addr_set(dev, addr);
1218 }
1219
1220 static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1221 {
1222 struct tb_xdomain *xd = tb_service_parent(svc);
1223 struct net_device *dev;
1224 struct tbnet *net;
1225 int ret;
1226
1227 dev = alloc_etherdev(sizeof(*net));
1228 if (!dev)
1229 return -ENOMEM;
1230
1231 SET_NETDEV_DEV(dev, &svc->dev);
1232
1233 net = netdev_priv(dev);
1234 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1235 INIT_WORK(&net->connected_work, tbnet_connected_work);
1236 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1237 mutex_init(&net->connection_lock);
1238 atomic_set(&net->command_id, 0);
1239 atomic_set(&net->frame_id, 0);
1240 net->svc = svc;
1241 net->dev = dev;
1242 net->xd = xd;
1243
1244 tbnet_generate_mac(dev);
1245
1246 strcpy(dev->name, "thunderbolt%d");
1247 dev->netdev_ops = &tbnet_netdev_ops;
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1263 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1264 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1265 dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1266
1267 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1268
1269
1270 dev->min_mtu = ETH_MIN_MTU;
1271 dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1272
1273 net->handler.uuid = &tbnet_svc_uuid;
1274 net->handler.callback = tbnet_handle_packet;
1275 net->handler.data = net;
1276 tb_register_protocol_handler(&net->handler);
1277
1278 tb_service_set_drvdata(svc, net);
1279
1280 ret = register_netdev(dev);
1281 if (ret) {
1282 tb_unregister_protocol_handler(&net->handler);
1283 free_netdev(dev);
1284 return ret;
1285 }
1286
1287 return 0;
1288 }
1289
1290 static void tbnet_remove(struct tb_service *svc)
1291 {
1292 struct tbnet *net = tb_service_get_drvdata(svc);
1293
1294 unregister_netdev(net->dev);
1295 tb_unregister_protocol_handler(&net->handler);
1296 free_netdev(net->dev);
1297 }
1298
1299 static void tbnet_shutdown(struct tb_service *svc)
1300 {
1301 tbnet_tear_down(tb_service_get_drvdata(svc), true);
1302 }
1303
1304 static int __maybe_unused tbnet_suspend(struct device *dev)
1305 {
1306 struct tb_service *svc = tb_to_service(dev);
1307 struct tbnet *net = tb_service_get_drvdata(svc);
1308
1309 stop_login(net);
1310 if (netif_running(net->dev)) {
1311 netif_device_detach(net->dev);
1312 tbnet_tear_down(net, true);
1313 }
1314
1315 tb_unregister_protocol_handler(&net->handler);
1316 return 0;
1317 }
1318
1319 static int __maybe_unused tbnet_resume(struct device *dev)
1320 {
1321 struct tb_service *svc = tb_to_service(dev);
1322 struct tbnet *net = tb_service_get_drvdata(svc);
1323
1324 tb_register_protocol_handler(&net->handler);
1325
1326 netif_carrier_off(net->dev);
1327 if (netif_running(net->dev)) {
1328 netif_device_attach(net->dev);
1329 start_login(net);
1330 }
1331
1332 return 0;
1333 }
1334
1335 static const struct dev_pm_ops tbnet_pm_ops = {
1336 SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1337 };
1338
1339 static const struct tb_service_id tbnet_ids[] = {
1340 { TB_SERVICE("network", 1) },
1341 { },
1342 };
1343 MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1344
1345 static struct tb_service_driver tbnet_driver = {
1346 .driver = {
1347 .owner = THIS_MODULE,
1348 .name = "thunderbolt-net",
1349 .pm = &tbnet_pm_ops,
1350 },
1351 .probe = tbnet_probe,
1352 .remove = tbnet_remove,
1353 .shutdown = tbnet_shutdown,
1354 .id_table = tbnet_ids,
1355 };
1356
1357 static int __init tbnet_init(void)
1358 {
1359 int ret;
1360
1361 tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1362 if (!tbnet_dir)
1363 return -ENOMEM;
1364
1365 tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1366 tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1367 tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1368
1369
1370
1371
1372 tb_property_add_immediate(tbnet_dir, "prtcstns",
1373 TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
1374
1375 ret = tb_register_property_dir("network", tbnet_dir);
1376 if (ret) {
1377 tb_property_free_dir(tbnet_dir);
1378 return ret;
1379 }
1380
1381 return tb_register_service_driver(&tbnet_driver);
1382 }
1383 module_init(tbnet_init);
1384
1385 static void __exit tbnet_exit(void)
1386 {
1387 tb_unregister_service_driver(&tbnet_driver);
1388 tb_unregister_property_dir("network", tbnet_dir);
1389 tb_property_free_dir(tbnet_dir);
1390 }
1391 module_exit(tbnet_exit);
1392
1393 MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1394 MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1395 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1396 MODULE_DESCRIPTION("Thunderbolt network driver");
1397 MODULE_LICENSE("GPL v2");