0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #ifndef __OCTEON_NETWORK_H__
0025 #define __OCTEON_NETWORK_H__
0026 #include <linux/ptp_clock_kernel.h>
0027
0028 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
0029 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
0030
0031
0032 #define LIO_IFSTATE_DROQ_OPS 0x01
0033 #define LIO_IFSTATE_REGISTERED 0x02
0034 #define LIO_IFSTATE_RUNNING 0x04
0035 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
0036 #define LIO_IFSTATE_RESETTING 0x10
0037
0038 struct liquidio_if_cfg_resp {
0039 u64 rh;
0040 struct liquidio_if_cfg_info cfg_info;
0041 u64 status;
0042 };
0043
0044 #define LIO_IFCFG_WAIT_TIME 3000
0045 #define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
0046
0047
0048
0049
0050 struct octnic_gather {
0051
0052 struct list_head list;
0053
0054
0055 int sg_size;
0056
0057
0058 int adjust;
0059
0060
0061
0062
0063 struct octeon_sg_entry *sg;
0064
0065 dma_addr_t sg_dma_ptr;
0066 };
0067
0068 struct oct_nic_stats_resp {
0069 u64 rh;
0070 struct oct_link_stats stats;
0071 u64 status;
0072 };
0073
0074 struct oct_nic_vf_stats_resp {
0075 u64 rh;
0076 u64 spoofmac_cnt;
0077 u64 status;
0078 };
0079
0080 struct oct_nic_stats_ctrl {
0081 struct completion complete;
0082 struct net_device *netdev;
0083 };
0084
0085 struct oct_nic_seapi_resp {
0086 u64 rh;
0087 union {
0088 u32 fec_setting;
0089 u32 speed;
0090 };
0091 u64 status;
0092 };
0093
0094
0095 struct lio {
0096
0097 atomic_t ifstate;
0098
0099
0100
0101
0102 int ifidx;
0103
0104
0105 int txq;
0106
0107
0108
0109
0110 int rxq;
0111
0112
0113 spinlock_t *glist_lock;
0114
0115
0116 struct list_head *glist;
0117 void **glists_virt_base;
0118 dma_addr_t *glists_dma_base;
0119 u32 glist_entry_size;
0120
0121
0122
0123
0124 struct octdev_props *octprops;
0125
0126
0127 struct octeon_device *oct_dev;
0128
0129 struct net_device *netdev;
0130
0131
0132 struct oct_link_info linfo;
0133
0134
0135 u64 link_changes;
0136
0137
0138 u32 tx_qsize;
0139
0140
0141 u32 rx_qsize;
0142
0143
0144 u32 mtu;
0145
0146
0147 u32 msg_enable;
0148
0149
0150 u64 dev_capability;
0151
0152
0153
0154
0155
0156 u64 enc_dev_capability;
0157
0158
0159 u32 phy_beacon_val;
0160
0161
0162 u32 led_ctrl_val;
0163
0164
0165 struct ptp_clock_info ptp_info;
0166 struct ptp_clock *ptp_clock;
0167 s64 ptp_adjust;
0168
0169
0170 spinlock_t ptp_lock;
0171
0172
0173 u32 intf_open;
0174
0175
0176 struct cavium_wq txq_status_wq;
0177
0178
0179 struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
0180
0181
0182 struct cavium_wq link_status_wq;
0183
0184
0185 struct cavium_wq sync_octeon_time_wq;
0186
0187 int netdev_uc_count;
0188 struct cavium_wk stats_wk;
0189 };
0190
0191 #define LIO_SIZE (sizeof(struct lio))
0192 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
0193
0194 #define LIO_MAX_CORES 16
0195
0196
0197
0198
0199
0200
0201
0202 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
0203
0204 int setup_rx_oom_poll_fn(struct net_device *netdev);
0205
0206 void cleanup_rx_oom_poll_fn(struct net_device *netdev);
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
0218
0219 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
0220 u32 num_iqs, u32 num_oqs);
0221
0222 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
0223 void *dev);
0224
0225 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
0226
0227 void lio_fetch_stats(struct work_struct *work);
0228
0229 int lio_wait_for_clean_oq(struct octeon_device *oct);
0230
0231
0232
0233
0234 void liquidio_set_ethtool_ops(struct net_device *netdev);
0235
0236 void lio_delete_glists(struct lio *lio);
0237
0238 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
0239
0240 int liquidio_get_speed(struct lio *lio);
0241 int liquidio_set_speed(struct lio *lio, int speed);
0242 int liquidio_get_fec(struct lio *lio);
0243 int liquidio_set_fec(struct lio *lio, int on_off);
0244
0245
0246
0247
0248
0249 int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
0250 #define LIO_CHANGE_MTU_SUCCESS 1
0251 #define LIO_CHANGE_MTU_FAIL 2
0252
0253 #define SKB_ADJ_MASK 0x3F
0254 #define SKB_ADJ (SKB_ADJ_MASK + 1)
0255
0256 #define MIN_SKB_SIZE 256
0257 #define LIO_RXBUFFER_SZ 2048
0258
0259 static inline void
0260 *recv_buffer_alloc(struct octeon_device *oct,
0261 struct octeon_skb_page_info *pg_info)
0262 {
0263 struct page *page;
0264 struct sk_buff *skb;
0265 struct octeon_skb_page_info *skb_pg_info;
0266
0267 page = alloc_page(GFP_ATOMIC);
0268 if (unlikely(!page))
0269 return NULL;
0270
0271 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
0272 if (unlikely(!skb)) {
0273 __free_page(page);
0274 pg_info->page = NULL;
0275 return NULL;
0276 }
0277
0278 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
0279 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
0280
0281 skb_reserve(skb, r);
0282 }
0283
0284 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0285
0286 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
0287 PAGE_SIZE, DMA_FROM_DEVICE);
0288
0289
0290 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
0291 __free_page(page);
0292 dev_kfree_skb_any((struct sk_buff *)skb);
0293 pg_info->page = NULL;
0294 return NULL;
0295 }
0296
0297 pg_info->page = page;
0298 pg_info->page_offset = 0;
0299 skb_pg_info->page = page;
0300 skb_pg_info->page_offset = 0;
0301 skb_pg_info->dma = pg_info->dma;
0302
0303 return (void *)skb;
0304 }
0305
0306 static inline void
0307 *recv_buffer_fast_alloc(u32 size)
0308 {
0309 struct sk_buff *skb;
0310 struct octeon_skb_page_info *skb_pg_info;
0311
0312 skb = dev_alloc_skb(size + SKB_ADJ);
0313 if (unlikely(!skb))
0314 return NULL;
0315
0316 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
0317 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
0318
0319 skb_reserve(skb, r);
0320 }
0321
0322 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0323 skb_pg_info->page = NULL;
0324 skb_pg_info->page_offset = 0;
0325 skb_pg_info->dma = 0;
0326
0327 return skb;
0328 }
0329
0330 static inline int
0331 recv_buffer_recycle(struct octeon_device *oct, void *buf)
0332 {
0333 struct octeon_skb_page_info *pg_info = buf;
0334
0335 if (!pg_info->page) {
0336 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
0337 __func__);
0338 return -ENOMEM;
0339 }
0340
0341 if (unlikely(page_count(pg_info->page) != 1) ||
0342 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
0343 dma_unmap_page(&oct->pci_dev->dev,
0344 pg_info->dma, (PAGE_SIZE << 0),
0345 DMA_FROM_DEVICE);
0346 pg_info->dma = 0;
0347 pg_info->page = NULL;
0348 pg_info->page_offset = 0;
0349 return -ENOMEM;
0350 }
0351
0352
0353 if (pg_info->page_offset == 0)
0354 pg_info->page_offset = LIO_RXBUFFER_SZ;
0355 else
0356 pg_info->page_offset = 0;
0357 page_ref_inc(pg_info->page);
0358
0359 return 0;
0360 }
0361
0362 static inline void
0363 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
0364 {
0365 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
0366 struct sk_buff *skb;
0367
0368 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
0369 if (unlikely(!skb)) {
0370 dma_unmap_page(&oct->pci_dev->dev,
0371 pg_info->dma, (PAGE_SIZE << 0),
0372 DMA_FROM_DEVICE);
0373 return NULL;
0374 }
0375
0376 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
0377 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
0378
0379 skb_reserve(skb, r);
0380 }
0381
0382 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0383 skb_pg_info->page = pg_info->page;
0384 skb_pg_info->page_offset = pg_info->page_offset;
0385 skb_pg_info->dma = pg_info->dma;
0386
0387 return skb;
0388 }
0389
0390 static inline void
0391 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
0392 {
0393 struct sk_buff *skb = (struct sk_buff *)buffer;
0394
0395 put_page(pg_info->page);
0396 pg_info->dma = 0;
0397 pg_info->page = NULL;
0398 pg_info->page_offset = 0;
0399
0400 if (skb)
0401 dev_kfree_skb_any(skb);
0402 }
0403
0404 static inline void recv_buffer_free(void *buffer)
0405 {
0406 struct sk_buff *skb = (struct sk_buff *)buffer;
0407 struct octeon_skb_page_info *pg_info;
0408
0409 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0410
0411 if (pg_info->page) {
0412 put_page(pg_info->page);
0413 pg_info->dma = 0;
0414 pg_info->page = NULL;
0415 pg_info->page_offset = 0;
0416 }
0417
0418 dev_kfree_skb_any((struct sk_buff *)buffer);
0419 }
0420
0421 static inline void
0422 recv_buffer_fast_free(void *buffer)
0423 {
0424 dev_kfree_skb_any((struct sk_buff *)buffer);
0425 }
0426
0427 static inline void tx_buffer_free(void *buffer)
0428 {
0429 dev_kfree_skb_any((struct sk_buff *)buffer);
0430 }
0431
0432 #define lio_dma_alloc(oct, size, dma_addr) \
0433 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
0434 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
0435 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
0436
0437 static inline
0438 void *get_rbd(struct sk_buff *skb)
0439 {
0440 struct octeon_skb_page_info *pg_info;
0441 unsigned char *va;
0442
0443 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0444 va = page_address(pg_info->page) + pg_info->page_offset;
0445
0446 return va;
0447 }
0448
0449 static inline u64
0450 lio_map_ring(void *buf)
0451 {
0452 dma_addr_t dma_addr;
0453
0454 struct sk_buff *skb = (struct sk_buff *)buf;
0455 struct octeon_skb_page_info *pg_info;
0456
0457 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0458 if (!pg_info->page) {
0459 pr_err("%s: pg_info->page NULL\n", __func__);
0460 WARN_ON(1);
0461 }
0462
0463
0464 dma_addr = pg_info->dma;
0465 if (!pg_info->dma) {
0466 pr_err("%s: ERROR it should be already available\n",
0467 __func__);
0468 WARN_ON(1);
0469 }
0470 dma_addr += pg_info->page_offset;
0471
0472 return (u64)dma_addr;
0473 }
0474
0475 static inline void
0476 lio_unmap_ring(struct pci_dev *pci_dev,
0477 u64 buf_ptr)
0478
0479 {
0480 dma_unmap_page(&pci_dev->dev,
0481 buf_ptr, (PAGE_SIZE << 0),
0482 DMA_FROM_DEVICE);
0483 }
0484
0485 static inline void *octeon_fast_packet_alloc(u32 size)
0486 {
0487 return recv_buffer_fast_alloc(size);
0488 }
0489
0490 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
0491 struct sk_buff *nicbuf,
0492 int copy_len,
0493 int idx)
0494 {
0495 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
0496 copy_len);
0497 }
0498
0499
0500
0501
0502
0503
0504 static inline int ifstate_check(struct lio *lio, int state_flag)
0505 {
0506 return atomic_read(&lio->ifstate) & state_flag;
0507 }
0508
0509
0510
0511
0512
0513
0514 static inline void ifstate_set(struct lio *lio, int state_flag)
0515 {
0516 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
0517 }
0518
0519
0520
0521
0522
0523
0524 static inline void ifstate_reset(struct lio *lio, int state_flag)
0525 {
0526 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
0527 }
0528
0529
0530
0531
0532
0533
0534
0535 static inline int wait_for_pending_requests(struct octeon_device *oct)
0536 {
0537 int i, pcount = 0;
0538
0539 for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
0540 pcount = atomic_read(
0541 &oct->response_list[OCTEON_ORDERED_SC_LIST]
0542 .pending_req_count);
0543 if (pcount)
0544 schedule_timeout_uninterruptible(HZ / 10);
0545 else
0546 break;
0547 }
0548
0549 if (pcount)
0550 return 1;
0551
0552 return 0;
0553 }
0554
0555
0556
0557
0558
0559 static inline void stop_txqs(struct net_device *netdev)
0560 {
0561 int i;
0562
0563 for (i = 0; i < netdev->real_num_tx_queues; i++)
0564 netif_stop_subqueue(netdev, i);
0565 }
0566
0567
0568
0569
0570
0571 static inline void wake_txqs(struct net_device *netdev)
0572 {
0573 struct lio *lio = GET_LIO(netdev);
0574 int i, qno;
0575
0576 for (i = 0; i < netdev->real_num_tx_queues; i++) {
0577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
0578
0579 if (__netif_subqueue_stopped(netdev, i)) {
0580 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
0581 tx_restart, 1);
0582 netif_wake_subqueue(netdev, i);
0583 }
0584 }
0585 }
0586
0587
0588
0589
0590
0591 static inline void start_txqs(struct net_device *netdev)
0592 {
0593 struct lio *lio = GET_LIO(netdev);
0594 int i;
0595
0596 if (lio->linfo.link.s.link_up) {
0597 for (i = 0; i < netdev->real_num_tx_queues; i++)
0598 netif_start_subqueue(netdev, i);
0599 }
0600 }
0601
0602 static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
0603 {
0604 return skb->queue_mapping % oct->num_iqs;
0605 }
0606
0607
0608
0609
0610
0611 static inline struct list_head *lio_list_delete_head(struct list_head *root)
0612 {
0613 struct list_head *node;
0614
0615 if (list_empty_careful(root))
0616 node = NULL;
0617 else
0618 node = root->next;
0619
0620 if (node)
0621 list_del(node);
0622
0623 return node;
0624 }
0625
0626 #endif