Back to home page

OSCL-LXR

 
 

    


0001 /**********************************************************************
0002  * Author: Cavium, Inc.
0003  *
0004  * Contact: support@cavium.com
0005  *          Please include "LiquidIO" in the subject.
0006  *
0007  * Copyright (c) 2003-2016 Cavium, Inc.
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more details.
0017  ***********************************************************************/
0018 #include <linux/pci.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/vmalloc.h>
0021 #include "liquidio_common.h"
0022 #include "octeon_droq.h"
0023 #include "octeon_iq.h"
0024 #include "response_manager.h"
0025 #include "octeon_device.h"
0026 #include "octeon_main.h"
0027 #include "octeon_network.h"
0028 #include "cn66xx_regs.h"
0029 #include "cn66xx_device.h"
0030 #include "cn23xx_pf_device.h"
0031 #include "cn23xx_vf_device.h"
0032 
0033 /** Default configuration
0034  *  for CN66XX OCTEON Models.
0035  */
0036 static struct octeon_config default_cn66xx_conf = {
0037     .card_type                              = LIO_210SV,
0038     .card_name                              = LIO_210SV_NAME,
0039 
0040     /** IQ attributes */
0041     .iq                 = {
0042         .max_iqs            = CN6XXX_CFG_IO_QUEUES,
0043         .pending_list_size      =
0044             (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
0045         .instr_type         = OCTEON_64BYTE_INSTR,
0046         .db_min             = CN6XXX_DB_MIN,
0047         .db_timeout         = CN6XXX_DB_TIMEOUT,
0048     }
0049     ,
0050 
0051     /** OQ attributes */
0052     .oq                 = {
0053         .max_oqs            = CN6XXX_CFG_IO_QUEUES,
0054         .refill_threshold       = CN6XXX_OQ_REFIL_THRESHOLD,
0055         .oq_intr_pkt            = CN6XXX_OQ_INTR_PKT,
0056         .oq_intr_time           = CN6XXX_OQ_INTR_TIME,
0057         .pkts_per_intr          = CN6XXX_OQ_PKTSPER_INTR,
0058     }
0059     ,
0060 
0061     .num_nic_ports              = DEFAULT_NUM_NIC_PORTS_66XX,
0062     .num_def_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0063     .num_def_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0064     .def_rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0065 
0066     /* For ethernet interface 0:  Port cfg Attributes */
0067     .nic_if_cfg[0] = {
0068         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0069         .max_txqs           = MAX_TXQS_PER_INTF,
0070 
0071         /* Actual configured value. Range could be: 1...max_txqs */
0072         .num_txqs           = DEF_TXQS_PER_INTF,
0073 
0074         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0075         .max_rxqs           = MAX_RXQS_PER_INTF,
0076 
0077         /* Actual configured value. Range could be: 1...max_rxqs */
0078         .num_rxqs           = DEF_RXQS_PER_INTF,
0079 
0080         /* Num of desc for rx rings */
0081         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0082 
0083         /* Num of desc for tx rings */
0084         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0085 
0086         /* SKB size, We need not change buf size even for Jumbo frames.
0087          * Octeon can send jumbo frames in 4 consecutive descriptors,
0088          */
0089         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0090 
0091         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0092 
0093         .gmx_port_id            = 0,
0094     },
0095 
0096     .nic_if_cfg[1] = {
0097         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0098         .max_txqs           = MAX_TXQS_PER_INTF,
0099 
0100         /* Actual configured value. Range could be: 1...max_txqs */
0101         .num_txqs           = DEF_TXQS_PER_INTF,
0102 
0103         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0104         .max_rxqs           = MAX_RXQS_PER_INTF,
0105 
0106         /* Actual configured value. Range could be: 1...max_rxqs */
0107         .num_rxqs           = DEF_RXQS_PER_INTF,
0108 
0109         /* Num of desc for rx rings */
0110         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0111 
0112         /* Num of desc for tx rings */
0113         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0114 
0115         /* SKB size, We need not change buf size even for Jumbo frames.
0116          * Octeon can send jumbo frames in 4 consecutive descriptors,
0117          */
0118         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0119 
0120         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0121 
0122         .gmx_port_id            = 1,
0123     },
0124 
0125     /** Miscellaneous attributes */
0126     .misc                   = {
0127         /* Host driver link query interval */
0128         .oct_link_query_interval    = 100,
0129 
0130         /* Octeon link query interval */
0131         .host_link_query_interval   = 500,
0132 
0133         .enable_sli_oq_bp       = 0,
0134 
0135         /* Control queue group */
0136         .ctrlq_grp          = 1,
0137     }
0138     ,
0139 };
0140 
0141 /** Default configuration
0142  *  for CN68XX OCTEON Model.
0143  */
0144 
0145 static struct octeon_config default_cn68xx_conf = {
0146     .card_type                              = LIO_410NV,
0147     .card_name                              = LIO_410NV_NAME,
0148 
0149     /** IQ attributes */
0150     .iq                 = {
0151         .max_iqs            = CN6XXX_CFG_IO_QUEUES,
0152         .pending_list_size      =
0153             (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
0154         .instr_type         = OCTEON_64BYTE_INSTR,
0155         .db_min             = CN6XXX_DB_MIN,
0156         .db_timeout         = CN6XXX_DB_TIMEOUT,
0157     }
0158     ,
0159 
0160     /** OQ attributes */
0161     .oq                 = {
0162         .max_oqs            = CN6XXX_CFG_IO_QUEUES,
0163         .refill_threshold       = CN6XXX_OQ_REFIL_THRESHOLD,
0164         .oq_intr_pkt            = CN6XXX_OQ_INTR_PKT,
0165         .oq_intr_time           = CN6XXX_OQ_INTR_TIME,
0166         .pkts_per_intr          = CN6XXX_OQ_PKTSPER_INTR,
0167     }
0168     ,
0169 
0170     .num_nic_ports              = DEFAULT_NUM_NIC_PORTS_68XX,
0171     .num_def_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0172     .num_def_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0173     .def_rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0174 
0175     .nic_if_cfg[0] = {
0176         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0177         .max_txqs           = MAX_TXQS_PER_INTF,
0178 
0179         /* Actual configured value. Range could be: 1...max_txqs */
0180         .num_txqs           = DEF_TXQS_PER_INTF,
0181 
0182         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0183         .max_rxqs           = MAX_RXQS_PER_INTF,
0184 
0185         /* Actual configured value. Range could be: 1...max_rxqs */
0186         .num_rxqs           = DEF_RXQS_PER_INTF,
0187 
0188         /* Num of desc for rx rings */
0189         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0190 
0191         /* Num of desc for tx rings */
0192         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0193 
0194         /* SKB size, We need not change buf size even for Jumbo frames.
0195          * Octeon can send jumbo frames in 4 consecutive descriptors,
0196          */
0197         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0198 
0199         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0200 
0201         .gmx_port_id            = 0,
0202     },
0203 
0204     .nic_if_cfg[1] = {
0205         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0206         .max_txqs           = MAX_TXQS_PER_INTF,
0207 
0208         /* Actual configured value. Range could be: 1...max_txqs */
0209         .num_txqs           = DEF_TXQS_PER_INTF,
0210 
0211         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0212         .max_rxqs           = MAX_RXQS_PER_INTF,
0213 
0214         /* Actual configured value. Range could be: 1...max_rxqs */
0215         .num_rxqs           = DEF_RXQS_PER_INTF,
0216 
0217         /* Num of desc for rx rings */
0218         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0219 
0220         /* Num of desc for tx rings */
0221         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0222 
0223         /* SKB size, We need not change buf size even for Jumbo frames.
0224          * Octeon can send jumbo frames in 4 consecutive descriptors,
0225          */
0226         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0227 
0228         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0229 
0230         .gmx_port_id            = 1,
0231     },
0232 
0233     .nic_if_cfg[2] = {
0234         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0235         .max_txqs           = MAX_TXQS_PER_INTF,
0236 
0237         /* Actual configured value. Range could be: 1...max_txqs */
0238         .num_txqs           = DEF_TXQS_PER_INTF,
0239 
0240         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0241         .max_rxqs           = MAX_RXQS_PER_INTF,
0242 
0243         /* Actual configured value. Range could be: 1...max_rxqs */
0244         .num_rxqs           = DEF_RXQS_PER_INTF,
0245 
0246         /* Num of desc for rx rings */
0247         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0248 
0249         /* Num of desc for tx rings */
0250         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0251 
0252         /* SKB size, We need not change buf size even for Jumbo frames.
0253          * Octeon can send jumbo frames in 4 consecutive descriptors,
0254          */
0255         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0256 
0257         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0258 
0259         .gmx_port_id            = 2,
0260     },
0261 
0262     .nic_if_cfg[3] = {
0263         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0264         .max_txqs           = MAX_TXQS_PER_INTF,
0265 
0266         /* Actual configured value. Range could be: 1...max_txqs */
0267         .num_txqs           = DEF_TXQS_PER_INTF,
0268 
0269         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0270         .max_rxqs           = MAX_RXQS_PER_INTF,
0271 
0272         /* Actual configured value. Range could be: 1...max_rxqs */
0273         .num_rxqs           = DEF_RXQS_PER_INTF,
0274 
0275         /* Num of desc for rx rings */
0276         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0277 
0278         /* Num of desc for tx rings */
0279         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0280 
0281         /* SKB size, We need not change buf size even for Jumbo frames.
0282          * Octeon can send jumbo frames in 4 consecutive descriptors,
0283          */
0284         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0285 
0286         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0287 
0288         .gmx_port_id            = 3,
0289     },
0290 
0291     /** Miscellaneous attributes */
0292     .misc                   = {
0293         /* Host driver link query interval */
0294         .oct_link_query_interval    = 100,
0295 
0296         /* Octeon link query interval */
0297         .host_link_query_interval   = 500,
0298 
0299         .enable_sli_oq_bp       = 0,
0300 
0301         /* Control queue group */
0302         .ctrlq_grp          = 1,
0303     }
0304     ,
0305 };
0306 
0307 /** Default configuration
0308  *  for CN68XX OCTEON Model.
0309  */
0310 static struct octeon_config default_cn68xx_210nv_conf = {
0311     .card_type                              = LIO_210NV,
0312     .card_name                              = LIO_210NV_NAME,
0313 
0314     /** IQ attributes */
0315 
0316     .iq                 = {
0317         .max_iqs            = CN6XXX_CFG_IO_QUEUES,
0318         .pending_list_size      =
0319             (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
0320         .instr_type         = OCTEON_64BYTE_INSTR,
0321         .db_min             = CN6XXX_DB_MIN,
0322         .db_timeout         = CN6XXX_DB_TIMEOUT,
0323     }
0324     ,
0325 
0326     /** OQ attributes */
0327     .oq                 = {
0328         .max_oqs            = CN6XXX_CFG_IO_QUEUES,
0329         .refill_threshold       = CN6XXX_OQ_REFIL_THRESHOLD,
0330         .oq_intr_pkt            = CN6XXX_OQ_INTR_PKT,
0331         .oq_intr_time           = CN6XXX_OQ_INTR_TIME,
0332         .pkts_per_intr          = CN6XXX_OQ_PKTSPER_INTR,
0333     }
0334     ,
0335 
0336     .num_nic_ports          = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
0337     .num_def_rx_descs       = CN6XXX_MAX_OQ_DESCRIPTORS,
0338     .num_def_tx_descs       = CN6XXX_MAX_IQ_DESCRIPTORS,
0339     .def_rx_buf_size        = CN6XXX_OQ_BUF_SIZE,
0340 
0341     .nic_if_cfg[0] = {
0342         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0343         .max_txqs           = MAX_TXQS_PER_INTF,
0344 
0345         /* Actual configured value. Range could be: 1...max_txqs */
0346         .num_txqs           = DEF_TXQS_PER_INTF,
0347 
0348         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0349         .max_rxqs           = MAX_RXQS_PER_INTF,
0350 
0351         /* Actual configured value. Range could be: 1...max_rxqs */
0352         .num_rxqs           = DEF_RXQS_PER_INTF,
0353 
0354         /* Num of desc for rx rings */
0355         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0356 
0357         /* Num of desc for tx rings */
0358         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0359 
0360         /* SKB size, We need not change buf size even for Jumbo frames.
0361          * Octeon can send jumbo frames in 4 consecutive descriptors,
0362          */
0363         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0364 
0365         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0366 
0367         .gmx_port_id            = 0,
0368     },
0369 
0370     .nic_if_cfg[1] = {
0371         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0372         .max_txqs           = MAX_TXQS_PER_INTF,
0373 
0374         /* Actual configured value. Range could be: 1...max_txqs */
0375         .num_txqs           = DEF_TXQS_PER_INTF,
0376 
0377         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0378         .max_rxqs           = MAX_RXQS_PER_INTF,
0379 
0380         /* Actual configured value. Range could be: 1...max_rxqs */
0381         .num_rxqs           = DEF_RXQS_PER_INTF,
0382 
0383         /* Num of desc for rx rings */
0384         .num_rx_descs           = CN6XXX_MAX_OQ_DESCRIPTORS,
0385 
0386         /* Num of desc for tx rings */
0387         .num_tx_descs           = CN6XXX_MAX_IQ_DESCRIPTORS,
0388 
0389         /* SKB size, We need not change buf size even for Jumbo frames.
0390          * Octeon can send jumbo frames in 4 consecutive descriptors,
0391          */
0392         .rx_buf_size            = CN6XXX_OQ_BUF_SIZE,
0393 
0394         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0395 
0396         .gmx_port_id            = 1,
0397     },
0398 
0399     /** Miscellaneous attributes */
0400     .misc                   = {
0401         /* Host driver link query interval */
0402         .oct_link_query_interval    = 100,
0403 
0404         /* Octeon link query interval */
0405         .host_link_query_interval   = 500,
0406 
0407         .enable_sli_oq_bp       = 0,
0408 
0409         /* Control queue group */
0410         .ctrlq_grp          = 1,
0411     }
0412     ,
0413 };
0414 
0415 static struct octeon_config default_cn23xx_conf = {
0416     .card_type                              = LIO_23XX,
0417     .card_name                              = LIO_23XX_NAME,
0418     /** IQ attributes */
0419     .iq = {
0420         .max_iqs        = CN23XX_CFG_IO_QUEUES,
0421         .pending_list_size  = (CN23XX_DEFAULT_IQ_DESCRIPTORS *
0422                        CN23XX_CFG_IO_QUEUES),
0423         .instr_type     = OCTEON_64BYTE_INSTR,
0424         .db_min         = CN23XX_DB_MIN,
0425         .db_timeout     = CN23XX_DB_TIMEOUT,
0426         .iq_intr_pkt        = CN23XX_DEF_IQ_INTR_THRESHOLD,
0427     },
0428 
0429     /** OQ attributes */
0430     .oq = {
0431         .max_oqs        = CN23XX_CFG_IO_QUEUES,
0432         .pkts_per_intr  = CN23XX_OQ_PKTSPER_INTR,
0433         .refill_threshold   = CN23XX_OQ_REFIL_THRESHOLD,
0434         .oq_intr_pkt    = CN23XX_OQ_INTR_PKT,
0435         .oq_intr_time   = CN23XX_OQ_INTR_TIME,
0436     },
0437 
0438     .num_nic_ports              = DEFAULT_NUM_NIC_PORTS_23XX,
0439     .num_def_rx_descs           = CN23XX_DEFAULT_OQ_DESCRIPTORS,
0440     .num_def_tx_descs           = CN23XX_DEFAULT_IQ_DESCRIPTORS,
0441     .def_rx_buf_size            = CN23XX_OQ_BUF_SIZE,
0442 
0443     /* For ethernet interface 0:  Port cfg Attributes */
0444     .nic_if_cfg[0] = {
0445         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0446         .max_txqs           = MAX_TXQS_PER_INTF,
0447 
0448         /* Actual configured value. Range could be: 1...max_txqs */
0449         .num_txqs           = DEF_TXQS_PER_INTF,
0450 
0451         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0452         .max_rxqs           = MAX_RXQS_PER_INTF,
0453 
0454         /* Actual configured value. Range could be: 1...max_rxqs */
0455         .num_rxqs           = DEF_RXQS_PER_INTF,
0456 
0457         /* Num of desc for rx rings */
0458         .num_rx_descs           = CN23XX_DEFAULT_OQ_DESCRIPTORS,
0459 
0460         /* Num of desc for tx rings */
0461         .num_tx_descs           = CN23XX_DEFAULT_IQ_DESCRIPTORS,
0462 
0463         /* SKB size, We need not change buf size even for Jumbo frames.
0464          * Octeon can send jumbo frames in 4 consecutive descriptors,
0465          */
0466         .rx_buf_size            = CN23XX_OQ_BUF_SIZE,
0467 
0468         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0469 
0470         .gmx_port_id            = 0,
0471     },
0472 
0473     .nic_if_cfg[1] = {
0474         /* Max Txqs: Half for each of the two ports :max_iq/2 */
0475         .max_txqs           = MAX_TXQS_PER_INTF,
0476 
0477         /* Actual configured value. Range could be: 1...max_txqs */
0478         .num_txqs           = DEF_TXQS_PER_INTF,
0479 
0480         /* Max Rxqs: Half for each of the two ports :max_oq/2  */
0481         .max_rxqs           = MAX_RXQS_PER_INTF,
0482 
0483         /* Actual configured value. Range could be: 1...max_rxqs */
0484         .num_rxqs           = DEF_RXQS_PER_INTF,
0485 
0486         /* Num of desc for rx rings */
0487         .num_rx_descs           = CN23XX_DEFAULT_OQ_DESCRIPTORS,
0488 
0489         /* Num of desc for tx rings */
0490         .num_tx_descs           = CN23XX_DEFAULT_IQ_DESCRIPTORS,
0491 
0492         /* SKB size, We need not change buf size even for Jumbo frames.
0493          * Octeon can send jumbo frames in 4 consecutive descriptors,
0494          */
0495         .rx_buf_size            = CN23XX_OQ_BUF_SIZE,
0496 
0497         .base_queue         = BASE_QUEUE_NOT_REQUESTED,
0498 
0499         .gmx_port_id            = 1,
0500     },
0501 
0502     .misc                   = {
0503         /* Host driver link query interval */
0504         .oct_link_query_interval    = 100,
0505 
0506         /* Octeon link query interval */
0507         .host_link_query_interval   = 500,
0508 
0509         .enable_sli_oq_bp       = 0,
0510 
0511         /* Control queue group */
0512         .ctrlq_grp          = 1,
0513     }
0514 };
0515 
0516 static struct octeon_config_ptr {
0517     u32 conf_type;
0518 } oct_conf_info[MAX_OCTEON_DEVICES] = {
0519     {
0520         OCTEON_CONFIG_TYPE_DEFAULT,
0521     }, {
0522         OCTEON_CONFIG_TYPE_DEFAULT,
0523     }, {
0524         OCTEON_CONFIG_TYPE_DEFAULT,
0525     }, {
0526         OCTEON_CONFIG_TYPE_DEFAULT,
0527     },
0528 };
0529 
0530 static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
0531     "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
0532     "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
0533     "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
0534     "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
0535     "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
0536     "INVALID"
0537 };
0538 
0539 static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
0540     "BASE", "NIC", "UNKNOWN"};
0541 
0542 static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
0543 static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
0544 static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
0545 
0546 static u32 octeon_device_count;
0547 /* locks device array (i.e. octeon_device[]) */
0548 static DEFINE_SPINLOCK(octeon_devices_lock);
0549 
0550 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
0551 
0552 static void oct_set_config_info(int oct_id, int conf_type)
0553 {
0554     if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
0555         conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
0556     oct_conf_info[oct_id].conf_type = conf_type;
0557 }
0558 
0559 void octeon_init_device_list(int conf_type)
0560 {
0561     int i;
0562 
0563     memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
0564     for (i = 0; i <  MAX_OCTEON_DEVICES; i++)
0565         oct_set_config_info(i, conf_type);
0566 }
0567 
0568 static void *__retrieve_octeon_config_info(struct octeon_device *oct,
0569                        u16 card_type)
0570 {
0571     u32 oct_id = oct->octeon_id;
0572     void *ret = NULL;
0573 
0574     switch (oct_conf_info[oct_id].conf_type) {
0575     case OCTEON_CONFIG_TYPE_DEFAULT:
0576         if (oct->chip_id == OCTEON_CN66XX) {
0577             ret = &default_cn66xx_conf;
0578         } else if ((oct->chip_id == OCTEON_CN68XX) &&
0579                (card_type == LIO_210NV)) {
0580             ret = &default_cn68xx_210nv_conf;
0581         } else if ((oct->chip_id == OCTEON_CN68XX) &&
0582                (card_type == LIO_410NV)) {
0583             ret = &default_cn68xx_conf;
0584         } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
0585             ret = &default_cn23xx_conf;
0586         } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
0587             ret = &default_cn23xx_conf;
0588         }
0589         break;
0590     default:
0591         break;
0592     }
0593     return ret;
0594 }
0595 
0596 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
0597 {
0598     switch (oct->chip_id) {
0599     case OCTEON_CN66XX:
0600     case OCTEON_CN68XX:
0601         return lio_validate_cn6xxx_config_info(oct, conf);
0602     case OCTEON_CN23XX_PF_VID:
0603     case OCTEON_CN23XX_VF_VID:
0604         return 0;
0605     default:
0606         break;
0607     }
0608 
0609     return 1;
0610 }
0611 
0612 void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
0613 {
0614     void *conf = NULL;
0615 
0616     conf = __retrieve_octeon_config_info(oct, card_type);
0617     if (!conf)
0618         return NULL;
0619 
0620     if (__verify_octeon_config_info(oct, conf)) {
0621         dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
0622         return NULL;
0623     }
0624 
0625     return conf;
0626 }
0627 
0628 char *lio_get_state_string(atomic_t *state_ptr)
0629 {
0630     s32 istate = (s32)atomic_read(state_ptr);
0631 
0632     if (istate > OCT_DEV_STATES || istate < 0)
0633         return oct_dev_state_str[OCT_DEV_STATE_INVALID];
0634     return oct_dev_state_str[istate];
0635 }
0636 
0637 static char *get_oct_app_string(u32 app_mode)
0638 {
0639     if (app_mode <= CVM_DRV_APP_END)
0640         return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
0641     return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
0642 }
0643 
0644 void octeon_free_device_mem(struct octeon_device *oct)
0645 {
0646     int i;
0647 
0648     for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
0649         if (oct->io_qmask.oq & BIT_ULL(i))
0650             vfree(oct->droq[i]);
0651     }
0652 
0653     for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0654         if (oct->io_qmask.iq & BIT_ULL(i))
0655             vfree(oct->instr_queue[i]);
0656     }
0657 
0658     i = oct->octeon_id;
0659     vfree(oct);
0660 
0661     octeon_device[i] = NULL;
0662     octeon_device_count--;
0663 }
0664 
0665 static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
0666                             u32 priv_size)
0667 {
0668     struct octeon_device *oct;
0669     u8 *buf = NULL;
0670     u32 octdevsize = 0, configsize = 0, size;
0671 
0672     switch (pci_id) {
0673     case OCTEON_CN68XX:
0674     case OCTEON_CN66XX:
0675         configsize = sizeof(struct octeon_cn6xxx);
0676         break;
0677 
0678     case OCTEON_CN23XX_PF_VID:
0679         configsize = sizeof(struct octeon_cn23xx_pf);
0680         break;
0681     case OCTEON_CN23XX_VF_VID:
0682         configsize = sizeof(struct octeon_cn23xx_vf);
0683         break;
0684     default:
0685         pr_err("%s: Unknown PCI Device: 0x%x\n",
0686                __func__,
0687                pci_id);
0688         return NULL;
0689     }
0690 
0691     if (configsize & 0x7)
0692         configsize += (8 - (configsize & 0x7));
0693 
0694     octdevsize = sizeof(struct octeon_device);
0695     if (octdevsize & 0x7)
0696         octdevsize += (8 - (octdevsize & 0x7));
0697 
0698     if (priv_size & 0x7)
0699         priv_size += (8 - (priv_size & 0x7));
0700 
0701     size = octdevsize + priv_size + configsize +
0702         (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
0703 
0704     buf = vzalloc(size);
0705     if (!buf)
0706         return NULL;
0707 
0708     oct = (struct octeon_device *)buf;
0709     oct->priv = (void *)(buf + octdevsize);
0710     oct->chip = (void *)(buf + octdevsize + priv_size);
0711     oct->dispatch.dlist = (struct octeon_dispatch *)
0712         (buf + octdevsize + priv_size + configsize);
0713 
0714     return oct;
0715 }
0716 
0717 struct octeon_device *octeon_allocate_device(u32 pci_id,
0718                          u32 priv_size)
0719 {
0720     u32 oct_idx = 0;
0721     struct octeon_device *oct = NULL;
0722 
0723     spin_lock(&octeon_devices_lock);
0724 
0725     for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
0726         if (!octeon_device[oct_idx])
0727             break;
0728 
0729     if (oct_idx < MAX_OCTEON_DEVICES) {
0730         oct = octeon_allocate_device_mem(pci_id, priv_size);
0731         if (oct) {
0732             octeon_device_count++;
0733             octeon_device[oct_idx] = oct;
0734         }
0735     }
0736 
0737     spin_unlock(&octeon_devices_lock);
0738     if (!oct)
0739         return NULL;
0740 
0741     spin_lock_init(&oct->pci_win_lock);
0742     spin_lock_init(&oct->mem_access_lock);
0743 
0744     oct->octeon_id = oct_idx;
0745     snprintf(oct->device_name, sizeof(oct->device_name),
0746          "LiquidIO%d", (oct->octeon_id));
0747 
0748     return oct;
0749 }
0750 
0751 /** Register a device's bus location at initialization time.
0752  *  @param octeon_dev - pointer to the octeon device structure.
0753  *  @param bus        - PCIe bus #
0754  *  @param dev        - PCIe device #
0755  *  @param func       - PCIe function #
0756  *  @param is_pf      - TRUE for PF, FALSE for VF
0757  *  @return reference count of device's adapter
0758  */
0759 int octeon_register_device(struct octeon_device *oct,
0760                int bus, int dev, int func, int is_pf)
0761 {
0762     int idx, refcount;
0763 
0764     oct->loc.bus = bus;
0765     oct->loc.dev = dev;
0766     oct->loc.func = func;
0767 
0768     oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
0769     atomic_set(oct->adapter_refcount, 0);
0770 
0771     /* Like the reference count, the f/w state is shared 'per-adapter' */
0772     oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id];
0773     atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED);
0774 
0775     spin_lock(&octeon_devices_lock);
0776     for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
0777         if (!octeon_device[idx]) {
0778             dev_err(&oct->pci_dev->dev,
0779                 "%s: Internal driver error, missing dev",
0780                 __func__);
0781             spin_unlock(&octeon_devices_lock);
0782             atomic_inc(oct->adapter_refcount);
0783             return 1; /* here, refcount is guaranteed to be 1 */
0784         }
0785         /* If another device is at same bus/dev, use its refcounter
0786          * (and f/w state variable).
0787          */
0788         if ((octeon_device[idx]->loc.bus == bus) &&
0789             (octeon_device[idx]->loc.dev == dev)) {
0790             oct->adapter_refcount =
0791                 octeon_device[idx]->adapter_refcount;
0792             oct->adapter_fw_state =
0793                 octeon_device[idx]->adapter_fw_state;
0794             break;
0795         }
0796     }
0797     spin_unlock(&octeon_devices_lock);
0798 
0799     atomic_inc(oct->adapter_refcount);
0800     refcount = atomic_read(oct->adapter_refcount);
0801 
0802     dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
0803         oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
0804 
0805     return refcount;
0806 }
0807 
0808 /** Deregister a device at de-initialization time.
0809  *  @param octeon_dev - pointer to the octeon device structure.
0810  *  @return reference count of device's adapter
0811  */
0812 int octeon_deregister_device(struct octeon_device *oct)
0813 {
0814     int refcount;
0815 
0816     atomic_dec(oct->adapter_refcount);
0817     refcount = atomic_read(oct->adapter_refcount);
0818 
0819     dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
0820         oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
0821 
0822     return refcount;
0823 }
0824 
0825 int
0826 octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
0827 {
0828     struct octeon_ioq_vector *ioq_vector;
0829     int cpu_num;
0830     int size;
0831     int i;
0832 
0833     size = sizeof(struct octeon_ioq_vector) * num_ioqs;
0834 
0835     oct->ioq_vector = vzalloc(size);
0836     if (!oct->ioq_vector)
0837         return -1;
0838     for (i = 0; i < num_ioqs; i++) {
0839         ioq_vector      = &oct->ioq_vector[i];
0840         ioq_vector->oct_dev = oct;
0841         ioq_vector->iq_index    = i;
0842         ioq_vector->droq_index  = i;
0843         ioq_vector->mbox    = oct->mbox[i];
0844 
0845         cpu_num = i % num_online_cpus();
0846         cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
0847 
0848         if (oct->chip_id == OCTEON_CN23XX_PF_VID)
0849             ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
0850         else
0851             ioq_vector->ioq_num = i;
0852     }
0853 
0854     return 0;
0855 }
0856 
0857 void
0858 octeon_free_ioq_vector(struct octeon_device *oct)
0859 {
0860     vfree(oct->ioq_vector);
0861 }
0862 
0863 /* this function is only for setting up the first queue */
0864 int octeon_setup_instr_queues(struct octeon_device *oct)
0865 {
0866     u32 num_descs = 0;
0867     u32 iq_no = 0;
0868     union oct_txpciq txpciq;
0869     int numa_node = dev_to_node(&oct->pci_dev->dev);
0870 
0871     if (OCTEON_CN6XXX(oct))
0872         num_descs =
0873             CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
0874     else if (OCTEON_CN23XX_PF(oct))
0875         num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
0876     else if (OCTEON_CN23XX_VF(oct))
0877         num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
0878 
0879     oct->num_iqs = 0;
0880 
0881     oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]),
0882                 numa_node);
0883     if (!oct->instr_queue[0])
0884         oct->instr_queue[0] =
0885             vzalloc(sizeof(struct octeon_instr_queue));
0886     if (!oct->instr_queue[0])
0887         return 1;
0888     memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
0889     oct->instr_queue[0]->q_index = 0;
0890     oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
0891     oct->instr_queue[0]->ifidx = 0;
0892     txpciq.u64 = 0;
0893     txpciq.s.q_no = iq_no;
0894     txpciq.s.pkind = oct->pfvf_hsword.pkind;
0895     txpciq.s.use_qpg = 0;
0896     txpciq.s.qpg = 0;
0897     if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
0898         /* prevent memory leak */
0899         vfree(oct->instr_queue[0]);
0900         oct->instr_queue[0] = NULL;
0901         return 1;
0902     }
0903 
0904     oct->num_iqs++;
0905     return 0;
0906 }
0907 
0908 int octeon_setup_output_queues(struct octeon_device *oct)
0909 {
0910     u32 num_descs = 0;
0911     u32 desc_size = 0;
0912     u32 oq_no = 0;
0913     int numa_node = dev_to_node(&oct->pci_dev->dev);
0914 
0915     if (OCTEON_CN6XXX(oct)) {
0916         num_descs =
0917             CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx));
0918         desc_size =
0919             CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx));
0920     } else if (OCTEON_CN23XX_PF(oct)) {
0921         num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
0922         desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
0923     } else if (OCTEON_CN23XX_VF(oct)) {
0924         num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
0925         desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
0926     }
0927     oct->num_oqs = 0;
0928     oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node);
0929     if (!oct->droq[0])
0930         oct->droq[0] = vzalloc(sizeof(*oct->droq[0]));
0931     if (!oct->droq[0])
0932         return 1;
0933 
0934     if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
0935         vfree(oct->droq[oq_no]);
0936         oct->droq[oq_no] = NULL;
0937         return 1;
0938     }
0939     oct->num_oqs++;
0940 
0941     return 0;
0942 }
0943 
0944 int octeon_set_io_queues_off(struct octeon_device *oct)
0945 {
0946     int loop = BUSY_READING_REG_VF_LOOP_COUNT;
0947 
0948     if (OCTEON_CN6XXX(oct)) {
0949         octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
0950         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
0951     } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
0952         u32 q_no;
0953 
0954         /* IOQs will already be in reset.
0955          * If RST bit is set, wait for quiet bit to be set.
0956          * Once quiet bit is set, clear the RST bit.
0957          */
0958         for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
0959             u64 reg_val = octeon_read_csr64(
0960                 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
0961 
0962             while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
0963                    !(reg_val &  CN23XX_PKT_INPUT_CTL_QUIET) &&
0964                    loop) {
0965                 reg_val = octeon_read_csr64(
0966                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
0967                 loop--;
0968             }
0969             if (!loop) {
0970                 dev_err(&oct->pci_dev->dev,
0971                     "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
0972                     q_no);
0973                 return -1;
0974             }
0975 
0976             reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
0977             octeon_write_csr64(oct,
0978                        CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
0979                        reg_val);
0980 
0981             reg_val = octeon_read_csr64(
0982                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
0983             if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
0984                 dev_err(&oct->pci_dev->dev,
0985                     "unable to reset qno %u\n", q_no);
0986                 return -1;
0987             }
0988         }
0989     }
0990     return 0;
0991 }
0992 
0993 void octeon_set_droq_pkt_op(struct octeon_device *oct,
0994                 u32 q_no,
0995                 u32 enable)
0996 {
0997     u32 reg_val = 0;
0998 
0999     /* Disable the i/p and o/p queues for this Octeon. */
1000     if (OCTEON_CN6XXX(oct)) {
1001         reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
1002 
1003         if (enable)
1004             reg_val = reg_val | (1 << q_no);
1005         else
1006             reg_val = reg_val & (~(1 << q_no));
1007 
1008         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
1009     }
1010 }
1011 
1012 int octeon_init_dispatch_list(struct octeon_device *oct)
1013 {
1014     u32 i;
1015 
1016     oct->dispatch.count = 0;
1017 
1018     for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
1019         oct->dispatch.dlist[i].opcode = 0;
1020         INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
1021     }
1022 
1023     for (i = 0; i <= REQTYPE_LAST; i++)
1024         octeon_register_reqtype_free_fn(oct, i, NULL);
1025 
1026     spin_lock_init(&oct->dispatch.lock);
1027 
1028     return 0;
1029 }
1030 
1031 void octeon_delete_dispatch_list(struct octeon_device *oct)
1032 {
1033     u32 i;
1034     struct list_head freelist, *temp, *tmp2;
1035 
1036     INIT_LIST_HEAD(&freelist);
1037 
1038     spin_lock_bh(&oct->dispatch.lock);
1039 
1040     for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
1041         struct list_head *dispatch;
1042 
1043         dispatch = &oct->dispatch.dlist[i].list;
1044         while (dispatch->next != dispatch) {
1045             temp = dispatch->next;
1046             list_move_tail(temp, &freelist);
1047         }
1048 
1049         oct->dispatch.dlist[i].opcode = 0;
1050     }
1051 
1052     oct->dispatch.count = 0;
1053 
1054     spin_unlock_bh(&oct->dispatch.lock);
1055 
1056     list_for_each_safe(temp, tmp2, &freelist) {
1057         list_del(temp);
1058         kfree(temp);
1059     }
1060 }
1061 
1062 octeon_dispatch_fn_t
1063 octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
1064             u16 subcode)
1065 {
1066     u32 idx;
1067     struct list_head *dispatch;
1068     octeon_dispatch_fn_t fn = NULL;
1069     u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1070 
1071     idx = combined_opcode & OCTEON_OPCODE_MASK;
1072 
1073     spin_lock_bh(&octeon_dev->dispatch.lock);
1074 
1075     if (octeon_dev->dispatch.count == 0) {
1076         spin_unlock_bh(&octeon_dev->dispatch.lock);
1077         return NULL;
1078     }
1079 
1080     if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
1081         spin_unlock_bh(&octeon_dev->dispatch.lock);
1082         return NULL;
1083     }
1084 
1085     if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
1086         fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
1087     } else {
1088         list_for_each(dispatch,
1089                   &octeon_dev->dispatch.dlist[idx].list) {
1090             if (((struct octeon_dispatch *)dispatch)->opcode ==
1091                 combined_opcode) {
1092                 fn = ((struct octeon_dispatch *)
1093                       dispatch)->dispatch_fn;
1094                 break;
1095             }
1096         }
1097     }
1098 
1099     spin_unlock_bh(&octeon_dev->dispatch.lock);
1100     return fn;
1101 }
1102 
1103 /* octeon_register_dispatch_fn
1104  * Parameters:
1105  *   octeon_id - id of the octeon device.
1106  *   opcode    - opcode for which driver should call the registered function
1107  *   subcode   - subcode for which driver should call the registered function
1108  *   fn        - The function to call when a packet with "opcode" arrives in
1109  *        octeon output queues.
1110  *   fn_arg    - The argument to be passed when calling function "fn".
1111  * Description:
1112  *   Registers a function and its argument to be called when a packet
1113  *   arrives in Octeon output queues with "opcode".
1114  * Returns:
1115  *   Success: 0
1116  *   Failure: 1
1117  * Locks:
1118  *   No locks are held.
1119  */
1120 int
1121 octeon_register_dispatch_fn(struct octeon_device *oct,
1122                 u16 opcode,
1123                 u16 subcode,
1124                 octeon_dispatch_fn_t fn, void *fn_arg)
1125 {
1126     u32 idx;
1127     octeon_dispatch_fn_t pfn;
1128     u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1129 
1130     idx = combined_opcode & OCTEON_OPCODE_MASK;
1131 
1132     spin_lock_bh(&oct->dispatch.lock);
1133     /* Add dispatch function to first level of lookup table */
1134     if (oct->dispatch.dlist[idx].opcode == 0) {
1135         oct->dispatch.dlist[idx].opcode = combined_opcode;
1136         oct->dispatch.dlist[idx].dispatch_fn = fn;
1137         oct->dispatch.dlist[idx].arg = fn_arg;
1138         oct->dispatch.count++;
1139         spin_unlock_bh(&oct->dispatch.lock);
1140         return 0;
1141     }
1142 
1143     spin_unlock_bh(&oct->dispatch.lock);
1144 
1145     /* Check if there was a function already registered for this
1146      * opcode/subcode.
1147      */
1148     pfn = octeon_get_dispatch(oct, opcode, subcode);
1149     if (!pfn) {
1150         struct octeon_dispatch *dispatch;
1151 
1152         dev_dbg(&oct->pci_dev->dev,
1153             "Adding opcode to dispatch list linked list\n");
1154         dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL);
1155         if (!dispatch)
1156             return 1;
1157 
1158         dispatch->opcode = combined_opcode;
1159         dispatch->dispatch_fn = fn;
1160         dispatch->arg = fn_arg;
1161 
1162         /* Add dispatch function to linked list of fn ptrs
1163          * at the hashed index.
1164          */
1165         spin_lock_bh(&oct->dispatch.lock);
1166         list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
1167         oct->dispatch.count++;
1168         spin_unlock_bh(&oct->dispatch.lock);
1169 
1170     } else {
1171         if (pfn == fn &&
1172             octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg)
1173             return 0;
1174 
1175         dev_err(&oct->pci_dev->dev,
1176             "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
1177             opcode, subcode);
1178         return 1;
1179     }
1180 
1181     return 0;
1182 }
1183 
1184 int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
1185 {
1186     u32 i;
1187     char app_name[16];
1188     struct octeon_device *oct = (struct octeon_device *)buf;
1189     struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1190     struct octeon_core_setup *cs = NULL;
1191     u32 num_nic_ports = 0;
1192 
1193     if (OCTEON_CN6XXX(oct))
1194         num_nic_ports =
1195             CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx));
1196     else if (OCTEON_CN23XX_PF(oct))
1197         num_nic_ports =
1198             CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf));
1199 
1200     if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
1201         dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
1202             atomic_read(&oct->status));
1203         goto core_drv_init_err;
1204     }
1205 
1206     strncpy(app_name,
1207         get_oct_app_string(
1208         (u32)recv_pkt->rh.r_core_drv_init.app_mode),
1209         sizeof(app_name) - 1);
1210     oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1211     if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
1212         oct->fw_info.max_nic_ports =
1213             (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
1214         oct->fw_info.num_gmx_ports =
1215             (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
1216     }
1217 
1218     if (oct->fw_info.max_nic_ports < num_nic_ports) {
1219         dev_err(&oct->pci_dev->dev,
1220             "Config has more ports than firmware allows (%d > %d).\n",
1221             num_nic_ports, oct->fw_info.max_nic_ports);
1222         goto core_drv_init_err;
1223     }
1224     oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
1225     oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1226     oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1227 
1228     oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
1229 
1230     for (i = 0; i < oct->num_iqs; i++)
1231         oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
1232 
1233     atomic_set(&oct->status, OCT_DEV_CORE_OK);
1234 
1235     cs = &core_setup[oct->octeon_id];
1236 
1237     if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
1238         dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
1239             (u32)sizeof(*cs),
1240             recv_pkt->buffer_size[0]);
1241     }
1242 
1243     memcpy(cs, get_rbd(
1244            recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
1245 
1246     strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
1247     strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
1248         OCT_SERIAL_LEN);
1249 
1250     octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
1251 
1252     oct->boardinfo.major = cs->board_rev_major;
1253     oct->boardinfo.minor = cs->board_rev_minor;
1254 
1255     dev_info(&oct->pci_dev->dev,
1256          "Running %s (%llu Hz)\n",
1257          app_name, CVM_CAST64(cs->corefreq));
1258 
1259 core_drv_init_err:
1260     for (i = 0; i < recv_pkt->buffer_count; i++)
1261         recv_buffer_free(recv_pkt->buffer_ptr[i]);
1262     octeon_free_recv_info(recv_info);
1263     return 0;
1264 }
1265 
1266 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1267 
1268 {
1269     if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
1270         (oct->io_qmask.iq & BIT_ULL(q_no)))
1271         return oct->instr_queue[q_no]->max_count;
1272 
1273     return -1;
1274 }
1275 
1276 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1277 {
1278     if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
1279         (oct->io_qmask.oq & BIT_ULL(q_no)))
1280         return oct->droq[q_no]->max_count;
1281     return -1;
1282 }
1283 
1284 /* Retruns the host firmware handshake OCTEON specific configuration */
1285 struct octeon_config *octeon_get_conf(struct octeon_device *oct)
1286 {
1287     struct octeon_config *default_oct_conf = NULL;
1288 
1289     /* check the OCTEON Device model & return the corresponding octeon
1290      * configuration
1291      */
1292 
1293     if (OCTEON_CN6XXX(oct)) {
1294         default_oct_conf =
1295             (struct octeon_config *)(CHIP_CONF(oct, cn6xxx));
1296     } else if (OCTEON_CN23XX_PF(oct)) {
1297         default_oct_conf = (struct octeon_config *)
1298             (CHIP_CONF(oct, cn23xx_pf));
1299     } else if (OCTEON_CN23XX_VF(oct)) {
1300         default_oct_conf = (struct octeon_config *)
1301             (CHIP_CONF(oct, cn23xx_vf));
1302     }
1303     return default_oct_conf;
1304 }
1305 
1306 /* scratch register address is same in all the OCT-II and CN70XX models */
1307 #define CNXX_SLI_SCRATCH1   0x3C0
1308 
1309 /* Get the octeon device pointer.
1310  *  @param octeon_id  - The id for which the octeon device pointer is required.
1311  *  @return Success: Octeon device pointer.
1312  *  @return Failure: NULL.
1313  */
1314 struct octeon_device *lio_get_device(u32 octeon_id)
1315 {
1316     if (octeon_id >= MAX_OCTEON_DEVICES)
1317         return NULL;
1318     else
1319         return octeon_device[octeon_id];
1320 }
1321 
1322 u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
1323 {
1324     u64 val64;
1325     unsigned long flags;
1326     u32 addrhi;
1327 
1328     spin_lock_irqsave(&oct->pci_win_lock, flags);
1329 
1330     /* The windowed read happens when the LSB of the addr is written.
1331      * So write MSB first
1332      */
1333     addrhi = (addr >> 32);
1334     if ((oct->chip_id == OCTEON_CN66XX) ||
1335         (oct->chip_id == OCTEON_CN68XX) ||
1336         (oct->chip_id == OCTEON_CN23XX_PF_VID))
1337         addrhi |= 0x00060000;
1338     writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
1339 
1340     /* Read back to preserve ordering of writes */
1341     readl(oct->reg_list.pci_win_rd_addr_hi);
1342 
1343     writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
1344     readl(oct->reg_list.pci_win_rd_addr_lo);
1345 
1346     val64 = readq(oct->reg_list.pci_win_rd_data);
1347 
1348     spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1349 
1350     return val64;
1351 }
1352 
1353 void lio_pci_writeq(struct octeon_device *oct,
1354             u64 val,
1355             u64 addr)
1356 {
1357     unsigned long flags;
1358 
1359     spin_lock_irqsave(&oct->pci_win_lock, flags);
1360 
1361     writeq(addr, oct->reg_list.pci_win_wr_addr);
1362 
1363     /* The write happens when the LSB is written. So write MSB first. */
1364     writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
1365     /* Read the MSB to ensure ordering of writes. */
1366     readl(oct->reg_list.pci_win_wr_data_hi);
1367 
1368     writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
1369 
1370     spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1371 }
1372 
1373 int octeon_mem_access_ok(struct octeon_device *oct)
1374 {
1375     u64 access_okay = 0;
1376     u64 lmc0_reset_ctl;
1377 
1378     /* Check to make sure a DDR interface is enabled */
1379     if (OCTEON_CN23XX_PF(oct)) {
1380         lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
1381         access_okay =
1382             (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
1383     } else {
1384         lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1385         access_okay =
1386             (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
1387     }
1388 
1389     return access_okay ? 0 : 1;
1390 }
1391 
1392 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1393 {
1394     int ret = 1;
1395     u32 ms;
1396 
1397     if (!timeout)
1398         return ret;
1399 
1400     for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
1401          ms += HZ / 10) {
1402         ret = octeon_mem_access_ok(oct);
1403 
1404         /* wait 100 ms */
1405         if (ret)
1406             schedule_timeout_uninterruptible(HZ / 10);
1407     }
1408 
1409     return ret;
1410 }
1411 
1412 /* Get the octeon id assigned to the octeon device passed as argument.
1413  *  This function is exported to other modules.
1414  *  @param dev - octeon device pointer passed as a void *.
1415  *  @return octeon device id
1416  */
1417 int lio_get_device_id(void *dev)
1418 {
1419     struct octeon_device *octeon_dev = (struct octeon_device *)dev;
1420     u32 i;
1421 
1422     for (i = 0; i < MAX_OCTEON_DEVICES; i++)
1423         if (octeon_device[i] == octeon_dev)
1424             return octeon_dev->octeon_id;
1425     return -1;
1426 }
1427 
1428 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
1429 {
1430     u64 instr_cnt;
1431     u32 pkts_pend;
1432     struct octeon_device *oct = NULL;
1433 
1434     /* the whole thing needs to be atomic, ideally */
1435     if (droq) {
1436         pkts_pend = (u32)atomic_read(&droq->pkts_pending);
1437         writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
1438         droq->pkt_count = pkts_pend;
1439         oct = droq->oct_dev;
1440     }
1441     if (iq) {
1442         spin_lock_bh(&iq->lock);
1443         writel(iq->pkts_processed, iq->inst_cnt_reg);
1444         iq->pkt_in_done -= iq->pkts_processed;
1445         iq->pkts_processed = 0;
1446         /* this write needs to be flushed before we release the lock */
1447         spin_unlock_bh(&iq->lock);
1448         oct = iq->oct_dev;
1449     }
1450     /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
1451      *to trigger tx interrupts as well, if they are pending.
1452      */
1453     if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) {
1454         if (droq)
1455             writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
1456         /*we race with firmrware here. read and write the IN_DONE_CNTS*/
1457         else if (iq) {
1458             instr_cnt =  readq(iq->inst_cnt_reg);
1459             writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
1460                 CN23XX_INTR_RESEND),
1461                    iq->inst_cnt_reg);
1462         }
1463     }
1464 }