Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2019 Solarflare Communications Inc.
0005  *
0006  * This program is free software; you can redistribute it and/or modify it
0007  * under the terms of the GNU General Public License version 2 as published
0008  * by the Free Software Foundation, incorporated herein by reference.
0009  */
0010 
0011 #include "net_driver.h"
0012 #include "efx.h"
0013 #include "nic.h"
0014 #include "mcdi_functions.h"
0015 #include "mcdi.h"
0016 #include "mcdi_pcol.h"
0017 
0018 int efx_mcdi_free_vis(struct efx_nic *efx)
0019 {
0020     MCDI_DECLARE_BUF_ERR(outbuf);
0021     size_t outlen;
0022     int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
0023                     outbuf, sizeof(outbuf), &outlen);
0024 
0025     /* -EALREADY means nothing to free, so ignore */
0026     if (rc == -EALREADY)
0027         rc = 0;
0028     if (rc)
0029         efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
0030                        rc);
0031     return rc;
0032 }
0033 
0034 int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
0035                unsigned int max_vis, unsigned int *vi_base,
0036                unsigned int *allocated_vis)
0037 {
0038     MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
0039     MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
0040     size_t outlen;
0041     int rc;
0042 
0043     MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
0044     MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
0045     rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
0046               outbuf, sizeof(outbuf), &outlen);
0047     if (rc != 0)
0048         return rc;
0049 
0050     if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
0051         return -EIO;
0052 
0053     netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
0054           MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
0055 
0056     if (vi_base)
0057         *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
0058     if (allocated_vis)
0059         *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
0060     return 0;
0061 }
0062 
0063 int efx_mcdi_ev_probe(struct efx_channel *channel)
0064 {
0065     return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
0066                     (channel->eventq_mask + 1) *
0067                     sizeof(efx_qword_t),
0068                     GFP_KERNEL);
0069 }
0070 
0071 int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
0072 {
0073     MCDI_DECLARE_BUF(inbuf,
0074              MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
0075                            EFX_BUF_SIZE));
0076     MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
0077     size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
0078     struct efx_nic *efx = channel->efx;
0079     size_t inlen, outlen;
0080     dma_addr_t dma_addr;
0081     int rc, i;
0082 
0083     /* Fill event queue with all ones (i.e. empty events) */
0084     memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
0085 
0086     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
0087     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
0088     /* INIT_EVQ expects index in vector table, not absolute */
0089     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
0090     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
0091                MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
0092     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
0093     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
0094     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
0095                MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
0096     MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
0097 
0098     if (v2) {
0099         /* Use the new generic approach to specifying event queue
0100          * configuration, requesting lower latency or higher throughput.
0101          * The options that actually get used appear in the output.
0102          */
0103         MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
0104                       INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
0105                       INIT_EVQ_V2_IN_FLAG_TYPE,
0106                       MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
0107     } else {
0108         MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
0109                       INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
0110                       INIT_EVQ_IN_FLAG_RX_MERGE, 1,
0111                       INIT_EVQ_IN_FLAG_TX_MERGE, 1,
0112                       INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
0113     }
0114 
0115     dma_addr = channel->eventq.buf.dma_addr;
0116     for (i = 0; i < entries; ++i) {
0117         MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
0118         dma_addr += EFX_BUF_SIZE;
0119     }
0120 
0121     inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
0122 
0123     rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
0124               outbuf, sizeof(outbuf), &outlen);
0125 
0126     if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
0127         netif_dbg(efx, drv, efx->net_dev,
0128               "Channel %d using event queue flags %08x\n",
0129               channel->channel,
0130               MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
0131 
0132     return rc;
0133 }
0134 
0135 void efx_mcdi_ev_remove(struct efx_channel *channel)
0136 {
0137     efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
0138 }
0139 
0140 void efx_mcdi_ev_fini(struct efx_channel *channel)
0141 {
0142     MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
0143     MCDI_DECLARE_BUF_ERR(outbuf);
0144     struct efx_nic *efx = channel->efx;
0145     size_t outlen;
0146     int rc;
0147 
0148     MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
0149 
0150     rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
0151                 outbuf, sizeof(outbuf), &outlen);
0152 
0153     if (rc && rc != -EALREADY)
0154         goto fail;
0155 
0156     return;
0157 
0158 fail:
0159     efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
0160                    outbuf, outlen, rc);
0161 }
0162 
0163 int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
0164 {
0165     MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
0166                                EFX_BUF_SIZE));
0167     bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
0168     bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
0169     size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
0170     struct efx_channel *channel = tx_queue->channel;
0171     struct efx_nic *efx = tx_queue->efx;
0172     dma_addr_t dma_addr;
0173     size_t inlen;
0174     int rc, i;
0175 
0176     BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
0177 
0178     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
0179     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
0180     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
0181     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
0182     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
0183     MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
0184 
0185     dma_addr = tx_queue->txd.buf.dma_addr;
0186 
0187     netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
0188           tx_queue->queue, entries, (u64)dma_addr);
0189 
0190     for (i = 0; i < entries; ++i) {
0191         MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
0192         dma_addr += EFX_BUF_SIZE;
0193     }
0194 
0195     inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
0196 
0197     do {
0198         bool tso_v2 = tx_queue->tso_version == 2;
0199 
0200         /* TSOv2 implies IP header checksum offload for TSO frames,
0201          * so we can safely disable IP header checksum offload for
0202          * everything else.  If we don't have TSOv2, then we have to
0203          * enable IP header checksum offload, which is strictly
0204          * incorrect but better than breaking TSO.
0205          */
0206         MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS,
0207                 /* This flag was removed from mcdi_pcol.h for
0208                  * the non-_EXT version of INIT_TXQ.  However,
0209                  * firmware still honours it.
0210                  */
0211                 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
0212                 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2),
0213                 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
0214                 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping,
0215                 INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2,
0216                 INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum);
0217 
0218         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
0219                     NULL, 0, NULL);
0220         if (rc == -ENOSPC && tso_v2) {
0221             /* Retry without TSOv2 if we're short on contexts. */
0222             tx_queue->tso_version = 0;
0223             netif_warn(efx, probe, efx->net_dev,
0224                    "TSOv2 context not available to segment in "
0225                    "hardware. TCP performance may be reduced.\n"
0226                    );
0227         } else if (rc) {
0228             efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
0229                            MC_CMD_INIT_TXQ_EXT_IN_LEN,
0230                            NULL, 0, rc);
0231             goto fail;
0232         }
0233     } while (rc);
0234 
0235     return 0;
0236 
0237 fail:
0238     return rc;
0239 }
0240 
0241 void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
0242 {
0243     efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
0244 }
0245 
0246 void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
0247 {
0248     MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
0249     MCDI_DECLARE_BUF_ERR(outbuf);
0250     struct efx_nic *efx = tx_queue->efx;
0251     size_t outlen;
0252     int rc;
0253 
0254     MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
0255                tx_queue->queue);
0256 
0257     rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
0258                 outbuf, sizeof(outbuf), &outlen);
0259 
0260     if (rc && rc != -EALREADY)
0261         goto fail;
0262 
0263     return;
0264 
0265 fail:
0266     efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
0267                    outbuf, outlen, rc);
0268 }
0269 
0270 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
0271 {
0272     return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
0273                     (rx_queue->ptr_mask + 1) *
0274                     sizeof(efx_qword_t),
0275                     GFP_KERNEL);
0276 }
0277 
0278 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
0279 {
0280     struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
0281     size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
0282     MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
0283     struct efx_nic *efx = rx_queue->efx;
0284     unsigned int buffer_size;
0285     dma_addr_t dma_addr;
0286     int rc;
0287     int i;
0288     BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
0289 
0290     rx_queue->scatter_n = 0;
0291     rx_queue->scatter_len = 0;
0292     if (efx->type->revision == EFX_REV_EF100)
0293         buffer_size = efx->rx_page_buf_step;
0294     else
0295         buffer_size = 0;
0296 
0297     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
0298     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
0299     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
0300     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
0301                efx_rx_queue_index(rx_queue));
0302     MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
0303                   INIT_RXQ_IN_FLAG_PREFIX, 1,
0304                   INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
0305     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
0306     MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
0307     MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
0308 
0309     dma_addr = rx_queue->rxd.buf.dma_addr;
0310 
0311     netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
0312           efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
0313 
0314     for (i = 0; i < entries; ++i) {
0315         MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
0316         dma_addr += EFX_BUF_SIZE;
0317     }
0318 
0319     rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
0320               NULL, 0, NULL);
0321     if (rc)
0322         netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
0323                 efx_rx_queue_index(rx_queue));
0324 }
0325 
0326 void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
0327 {
0328     efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
0329 }
0330 
0331 void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
0332 {
0333     MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
0334     MCDI_DECLARE_BUF_ERR(outbuf);
0335     struct efx_nic *efx = rx_queue->efx;
0336     size_t outlen;
0337     int rc;
0338 
0339     MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
0340                efx_rx_queue_index(rx_queue));
0341 
0342     rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
0343                 outbuf, sizeof(outbuf), &outlen);
0344 
0345     if (rc && rc != -EALREADY)
0346         goto fail;
0347 
0348     return;
0349 
0350 fail:
0351     efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
0352                    outbuf, outlen, rc);
0353 }
0354 
0355 int efx_fini_dmaq(struct efx_nic *efx)
0356 {
0357     struct efx_tx_queue *tx_queue;
0358     struct efx_rx_queue *rx_queue;
0359     struct efx_channel *channel;
0360     int pending;
0361 
0362     /* If the MC has just rebooted, the TX/RX queues will have already been
0363      * torn down, but efx->active_queues needs to be set to zero.
0364      */
0365     if (efx->must_realloc_vis) {
0366         atomic_set(&efx->active_queues, 0);
0367         return 0;
0368     }
0369 
0370     /* Do not attempt to write to the NIC during EEH recovery */
0371     if (efx->state != STATE_RECOVERY) {
0372         efx_for_each_channel(channel, efx) {
0373             efx_for_each_channel_rx_queue(rx_queue, channel)
0374                 efx_mcdi_rx_fini(rx_queue);
0375             efx_for_each_channel_tx_queue(tx_queue, channel)
0376                 efx_mcdi_tx_fini(tx_queue);
0377         }
0378 
0379         wait_event_timeout(efx->flush_wq,
0380                    atomic_read(&efx->active_queues) == 0,
0381                    msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
0382         pending = atomic_read(&efx->active_queues);
0383         if (pending) {
0384             netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
0385                   pending);
0386             return -ETIMEDOUT;
0387         }
0388     }
0389 
0390     return 0;
0391 }
0392 
0393 int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
0394 {
0395     switch (vi_window_mode) {
0396     case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
0397         efx->vi_stride = 8192;
0398         break;
0399     case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
0400         efx->vi_stride = 16384;
0401         break;
0402     case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
0403         efx->vi_stride = 65536;
0404         break;
0405     default:
0406         netif_err(efx, probe, efx->net_dev,
0407               "Unrecognised VI window mode %d\n",
0408               vi_window_mode);
0409         return -EIO;
0410     }
0411     netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
0412           efx->vi_stride);
0413     return 0;
0414 }
0415 
0416 int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
0417 {
0418     MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
0419     size_t outlen;
0420     int rc;
0421 
0422     rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
0423               sizeof(outbuf), &outlen);
0424     if (rc)
0425         return rc;
0426     if (outlen < sizeof(outbuf))
0427         return -EIO;
0428 
0429     *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
0430     return 0;
0431 }