Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/highmem.h>
0034 #include <linux/errno.h>
0035 #include <linux/pci.h>
0036 #include <linux/dma-mapping.h>
0037 #include <linux/slab.h>
0038 #include <linux/delay.h>
0039 #include <linux/random.h>
0040 #include <linux/io-mapping.h>
0041 #include <linux/mlx5/driver.h>
0042 #include <linux/mlx5/eq.h>
0043 #include <linux/debugfs.h>
0044 
0045 #include "mlx5_core.h"
0046 #include "lib/eq.h"
0047 #include "lib/tout.h"
0048 
0049 enum {
0050     CMD_IF_REV = 5,
0051 };
0052 
0053 enum {
0054     CMD_MODE_POLLING,
0055     CMD_MODE_EVENTS
0056 };
0057 
0058 enum {
0059     MLX5_CMD_DELIVERY_STAT_OK           = 0x0,
0060     MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR       = 0x1,
0061     MLX5_CMD_DELIVERY_STAT_TOK_ERR          = 0x2,
0062     MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR      = 0x3,
0063     MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR    = 0x4,
0064     MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR     = 0x5,
0065     MLX5_CMD_DELIVERY_STAT_FW_ERR           = 0x6,
0066     MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR        = 0x7,
0067     MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR       = 0x8,
0068     MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR  = 0x9,
0069     MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR        = 0x10,
0070 };
0071 
0072 static struct mlx5_cmd_work_ent *
0073 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
0074           struct mlx5_cmd_msg *out, void *uout, int uout_size,
0075           mlx5_cmd_cbk_t cbk, void *context, int page_queue)
0076 {
0077     gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
0078     struct mlx5_cmd_work_ent *ent;
0079 
0080     ent = kzalloc(sizeof(*ent), alloc_flags);
0081     if (!ent)
0082         return ERR_PTR(-ENOMEM);
0083 
0084     ent->idx    = -EINVAL;
0085     ent->in     = in;
0086     ent->out    = out;
0087     ent->uout   = uout;
0088     ent->uout_size  = uout_size;
0089     ent->callback   = cbk;
0090     ent->context    = context;
0091     ent->cmd    = cmd;
0092     ent->page_queue = page_queue;
0093     refcount_set(&ent->refcnt, 1);
0094 
0095     return ent;
0096 }
0097 
0098 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
0099 {
0100     kfree(ent);
0101 }
0102 
0103 static u8 alloc_token(struct mlx5_cmd *cmd)
0104 {
0105     u8 token;
0106 
0107     spin_lock(&cmd->token_lock);
0108     cmd->token++;
0109     if (cmd->token == 0)
0110         cmd->token++;
0111     token = cmd->token;
0112     spin_unlock(&cmd->token_lock);
0113 
0114     return token;
0115 }
0116 
0117 static int cmd_alloc_index(struct mlx5_cmd *cmd)
0118 {
0119     unsigned long flags;
0120     int ret;
0121 
0122     spin_lock_irqsave(&cmd->alloc_lock, flags);
0123     ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
0124     if (ret < cmd->max_reg_cmds)
0125         clear_bit(ret, &cmd->bitmask);
0126     spin_unlock_irqrestore(&cmd->alloc_lock, flags);
0127 
0128     return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
0129 }
0130 
0131 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
0132 {
0133     lockdep_assert_held(&cmd->alloc_lock);
0134     set_bit(idx, &cmd->bitmask);
0135 }
0136 
0137 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
0138 {
0139     refcount_inc(&ent->refcnt);
0140 }
0141 
0142 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
0143 {
0144     struct mlx5_cmd *cmd = ent->cmd;
0145     unsigned long flags;
0146 
0147     spin_lock_irqsave(&cmd->alloc_lock, flags);
0148     if (!refcount_dec_and_test(&ent->refcnt))
0149         goto out;
0150 
0151     if (ent->idx >= 0) {
0152         cmd_free_index(cmd, ent->idx);
0153         up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
0154     }
0155 
0156     cmd_free_ent(ent);
0157 out:
0158     spin_unlock_irqrestore(&cmd->alloc_lock, flags);
0159 }
0160 
0161 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
0162 {
0163     return cmd->cmd_buf + (idx << cmd->log_stride);
0164 }
0165 
0166 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
0167 {
0168     int size = msg->len;
0169     int blen = size - min_t(int, sizeof(msg->first.data), size);
0170 
0171     return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
0172 }
0173 
0174 static u8 xor8_buf(void *buf, size_t offset, int len)
0175 {
0176     u8 *ptr = buf;
0177     u8 sum = 0;
0178     int i;
0179     int end = len + offset;
0180 
0181     for (i = offset; i < end; i++)
0182         sum ^= ptr[i];
0183 
0184     return sum;
0185 }
0186 
0187 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
0188 {
0189     size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
0190     int xor_len = sizeof(*block) - sizeof(block->data) - 1;
0191 
0192     if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
0193         return -EHWPOISON;
0194 
0195     if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
0196         return -EHWPOISON;
0197 
0198     return 0;
0199 }
0200 
0201 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
0202 {
0203     int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
0204     size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
0205 
0206     block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
0207     block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
0208 }
0209 
0210 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
0211 {
0212     struct mlx5_cmd_mailbox *next = msg->next;
0213     int n = mlx5_calc_cmd_blocks(msg);
0214     int i = 0;
0215 
0216     for (i = 0; i < n && next; i++)  {
0217         calc_block_sig(next->buf);
0218         next = next->next;
0219     }
0220 }
0221 
0222 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
0223 {
0224     ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
0225     if (csum) {
0226         calc_chain_sig(ent->in);
0227         calc_chain_sig(ent->out);
0228     }
0229 }
0230 
0231 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
0232 {
0233     struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
0234     u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
0235     unsigned long poll_end;
0236     u8 own;
0237 
0238     poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
0239 
0240     do {
0241         own = READ_ONCE(ent->lay->status_own);
0242         if (!(own & CMD_OWNER_HW)) {
0243             ent->ret = 0;
0244             return;
0245         }
0246         cond_resched();
0247     } while (time_before(jiffies, poll_end));
0248 
0249     ent->ret = -ETIMEDOUT;
0250 }
0251 
0252 static int verify_signature(struct mlx5_cmd_work_ent *ent)
0253 {
0254     struct mlx5_cmd_mailbox *next = ent->out->next;
0255     int n = mlx5_calc_cmd_blocks(ent->out);
0256     int err;
0257     u8 sig;
0258     int i = 0;
0259 
0260     sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
0261     if (sig != 0xff)
0262         return -EHWPOISON;
0263 
0264     for (i = 0; i < n && next; i++) {
0265         err = verify_block_sig(next->buf);
0266         if (err)
0267             return -EHWPOISON;
0268 
0269         next = next->next;
0270     }
0271 
0272     return 0;
0273 }
0274 
0275 static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
0276 {
0277     __be32 *p = buf;
0278     int i;
0279 
0280     for (i = 0; i < size; i += 16) {
0281         pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
0282              be32_to_cpu(p[0]), be32_to_cpu(p[1]),
0283              be32_to_cpu(p[2]), be32_to_cpu(p[3]));
0284         p += 4;
0285         offset += 16;
0286     }
0287     if (!data_only)
0288         pr_debug("\n");
0289 }
0290 
0291 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
0292                        u32 *synd, u8 *status)
0293 {
0294     *synd = 0;
0295     *status = 0;
0296 
0297     switch (op) {
0298     case MLX5_CMD_OP_TEARDOWN_HCA:
0299     case MLX5_CMD_OP_DISABLE_HCA:
0300     case MLX5_CMD_OP_MANAGE_PAGES:
0301     case MLX5_CMD_OP_DESTROY_MKEY:
0302     case MLX5_CMD_OP_DESTROY_EQ:
0303     case MLX5_CMD_OP_DESTROY_CQ:
0304     case MLX5_CMD_OP_DESTROY_QP:
0305     case MLX5_CMD_OP_DESTROY_PSV:
0306     case MLX5_CMD_OP_DESTROY_SRQ:
0307     case MLX5_CMD_OP_DESTROY_XRC_SRQ:
0308     case MLX5_CMD_OP_DESTROY_XRQ:
0309     case MLX5_CMD_OP_DESTROY_DCT:
0310     case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
0311     case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
0312     case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
0313     case MLX5_CMD_OP_DEALLOC_PD:
0314     case MLX5_CMD_OP_DEALLOC_UAR:
0315     case MLX5_CMD_OP_DETACH_FROM_MCG:
0316     case MLX5_CMD_OP_DEALLOC_XRCD:
0317     case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
0318     case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
0319     case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
0320     case MLX5_CMD_OP_DESTROY_LAG:
0321     case MLX5_CMD_OP_DESTROY_VPORT_LAG:
0322     case MLX5_CMD_OP_DESTROY_TIR:
0323     case MLX5_CMD_OP_DESTROY_SQ:
0324     case MLX5_CMD_OP_DESTROY_RQ:
0325     case MLX5_CMD_OP_DESTROY_RMP:
0326     case MLX5_CMD_OP_DESTROY_TIS:
0327     case MLX5_CMD_OP_DESTROY_RQT:
0328     case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
0329     case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
0330     case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
0331     case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
0332     case MLX5_CMD_OP_2ERR_QP:
0333     case MLX5_CMD_OP_2RST_QP:
0334     case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
0335     case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
0336     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
0337     case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
0338     case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
0339     case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
0340     case MLX5_CMD_OP_FPGA_DESTROY_QP:
0341     case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
0342     case MLX5_CMD_OP_DEALLOC_MEMIC:
0343     case MLX5_CMD_OP_PAGE_FAULT_RESUME:
0344     case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
0345     case MLX5_CMD_OP_DEALLOC_SF:
0346     case MLX5_CMD_OP_DESTROY_UCTX:
0347     case MLX5_CMD_OP_DESTROY_UMEM:
0348     case MLX5_CMD_OP_MODIFY_RQT:
0349         return MLX5_CMD_STAT_OK;
0350 
0351     case MLX5_CMD_OP_QUERY_HCA_CAP:
0352     case MLX5_CMD_OP_QUERY_ADAPTER:
0353     case MLX5_CMD_OP_INIT_HCA:
0354     case MLX5_CMD_OP_ENABLE_HCA:
0355     case MLX5_CMD_OP_QUERY_PAGES:
0356     case MLX5_CMD_OP_SET_HCA_CAP:
0357     case MLX5_CMD_OP_QUERY_ISSI:
0358     case MLX5_CMD_OP_SET_ISSI:
0359     case MLX5_CMD_OP_CREATE_MKEY:
0360     case MLX5_CMD_OP_QUERY_MKEY:
0361     case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
0362     case MLX5_CMD_OP_CREATE_EQ:
0363     case MLX5_CMD_OP_QUERY_EQ:
0364     case MLX5_CMD_OP_GEN_EQE:
0365     case MLX5_CMD_OP_CREATE_CQ:
0366     case MLX5_CMD_OP_QUERY_CQ:
0367     case MLX5_CMD_OP_MODIFY_CQ:
0368     case MLX5_CMD_OP_CREATE_QP:
0369     case MLX5_CMD_OP_RST2INIT_QP:
0370     case MLX5_CMD_OP_INIT2RTR_QP:
0371     case MLX5_CMD_OP_RTR2RTS_QP:
0372     case MLX5_CMD_OP_RTS2RTS_QP:
0373     case MLX5_CMD_OP_SQERR2RTS_QP:
0374     case MLX5_CMD_OP_QUERY_QP:
0375     case MLX5_CMD_OP_SQD_RTS_QP:
0376     case MLX5_CMD_OP_INIT2INIT_QP:
0377     case MLX5_CMD_OP_CREATE_PSV:
0378     case MLX5_CMD_OP_CREATE_SRQ:
0379     case MLX5_CMD_OP_QUERY_SRQ:
0380     case MLX5_CMD_OP_ARM_RQ:
0381     case MLX5_CMD_OP_CREATE_XRC_SRQ:
0382     case MLX5_CMD_OP_QUERY_XRC_SRQ:
0383     case MLX5_CMD_OP_ARM_XRC_SRQ:
0384     case MLX5_CMD_OP_CREATE_XRQ:
0385     case MLX5_CMD_OP_QUERY_XRQ:
0386     case MLX5_CMD_OP_ARM_XRQ:
0387     case MLX5_CMD_OP_CREATE_DCT:
0388     case MLX5_CMD_OP_DRAIN_DCT:
0389     case MLX5_CMD_OP_QUERY_DCT:
0390     case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
0391     case MLX5_CMD_OP_QUERY_VPORT_STATE:
0392     case MLX5_CMD_OP_MODIFY_VPORT_STATE:
0393     case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
0394     case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
0395     case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
0396     case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
0397     case MLX5_CMD_OP_SET_ROCE_ADDRESS:
0398     case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
0399     case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
0400     case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
0401     case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
0402     case MLX5_CMD_OP_QUERY_VNIC_ENV:
0403     case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
0404     case MLX5_CMD_OP_ALLOC_Q_COUNTER:
0405     case MLX5_CMD_OP_QUERY_Q_COUNTER:
0406     case MLX5_CMD_OP_SET_MONITOR_COUNTER:
0407     case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
0408     case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
0409     case MLX5_CMD_OP_QUERY_RATE_LIMIT:
0410     case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
0411     case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
0412     case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
0413     case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
0414     case MLX5_CMD_OP_ALLOC_PD:
0415     case MLX5_CMD_OP_ALLOC_UAR:
0416     case MLX5_CMD_OP_CONFIG_INT_MODERATION:
0417     case MLX5_CMD_OP_ACCESS_REG:
0418     case MLX5_CMD_OP_ATTACH_TO_MCG:
0419     case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
0420     case MLX5_CMD_OP_MAD_IFC:
0421     case MLX5_CMD_OP_QUERY_MAD_DEMUX:
0422     case MLX5_CMD_OP_SET_MAD_DEMUX:
0423     case MLX5_CMD_OP_NOP:
0424     case MLX5_CMD_OP_ALLOC_XRCD:
0425     case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
0426     case MLX5_CMD_OP_QUERY_CONG_STATUS:
0427     case MLX5_CMD_OP_MODIFY_CONG_STATUS:
0428     case MLX5_CMD_OP_QUERY_CONG_PARAMS:
0429     case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
0430     case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
0431     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
0432     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
0433     case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
0434     case MLX5_CMD_OP_CREATE_LAG:
0435     case MLX5_CMD_OP_MODIFY_LAG:
0436     case MLX5_CMD_OP_QUERY_LAG:
0437     case MLX5_CMD_OP_CREATE_VPORT_LAG:
0438     case MLX5_CMD_OP_CREATE_TIR:
0439     case MLX5_CMD_OP_MODIFY_TIR:
0440     case MLX5_CMD_OP_QUERY_TIR:
0441     case MLX5_CMD_OP_CREATE_SQ:
0442     case MLX5_CMD_OP_MODIFY_SQ:
0443     case MLX5_CMD_OP_QUERY_SQ:
0444     case MLX5_CMD_OP_CREATE_RQ:
0445     case MLX5_CMD_OP_MODIFY_RQ:
0446     case MLX5_CMD_OP_QUERY_RQ:
0447     case MLX5_CMD_OP_CREATE_RMP:
0448     case MLX5_CMD_OP_MODIFY_RMP:
0449     case MLX5_CMD_OP_QUERY_RMP:
0450     case MLX5_CMD_OP_CREATE_TIS:
0451     case MLX5_CMD_OP_MODIFY_TIS:
0452     case MLX5_CMD_OP_QUERY_TIS:
0453     case MLX5_CMD_OP_CREATE_RQT:
0454     case MLX5_CMD_OP_QUERY_RQT:
0455 
0456     case MLX5_CMD_OP_CREATE_FLOW_TABLE:
0457     case MLX5_CMD_OP_QUERY_FLOW_TABLE:
0458     case MLX5_CMD_OP_CREATE_FLOW_GROUP:
0459     case MLX5_CMD_OP_QUERY_FLOW_GROUP:
0460     case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
0461     case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
0462     case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
0463     case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
0464     case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
0465     case MLX5_CMD_OP_FPGA_CREATE_QP:
0466     case MLX5_CMD_OP_FPGA_MODIFY_QP:
0467     case MLX5_CMD_OP_FPGA_QUERY_QP:
0468     case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
0469     case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
0470     case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
0471     case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
0472     case MLX5_CMD_OP_CREATE_UCTX:
0473     case MLX5_CMD_OP_CREATE_UMEM:
0474     case MLX5_CMD_OP_ALLOC_MEMIC:
0475     case MLX5_CMD_OP_MODIFY_XRQ:
0476     case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
0477     case MLX5_CMD_OP_QUERY_VHCA_STATE:
0478     case MLX5_CMD_OP_MODIFY_VHCA_STATE:
0479     case MLX5_CMD_OP_ALLOC_SF:
0480     case MLX5_CMD_OP_SUSPEND_VHCA:
0481     case MLX5_CMD_OP_RESUME_VHCA:
0482     case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
0483     case MLX5_CMD_OP_SAVE_VHCA_STATE:
0484     case MLX5_CMD_OP_LOAD_VHCA_STATE:
0485         *status = MLX5_DRIVER_STATUS_ABORTED;
0486         *synd = MLX5_DRIVER_SYND;
0487         return -ENOLINK;
0488     default:
0489         mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
0490         return -EINVAL;
0491     }
0492 }
0493 
0494 const char *mlx5_command_str(int command)
0495 {
0496 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
0497 
0498     switch (command) {
0499     MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
0500     MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
0501     MLX5_COMMAND_STR_CASE(INIT_HCA);
0502     MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
0503     MLX5_COMMAND_STR_CASE(ENABLE_HCA);
0504     MLX5_COMMAND_STR_CASE(DISABLE_HCA);
0505     MLX5_COMMAND_STR_CASE(QUERY_PAGES);
0506     MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
0507     MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
0508     MLX5_COMMAND_STR_CASE(QUERY_ISSI);
0509     MLX5_COMMAND_STR_CASE(SET_ISSI);
0510     MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
0511     MLX5_COMMAND_STR_CASE(CREATE_MKEY);
0512     MLX5_COMMAND_STR_CASE(QUERY_MKEY);
0513     MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
0514     MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
0515     MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
0516     MLX5_COMMAND_STR_CASE(CREATE_EQ);
0517     MLX5_COMMAND_STR_CASE(DESTROY_EQ);
0518     MLX5_COMMAND_STR_CASE(QUERY_EQ);
0519     MLX5_COMMAND_STR_CASE(GEN_EQE);
0520     MLX5_COMMAND_STR_CASE(CREATE_CQ);
0521     MLX5_COMMAND_STR_CASE(DESTROY_CQ);
0522     MLX5_COMMAND_STR_CASE(QUERY_CQ);
0523     MLX5_COMMAND_STR_CASE(MODIFY_CQ);
0524     MLX5_COMMAND_STR_CASE(CREATE_QP);
0525     MLX5_COMMAND_STR_CASE(DESTROY_QP);
0526     MLX5_COMMAND_STR_CASE(RST2INIT_QP);
0527     MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
0528     MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
0529     MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
0530     MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
0531     MLX5_COMMAND_STR_CASE(2ERR_QP);
0532     MLX5_COMMAND_STR_CASE(2RST_QP);
0533     MLX5_COMMAND_STR_CASE(QUERY_QP);
0534     MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
0535     MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
0536     MLX5_COMMAND_STR_CASE(CREATE_PSV);
0537     MLX5_COMMAND_STR_CASE(DESTROY_PSV);
0538     MLX5_COMMAND_STR_CASE(CREATE_SRQ);
0539     MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
0540     MLX5_COMMAND_STR_CASE(QUERY_SRQ);
0541     MLX5_COMMAND_STR_CASE(ARM_RQ);
0542     MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
0543     MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
0544     MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
0545     MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
0546     MLX5_COMMAND_STR_CASE(CREATE_DCT);
0547     MLX5_COMMAND_STR_CASE(DESTROY_DCT);
0548     MLX5_COMMAND_STR_CASE(DRAIN_DCT);
0549     MLX5_COMMAND_STR_CASE(QUERY_DCT);
0550     MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
0551     MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
0552     MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
0553     MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
0554     MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
0555     MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
0556     MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
0557     MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
0558     MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
0559     MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
0560     MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
0561     MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
0562     MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
0563     MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
0564     MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
0565     MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
0566     MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
0567     MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
0568     MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
0569     MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
0570     MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
0571     MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
0572     MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
0573     MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
0574     MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
0575     MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
0576     MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
0577     MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
0578     MLX5_COMMAND_STR_CASE(ALLOC_PD);
0579     MLX5_COMMAND_STR_CASE(DEALLOC_PD);
0580     MLX5_COMMAND_STR_CASE(ALLOC_UAR);
0581     MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
0582     MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
0583     MLX5_COMMAND_STR_CASE(ACCESS_REG);
0584     MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
0585     MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
0586     MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
0587     MLX5_COMMAND_STR_CASE(MAD_IFC);
0588     MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
0589     MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
0590     MLX5_COMMAND_STR_CASE(NOP);
0591     MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
0592     MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
0593     MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
0594     MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
0595     MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
0596     MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
0597     MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
0598     MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
0599     MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
0600     MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
0601     MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
0602     MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
0603     MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
0604     MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
0605     MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
0606     MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
0607     MLX5_COMMAND_STR_CASE(CREATE_LAG);
0608     MLX5_COMMAND_STR_CASE(MODIFY_LAG);
0609     MLX5_COMMAND_STR_CASE(QUERY_LAG);
0610     MLX5_COMMAND_STR_CASE(DESTROY_LAG);
0611     MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
0612     MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
0613     MLX5_COMMAND_STR_CASE(CREATE_TIR);
0614     MLX5_COMMAND_STR_CASE(MODIFY_TIR);
0615     MLX5_COMMAND_STR_CASE(DESTROY_TIR);
0616     MLX5_COMMAND_STR_CASE(QUERY_TIR);
0617     MLX5_COMMAND_STR_CASE(CREATE_SQ);
0618     MLX5_COMMAND_STR_CASE(MODIFY_SQ);
0619     MLX5_COMMAND_STR_CASE(DESTROY_SQ);
0620     MLX5_COMMAND_STR_CASE(QUERY_SQ);
0621     MLX5_COMMAND_STR_CASE(CREATE_RQ);
0622     MLX5_COMMAND_STR_CASE(MODIFY_RQ);
0623     MLX5_COMMAND_STR_CASE(DESTROY_RQ);
0624     MLX5_COMMAND_STR_CASE(QUERY_RQ);
0625     MLX5_COMMAND_STR_CASE(CREATE_RMP);
0626     MLX5_COMMAND_STR_CASE(MODIFY_RMP);
0627     MLX5_COMMAND_STR_CASE(DESTROY_RMP);
0628     MLX5_COMMAND_STR_CASE(QUERY_RMP);
0629     MLX5_COMMAND_STR_CASE(CREATE_TIS);
0630     MLX5_COMMAND_STR_CASE(MODIFY_TIS);
0631     MLX5_COMMAND_STR_CASE(DESTROY_TIS);
0632     MLX5_COMMAND_STR_CASE(QUERY_TIS);
0633     MLX5_COMMAND_STR_CASE(CREATE_RQT);
0634     MLX5_COMMAND_STR_CASE(MODIFY_RQT);
0635     MLX5_COMMAND_STR_CASE(DESTROY_RQT);
0636     MLX5_COMMAND_STR_CASE(QUERY_RQT);
0637     MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
0638     MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
0639     MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
0640     MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
0641     MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
0642     MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
0643     MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
0644     MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
0645     MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
0646     MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
0647     MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
0648     MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
0649     MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
0650     MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
0651     MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
0652     MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
0653     MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
0654     MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
0655     MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
0656     MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
0657     MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
0658     MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
0659     MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
0660     MLX5_COMMAND_STR_CASE(CREATE_XRQ);
0661     MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
0662     MLX5_COMMAND_STR_CASE(QUERY_XRQ);
0663     MLX5_COMMAND_STR_CASE(ARM_XRQ);
0664     MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
0665     MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
0666     MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
0667     MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
0668     MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
0669     MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
0670     MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
0671     MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
0672     MLX5_COMMAND_STR_CASE(CREATE_UCTX);
0673     MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
0674     MLX5_COMMAND_STR_CASE(CREATE_UMEM);
0675     MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
0676     MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
0677     MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
0678     MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
0679     MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
0680     MLX5_COMMAND_STR_CASE(ALLOC_SF);
0681     MLX5_COMMAND_STR_CASE(DEALLOC_SF);
0682     MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
0683     MLX5_COMMAND_STR_CASE(RESUME_VHCA);
0684     MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
0685     MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
0686     MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
0687     default: return "unknown command opcode";
0688     }
0689 }
0690 
0691 static const char *cmd_status_str(u8 status)
0692 {
0693     switch (status) {
0694     case MLX5_CMD_STAT_OK:
0695         return "OK";
0696     case MLX5_CMD_STAT_INT_ERR:
0697         return "internal error";
0698     case MLX5_CMD_STAT_BAD_OP_ERR:
0699         return "bad operation";
0700     case MLX5_CMD_STAT_BAD_PARAM_ERR:
0701         return "bad parameter";
0702     case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
0703         return "bad system state";
0704     case MLX5_CMD_STAT_BAD_RES_ERR:
0705         return "bad resource";
0706     case MLX5_CMD_STAT_RES_BUSY:
0707         return "resource busy";
0708     case MLX5_CMD_STAT_LIM_ERR:
0709         return "limits exceeded";
0710     case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
0711         return "bad resource state";
0712     case MLX5_CMD_STAT_IX_ERR:
0713         return "bad index";
0714     case MLX5_CMD_STAT_NO_RES_ERR:
0715         return "no resources";
0716     case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
0717         return "bad input length";
0718     case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
0719         return "bad output length";
0720     case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
0721         return "bad QP state";
0722     case MLX5_CMD_STAT_BAD_PKT_ERR:
0723         return "bad packet (discarded)";
0724     case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
0725         return "bad size too many outstanding CQEs";
0726     default:
0727         return "unknown status";
0728     }
0729 }
0730 
0731 static int cmd_status_to_err(u8 status)
0732 {
0733     switch (status) {
0734     case MLX5_CMD_STAT_OK:              return 0;
0735     case MLX5_CMD_STAT_INT_ERR:         return -EIO;
0736     case MLX5_CMD_STAT_BAD_OP_ERR:          return -EINVAL;
0737     case MLX5_CMD_STAT_BAD_PARAM_ERR:       return -EINVAL;
0738     case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:       return -EIO;
0739     case MLX5_CMD_STAT_BAD_RES_ERR:         return -EINVAL;
0740     case MLX5_CMD_STAT_RES_BUSY:            return -EBUSY;
0741     case MLX5_CMD_STAT_LIM_ERR:         return -ENOMEM;
0742     case MLX5_CMD_STAT_BAD_RES_STATE_ERR:       return -EINVAL;
0743     case MLX5_CMD_STAT_IX_ERR:          return -EINVAL;
0744     case MLX5_CMD_STAT_NO_RES_ERR:          return -EAGAIN;
0745     case MLX5_CMD_STAT_BAD_INP_LEN_ERR:     return -EIO;
0746     case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:        return -EIO;
0747     case MLX5_CMD_STAT_BAD_QP_STATE_ERR:        return -EINVAL;
0748     case MLX5_CMD_STAT_BAD_PKT_ERR:         return -EINVAL;
0749     case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:  return -EINVAL;
0750     default:                    return -EIO;
0751     }
0752 }
0753 
0754 struct mlx5_ifc_mbox_out_bits {
0755     u8         status[0x8];
0756     u8         reserved_at_8[0x18];
0757 
0758     u8         syndrome[0x20];
0759 
0760     u8         reserved_at_40[0x40];
0761 };
0762 
0763 struct mlx5_ifc_mbox_in_bits {
0764     u8         opcode[0x10];
0765     u8         uid[0x10];
0766 
0767     u8         reserved_at_20[0x10];
0768     u8         op_mod[0x10];
0769 
0770     u8         reserved_at_40[0x40];
0771 };
0772 
0773 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
0774 {
0775     u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
0776     u8 status = MLX5_GET(mbox_out, out, status);
0777 
0778     mlx5_core_err_rl(dev,
0779              "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
0780              mlx5_command_str(opcode), opcode, op_mod,
0781              cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
0782 }
0783 EXPORT_SYMBOL(mlx5_cmd_out_err);
0784 
0785 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
0786 {
0787     u16 opcode, op_mod;
0788     u32 syndrome;
0789     u8  status;
0790     u16 uid;
0791     int err;
0792 
0793     syndrome = MLX5_GET(mbox_out, out, syndrome);
0794     status = MLX5_GET(mbox_out, out, status);
0795 
0796     opcode = MLX5_GET(mbox_in, in, opcode);
0797     op_mod = MLX5_GET(mbox_in, in, op_mod);
0798     uid    = MLX5_GET(mbox_in, in, uid);
0799 
0800     err = cmd_status_to_err(status);
0801 
0802     if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
0803         mlx5_cmd_out_err(dev, opcode, op_mod, out);
0804     else
0805         mlx5_core_dbg(dev,
0806             "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
0807             mlx5_command_str(opcode), opcode, op_mod, uid,
0808             cmd_status_str(status), status, syndrome, err);
0809 }
0810 
0811 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
0812 {
0813     /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
0814     if (err == -ENXIO) {
0815         u16 opcode = MLX5_GET(mbox_in, in, opcode);
0816         u32 syndrome;
0817         u8 status;
0818 
0819         /* PCI Error, emulate command return status, for smooth reset */
0820         err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
0821         MLX5_SET(mbox_out, out, status, status);
0822         MLX5_SET(mbox_out, out, syndrome, syndrome);
0823         if (!err)
0824             return 0;
0825     }
0826 
0827     /* driver or FW delivery error */
0828     if (err != -EREMOTEIO && err)
0829         return err;
0830 
0831     /* check outbox status */
0832     err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
0833     if (err)
0834         cmd_status_print(dev, in, out);
0835 
0836     return err;
0837 }
0838 EXPORT_SYMBOL(mlx5_cmd_check);
0839 
0840 static void dump_command(struct mlx5_core_dev *dev,
0841              struct mlx5_cmd_work_ent *ent, int input)
0842 {
0843     struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
0844     u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
0845     struct mlx5_cmd_mailbox *next = msg->next;
0846     int n = mlx5_calc_cmd_blocks(msg);
0847     int data_only;
0848     u32 offset = 0;
0849     int dump_len;
0850     int i;
0851 
0852     mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
0853     data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
0854 
0855     if (data_only)
0856         mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
0857                    "cmd[%d]: dump command data %s(0x%x) %s\n",
0858                    ent->idx, mlx5_command_str(op), op,
0859                    input ? "INPUT" : "OUTPUT");
0860     else
0861         mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
0862                   ent->idx, mlx5_command_str(op), op,
0863                   input ? "INPUT" : "OUTPUT");
0864 
0865     if (data_only) {
0866         if (input) {
0867             dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
0868             offset += sizeof(ent->lay->in);
0869         } else {
0870             dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
0871             offset += sizeof(ent->lay->out);
0872         }
0873     } else {
0874         dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
0875         offset += sizeof(*ent->lay);
0876     }
0877 
0878     for (i = 0; i < n && next; i++)  {
0879         if (data_only) {
0880             dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
0881             dump_buf(next->buf, dump_len, 1, offset, ent->idx);
0882             offset += MLX5_CMD_DATA_BLOCK_SIZE;
0883         } else {
0884             mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
0885             dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
0886                  ent->idx);
0887             offset += sizeof(struct mlx5_cmd_prot_block);
0888         }
0889         next = next->next;
0890     }
0891 
0892     if (data_only)
0893         pr_debug("\n");
0894 
0895     mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
0896 }
0897 
0898 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
0899 {
0900     return MLX5_GET(mbox_in, in->first.data, opcode);
0901 }
0902 
0903 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
0904 
0905 static void cb_timeout_handler(struct work_struct *work)
0906 {
0907     struct delayed_work *dwork = container_of(work, struct delayed_work,
0908                           work);
0909     struct mlx5_cmd_work_ent *ent = container_of(dwork,
0910                              struct mlx5_cmd_work_ent,
0911                              cb_timeout_work);
0912     struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
0913                          cmd);
0914 
0915     mlx5_cmd_eq_recover(dev);
0916 
0917     /* Maybe got handled by eq recover ? */
0918     if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
0919         mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
0920                    mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
0921         goto out; /* phew, already handled */
0922     }
0923 
0924     ent->ret = -ETIMEDOUT;
0925     mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
0926                ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
0927     mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
0928 
0929 out:
0930     cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
0931 }
0932 
0933 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
0934 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
0935                   struct mlx5_cmd_msg *msg);
0936 
0937 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
0938 {
0939     if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
0940         return true;
0941 
0942     return cmd->allowed_opcode == opcode;
0943 }
0944 
0945 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
0946 {
0947     return pci_channel_offline(dev->pdev) ||
0948            dev->cmd.state != MLX5_CMDIF_STATE_UP ||
0949            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
0950 }
0951 
0952 static void cmd_work_handler(struct work_struct *work)
0953 {
0954     struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
0955     struct mlx5_cmd *cmd = ent->cmd;
0956     bool poll_cmd = ent->polling;
0957     struct mlx5_cmd_layout *lay;
0958     struct mlx5_core_dev *dev;
0959     unsigned long cb_timeout;
0960     struct semaphore *sem;
0961     unsigned long flags;
0962     int alloc_ret;
0963     int cmd_mode;
0964 
0965     dev = container_of(cmd, struct mlx5_core_dev, cmd);
0966     cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
0967 
0968     complete(&ent->handling);
0969     sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
0970     down(sem);
0971     if (!ent->page_queue) {
0972         alloc_ret = cmd_alloc_index(cmd);
0973         if (alloc_ret < 0) {
0974             mlx5_core_err_rl(dev, "failed to allocate command entry\n");
0975             if (ent->callback) {
0976                 ent->callback(-EAGAIN, ent->context);
0977                 mlx5_free_cmd_msg(dev, ent->out);
0978                 free_msg(dev, ent->in);
0979                 cmd_ent_put(ent);
0980             } else {
0981                 ent->ret = -EAGAIN;
0982                 complete(&ent->done);
0983             }
0984             up(sem);
0985             return;
0986         }
0987         ent->idx = alloc_ret;
0988     } else {
0989         ent->idx = cmd->max_reg_cmds;
0990         spin_lock_irqsave(&cmd->alloc_lock, flags);
0991         clear_bit(ent->idx, &cmd->bitmask);
0992         spin_unlock_irqrestore(&cmd->alloc_lock, flags);
0993     }
0994 
0995     cmd->ent_arr[ent->idx] = ent;
0996     lay = get_inst(cmd, ent->idx);
0997     ent->lay = lay;
0998     memset(lay, 0, sizeof(*lay));
0999     memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
1000     ent->op = be32_to_cpu(lay->in[0]) >> 16;
1001     if (ent->in->next)
1002         lay->in_ptr = cpu_to_be64(ent->in->next->dma);
1003     lay->inlen = cpu_to_be32(ent->in->len);
1004     if (ent->out->next)
1005         lay->out_ptr = cpu_to_be64(ent->out->next->dma);
1006     lay->outlen = cpu_to_be32(ent->out->len);
1007     lay->type = MLX5_PCI_CMD_XPORT;
1008     lay->token = ent->token;
1009     lay->status_own = CMD_OWNER_HW;
1010     set_signature(ent, !cmd->checksum_disabled);
1011     dump_command(dev, ent, 1);
1012     ent->ts1 = ktime_get_ns();
1013     cmd_mode = cmd->mode;
1014 
1015     if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
1016         cmd_ent_get(ent);
1017     set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1018 
1019     /* Skip sending command to fw if internal error */
1020     if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
1021         ent->ret = -ENXIO;
1022         mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1023         return;
1024     }
1025 
1026     cmd_ent_get(ent); /* for the _real_ FW event on completion */
1027     /* ring doorbell after the descriptor is valid */
1028     mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1029     wmb();
1030     iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1031     /* if not in polling don't use ent after this point */
1032     if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
1033         poll_timeout(ent);
1034         /* make sure we read the descriptor after ownership is SW */
1035         rmb();
1036         mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
1037     }
1038 }
1039 
1040 static int deliv_status_to_err(u8 status)
1041 {
1042     switch (status) {
1043     case MLX5_CMD_DELIVERY_STAT_OK:
1044     case MLX5_DRIVER_STATUS_ABORTED:
1045         return 0;
1046     case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1047     case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1048         return -EBADR;
1049     case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1050     case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1051     case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1052         return -EFAULT; /* Bad address */
1053     case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1054     case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1055     case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1056     case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1057         return -ENOMSG;
1058     case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1059         return -EIO;
1060     default:
1061         return -EINVAL;
1062     }
1063 }
1064 
1065 static const char *deliv_status_to_str(u8 status)
1066 {
1067     switch (status) {
1068     case MLX5_CMD_DELIVERY_STAT_OK:
1069         return "no errors";
1070     case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1071         return "signature error";
1072     case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1073         return "token error";
1074     case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1075         return "bad block number";
1076     case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1077         return "output pointer not aligned to block size";
1078     case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1079         return "input pointer not aligned to block size";
1080     case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1081         return "firmware internal error";
1082     case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1083         return "command input length error";
1084     case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1085         return "command output length error";
1086     case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1087         return "reserved fields not cleared";
1088     case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1089         return "bad command descriptor type";
1090     default:
1091         return "unknown status code";
1092     }
1093 }
1094 
1095 enum {
1096     MLX5_CMD_TIMEOUT_RECOVER_MSEC   = 5 * 1000,
1097 };
1098 
1099 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1100                       struct mlx5_cmd_work_ent *ent)
1101 {
1102     unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1103 
1104     mlx5_cmd_eq_recover(dev);
1105 
1106     /* Re-wait on the ent->done after executing the recovery flow. If the
1107      * recovery flow (or any other recovery flow running simultaneously)
1108      * has recovered an EQE, it should cause the entry to be completed by
1109      * the command interface.
1110      */
1111     if (wait_for_completion_timeout(&ent->done, timeout)) {
1112         mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1113                    mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1114         return;
1115     }
1116 
1117     mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1118                mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1119 
1120     ent->ret = -ETIMEDOUT;
1121     mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1122 }
1123 
1124 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1125 {
1126     unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
1127     struct mlx5_cmd *cmd = &dev->cmd;
1128     int err;
1129 
1130     if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1131         cancel_work_sync(&ent->work)) {
1132         ent->ret = -ECANCELED;
1133         goto out_err;
1134     }
1135     if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1136         wait_for_completion(&ent->done);
1137     else if (!wait_for_completion_timeout(&ent->done, timeout))
1138         wait_func_handle_exec_timeout(dev, ent);
1139 
1140 out_err:
1141     err = ent->ret;
1142 
1143     if (err == -ETIMEDOUT) {
1144         mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1145                    mlx5_command_str(msg_to_opcode(ent->in)),
1146                    msg_to_opcode(ent->in));
1147     } else if (err == -ECANCELED) {
1148         mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1149                    mlx5_command_str(msg_to_opcode(ent->in)),
1150                    msg_to_opcode(ent->in));
1151     }
1152     mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1153               err, deliv_status_to_str(ent->status), ent->status);
1154 
1155     return err;
1156 }
1157 
1158 /*  Notes:
1159  *    1. Callback functions may not sleep
1160  *    2. page queue commands do not support asynchrous completion
1161  *
1162  * return value in case (!callback):
1163  *  ret < 0 : Command execution couldn't be submitted by driver
1164  *  ret > 0 : Command execution couldn't be performed by firmware
1165  *  ret == 0: Command was executed by FW, Caller must check FW outbox status.
1166  *
1167  * return value in case (callback):
1168  *  ret < 0 : Command execution couldn't be submitted by driver
1169  *  ret == 0: Command will be submitted to FW for execution
1170  *        and the callback will be called for further status updates
1171  */
1172 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1173                struct mlx5_cmd_msg *out, void *uout, int uout_size,
1174                mlx5_cmd_cbk_t callback,
1175                void *context, int page_queue,
1176                u8 token, bool force_polling)
1177 {
1178     struct mlx5_cmd *cmd = &dev->cmd;
1179     struct mlx5_cmd_work_ent *ent;
1180     struct mlx5_cmd_stats *stats;
1181     u8 status = 0;
1182     int err = 0;
1183     s64 ds;
1184     u16 op;
1185 
1186     if (callback && page_queue)
1187         return -EINVAL;
1188 
1189     ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1190                 callback, context, page_queue);
1191     if (IS_ERR(ent))
1192         return PTR_ERR(ent);
1193 
1194     /* put for this ent is when consumed, depending on the use case
1195      * 1) (!callback) blocking flow: by caller after wait_func completes
1196      * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1197      */
1198 
1199     ent->token = token;
1200     ent->polling = force_polling;
1201 
1202     init_completion(&ent->handling);
1203     if (!callback)
1204         init_completion(&ent->done);
1205 
1206     INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1207     INIT_WORK(&ent->work, cmd_work_handler);
1208     if (page_queue) {
1209         cmd_work_handler(&ent->work);
1210     } else if (!queue_work(cmd->wq, &ent->work)) {
1211         mlx5_core_warn(dev, "failed to queue work\n");
1212         err = -EALREADY;
1213         goto out_free;
1214     }
1215 
1216     if (callback)
1217         return 0; /* mlx5_cmd_comp_handler() will put(ent) */
1218 
1219     err = wait_func(dev, ent);
1220     if (err == -ETIMEDOUT || err == -ECANCELED)
1221         goto out_free;
1222 
1223     ds = ent->ts2 - ent->ts1;
1224     op = MLX5_GET(mbox_in, in->first.data, opcode);
1225     if (op < MLX5_CMD_OP_MAX) {
1226         stats = &cmd->stats[op];
1227         spin_lock_irq(&stats->lock);
1228         stats->sum += ds;
1229         ++stats->n;
1230         spin_unlock_irq(&stats->lock);
1231     }
1232     mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1233                "fw exec time for %s is %lld nsec\n",
1234                mlx5_command_str(op), ds);
1235 
1236 out_free:
1237     status = ent->status;
1238     cmd_ent_put(ent);
1239     return err ? : status;
1240 }
1241 
1242 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1243              size_t count, loff_t *pos)
1244 {
1245     struct mlx5_core_dev *dev = filp->private_data;
1246     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1247     char lbuf[3];
1248     int err;
1249 
1250     if (!dbg->in_msg || !dbg->out_msg)
1251         return -ENOMEM;
1252 
1253     if (count < sizeof(lbuf) - 1)
1254         return -EINVAL;
1255 
1256     if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1257         return -EFAULT;
1258 
1259     lbuf[sizeof(lbuf) - 1] = 0;
1260 
1261     if (strcmp(lbuf, "go"))
1262         return -EINVAL;
1263 
1264     err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1265 
1266     return err ? err : count;
1267 }
1268 
1269 static const struct file_operations fops = {
1270     .owner  = THIS_MODULE,
1271     .open   = simple_open,
1272     .write  = dbg_write,
1273 };
1274 
1275 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1276                 u8 token)
1277 {
1278     struct mlx5_cmd_prot_block *block;
1279     struct mlx5_cmd_mailbox *next;
1280     int copy;
1281 
1282     if (!to || !from)
1283         return -ENOMEM;
1284 
1285     copy = min_t(int, size, sizeof(to->first.data));
1286     memcpy(to->first.data, from, copy);
1287     size -= copy;
1288     from += copy;
1289 
1290     next = to->next;
1291     while (size) {
1292         if (!next) {
1293             /* this is a BUG */
1294             return -ENOMEM;
1295         }
1296 
1297         copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1298         block = next->buf;
1299         memcpy(block->data, from, copy);
1300         from += copy;
1301         size -= copy;
1302         block->token = token;
1303         next = next->next;
1304     }
1305 
1306     return 0;
1307 }
1308 
1309 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1310 {
1311     struct mlx5_cmd_prot_block *block;
1312     struct mlx5_cmd_mailbox *next;
1313     int copy;
1314 
1315     if (!to || !from)
1316         return -ENOMEM;
1317 
1318     copy = min_t(int, size, sizeof(from->first.data));
1319     memcpy(to, from->first.data, copy);
1320     size -= copy;
1321     to += copy;
1322 
1323     next = from->next;
1324     while (size) {
1325         if (!next) {
1326             /* this is a BUG */
1327             return -ENOMEM;
1328         }
1329 
1330         copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1331         block = next->buf;
1332 
1333         memcpy(to, block->data, copy);
1334         to += copy;
1335         size -= copy;
1336         next = next->next;
1337     }
1338 
1339     return 0;
1340 }
1341 
1342 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1343                           gfp_t flags)
1344 {
1345     struct mlx5_cmd_mailbox *mailbox;
1346 
1347     mailbox = kmalloc(sizeof(*mailbox), flags);
1348     if (!mailbox)
1349         return ERR_PTR(-ENOMEM);
1350 
1351     mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1352                        &mailbox->dma);
1353     if (!mailbox->buf) {
1354         mlx5_core_dbg(dev, "failed allocation\n");
1355         kfree(mailbox);
1356         return ERR_PTR(-ENOMEM);
1357     }
1358     mailbox->next = NULL;
1359 
1360     return mailbox;
1361 }
1362 
1363 static void free_cmd_box(struct mlx5_core_dev *dev,
1364              struct mlx5_cmd_mailbox *mailbox)
1365 {
1366     dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1367     kfree(mailbox);
1368 }
1369 
1370 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1371                            gfp_t flags, int size,
1372                            u8 token)
1373 {
1374     struct mlx5_cmd_mailbox *tmp, *head = NULL;
1375     struct mlx5_cmd_prot_block *block;
1376     struct mlx5_cmd_msg *msg;
1377     int err;
1378     int n;
1379     int i;
1380 
1381     msg = kzalloc(sizeof(*msg), flags);
1382     if (!msg)
1383         return ERR_PTR(-ENOMEM);
1384 
1385     msg->len = size;
1386     n = mlx5_calc_cmd_blocks(msg);
1387 
1388     for (i = 0; i < n; i++) {
1389         tmp = alloc_cmd_box(dev, flags);
1390         if (IS_ERR(tmp)) {
1391             mlx5_core_warn(dev, "failed allocating block\n");
1392             err = PTR_ERR(tmp);
1393             goto err_alloc;
1394         }
1395 
1396         block = tmp->buf;
1397         tmp->next = head;
1398         block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1399         block->block_num = cpu_to_be32(n - i - 1);
1400         block->token = token;
1401         head = tmp;
1402     }
1403     msg->next = head;
1404     return msg;
1405 
1406 err_alloc:
1407     while (head) {
1408         tmp = head->next;
1409         free_cmd_box(dev, head);
1410         head = tmp;
1411     }
1412     kfree(msg);
1413 
1414     return ERR_PTR(err);
1415 }
1416 
1417 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1418                   struct mlx5_cmd_msg *msg)
1419 {
1420     struct mlx5_cmd_mailbox *head = msg->next;
1421     struct mlx5_cmd_mailbox *next;
1422 
1423     while (head) {
1424         next = head->next;
1425         free_cmd_box(dev, head);
1426         head = next;
1427     }
1428     kfree(msg);
1429 }
1430 
1431 static ssize_t data_write(struct file *filp, const char __user *buf,
1432               size_t count, loff_t *pos)
1433 {
1434     struct mlx5_core_dev *dev = filp->private_data;
1435     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1436     void *ptr;
1437 
1438     if (*pos != 0)
1439         return -EINVAL;
1440 
1441     kfree(dbg->in_msg);
1442     dbg->in_msg = NULL;
1443     dbg->inlen = 0;
1444     ptr = memdup_user(buf, count);
1445     if (IS_ERR(ptr))
1446         return PTR_ERR(ptr);
1447     dbg->in_msg = ptr;
1448     dbg->inlen = count;
1449 
1450     *pos = count;
1451 
1452     return count;
1453 }
1454 
1455 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1456              loff_t *pos)
1457 {
1458     struct mlx5_core_dev *dev = filp->private_data;
1459     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1460 
1461     if (!dbg->out_msg)
1462         return -ENOMEM;
1463 
1464     return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1465                        dbg->outlen);
1466 }
1467 
1468 static const struct file_operations dfops = {
1469     .owner  = THIS_MODULE,
1470     .open   = simple_open,
1471     .write  = data_write,
1472     .read   = data_read,
1473 };
1474 
1475 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1476                loff_t *pos)
1477 {
1478     struct mlx5_core_dev *dev = filp->private_data;
1479     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1480     char outlen[8];
1481     int err;
1482 
1483     err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1484     if (err < 0)
1485         return err;
1486 
1487     return simple_read_from_buffer(buf, count, pos, outlen, err);
1488 }
1489 
1490 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1491                 size_t count, loff_t *pos)
1492 {
1493     struct mlx5_core_dev *dev = filp->private_data;
1494     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1495     char outlen_str[8] = {0};
1496     int outlen;
1497     void *ptr;
1498     int err;
1499 
1500     if (*pos != 0 || count > 6)
1501         return -EINVAL;
1502 
1503     kfree(dbg->out_msg);
1504     dbg->out_msg = NULL;
1505     dbg->outlen = 0;
1506 
1507     if (copy_from_user(outlen_str, buf, count))
1508         return -EFAULT;
1509 
1510     err = sscanf(outlen_str, "%d", &outlen);
1511     if (err < 0)
1512         return err;
1513 
1514     ptr = kzalloc(outlen, GFP_KERNEL);
1515     if (!ptr)
1516         return -ENOMEM;
1517 
1518     dbg->out_msg = ptr;
1519     dbg->outlen = outlen;
1520 
1521     *pos = count;
1522 
1523     return count;
1524 }
1525 
1526 static const struct file_operations olfops = {
1527     .owner  = THIS_MODULE,
1528     .open   = simple_open,
1529     .write  = outlen_write,
1530     .read   = outlen_read,
1531 };
1532 
1533 static void set_wqname(struct mlx5_core_dev *dev)
1534 {
1535     struct mlx5_cmd *cmd = &dev->cmd;
1536 
1537     snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1538          dev_name(dev->device));
1539 }
1540 
1541 static void clean_debug_files(struct mlx5_core_dev *dev)
1542 {
1543     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1544 
1545     if (!mlx5_debugfs_root)
1546         return;
1547 
1548     mlx5_cmdif_debugfs_cleanup(dev);
1549     debugfs_remove_recursive(dbg->dbg_root);
1550 }
1551 
1552 static void create_debugfs_files(struct mlx5_core_dev *dev)
1553 {
1554     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1555 
1556     dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
1557 
1558     debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1559     debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1560     debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1561     debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1562     debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1563 
1564     mlx5_cmdif_debugfs_init(dev);
1565 }
1566 
1567 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1568 {
1569     struct mlx5_cmd *cmd = &dev->cmd;
1570     int i;
1571 
1572     for (i = 0; i < cmd->max_reg_cmds; i++)
1573         down(&cmd->sem);
1574     down(&cmd->pages_sem);
1575 
1576     cmd->allowed_opcode = opcode;
1577 
1578     up(&cmd->pages_sem);
1579     for (i = 0; i < cmd->max_reg_cmds; i++)
1580         up(&cmd->sem);
1581 }
1582 
1583 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1584 {
1585     struct mlx5_cmd *cmd = &dev->cmd;
1586     int i;
1587 
1588     for (i = 0; i < cmd->max_reg_cmds; i++)
1589         down(&cmd->sem);
1590     down(&cmd->pages_sem);
1591 
1592     cmd->mode = mode;
1593 
1594     up(&cmd->pages_sem);
1595     for (i = 0; i < cmd->max_reg_cmds; i++)
1596         up(&cmd->sem);
1597 }
1598 
1599 static int cmd_comp_notifier(struct notifier_block *nb,
1600                  unsigned long type, void *data)
1601 {
1602     struct mlx5_core_dev *dev;
1603     struct mlx5_cmd *cmd;
1604     struct mlx5_eqe *eqe;
1605 
1606     cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1607     dev = container_of(cmd, struct mlx5_core_dev, cmd);
1608     eqe = data;
1609 
1610     mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1611 
1612     return NOTIFY_OK;
1613 }
1614 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1615 {
1616     MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1617     mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1618     mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1619 }
1620 
1621 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1622 {
1623     mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1624     mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1625 }
1626 
1627 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1628 {
1629     unsigned long flags;
1630 
1631     if (msg->parent) {
1632         spin_lock_irqsave(&msg->parent->lock, flags);
1633         list_add_tail(&msg->list, &msg->parent->head);
1634         spin_unlock_irqrestore(&msg->parent->lock, flags);
1635     } else {
1636         mlx5_free_cmd_msg(dev, msg);
1637     }
1638 }
1639 
1640 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1641 {
1642     struct mlx5_cmd *cmd = &dev->cmd;
1643     struct mlx5_cmd_work_ent *ent;
1644     mlx5_cmd_cbk_t callback;
1645     void *context;
1646     int err;
1647     int i;
1648     s64 ds;
1649     struct mlx5_cmd_stats *stats;
1650     unsigned long flags;
1651     unsigned long vector;
1652 
1653     /* there can be at most 32 command queues */
1654     vector = vec & 0xffffffff;
1655     for (i = 0; i < (1 << cmd->log_sz); i++) {
1656         if (test_bit(i, &vector)) {
1657             ent = cmd->ent_arr[i];
1658 
1659             /* if we already completed the command, ignore it */
1660             if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1661                         &ent->state)) {
1662                 /* only real completion can free the cmd slot */
1663                 if (!forced) {
1664                     mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1665                               ent->idx);
1666                     cmd_ent_put(ent);
1667                 }
1668                 continue;
1669             }
1670 
1671             if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1672                 cmd_ent_put(ent); /* timeout work was canceled */
1673 
1674             if (!forced || /* Real FW completion */
1675                 pci_channel_offline(dev->pdev) || /* FW is inaccessible */
1676                 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1677                 cmd_ent_put(ent);
1678 
1679             ent->ts2 = ktime_get_ns();
1680             memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1681             dump_command(dev, ent, 0);
1682 
1683             if (vec & MLX5_TRIGGERED_CMD_COMP)
1684                 ent->ret = -ENXIO;
1685 
1686             if (!ent->ret) { /* Command completed by FW */
1687                 if (!cmd->checksum_disabled)
1688                     ent->ret = verify_signature(ent);
1689 
1690                 ent->status = ent->lay->status_own >> 1;
1691 
1692                 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1693                           ent->ret, deliv_status_to_str(ent->status), ent->status);
1694             }
1695 
1696             if (ent->callback) {
1697                 ds = ent->ts2 - ent->ts1;
1698                 if (ent->op < MLX5_CMD_OP_MAX) {
1699                     stats = &cmd->stats[ent->op];
1700                     spin_lock_irqsave(&stats->lock, flags);
1701                     stats->sum += ds;
1702                     ++stats->n;
1703                     spin_unlock_irqrestore(&stats->lock, flags);
1704                 }
1705 
1706                 callback = ent->callback;
1707                 context = ent->context;
1708                 err = ent->ret ? : ent->status;
1709                 if (err > 0) /* Failed in FW, command didn't execute */
1710                     err = deliv_status_to_err(err);
1711 
1712                 if (!err)
1713                     err = mlx5_copy_from_msg(ent->uout,
1714                                  ent->out,
1715                                  ent->uout_size);
1716 
1717                 mlx5_free_cmd_msg(dev, ent->out);
1718                 free_msg(dev, ent->in);
1719 
1720                 /* final consumer is done, release ent */
1721                 cmd_ent_put(ent);
1722                 callback(err, context);
1723             } else {
1724                 /* release wait_func() so mlx5_cmd_invoke()
1725                  * can make the final ent_put()
1726                  */
1727                 complete(&ent->done);
1728             }
1729         }
1730     }
1731 }
1732 
1733 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1734 {
1735     struct mlx5_cmd *cmd = &dev->cmd;
1736     unsigned long bitmask;
1737     unsigned long flags;
1738     u64 vector;
1739     int i;
1740 
1741     /* wait for pending handlers to complete */
1742     mlx5_eq_synchronize_cmd_irq(dev);
1743     spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1744     vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1745     if (!vector)
1746         goto no_trig;
1747 
1748     bitmask = vector;
1749     /* we must increment the allocated entries refcount before triggering the completions
1750      * to guarantee pending commands will not get freed in the meanwhile.
1751      * For that reason, it also has to be done inside the alloc_lock.
1752      */
1753     for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1754         cmd_ent_get(cmd->ent_arr[i]);
1755     vector |= MLX5_TRIGGERED_CMD_COMP;
1756     spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1757 
1758     mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1759     mlx5_cmd_comp_handler(dev, vector, true);
1760     for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1761         cmd_ent_put(cmd->ent_arr[i]);
1762     return;
1763 
1764 no_trig:
1765     spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1766 }
1767 
1768 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1769 {
1770     struct mlx5_cmd *cmd = &dev->cmd;
1771     int i;
1772 
1773     for (i = 0; i < cmd->max_reg_cmds; i++)
1774         while (down_trylock(&cmd->sem))
1775             mlx5_cmd_trigger_completions(dev);
1776 
1777     while (down_trylock(&cmd->pages_sem))
1778         mlx5_cmd_trigger_completions(dev);
1779 
1780     /* Unlock cmdif */
1781     up(&cmd->pages_sem);
1782     for (i = 0; i < cmd->max_reg_cmds; i++)
1783         up(&cmd->sem);
1784 }
1785 
1786 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1787                       gfp_t gfp)
1788 {
1789     struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1790     struct cmd_msg_cache *ch = NULL;
1791     struct mlx5_cmd *cmd = &dev->cmd;
1792     int i;
1793 
1794     if (in_size <= 16)
1795         goto cache_miss;
1796 
1797     for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1798         ch = &cmd->cache[i];
1799         if (in_size > ch->max_inbox_size)
1800             continue;
1801         spin_lock_irq(&ch->lock);
1802         if (list_empty(&ch->head)) {
1803             spin_unlock_irq(&ch->lock);
1804             continue;
1805         }
1806         msg = list_entry(ch->head.next, typeof(*msg), list);
1807         /* For cached lists, we must explicitly state what is
1808          * the real size
1809          */
1810         msg->len = in_size;
1811         list_del(&msg->list);
1812         spin_unlock_irq(&ch->lock);
1813         break;
1814     }
1815 
1816     if (!IS_ERR(msg))
1817         return msg;
1818 
1819 cache_miss:
1820     msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1821     return msg;
1822 }
1823 
1824 static int is_manage_pages(void *in)
1825 {
1826     return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1827 }
1828 
1829 /*  Notes:
1830  *    1. Callback functions may not sleep
1831  *    2. Page queue commands do not support asynchrous completion
1832  */
1833 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1834             int out_size, mlx5_cmd_cbk_t callback, void *context,
1835             bool force_polling)
1836 {
1837     u16 opcode = MLX5_GET(mbox_in, in, opcode);
1838     struct mlx5_cmd_msg *inb, *outb;
1839     int pages_queue;
1840     gfp_t gfp;
1841     u8 token;
1842     int err;
1843 
1844     if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
1845         return -ENXIO;
1846 
1847     pages_queue = is_manage_pages(in);
1848     gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1849 
1850     inb = alloc_msg(dev, in_size, gfp);
1851     if (IS_ERR(inb)) {
1852         err = PTR_ERR(inb);
1853         return err;
1854     }
1855 
1856     token = alloc_token(&dev->cmd);
1857 
1858     err = mlx5_copy_to_msg(inb, in, in_size, token);
1859     if (err) {
1860         mlx5_core_warn(dev, "err %d\n", err);
1861         goto out_in;
1862     }
1863 
1864     outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1865     if (IS_ERR(outb)) {
1866         err = PTR_ERR(outb);
1867         goto out_in;
1868     }
1869 
1870     err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1871                   pages_queue, token, force_polling);
1872     if (callback)
1873         return err;
1874 
1875     if (err > 0) /* Failed in FW, command didn't execute */
1876         err = deliv_status_to_err(err);
1877 
1878     if (err)
1879         goto out_out;
1880 
1881     /* command completed by FW */
1882     err = mlx5_copy_from_msg(out, outb, out_size);
1883 out_out:
1884     mlx5_free_cmd_msg(dev, outb);
1885 out_in:
1886     free_msg(dev, inb);
1887     return err;
1888 }
1889 
1890 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
1891                u32 syndrome, int err)
1892 {
1893     struct mlx5_cmd_stats *stats;
1894 
1895     if (!err)
1896         return;
1897 
1898     stats = &dev->cmd.stats[opcode];
1899     spin_lock_irq(&stats->lock);
1900     stats->failed++;
1901     if (err < 0)
1902         stats->last_failed_errno = -err;
1903     if (err == -EREMOTEIO) {
1904         stats->failed_mbox_status++;
1905         stats->last_failed_mbox_status = status;
1906         stats->last_failed_syndrome = syndrome;
1907     }
1908     spin_unlock_irq(&stats->lock);
1909 }
1910 
1911 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
1912 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
1913 {
1914     u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
1915     u8 status = MLX5_GET(mbox_out, out, status);
1916 
1917     if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
1918         err = -EIO;
1919 
1920     if (!err && status != MLX5_CMD_STAT_OK)
1921         err = -EREMOTEIO;
1922 
1923     cmd_status_log(dev, opcode, status, syndrome, err);
1924     return err;
1925 }
1926 
1927 /**
1928  * mlx5_cmd_do - Executes a fw command, wait for completion.
1929  * Unlike mlx5_cmd_exec, this function will not translate or intercept
1930  * outbox.status and will return -EREMOTEIO when
1931  * outbox.status != MLX5_CMD_STAT_OK
1932  *
1933  * @dev: mlx5 core device
1934  * @in: inbox mlx5_ifc command buffer
1935  * @in_size: inbox buffer size
1936  * @out: outbox mlx5_ifc buffer
1937  * @out_size: outbox size
1938  *
1939  * @return:
1940  * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
1941  *              Caller must check FW outbox status.
1942  *   0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
1943  * < 0 : Command execution couldn't be performed by firmware or driver
1944  */
1945 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
1946 {
1947     int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1948     u16 opcode = MLX5_GET(mbox_in, in, opcode);
1949 
1950     err = cmd_status_err(dev, err, opcode, out);
1951     return err;
1952 }
1953 EXPORT_SYMBOL(mlx5_cmd_do);
1954 
1955 /**
1956  * mlx5_cmd_exec - Executes a fw command, wait for completion
1957  *
1958  * @dev: mlx5 core device
1959  * @in: inbox mlx5_ifc command buffer
1960  * @in_size: inbox buffer size
1961  * @out: outbox mlx5_ifc buffer
1962  * @out_size: outbox size
1963  *
1964  * @return: 0 if no error, FW command execution was successful
1965  *          and outbox status is ok.
1966  */
1967 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1968           int out_size)
1969 {
1970     int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
1971 
1972     return mlx5_cmd_check(dev, err, in, out);
1973 }
1974 EXPORT_SYMBOL(mlx5_cmd_exec);
1975 
1976 /**
1977  * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
1978  *  Needed for driver force teardown, when command completion EQ
1979  *  will not be available to complete the command
1980  *
1981  * @dev: mlx5 core device
1982  * @in: inbox mlx5_ifc command buffer
1983  * @in_size: inbox buffer size
1984  * @out: outbox mlx5_ifc buffer
1985  * @out_size: outbox size
1986  *
1987  * @return: 0 if no error, FW command execution was successful
1988  *          and outbox status is ok.
1989  */
1990 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1991               void *out, int out_size)
1992 {
1993     int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1994     u16 opcode = MLX5_GET(mbox_in, in, opcode);
1995 
1996     err = cmd_status_err(dev, err, opcode, out);
1997     return mlx5_cmd_check(dev, err, in, out);
1998 }
1999 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
2000 
2001 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
2002                  struct mlx5_async_ctx *ctx)
2003 {
2004     ctx->dev = dev;
2005     /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
2006     atomic_set(&ctx->num_inflight, 1);
2007     init_waitqueue_head(&ctx->wait);
2008 }
2009 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
2010 
2011 /**
2012  * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
2013  * @ctx: The ctx to clean
2014  *
2015  * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
2016  * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
2017  * the call mlx5_cleanup_async_ctx().
2018  */
2019 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
2020 {
2021     atomic_dec(&ctx->num_inflight);
2022     wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
2023 }
2024 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
2025 
2026 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
2027 {
2028     struct mlx5_async_work *work = _work;
2029     struct mlx5_async_ctx *ctx;
2030 
2031     ctx = work->ctx;
2032     status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
2033     work->user_callback(status, work);
2034     if (atomic_dec_and_test(&ctx->num_inflight))
2035         wake_up(&ctx->wait);
2036 }
2037 
2038 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
2039              void *out, int out_size, mlx5_async_cbk_t callback,
2040              struct mlx5_async_work *work)
2041 {
2042     int ret;
2043 
2044     work->ctx = ctx;
2045     work->user_callback = callback;
2046     work->opcode = MLX5_GET(mbox_in, in, opcode);
2047     work->out = out;
2048     if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
2049         return -EIO;
2050     ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
2051                mlx5_cmd_exec_cb_handler, work, false);
2052     if (ret && atomic_dec_and_test(&ctx->num_inflight))
2053         wake_up(&ctx->wait);
2054 
2055     return ret;
2056 }
2057 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
2058 
2059 static void destroy_msg_cache(struct mlx5_core_dev *dev)
2060 {
2061     struct cmd_msg_cache *ch;
2062     struct mlx5_cmd_msg *msg;
2063     struct mlx5_cmd_msg *n;
2064     int i;
2065 
2066     for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
2067         ch = &dev->cmd.cache[i];
2068         list_for_each_entry_safe(msg, n, &ch->head, list) {
2069             list_del(&msg->list);
2070             mlx5_free_cmd_msg(dev, msg);
2071         }
2072     }
2073 }
2074 
2075 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
2076     512, 32, 16, 8, 2
2077 };
2078 
2079 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
2080     16 + MLX5_CMD_DATA_BLOCK_SIZE,
2081     16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
2082     16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
2083     16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
2084     16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
2085 };
2086 
2087 static void create_msg_cache(struct mlx5_core_dev *dev)
2088 {
2089     struct mlx5_cmd *cmd = &dev->cmd;
2090     struct cmd_msg_cache *ch;
2091     struct mlx5_cmd_msg *msg;
2092     int i;
2093     int k;
2094 
2095     /* Initialize and fill the caches with initial entries */
2096     for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
2097         ch = &cmd->cache[k];
2098         spin_lock_init(&ch->lock);
2099         INIT_LIST_HEAD(&ch->head);
2100         ch->num_ent = cmd_cache_num_ent[k];
2101         ch->max_inbox_size = cmd_cache_ent_size[k];
2102         for (i = 0; i < ch->num_ent; i++) {
2103             msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
2104                          ch->max_inbox_size, 0);
2105             if (IS_ERR(msg))
2106                 break;
2107             msg->parent = ch;
2108             list_add_tail(&msg->list, &ch->head);
2109         }
2110     }
2111 }
2112 
2113 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2114 {
2115     cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
2116                         &cmd->alloc_dma, GFP_KERNEL);
2117     if (!cmd->cmd_alloc_buf)
2118         return -ENOMEM;
2119 
2120     /* make sure it is aligned to 4K */
2121     if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
2122         cmd->cmd_buf = cmd->cmd_alloc_buf;
2123         cmd->dma = cmd->alloc_dma;
2124         cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
2125         return 0;
2126     }
2127 
2128     dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
2129               cmd->alloc_dma);
2130     cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2131                         2 * MLX5_ADAPTER_PAGE_SIZE - 1,
2132                         &cmd->alloc_dma, GFP_KERNEL);
2133     if (!cmd->cmd_alloc_buf)
2134         return -ENOMEM;
2135 
2136     cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
2137     cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
2138     cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
2139     return 0;
2140 }
2141 
2142 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2143 {
2144     dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2145               cmd->alloc_dma);
2146 }
2147 
2148 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2149 {
2150     return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2151 }
2152 
2153 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2154 {
2155     int size = sizeof(struct mlx5_cmd_prot_block);
2156     int align = roundup_pow_of_two(size);
2157     struct mlx5_cmd *cmd = &dev->cmd;
2158     u32 cmd_h, cmd_l;
2159     u16 cmd_if_rev;
2160     int err;
2161     int i;
2162 
2163     memset(cmd, 0, sizeof(*cmd));
2164     cmd_if_rev = cmdif_rev(dev);
2165     if (cmd_if_rev != CMD_IF_REV) {
2166         mlx5_core_err(dev,
2167                   "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2168                   CMD_IF_REV, cmd_if_rev);
2169         return -EINVAL;
2170     }
2171 
2172     cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
2173     if (!cmd->stats)
2174         return -ENOMEM;
2175 
2176     cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2177     if (!cmd->pool) {
2178         err = -ENOMEM;
2179         goto dma_pool_err;
2180     }
2181 
2182     err = alloc_cmd_page(dev, cmd);
2183     if (err)
2184         goto err_free_pool;
2185 
2186     cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2187     cmd->log_sz = cmd_l >> 4 & 0xf;
2188     cmd->log_stride = cmd_l & 0xf;
2189     if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
2190         mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2191                   1 << cmd->log_sz);
2192         err = -EINVAL;
2193         goto err_free_page;
2194     }
2195 
2196     if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2197         mlx5_core_err(dev, "command queue size overflow\n");
2198         err = -EINVAL;
2199         goto err_free_page;
2200     }
2201 
2202     cmd->state = MLX5_CMDIF_STATE_DOWN;
2203     cmd->checksum_disabled = 1;
2204     cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2205     cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2206 
2207     cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2208     if (cmd->cmdif_rev > CMD_IF_REV) {
2209         mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2210                   CMD_IF_REV, cmd->cmdif_rev);
2211         err = -EOPNOTSUPP;
2212         goto err_free_page;
2213     }
2214 
2215     spin_lock_init(&cmd->alloc_lock);
2216     spin_lock_init(&cmd->token_lock);
2217     for (i = 0; i < MLX5_CMD_OP_MAX; i++)
2218         spin_lock_init(&cmd->stats[i].lock);
2219 
2220     sema_init(&cmd->sem, cmd->max_reg_cmds);
2221     sema_init(&cmd->pages_sem, 1);
2222 
2223     cmd_h = (u32)((u64)(cmd->dma) >> 32);
2224     cmd_l = (u32)(cmd->dma);
2225     if (cmd_l & 0xfff) {
2226         mlx5_core_err(dev, "invalid command queue address\n");
2227         err = -ENOMEM;
2228         goto err_free_page;
2229     }
2230 
2231     iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2232     iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2233 
2234     /* Make sure firmware sees the complete address before we proceed */
2235     wmb();
2236 
2237     mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2238 
2239     cmd->mode = CMD_MODE_POLLING;
2240     cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2241 
2242     create_msg_cache(dev);
2243 
2244     set_wqname(dev);
2245     cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2246     if (!cmd->wq) {
2247         mlx5_core_err(dev, "failed to create command workqueue\n");
2248         err = -ENOMEM;
2249         goto err_cache;
2250     }
2251 
2252     create_debugfs_files(dev);
2253 
2254     return 0;
2255 
2256 err_cache:
2257     destroy_msg_cache(dev);
2258 
2259 err_free_page:
2260     free_cmd_page(dev, cmd);
2261 
2262 err_free_pool:
2263     dma_pool_destroy(cmd->pool);
2264 dma_pool_err:
2265     kvfree(cmd->stats);
2266     return err;
2267 }
2268 
2269 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2270 {
2271     struct mlx5_cmd *cmd = &dev->cmd;
2272 
2273     clean_debug_files(dev);
2274     destroy_workqueue(cmd->wq);
2275     destroy_msg_cache(dev);
2276     free_cmd_page(dev, cmd);
2277     dma_pool_destroy(cmd->pool);
2278     kvfree(cmd->stats);
2279 }
2280 
2281 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2282             enum mlx5_cmdif_state cmdif_state)
2283 {
2284     dev->cmd.state = cmdif_state;
2285 }