0001
0002
0003
0004 #include <linux/mlx5/driver.h>
0005 #include "lib/tout.h"
0006
0007 struct mlx5_timeouts {
0008 u64 to[MAX_TIMEOUT_TYPES];
0009 };
0010
0011 static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
0012 [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
0013 [MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS] = 7200000,
0014 [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
0015 [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
0016 [MLX5_TO_FW_INIT_MS] = 2000,
0017 [MLX5_TO_CMD_MS] = 60000,
0018 [MLX5_TO_PCI_TOGGLE_MS] = 2000,
0019 [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000,
0020 [MLX5_TO_FULL_CRDUMP_MS] = 60000,
0021 [MLX5_TO_FW_RESET_MS] = 60000,
0022 [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000,
0023 [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000,
0024 [MLX5_TO_TEARDOWN_MS] = 3000,
0025 [MLX5_TO_FSM_REACTIVATE_MS] = 5000,
0026 [MLX5_TO_RECLAIM_PAGES_MS] = 5000,
0027 [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
0028 };
0029
0030 static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
0031 {
0032 dev->timeouts->to[type] = val;
0033 }
0034
0035 int mlx5_tout_init(struct mlx5_core_dev *dev)
0036 {
0037 int i;
0038
0039 dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
0040 if (!dev->timeouts)
0041 return -ENOMEM;
0042
0043 for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
0044 tout_set(dev, tout_def_sw_val[i], i);
0045
0046 return 0;
0047 }
0048
0049 void mlx5_tout_cleanup(struct mlx5_core_dev *dev)
0050 {
0051 kfree(dev->timeouts);
0052 }
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val)
0065 {
0066 u64 msec = to_val;
0067
0068 to_mul &= 0x3;
0069
0070 if (to_mul)
0071 msec *= 1000 * int_pow(60, to_mul - 1);
0072
0073 return msec;
0074 }
0075
0076 static u64 tout_convert_iseg_to_ms(u32 iseg_to)
0077 {
0078 return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff);
0079 }
0080
0081 static bool tout_is_supported(struct mlx5_core_dev *dev)
0082 {
0083 return !!ioread32be(&dev->iseg->cmd_q_init_to);
0084 }
0085
0086 void mlx5_tout_query_iseg(struct mlx5_core_dev *dev)
0087 {
0088 u32 to;
0089
0090 if (!tout_is_supported(dev))
0091 return;
0092
0093 to = ioread32be(&dev->iseg->cmd_q_init_to);
0094 tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS);
0095
0096 to = ioread32be(&dev->iseg->cmd_exec_to);
0097 tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS);
0098 }
0099
0100 u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
0101 {
0102 return dev->timeouts->to[type];
0103 }
0104
0105 #define MLX5_TIMEOUT_QUERY(fld, reg_out) \
0106 ({ \
0107 struct mlx5_ifc_default_timeout_bits *time_field; \
0108 u32 to_multi, to_value; \
0109 u64 to_val_ms; \
0110 \
0111 time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \
0112 to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \
0113 to_value = MLX5_GET(default_timeout, time_field, to_value); \
0114 to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \
0115 to_val_ms; \
0116 })
0117
0118 #define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
0119 ({ \
0120 u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
0121 tout_set(dev, fw_to + (to_extra), to_type); \
0122 fw_to; \
0123 })
0124
0125 static int tout_query_dtor(struct mlx5_core_dev *dev)
0126 {
0127 u64 pcie_toggle_to_val, tear_down_to_val;
0128 u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {};
0129 u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {};
0130 int err;
0131
0132 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0);
0133 if (err)
0134 return err;
0135
0136 pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0);
0137 MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val);
0138
0139 tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0);
0140 MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS,
0141 tear_down_to_val);
0142
0143 MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0);
0144 MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0);
0145 MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0);
0146 MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
0147 MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
0148 MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
0149
0150 return 0;
0151 }
0152
0153 int mlx5_tout_query_dtor(struct mlx5_core_dev *dev)
0154 {
0155 if (tout_is_supported(dev))
0156 return tout_query_dtor(dev);
0157
0158 return 0;
0159 }