0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/io.h>
0009 #include <linux/delay.h>
0010 #include <linux/errno.h>
0011 #include <linux/kernel.h>
0012 #include <linux/slab.h>
0013 #include <linux/string.h>
0014 #include "qed.h"
0015 #include "qed_hsi.h"
0016 #include "qed_hw.h"
0017 #include "qed_init_ops.h"
0018 #include "qed_iro_hsi.h"
0019 #include "qed_reg_addr.h"
0020 #include "qed_sriov.h"
0021
0022 #define QED_INIT_MAX_POLL_COUNT 100
0023 #define QED_INIT_POLL_PERIOD_US 500
0024
0025 static u32 pxp_global_win[] = {
0026 0,
0027 0,
0028 0x1c02,
0029 0x1c80,
0030 0x1d00,
0031 0x1d01,
0032 0x1d02,
0033 0x1d80,
0034 0x1d81,
0035 0x1d82,
0036 0x1e00,
0037 0x1e01,
0038 0x1e80,
0039 0x1f00,
0040 0x1c08,
0041 0,
0042 0,
0043 0,
0044 0,
0045 };
0046
0047
0048 static const u32 iro_arr[] = {
0049 0x00000000, 0x00000000, 0x00080000,
0050 0x00004478, 0x00000008, 0x00080000,
0051 0x00003288, 0x00000088, 0x00880000,
0052 0x000058a8, 0x00000020, 0x00200000,
0053 0x00003188, 0x00000008, 0x00080000,
0054 0x00000b00, 0x00000008, 0x00040000,
0055 0x00000a80, 0x00000008, 0x00040000,
0056 0x00000000, 0x00000008, 0x00020000,
0057 0x00000080, 0x00000008, 0x00040000,
0058 0x00000084, 0x00000008, 0x00020000,
0059 0x00005798, 0x00000004, 0x00040000,
0060 0x00004e50, 0x00000000, 0x00780000,
0061 0x00003e40, 0x00000000, 0x00780000,
0062 0x00004500, 0x00000000, 0x00780000,
0063 0x00003210, 0x00000000, 0x00780000,
0064 0x00003b50, 0x00000000, 0x00780000,
0065 0x00007f58, 0x00000000, 0x00780000,
0066 0x00005fd8, 0x00000000, 0x00080000,
0067 0x00007100, 0x00000000, 0x00080000,
0068 0x0000af20, 0x00000000, 0x00080000,
0069 0x00004398, 0x00000000, 0x00080000,
0070 0x0000a5a0, 0x00000000, 0x00080000,
0071 0x0000bde8, 0x00000000, 0x00080000,
0072 0x00000020, 0x00000004, 0x00040000,
0073 0x00005688, 0x00000010, 0x00100000,
0074 0x0000c210, 0x00000030, 0x00300000,
0075 0x0000b108, 0x00000038, 0x00380000,
0076 0x00003d20, 0x00000080, 0x00400000,
0077 0x0000bf60, 0x00000000, 0x00040000,
0078 0x00004560, 0x00040080, 0x00040000,
0079 0x000001f8, 0x00000004, 0x00040000,
0080 0x00003d60, 0x00000080, 0x00200000,
0081 0x00008960, 0x00000040, 0x00300000,
0082 0x0000e840, 0x00000060, 0x00600000,
0083 0x00004698, 0x00000080, 0x00380000,
0084 0x000107b8, 0x000000c0, 0x00c00000,
0085 0x000001f8, 0x00000002, 0x00020000,
0086 0x0000a260, 0x00000000, 0x01080000,
0087 0x0000a368, 0x00000008, 0x00080000,
0088 0x000001c0, 0x00000008, 0x00080000,
0089 0x000001f8, 0x00000008, 0x00080000,
0090 0x00000ac0, 0x00000008, 0x00080000,
0091 0x00002578, 0x00000008, 0x00080000,
0092 0x000024f8, 0x00000008, 0x00080000,
0093 0x00000280, 0x00000008, 0x00080000,
0094 0x00000680, 0x00080018, 0x00080000,
0095 0x00000b78, 0x00080018, 0x00020000,
0096 0x0000c600, 0x00000058, 0x003c0000,
0097 0x00012038, 0x00000020, 0x00100000,
0098 0x00011b00, 0x00000048, 0x00180000,
0099 0x00009650, 0x00000050, 0x00200000,
0100 0x00008b10, 0x00000040, 0x00280000,
0101 0x000116c0, 0x00000018, 0x00100000,
0102 0x0000c808, 0x00000048, 0x00380000,
0103 0x00011790, 0x00000020, 0x00200000,
0104 0x000046d0, 0x00000080, 0x00100000,
0105 0x00003618, 0x00000010, 0x00100000,
0106 0x0000a9e8, 0x00000008, 0x00010000,
0107 0x000097a0, 0x00000008, 0x00010000,
0108 0x00011a10, 0x00000008, 0x00010000,
0109 0x0000e9f8, 0x00000008, 0x00010000,
0110 0x00012648, 0x00000008, 0x00010000,
0111 0x000121c8, 0x00000008, 0x00010000,
0112 0x0000af08, 0x00000030, 0x00100000,
0113 0x0000d748, 0x00000028, 0x00280000,
0114 0x00009e68, 0x00000018, 0x00180000,
0115 0x00009fe8, 0x00000008, 0x00080000,
0116 0x00013ea8, 0x00000008, 0x00080000,
0117 0x00012f18, 0x00000018, 0x00180000,
0118 0x0000dfe8, 0x00500288, 0x00100000,
0119 0x000131a0, 0x00000138, 0x00280000,
0120 };
0121
0122 void qed_init_iro_array(struct qed_dev *cdev)
0123 {
0124 cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
0125 }
0126
0127 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
0128 {
0129 if (rt_offset >= RUNTIME_ARRAY_SIZE) {
0130 DP_ERR(p_hwfn,
0131 "Avoid storing %u in rt_data at index %u!\n",
0132 val, rt_offset);
0133 return;
0134 }
0135
0136 p_hwfn->rt_data.init_val[rt_offset] = val;
0137 p_hwfn->rt_data.b_valid[rt_offset] = true;
0138 }
0139
0140 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
0141 u32 rt_offset, u32 *p_val, size_t size)
0142 {
0143 size_t i;
0144
0145 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
0146 DP_ERR(p_hwfn,
0147 "Avoid storing values in rt_data at indices %u-%u!\n",
0148 rt_offset,
0149 (u32)(rt_offset + size - 1));
0150 return;
0151 }
0152
0153 for (i = 0; i < size / sizeof(u32); i++) {
0154 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
0155 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
0156 }
0157 }
0158
0159 static int qed_init_rt(struct qed_hwfn *p_hwfn,
0160 struct qed_ptt *p_ptt,
0161 u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
0162 {
0163 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
0164 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
0165 u16 i, j, segment;
0166 int rc = 0;
0167
0168
0169
0170
0171 for (i = 0; i < size; i++) {
0172 if (!p_valid[i])
0173 continue;
0174
0175
0176
0177
0178 if (!b_must_dmae) {
0179 qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
0180 p_valid[i] = false;
0181 continue;
0182 }
0183
0184
0185 for (segment = 1; i + segment < size; segment++)
0186 if (!p_valid[i + segment])
0187 break;
0188
0189 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
0190 (uintptr_t)(p_init_val + i),
0191 addr + (i << 2), segment, NULL);
0192 if (rc)
0193 return rc;
0194
0195
0196 for (j = i; j < (u32)(i + segment); j++)
0197 p_valid[j] = false;
0198
0199
0200 i += segment;
0201 }
0202
0203 return rc;
0204 }
0205
0206 int qed_init_alloc(struct qed_hwfn *p_hwfn)
0207 {
0208 struct qed_rt_data *rt_data = &p_hwfn->rt_data;
0209
0210 if (IS_VF(p_hwfn->cdev))
0211 return 0;
0212
0213 rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
0214 GFP_KERNEL);
0215 if (!rt_data->b_valid)
0216 return -ENOMEM;
0217
0218 rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
0219 GFP_KERNEL);
0220 if (!rt_data->init_val) {
0221 kfree(rt_data->b_valid);
0222 rt_data->b_valid = NULL;
0223 return -ENOMEM;
0224 }
0225
0226 return 0;
0227 }
0228
0229 void qed_init_free(struct qed_hwfn *p_hwfn)
0230 {
0231 kfree(p_hwfn->rt_data.init_val);
0232 p_hwfn->rt_data.init_val = NULL;
0233 kfree(p_hwfn->rt_data.b_valid);
0234 p_hwfn->rt_data.b_valid = NULL;
0235 }
0236
0237 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
0238 struct qed_ptt *p_ptt,
0239 u32 addr,
0240 u32 dmae_data_offset,
0241 u32 size,
0242 const u32 *buf,
0243 bool b_must_dmae,
0244 bool b_can_dmae)
0245 {
0246 int rc = 0;
0247
0248
0249 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
0250 const u32 *data = buf + dmae_data_offset;
0251 u32 i;
0252
0253 for (i = 0; i < size; i++)
0254 qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
0255 } else {
0256 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
0257 (uintptr_t)(buf + dmae_data_offset),
0258 addr, size, NULL);
0259 }
0260
0261 return rc;
0262 }
0263
0264 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
0265 struct qed_ptt *p_ptt,
0266 u32 addr, u32 fill_count)
0267 {
0268 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
0269 struct qed_dmae_params params = {};
0270
0271 memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
0272
0273
0274
0275
0276
0277
0278
0279 SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
0280 return qed_dmae_host2grc(p_hwfn, p_ptt,
0281 (uintptr_t)(&zero_buffer[0]),
0282 addr, fill_count, ¶ms);
0283 }
0284
0285 static void qed_init_fill(struct qed_hwfn *p_hwfn,
0286 struct qed_ptt *p_ptt,
0287 u32 addr, u32 fill, u32 fill_count)
0288 {
0289 u32 i;
0290
0291 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
0292 qed_wr(p_hwfn, p_ptt, addr, fill);
0293 }
0294
0295 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
0296 struct qed_ptt *p_ptt,
0297 struct init_write_op *cmd,
0298 bool b_must_dmae, bool b_can_dmae)
0299 {
0300 u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
0301 u32 data = le32_to_cpu(cmd->data);
0302 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
0303
0304 u32 offset, output_len, input_len, max_size;
0305 struct qed_dev *cdev = p_hwfn->cdev;
0306 union init_array_hdr *hdr;
0307 const u32 *array_data;
0308 int rc = 0;
0309 u32 size;
0310
0311 array_data = cdev->fw_data->arr_data;
0312
0313 hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
0314 data = le32_to_cpu(hdr->raw.data);
0315 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
0316 case INIT_ARR_ZIPPED:
0317 offset = dmae_array_offset + 1;
0318 input_len = GET_FIELD(data,
0319 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
0320 max_size = MAX_ZIPPED_SIZE * 4;
0321 memset(p_hwfn->unzip_buf, 0, max_size);
0322
0323 output_len = qed_unzip_data(p_hwfn, input_len,
0324 (u8 *)&array_data[offset],
0325 max_size, (u8 *)p_hwfn->unzip_buf);
0326 if (output_len) {
0327 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
0328 output_len,
0329 p_hwfn->unzip_buf,
0330 b_must_dmae, b_can_dmae);
0331 } else {
0332 DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
0333 rc = -EINVAL;
0334 }
0335 break;
0336 case INIT_ARR_PATTERN:
0337 {
0338 u32 repeats = GET_FIELD(data,
0339 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
0340 u32 i;
0341
0342 size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
0343
0344 for (i = 0; i < repeats; i++, addr += size << 2) {
0345 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
0346 dmae_array_offset + 1,
0347 size, array_data,
0348 b_must_dmae, b_can_dmae);
0349 if (rc)
0350 break;
0351 }
0352 break;
0353 }
0354 case INIT_ARR_STANDARD:
0355 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
0356 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
0357 dmae_array_offset + 1,
0358 size, array_data,
0359 b_must_dmae, b_can_dmae);
0360 break;
0361 }
0362
0363 return rc;
0364 }
0365
0366
0367 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
0368 struct qed_ptt *p_ptt,
0369 struct init_write_op *p_cmd, bool b_can_dmae)
0370 {
0371 u32 data = le32_to_cpu(p_cmd->data);
0372 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
0373 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
0374 union init_write_args *arg = &p_cmd->args;
0375 int rc = 0;
0376
0377
0378 if (b_must_dmae && !b_can_dmae) {
0379 DP_NOTICE(p_hwfn,
0380 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
0381 addr);
0382 return -EINVAL;
0383 }
0384
0385 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
0386 case INIT_SRC_INLINE:
0387 data = le32_to_cpu(p_cmd->args.inline_val);
0388 qed_wr(p_hwfn, p_ptt, addr, data);
0389 break;
0390 case INIT_SRC_ZEROS:
0391 data = le32_to_cpu(p_cmd->args.zeros_count);
0392 if (b_must_dmae || (b_can_dmae && (data >= 64)))
0393 rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
0394 else
0395 qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
0396 break;
0397 case INIT_SRC_ARRAY:
0398 rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
0399 b_must_dmae, b_can_dmae);
0400 break;
0401 case INIT_SRC_RUNTIME:
0402 qed_init_rt(p_hwfn, p_ptt, addr,
0403 le16_to_cpu(arg->runtime.offset),
0404 le16_to_cpu(arg->runtime.size),
0405 b_must_dmae);
0406 break;
0407 }
0408
0409 return rc;
0410 }
0411
0412 static inline bool comp_eq(u32 val, u32 expected_val)
0413 {
0414 return val == expected_val;
0415 }
0416
0417 static inline bool comp_and(u32 val, u32 expected_val)
0418 {
0419 return (val & expected_val) == expected_val;
0420 }
0421
0422 static inline bool comp_or(u32 val, u32 expected_val)
0423 {
0424 return (val | expected_val) > 0;
0425 }
0426
0427
0428 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
0429 struct qed_ptt *p_ptt, struct init_read_op *cmd)
0430 {
0431 bool (*comp_check)(u32 val, u32 expected_val);
0432 u32 delay = QED_INIT_POLL_PERIOD_US, val;
0433 u32 data, addr, poll;
0434 int i;
0435
0436 data = le32_to_cpu(cmd->op_data);
0437 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
0438 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
0439
0440 val = qed_rd(p_hwfn, p_ptt, addr);
0441
0442 if (poll == INIT_POLL_NONE)
0443 return;
0444
0445 switch (poll) {
0446 case INIT_POLL_EQ:
0447 comp_check = comp_eq;
0448 break;
0449 case INIT_POLL_OR:
0450 comp_check = comp_or;
0451 break;
0452 case INIT_POLL_AND:
0453 comp_check = comp_and;
0454 break;
0455 default:
0456 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
0457 cmd->op_data);
0458 return;
0459 }
0460
0461 data = le32_to_cpu(cmd->expected_val);
0462 for (i = 0;
0463 i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
0464 i++) {
0465 udelay(delay);
0466 val = qed_rd(p_hwfn, p_ptt, addr);
0467 }
0468
0469 if (i == QED_INIT_MAX_POLL_COUNT) {
0470 DP_ERR(p_hwfn,
0471 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
0472 addr, le32_to_cpu(cmd->expected_val),
0473 val, le32_to_cpu(cmd->op_data));
0474 }
0475 }
0476
0477
0478 static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
0479 struct qed_ptt *p_ptt,
0480 struct init_callback_op *p_cmd)
0481 {
0482 int rc;
0483
0484 switch (p_cmd->callback_id) {
0485 case DMAE_READY_CB:
0486 rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
0487 break;
0488 default:
0489 DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
0490 p_cmd->callback_id);
0491 return -EINVAL;
0492 }
0493
0494 return rc;
0495 }
0496
0497 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
0498 u16 *p_offset, int modes)
0499 {
0500 struct qed_dev *cdev = p_hwfn->cdev;
0501 const u8 *modes_tree_buf;
0502 u8 arg1, arg2, tree_val;
0503
0504 modes_tree_buf = cdev->fw_data->modes_tree_buf;
0505 tree_val = modes_tree_buf[(*p_offset)++];
0506 switch (tree_val) {
0507 case INIT_MODE_OP_NOT:
0508 return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
0509 case INIT_MODE_OP_OR:
0510 arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
0511 arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
0512 return arg1 | arg2;
0513 case INIT_MODE_OP_AND:
0514 arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
0515 arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
0516 return arg1 & arg2;
0517 default:
0518 tree_val -= MAX_INIT_MODE_OPS;
0519 return (modes & BIT(tree_val)) ? 1 : 0;
0520 }
0521 }
0522
0523 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
0524 struct init_if_mode_op *p_cmd, int modes)
0525 {
0526 u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
0527
0528 if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
0529 return 0;
0530 else
0531 return GET_FIELD(le32_to_cpu(p_cmd->op_data),
0532 INIT_IF_MODE_OP_CMD_OFFSET);
0533 }
0534
0535 static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
0536 u32 phase, u32 phase_id)
0537 {
0538 u32 data = le32_to_cpu(p_cmd->phase_data);
0539 u32 op_data = le32_to_cpu(p_cmd->op_data);
0540
0541 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
0542 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
0543 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
0544 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
0545 else
0546 return 0;
0547 }
0548
0549 int qed_init_run(struct qed_hwfn *p_hwfn,
0550 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
0551 {
0552 bool b_dmae = (phase != PHASE_ENGINE);
0553 struct qed_dev *cdev = p_hwfn->cdev;
0554 u32 cmd_num, num_init_ops;
0555 union init_op *init_ops;
0556 int rc = 0;
0557
0558 num_init_ops = cdev->fw_data->init_ops_size;
0559 init_ops = cdev->fw_data->init_ops;
0560
0561 p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
0562 if (!p_hwfn->unzip_buf)
0563 return -ENOMEM;
0564
0565 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
0566 union init_op *cmd = &init_ops[cmd_num];
0567 u32 data = le32_to_cpu(cmd->raw.op_data);
0568
0569 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
0570 case INIT_OP_WRITE:
0571 rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
0572 b_dmae);
0573 break;
0574 case INIT_OP_READ:
0575 qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
0576 break;
0577 case INIT_OP_IF_MODE:
0578 cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
0579 modes);
0580 break;
0581 case INIT_OP_IF_PHASE:
0582 cmd_num += qed_init_cmd_phase(&cmd->if_phase,
0583 phase, phase_id);
0584 break;
0585 case INIT_OP_DELAY:
0586
0587
0588
0589 udelay(le32_to_cpu(cmd->delay.delay));
0590 break;
0591
0592 case INIT_OP_CALLBACK:
0593 rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
0594 if (phase == PHASE_ENGINE &&
0595 cmd->callback.callback_id == DMAE_READY_CB)
0596 b_dmae = true;
0597 break;
0598 }
0599
0600 if (rc)
0601 break;
0602 }
0603
0604 kfree(p_hwfn->unzip_buf);
0605 p_hwfn->unzip_buf = NULL;
0606 return rc;
0607 }
0608
0609 void qed_gtt_init(struct qed_hwfn *p_hwfn)
0610 {
0611 u32 gtt_base;
0612 u32 i;
0613
0614
0615 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
0616
0617 for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
0618 if (pxp_global_win[i])
0619 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
0620 pxp_global_win[i]);
0621 }
0622
0623 int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
0624 {
0625 struct qed_fw_data *fw = cdev->fw_data;
0626 struct bin_buffer_hdr *buf_hdr;
0627 u32 offset, len;
0628
0629 if (!data) {
0630 DP_NOTICE(cdev, "Invalid fw data\n");
0631 return -EINVAL;
0632 }
0633
0634
0635 buf_hdr = (struct bin_buffer_hdr *)data;
0636
0637 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
0638 fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
0639
0640 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
0641 fw->init_ops = (union init_op *)(data + offset);
0642
0643 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
0644 fw->arr_data = (u32 *)(data + offset);
0645
0646 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
0647 fw->modes_tree_buf = (u8 *)(data + offset);
0648 len = buf_hdr[BIN_BUF_INIT_CMD].length;
0649 fw->init_ops_size = len / sizeof(struct init_raw_op);
0650
0651 offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
0652 fw->fw_overlays = (u32 *)(data + offset);
0653 len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
0654 fw->fw_overlays_len = len;
0655
0656 return 0;
0657 }