0001
0002
0003
0004
0005
0006
0007
0008 #include "netxen_nic_hw.h"
0009 #include "netxen_nic.h"
0010
0011 #define NXHAL_VERSION 1
0012
0013 static u32
0014 netxen_poll_rsp(struct netxen_adapter *adapter)
0015 {
0016 u32 rsp = NX_CDRP_RSP_OK;
0017 int timeout = 0;
0018
0019 do {
0020
0021 msleep(1);
0022
0023 if (++timeout > NX_OS_CRB_RETRY_COUNT)
0024 return NX_CDRP_RSP_TIMEOUT;
0025
0026 rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
0027 } while (!NX_CDRP_IS_RSP(rsp));
0028
0029 return rsp;
0030 }
0031
0032 static u32
0033 netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
0034 {
0035 u32 rsp;
0036 u32 signature = 0;
0037 u32 rcode = NX_RCODE_SUCCESS;
0038
0039 signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
0040 NXHAL_VERSION);
0041
0042 if (netxen_api_lock(adapter))
0043 return NX_RCODE_TIMEOUT;
0044
0045 NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
0046
0047 NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
0048
0049 NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
0050
0051 NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
0052
0053 NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
0054
0055 rsp = netxen_poll_rsp(adapter);
0056
0057 if (rsp == NX_CDRP_RSP_TIMEOUT) {
0058 printk(KERN_ERR "%s: card response timeout.\n",
0059 netxen_nic_driver_name);
0060
0061 rcode = NX_RCODE_TIMEOUT;
0062 } else if (rsp == NX_CDRP_RSP_FAIL) {
0063 rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
0064
0065 printk(KERN_ERR "%s: failed card response code:0x%x\n",
0066 netxen_nic_driver_name, rcode);
0067 } else if (rsp == NX_CDRP_RSP_OK) {
0068 cmd->rsp.cmd = NX_RCODE_SUCCESS;
0069 if (cmd->rsp.arg2)
0070 cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
0071 if (cmd->rsp.arg3)
0072 cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
0073 }
0074
0075 if (cmd->rsp.arg1)
0076 cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
0077
0078 netxen_api_unlock(adapter);
0079
0080 return rcode;
0081 }
0082
0083 static int
0084 netxen_get_minidump_template_size(struct netxen_adapter *adapter)
0085 {
0086 struct netxen_cmd_args cmd;
0087 memset(&cmd, 0, sizeof(cmd));
0088 cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
0089 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
0090 netxen_issue_cmd(adapter, &cmd);
0091 if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
0092 dev_info(&adapter->pdev->dev,
0093 "Can't get template size %d\n", cmd.rsp.cmd);
0094 return -EIO;
0095 }
0096 adapter->mdump.md_template_size = cmd.rsp.arg2;
0097 adapter->mdump.md_template_ver = cmd.rsp.arg3;
0098 return 0;
0099 }
0100
0101 static int
0102 netxen_get_minidump_template(struct netxen_adapter *adapter)
0103 {
0104 dma_addr_t md_template_addr;
0105 void *addr;
0106 u32 size;
0107 struct netxen_cmd_args cmd;
0108 size = adapter->mdump.md_template_size;
0109
0110 if (size == 0) {
0111 dev_err(&adapter->pdev->dev, "Can not capture Minidump "
0112 "template. Invalid template size.\n");
0113 return NX_RCODE_INVALID_ARGS;
0114 }
0115
0116 addr = dma_alloc_coherent(&adapter->pdev->dev, size,
0117 &md_template_addr, GFP_KERNEL);
0118 if (!addr) {
0119 dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
0120 return -ENOMEM;
0121 }
0122
0123 memset(&cmd, 0, sizeof(cmd));
0124 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
0125 cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
0126 cmd.req.arg1 = LSD(md_template_addr);
0127 cmd.req.arg2 = MSD(md_template_addr);
0128 cmd.req.arg3 |= size;
0129 netxen_issue_cmd(adapter, &cmd);
0130
0131 if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
0132 memcpy(adapter->mdump.md_template, addr, size);
0133 } else {
0134 dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
0135 cmd.rsp.cmd, size, cmd.rsp.arg2);
0136 }
0137 dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
0138 return 0;
0139 }
0140
0141 static u32
0142 netxen_check_template_checksum(struct netxen_adapter *adapter)
0143 {
0144 u64 sum = 0 ;
0145 u32 *buff = adapter->mdump.md_template;
0146 int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
0147
0148 while (count-- > 0)
0149 sum += *buff++ ;
0150 while (sum >> 32)
0151 sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
0152
0153 return ~sum;
0154 }
0155
0156 int
0157 netxen_setup_minidump(struct netxen_adapter *adapter)
0158 {
0159 int err = 0, i;
0160 u32 *template, *tmp_buf;
0161 err = netxen_get_minidump_template_size(adapter);
0162 if (err) {
0163 adapter->mdump.fw_supports_md = 0;
0164 if ((err == NX_RCODE_CMD_INVALID) ||
0165 (err == NX_RCODE_CMD_NOT_IMPL)) {
0166 dev_info(&adapter->pdev->dev,
0167 "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
0168 NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
0169 NX_MD_SUPPORT_SUBVERSION);
0170 }
0171 return err;
0172 }
0173
0174 if (!adapter->mdump.md_template_size) {
0175 dev_err(&adapter->pdev->dev, "Error : Invalid template size "
0176 ",should be non-zero.\n");
0177 return -EIO;
0178 }
0179 adapter->mdump.md_template =
0180 kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
0181
0182 if (!adapter->mdump.md_template)
0183 return -ENOMEM;
0184
0185 err = netxen_get_minidump_template(adapter);
0186 if (err) {
0187 if (err == NX_RCODE_CMD_NOT_IMPL)
0188 adapter->mdump.fw_supports_md = 0;
0189 goto free_template;
0190 }
0191
0192 if (netxen_check_template_checksum(adapter)) {
0193 dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
0194 err = -EIO;
0195 goto free_template;
0196 }
0197
0198 adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
0199 tmp_buf = (u32 *) adapter->mdump.md_template;
0200 template = (u32 *) adapter->mdump.md_template;
0201 for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
0202 *template++ = __le32_to_cpu(*tmp_buf++);
0203 adapter->mdump.md_capture_buff = NULL;
0204 adapter->mdump.fw_supports_md = 1;
0205 adapter->mdump.md_enabled = 0;
0206
0207 return err;
0208
0209 free_template:
0210 kfree(adapter->mdump.md_template);
0211 adapter->mdump.md_template = NULL;
0212 return err;
0213 }
0214
0215
0216 int
0217 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
0218 {
0219 u32 rcode = NX_RCODE_SUCCESS;
0220 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
0221 struct netxen_cmd_args cmd;
0222
0223 memset(&cmd, 0, sizeof(cmd));
0224 cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
0225 cmd.req.arg1 = recv_ctx->context_id;
0226 cmd.req.arg2 = mtu;
0227 cmd.req.arg3 = 0;
0228
0229 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
0230 rcode = netxen_issue_cmd(adapter, &cmd);
0231
0232 if (rcode != NX_RCODE_SUCCESS)
0233 return -EIO;
0234
0235 return 0;
0236 }
0237
0238 int
0239 nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
0240 u32 speed, u32 duplex, u32 autoneg)
0241 {
0242 struct netxen_cmd_args cmd;
0243
0244 memset(&cmd, 0, sizeof(cmd));
0245 cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
0246 cmd.req.arg1 = speed;
0247 cmd.req.arg2 = duplex;
0248 cmd.req.arg3 = autoneg;
0249 return netxen_issue_cmd(adapter, &cmd);
0250 }
0251
0252 static int
0253 nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
0254 {
0255 void *addr;
0256 nx_hostrq_rx_ctx_t *prq;
0257 nx_cardrsp_rx_ctx_t *prsp;
0258 nx_hostrq_rds_ring_t *prq_rds;
0259 nx_hostrq_sds_ring_t *prq_sds;
0260 nx_cardrsp_rds_ring_t *prsp_rds;
0261 nx_cardrsp_sds_ring_t *prsp_sds;
0262 struct nx_host_rds_ring *rds_ring;
0263 struct nx_host_sds_ring *sds_ring;
0264 struct netxen_cmd_args cmd;
0265
0266 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
0267 u64 phys_addr;
0268
0269 int i, nrds_rings, nsds_rings;
0270 size_t rq_size, rsp_size;
0271 u32 cap, reg, val;
0272
0273 int err;
0274
0275 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
0276
0277 nrds_rings = adapter->max_rds_rings;
0278 nsds_rings = adapter->max_sds_rings;
0279
0280 rq_size =
0281 SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
0282 rsp_size =
0283 SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
0284
0285 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
0286 &hostrq_phys_addr, GFP_KERNEL);
0287 if (addr == NULL)
0288 return -ENOMEM;
0289 prq = addr;
0290
0291 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
0292 &cardrsp_phys_addr, GFP_KERNEL);
0293 if (addr == NULL) {
0294 err = -ENOMEM;
0295 goto out_free_rq;
0296 }
0297 prsp = addr;
0298
0299 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
0300
0301 cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
0302 cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
0303
0304 if (adapter->flags & NETXEN_FW_MSS_CAP)
0305 cap |= NX_CAP0_HW_LRO_MSS;
0306
0307 prq->capabilities[0] = cpu_to_le32(cap);
0308 prq->host_int_crb_mode =
0309 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
0310 prq->host_rds_crb_mode =
0311 cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
0312
0313 prq->num_rds_rings = cpu_to_le16(nrds_rings);
0314 prq->num_sds_rings = cpu_to_le16(nsds_rings);
0315 prq->rds_ring_offset = cpu_to_le32(0);
0316
0317 val = le32_to_cpu(prq->rds_ring_offset) +
0318 (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
0319 prq->sds_ring_offset = cpu_to_le32(val);
0320
0321 prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
0322 le32_to_cpu(prq->rds_ring_offset));
0323
0324 for (i = 0; i < nrds_rings; i++) {
0325
0326 rds_ring = &recv_ctx->rds_rings[i];
0327
0328 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
0329 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
0330 prq_rds[i].ring_kind = cpu_to_le32(i);
0331 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
0332 }
0333
0334 prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
0335 le32_to_cpu(prq->sds_ring_offset));
0336
0337 for (i = 0; i < nsds_rings; i++) {
0338
0339 sds_ring = &recv_ctx->sds_rings[i];
0340
0341 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
0342 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
0343 prq_sds[i].msi_index = cpu_to_le16(i);
0344 }
0345
0346 phys_addr = hostrq_phys_addr;
0347 memset(&cmd, 0, sizeof(cmd));
0348 cmd.req.arg1 = (u32)(phys_addr >> 32);
0349 cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
0350 cmd.req.arg3 = rq_size;
0351 cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
0352 err = netxen_issue_cmd(adapter, &cmd);
0353 if (err) {
0354 printk(KERN_WARNING
0355 "Failed to create rx ctx in firmware%d\n", err);
0356 goto out_free_rsp;
0357 }
0358
0359
0360 prsp_rds = ((nx_cardrsp_rds_ring_t *)
0361 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
0362
0363 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
0364 rds_ring = &recv_ctx->rds_rings[i];
0365
0366 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
0367 rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
0368 NETXEN_NIC_REG(reg - 0x200));
0369 }
0370
0371 prsp_sds = ((nx_cardrsp_sds_ring_t *)
0372 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
0373
0374 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
0375 sds_ring = &recv_ctx->sds_rings[i];
0376
0377 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
0378 sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
0379 NETXEN_NIC_REG(reg - 0x200));
0380
0381 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
0382 sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
0383 NETXEN_NIC_REG(reg - 0x200));
0384 }
0385
0386 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
0387 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
0388 recv_ctx->virt_port = prsp->virt_port;
0389
0390 out_free_rsp:
0391 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
0392 cardrsp_phys_addr);
0393 out_free_rq:
0394 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
0395 return err;
0396 }
0397
0398 static void
0399 nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
0400 {
0401 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
0402 struct netxen_cmd_args cmd;
0403
0404 memset(&cmd, 0, sizeof(cmd));
0405 cmd.req.arg1 = recv_ctx->context_id;
0406 cmd.req.arg2 = NX_DESTROY_CTX_RESET;
0407 cmd.req.arg3 = 0;
0408 cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
0409
0410 if (netxen_issue_cmd(adapter, &cmd)) {
0411 printk(KERN_WARNING
0412 "%s: Failed to destroy rx ctx in firmware\n",
0413 netxen_nic_driver_name);
0414 }
0415 }
0416
0417 static int
0418 nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
0419 {
0420 nx_hostrq_tx_ctx_t *prq;
0421 nx_hostrq_cds_ring_t *prq_cds;
0422 nx_cardrsp_tx_ctx_t *prsp;
0423 void *rq_addr, *rsp_addr;
0424 size_t rq_size, rsp_size;
0425 u32 temp;
0426 int err = 0;
0427 u64 offset, phys_addr;
0428 dma_addr_t rq_phys_addr, rsp_phys_addr;
0429 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
0430 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
0431 struct netxen_cmd_args cmd;
0432
0433 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
0434 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
0435 &rq_phys_addr, GFP_KERNEL);
0436 if (!rq_addr)
0437 return -ENOMEM;
0438
0439 rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
0440 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
0441 &rsp_phys_addr, GFP_KERNEL);
0442 if (!rsp_addr) {
0443 err = -ENOMEM;
0444 goto out_free_rq;
0445 }
0446
0447 prq = rq_addr;
0448
0449 prsp = rsp_addr;
0450
0451 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
0452
0453 temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
0454 prq->capabilities[0] = cpu_to_le32(temp);
0455
0456 prq->host_int_crb_mode =
0457 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
0458
0459 prq->interrupt_ctl = 0;
0460 prq->msi_index = 0;
0461
0462 prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
0463
0464 offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
0465 prq->cmd_cons_dma_addr = cpu_to_le64(offset);
0466
0467 prq_cds = &prq->cds_ring;
0468
0469 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
0470 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
0471
0472 phys_addr = rq_phys_addr;
0473 memset(&cmd, 0, sizeof(cmd));
0474 cmd.req.arg1 = (u32)(phys_addr >> 32);
0475 cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
0476 cmd.req.arg3 = rq_size;
0477 cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
0478 err = netxen_issue_cmd(adapter, &cmd);
0479
0480 if (err == NX_RCODE_SUCCESS) {
0481 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
0482 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
0483 NETXEN_NIC_REG(temp - 0x200));
0484 #if 0
0485 adapter->tx_state =
0486 le32_to_cpu(prsp->host_ctx_state);
0487 #endif
0488 adapter->tx_context_id =
0489 le16_to_cpu(prsp->context_id);
0490 } else {
0491 printk(KERN_WARNING
0492 "Failed to create tx ctx in firmware%d\n", err);
0493 err = -EIO;
0494 }
0495
0496 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
0497 rsp_phys_addr);
0498
0499 out_free_rq:
0500 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
0501
0502 return err;
0503 }
0504
0505 static void
0506 nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
0507 {
0508 struct netxen_cmd_args cmd;
0509
0510 memset(&cmd, 0, sizeof(cmd));
0511 cmd.req.arg1 = adapter->tx_context_id;
0512 cmd.req.arg2 = NX_DESTROY_CTX_RESET;
0513 cmd.req.arg3 = 0;
0514 cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
0515 if (netxen_issue_cmd(adapter, &cmd)) {
0516 printk(KERN_WARNING
0517 "%s: Failed to destroy tx ctx in firmware\n",
0518 netxen_nic_driver_name);
0519 }
0520 }
0521
0522 int
0523 nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
0524 {
0525 u32 rcode;
0526 struct netxen_cmd_args cmd;
0527
0528 memset(&cmd, 0, sizeof(cmd));
0529 cmd.req.arg1 = reg;
0530 cmd.req.arg2 = 0;
0531 cmd.req.arg3 = 0;
0532 cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
0533 cmd.rsp.arg1 = 1;
0534 rcode = netxen_issue_cmd(adapter, &cmd);
0535 if (rcode != NX_RCODE_SUCCESS)
0536 return -EIO;
0537
0538 if (val == NULL)
0539 return -EIO;
0540
0541 *val = cmd.rsp.arg1;
0542 return 0;
0543 }
0544
0545 int
0546 nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
0547 {
0548 u32 rcode;
0549 struct netxen_cmd_args cmd;
0550
0551 memset(&cmd, 0, sizeof(cmd));
0552 cmd.req.arg1 = reg;
0553 cmd.req.arg2 = val;
0554 cmd.req.arg3 = 0;
0555 cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
0556 rcode = netxen_issue_cmd(adapter, &cmd);
0557 if (rcode != NX_RCODE_SUCCESS)
0558 return -EIO;
0559
0560 return 0;
0561 }
0562
0563 static u64 ctx_addr_sig_regs[][3] = {
0564 {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
0565 {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
0566 {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
0567 {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
0568 };
0569
0570 #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
0571 #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
0572 #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
0573
0574 #define lower32(x) ((u32)((x) & 0xffffffff))
0575 #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
0576
0577 static struct netxen_recv_crb recv_crb_registers[] = {
0578
0579 {
0580
0581 {
0582 NETXEN_NIC_REG(0x100),
0583
0584 NETXEN_NIC_REG(0x110),
0585
0586 NETXEN_NIC_REG(0x120)
0587 },
0588
0589 {
0590 NETXEN_NIC_REG(0x138),
0591 NETXEN_NIC_REG_2(0x000),
0592 NETXEN_NIC_REG_2(0x004),
0593 NETXEN_NIC_REG_2(0x008),
0594 },
0595
0596 {
0597 CRB_SW_INT_MASK_0,
0598 NETXEN_NIC_REG_2(0x044),
0599 NETXEN_NIC_REG_2(0x048),
0600 NETXEN_NIC_REG_2(0x04c),
0601 },
0602 },
0603
0604 {
0605
0606 {
0607 NETXEN_NIC_REG(0x144),
0608
0609 NETXEN_NIC_REG(0x154),
0610
0611 NETXEN_NIC_REG(0x164)
0612 },
0613
0614 {
0615 NETXEN_NIC_REG(0x17c),
0616 NETXEN_NIC_REG_2(0x020),
0617 NETXEN_NIC_REG_2(0x024),
0618 NETXEN_NIC_REG_2(0x028),
0619 },
0620
0621 {
0622 CRB_SW_INT_MASK_1,
0623 NETXEN_NIC_REG_2(0x064),
0624 NETXEN_NIC_REG_2(0x068),
0625 NETXEN_NIC_REG_2(0x06c),
0626 },
0627 },
0628
0629 {
0630
0631 {
0632 NETXEN_NIC_REG(0x1d8),
0633
0634 NETXEN_NIC_REG(0x1f8),
0635
0636 NETXEN_NIC_REG(0x208)
0637 },
0638
0639 {
0640 NETXEN_NIC_REG(0x220),
0641 NETXEN_NIC_REG_2(0x03c),
0642 NETXEN_NIC_REG_2(0x03c),
0643 NETXEN_NIC_REG_2(0x03c),
0644 },
0645
0646 {
0647 CRB_SW_INT_MASK_2,
0648 NETXEN_NIC_REG_2(0x03c),
0649 NETXEN_NIC_REG_2(0x03c),
0650 NETXEN_NIC_REG_2(0x03c),
0651 },
0652 },
0653
0654 {
0655
0656 {
0657 NETXEN_NIC_REG(0x22c),
0658
0659 NETXEN_NIC_REG(0x23c),
0660
0661 NETXEN_NIC_REG(0x24c)
0662 },
0663
0664 {
0665 NETXEN_NIC_REG(0x264),
0666 NETXEN_NIC_REG_2(0x03c),
0667 NETXEN_NIC_REG_2(0x03c),
0668 NETXEN_NIC_REG_2(0x03c),
0669 },
0670
0671 {
0672 CRB_SW_INT_MASK_3,
0673 NETXEN_NIC_REG_2(0x03c),
0674 NETXEN_NIC_REG_2(0x03c),
0675 NETXEN_NIC_REG_2(0x03c),
0676 },
0677 },
0678 };
0679
0680 static int
0681 netxen_init_old_ctx(struct netxen_adapter *adapter)
0682 {
0683 struct netxen_recv_context *recv_ctx;
0684 struct nx_host_rds_ring *rds_ring;
0685 struct nx_host_sds_ring *sds_ring;
0686 struct nx_host_tx_ring *tx_ring;
0687 int ring;
0688 int port = adapter->portnum;
0689 struct netxen_ring_ctx *hwctx;
0690 u32 signature;
0691
0692 tx_ring = adapter->tx_ring;
0693 recv_ctx = &adapter->recv_ctx;
0694 hwctx = recv_ctx->hwctx;
0695
0696 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
0697 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
0698
0699
0700 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
0701 rds_ring = &recv_ctx->rds_rings[ring];
0702
0703 hwctx->rcv_rings[ring].addr =
0704 cpu_to_le64(rds_ring->phys_addr);
0705 hwctx->rcv_rings[ring].size =
0706 cpu_to_le32(rds_ring->num_desc);
0707 }
0708
0709 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
0710 sds_ring = &recv_ctx->sds_rings[ring];
0711
0712 if (ring == 0) {
0713 hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
0714 hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
0715 }
0716 hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
0717 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
0718 hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
0719 }
0720 hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
0721
0722 signature = (adapter->max_sds_rings > 1) ?
0723 NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
0724
0725 NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
0726 lower32(recv_ctx->phys_addr));
0727 NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
0728 upper32(recv_ctx->phys_addr));
0729 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
0730 signature | port);
0731 return 0;
0732 }
0733
0734 int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
0735 {
0736 void *addr;
0737 int err = 0;
0738 int ring;
0739 struct netxen_recv_context *recv_ctx;
0740 struct nx_host_rds_ring *rds_ring;
0741 struct nx_host_sds_ring *sds_ring;
0742 struct nx_host_tx_ring *tx_ring;
0743
0744 struct pci_dev *pdev = adapter->pdev;
0745 struct net_device *netdev = adapter->netdev;
0746 int port = adapter->portnum;
0747
0748 recv_ctx = &adapter->recv_ctx;
0749 tx_ring = adapter->tx_ring;
0750
0751 addr = dma_alloc_coherent(&pdev->dev,
0752 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
0753 &recv_ctx->phys_addr, GFP_KERNEL);
0754 if (addr == NULL) {
0755 dev_err(&pdev->dev, "failed to allocate hw context\n");
0756 return -ENOMEM;
0757 }
0758
0759 recv_ctx->hwctx = addr;
0760 recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
0761 recv_ctx->hwctx->cmd_consumer_offset =
0762 cpu_to_le64(recv_ctx->phys_addr +
0763 sizeof(struct netxen_ring_ctx));
0764 tx_ring->hw_consumer =
0765 (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
0766
0767
0768 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
0769 &tx_ring->phys_addr, GFP_KERNEL);
0770
0771 if (addr == NULL) {
0772 dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
0773 netdev->name);
0774 err = -ENOMEM;
0775 goto err_out_free;
0776 }
0777
0778 tx_ring->desc_head = addr;
0779
0780 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
0781 rds_ring = &recv_ctx->rds_rings[ring];
0782 addr = dma_alloc_coherent(&adapter->pdev->dev,
0783 RCV_DESC_RINGSIZE(rds_ring),
0784 &rds_ring->phys_addr, GFP_KERNEL);
0785 if (addr == NULL) {
0786 dev_err(&pdev->dev,
0787 "%s: failed to allocate rds ring [%d]\n",
0788 netdev->name, ring);
0789 err = -ENOMEM;
0790 goto err_out_free;
0791 }
0792 rds_ring->desc_head = addr;
0793
0794 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
0795 rds_ring->crb_rcv_producer =
0796 netxen_get_ioaddr(adapter,
0797 recv_crb_registers[port].crb_rcv_producer[ring]);
0798 }
0799
0800 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
0801 sds_ring = &recv_ctx->sds_rings[ring];
0802
0803 addr = dma_alloc_coherent(&adapter->pdev->dev,
0804 STATUS_DESC_RINGSIZE(sds_ring),
0805 &sds_ring->phys_addr, GFP_KERNEL);
0806 if (addr == NULL) {
0807 dev_err(&pdev->dev,
0808 "%s: failed to allocate sds ring [%d]\n",
0809 netdev->name, ring);
0810 err = -ENOMEM;
0811 goto err_out_free;
0812 }
0813 sds_ring->desc_head = addr;
0814
0815 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
0816 sds_ring->crb_sts_consumer =
0817 netxen_get_ioaddr(adapter,
0818 recv_crb_registers[port].crb_sts_consumer[ring]);
0819
0820 sds_ring->crb_intr_mask =
0821 netxen_get_ioaddr(adapter,
0822 recv_crb_registers[port].sw_int_mask[ring]);
0823 }
0824 }
0825
0826
0827 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
0828 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
0829 goto done;
0830 err = nx_fw_cmd_create_rx_ctx(adapter);
0831 if (err)
0832 goto err_out_free;
0833 err = nx_fw_cmd_create_tx_ctx(adapter);
0834 if (err)
0835 goto err_out_free;
0836 } else {
0837 err = netxen_init_old_ctx(adapter);
0838 if (err)
0839 goto err_out_free;
0840 }
0841
0842 done:
0843 return 0;
0844
0845 err_out_free:
0846 netxen_free_hw_resources(adapter);
0847 return err;
0848 }
0849
0850 void netxen_free_hw_resources(struct netxen_adapter *adapter)
0851 {
0852 struct netxen_recv_context *recv_ctx;
0853 struct nx_host_rds_ring *rds_ring;
0854 struct nx_host_sds_ring *sds_ring;
0855 struct nx_host_tx_ring *tx_ring;
0856 int ring;
0857
0858 int port = adapter->portnum;
0859
0860 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
0861 if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
0862 goto done;
0863
0864 nx_fw_cmd_destroy_rx_ctx(adapter);
0865 nx_fw_cmd_destroy_tx_ctx(adapter);
0866 } else {
0867 netxen_api_lock(adapter);
0868 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
0869 NETXEN_CTX_D3_RESET | port);
0870 netxen_api_unlock(adapter);
0871 }
0872
0873
0874 msleep(20);
0875
0876 done:
0877 recv_ctx = &adapter->recv_ctx;
0878
0879 if (recv_ctx->hwctx != NULL) {
0880 dma_free_coherent(&adapter->pdev->dev,
0881 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
0882 recv_ctx->hwctx, recv_ctx->phys_addr);
0883 recv_ctx->hwctx = NULL;
0884 }
0885
0886 tx_ring = adapter->tx_ring;
0887 if (tx_ring->desc_head != NULL) {
0888 dma_free_coherent(&adapter->pdev->dev,
0889 TX_DESC_RINGSIZE(tx_ring),
0890 tx_ring->desc_head, tx_ring->phys_addr);
0891 tx_ring->desc_head = NULL;
0892 }
0893
0894 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
0895 rds_ring = &recv_ctx->rds_rings[ring];
0896
0897 if (rds_ring->desc_head != NULL) {
0898 dma_free_coherent(&adapter->pdev->dev,
0899 RCV_DESC_RINGSIZE(rds_ring),
0900 rds_ring->desc_head,
0901 rds_ring->phys_addr);
0902 rds_ring->desc_head = NULL;
0903 }
0904 }
0905
0906 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
0907 sds_ring = &recv_ctx->sds_rings[ring];
0908
0909 if (sds_ring->desc_head != NULL) {
0910 dma_free_coherent(&adapter->pdev->dev,
0911 STATUS_DESC_RINGSIZE(sds_ring),
0912 sds_ring->desc_head,
0913 sds_ring->phys_addr);
0914 sds_ring->desc_head = NULL;
0915 }
0916 }
0917 }
0918