0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/completion.h>
0036 #include <linux/pci.h>
0037 #include <linux/errno.h>
0038 #include <linux/sched.h>
0039 #include <linux/module.h>
0040 #include <linux/slab.h>
0041 #include <asm/io.h>
0042 #include <rdma/ib_mad.h>
0043
0044 #include "mthca_dev.h"
0045 #include "mthca_config_reg.h"
0046 #include "mthca_cmd.h"
0047 #include "mthca_memfree.h"
0048
0049 #define CMD_POLL_TOKEN 0xffff
0050
0051 enum {
0052 HCR_IN_PARAM_OFFSET = 0x00,
0053 HCR_IN_MODIFIER_OFFSET = 0x08,
0054 HCR_OUT_PARAM_OFFSET = 0x0c,
0055 HCR_TOKEN_OFFSET = 0x14,
0056 HCR_STATUS_OFFSET = 0x18,
0057
0058 HCR_OPMOD_SHIFT = 12,
0059 HCA_E_BIT = 22,
0060 HCR_GO_BIT = 23
0061 };
0062
0063 enum {
0064
0065 CMD_SYS_EN = 0x1,
0066 CMD_SYS_DIS = 0x2,
0067 CMD_MAP_FA = 0xfff,
0068 CMD_UNMAP_FA = 0xffe,
0069 CMD_RUN_FW = 0xff6,
0070 CMD_MOD_STAT_CFG = 0x34,
0071 CMD_QUERY_DEV_LIM = 0x3,
0072 CMD_QUERY_FW = 0x4,
0073 CMD_ENABLE_LAM = 0xff8,
0074 CMD_DISABLE_LAM = 0xff7,
0075 CMD_QUERY_DDR = 0x5,
0076 CMD_QUERY_ADAPTER = 0x6,
0077 CMD_INIT_HCA = 0x7,
0078 CMD_CLOSE_HCA = 0x8,
0079 CMD_INIT_IB = 0x9,
0080 CMD_CLOSE_IB = 0xa,
0081 CMD_QUERY_HCA = 0xb,
0082 CMD_SET_IB = 0xc,
0083 CMD_ACCESS_DDR = 0x2e,
0084 CMD_MAP_ICM = 0xffa,
0085 CMD_UNMAP_ICM = 0xff9,
0086 CMD_MAP_ICM_AUX = 0xffc,
0087 CMD_UNMAP_ICM_AUX = 0xffb,
0088 CMD_SET_ICM_SIZE = 0xffd,
0089
0090
0091 CMD_SW2HW_MPT = 0xd,
0092 CMD_QUERY_MPT = 0xe,
0093 CMD_HW2SW_MPT = 0xf,
0094 CMD_READ_MTT = 0x10,
0095 CMD_WRITE_MTT = 0x11,
0096 CMD_SYNC_TPT = 0x2f,
0097
0098
0099 CMD_MAP_EQ = 0x12,
0100 CMD_SW2HW_EQ = 0x13,
0101 CMD_HW2SW_EQ = 0x14,
0102 CMD_QUERY_EQ = 0x15,
0103
0104
0105 CMD_SW2HW_CQ = 0x16,
0106 CMD_HW2SW_CQ = 0x17,
0107 CMD_QUERY_CQ = 0x18,
0108 CMD_RESIZE_CQ = 0x2c,
0109
0110
0111 CMD_SW2HW_SRQ = 0x35,
0112 CMD_HW2SW_SRQ = 0x36,
0113 CMD_QUERY_SRQ = 0x37,
0114 CMD_ARM_SRQ = 0x40,
0115
0116
0117 CMD_RST2INIT_QPEE = 0x19,
0118 CMD_INIT2RTR_QPEE = 0x1a,
0119 CMD_RTR2RTS_QPEE = 0x1b,
0120 CMD_RTS2RTS_QPEE = 0x1c,
0121 CMD_SQERR2RTS_QPEE = 0x1d,
0122 CMD_2ERR_QPEE = 0x1e,
0123 CMD_RTS2SQD_QPEE = 0x1f,
0124 CMD_SQD2SQD_QPEE = 0x38,
0125 CMD_SQD2RTS_QPEE = 0x20,
0126 CMD_ERR2RST_QPEE = 0x21,
0127 CMD_QUERY_QPEE = 0x22,
0128 CMD_INIT2INIT_QPEE = 0x2d,
0129 CMD_SUSPEND_QPEE = 0x32,
0130 CMD_UNSUSPEND_QPEE = 0x33,
0131
0132 CMD_CONF_SPECIAL_QP = 0x23,
0133 CMD_MAD_IFC = 0x24,
0134
0135
0136 CMD_READ_MGM = 0x25,
0137 CMD_WRITE_MGM = 0x26,
0138 CMD_MGID_HASH = 0x27,
0139
0140
0141 CMD_DIAG_RPRT = 0x30,
0142 CMD_NOP = 0x31,
0143
0144
0145 CMD_QUERY_DEBUG_MSG = 0x2a,
0146 CMD_SET_DEBUG_MSG = 0x2b,
0147 };
0148
0149
0150
0151
0152
0153
0154 #if 0
0155
0156
0157
0158
0159 enum {
0160 CMD_TIME_CLASS_A = (HZ + 999) / 1000 + 1,
0161 CMD_TIME_CLASS_B = (HZ + 99) / 100 + 1,
0162 CMD_TIME_CLASS_C = (HZ + 9) / 10 + 1,
0163 CMD_TIME_CLASS_D = 60 * HZ
0164 };
0165 #else
0166 enum {
0167 CMD_TIME_CLASS_A = 60 * HZ,
0168 CMD_TIME_CLASS_B = 60 * HZ,
0169 CMD_TIME_CLASS_C = 60 * HZ,
0170 CMD_TIME_CLASS_D = 60 * HZ
0171 };
0172 #endif
0173
0174 enum {
0175 GO_BIT_TIMEOUT = HZ * 10
0176 };
0177
0178 struct mthca_cmd_context {
0179 struct completion done;
0180 int result;
0181 int next;
0182 u64 out_param;
0183 u16 token;
0184 u8 status;
0185 };
0186
0187 static int fw_cmd_doorbell = 0;
0188 module_param(fw_cmd_doorbell, int, 0644);
0189 MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
0190 "(and supported by FW)");
0191
0192 static inline int go_bit(struct mthca_dev *dev)
0193 {
0194 return readl(dev->hcr + HCR_STATUS_OFFSET) &
0195 swab32(1 << HCR_GO_BIT);
0196 }
0197
0198 static void mthca_cmd_post_dbell(struct mthca_dev *dev,
0199 u64 in_param,
0200 u64 out_param,
0201 u32 in_modifier,
0202 u8 op_modifier,
0203 u16 op,
0204 u16 token)
0205 {
0206 void __iomem *ptr = dev->cmd.dbell_map;
0207 u16 *offs = dev->cmd.dbell_offsets;
0208
0209 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), ptr + offs[0]);
0210 wmb();
0211 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), ptr + offs[1]);
0212 wmb();
0213 __raw_writel((__force u32) cpu_to_be32(in_modifier), ptr + offs[2]);
0214 wmb();
0215 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), ptr + offs[3]);
0216 wmb();
0217 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), ptr + offs[4]);
0218 wmb();
0219 __raw_writel((__force u32) cpu_to_be32(token << 16), ptr + offs[5]);
0220 wmb();
0221 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
0222 (1 << HCA_E_BIT) |
0223 (op_modifier << HCR_OPMOD_SHIFT) |
0224 op), ptr + offs[6]);
0225 wmb();
0226 __raw_writel((__force u32) 0, ptr + offs[7]);
0227 wmb();
0228 }
0229
0230 static int mthca_cmd_post_hcr(struct mthca_dev *dev,
0231 u64 in_param,
0232 u64 out_param,
0233 u32 in_modifier,
0234 u8 op_modifier,
0235 u16 op,
0236 u16 token,
0237 int event)
0238 {
0239 if (event) {
0240 unsigned long end = jiffies + GO_BIT_TIMEOUT;
0241
0242 while (go_bit(dev) && time_before(jiffies, end)) {
0243 set_current_state(TASK_RUNNING);
0244 schedule();
0245 }
0246 }
0247
0248 if (go_bit(dev))
0249 return -EAGAIN;
0250
0251
0252
0253
0254
0255
0256
0257 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
0258 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4);
0259 __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4);
0260 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4);
0261 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
0262 __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4);
0263
0264
0265 wmb();
0266
0267 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
0268 (event ? (1 << HCA_E_BIT) : 0) |
0269 (op_modifier << HCR_OPMOD_SHIFT) |
0270 op), dev->hcr + 6 * 4);
0271
0272 return 0;
0273 }
0274
0275 static int mthca_cmd_post(struct mthca_dev *dev,
0276 u64 in_param,
0277 u64 out_param,
0278 u32 in_modifier,
0279 u8 op_modifier,
0280 u16 op,
0281 u16 token,
0282 int event)
0283 {
0284 int err = 0;
0285
0286 mutex_lock(&dev->cmd.hcr_mutex);
0287
0288 if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell)
0289 mthca_cmd_post_dbell(dev, in_param, out_param, in_modifier,
0290 op_modifier, op, token);
0291 else
0292 err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
0293 op_modifier, op, token, event);
0294
0295 mutex_unlock(&dev->cmd.hcr_mutex);
0296 return err;
0297 }
0298
0299
0300 static int mthca_status_to_errno(u8 status)
0301 {
0302 static const int trans_table[] = {
0303 [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO,
0304 [MTHCA_CMD_STAT_BAD_OP] = -EPERM,
0305 [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL,
0306 [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO,
0307 [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF,
0308 [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY,
0309 [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM,
0310 [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM,
0311 [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF,
0312 [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF,
0313 [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT,
0314 [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL,
0315 [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
0316 [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY,
0317 [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
0318 [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG,
0319 [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM,
0320 };
0321
0322 if (status >= ARRAY_SIZE(trans_table) ||
0323 (status != MTHCA_CMD_STAT_OK
0324 && trans_table[status] == 0))
0325 return -EINVAL;
0326
0327 return trans_table[status];
0328 }
0329
0330
0331 static int mthca_cmd_poll(struct mthca_dev *dev,
0332 u64 in_param,
0333 u64 *out_param,
0334 int out_is_imm,
0335 u32 in_modifier,
0336 u8 op_modifier,
0337 u16 op,
0338 unsigned long timeout)
0339 {
0340 int err = 0;
0341 unsigned long end;
0342 u8 status;
0343
0344 down(&dev->cmd.poll_sem);
0345
0346 err = mthca_cmd_post(dev, in_param,
0347 out_param ? *out_param : 0,
0348 in_modifier, op_modifier,
0349 op, CMD_POLL_TOKEN, 0);
0350 if (err)
0351 goto out;
0352
0353 end = timeout + jiffies;
0354 while (go_bit(dev) && time_before(jiffies, end)) {
0355 set_current_state(TASK_RUNNING);
0356 schedule();
0357 }
0358
0359 if (go_bit(dev)) {
0360 err = -EBUSY;
0361 goto out;
0362 }
0363
0364 if (out_is_imm && out_param) {
0365 *out_param =
0366 (u64) be32_to_cpu((__force __be32)
0367 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
0368 (u64) be32_to_cpu((__force __be32)
0369 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
0370 } else if (out_is_imm) {
0371 err = -EINVAL;
0372 goto out;
0373 }
0374
0375 status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
0376 if (status) {
0377 mthca_dbg(dev, "Command %02x completed with status %02x\n",
0378 op, status);
0379 err = mthca_status_to_errno(status);
0380 }
0381
0382 out:
0383 up(&dev->cmd.poll_sem);
0384 return err;
0385 }
0386
0387 void mthca_cmd_event(struct mthca_dev *dev,
0388 u16 token,
0389 u8 status,
0390 u64 out_param)
0391 {
0392 struct mthca_cmd_context *context =
0393 &dev->cmd.context[token & dev->cmd.token_mask];
0394
0395
0396 if (token != context->token)
0397 return;
0398
0399 context->result = 0;
0400 context->status = status;
0401 context->out_param = out_param;
0402
0403 complete(&context->done);
0404 }
0405
0406 static int mthca_cmd_wait(struct mthca_dev *dev,
0407 u64 in_param,
0408 u64 *out_param,
0409 int out_is_imm,
0410 u32 in_modifier,
0411 u8 op_modifier,
0412 u16 op,
0413 unsigned long timeout)
0414 {
0415 int err = 0;
0416 struct mthca_cmd_context *context;
0417
0418 down(&dev->cmd.event_sem);
0419
0420 spin_lock(&dev->cmd.context_lock);
0421 BUG_ON(dev->cmd.free_head < 0);
0422 context = &dev->cmd.context[dev->cmd.free_head];
0423 context->token += dev->cmd.token_mask + 1;
0424 dev->cmd.free_head = context->next;
0425 spin_unlock(&dev->cmd.context_lock);
0426
0427 init_completion(&context->done);
0428
0429 err = mthca_cmd_post(dev, in_param,
0430 out_param ? *out_param : 0,
0431 in_modifier, op_modifier,
0432 op, context->token, 1);
0433 if (err)
0434 goto out;
0435
0436 if (!wait_for_completion_timeout(&context->done, timeout)) {
0437 err = -EBUSY;
0438 goto out;
0439 }
0440
0441 err = context->result;
0442 if (err)
0443 goto out;
0444
0445 if (context->status) {
0446 mthca_dbg(dev, "Command %02x completed with status %02x\n",
0447 op, context->status);
0448 err = mthca_status_to_errno(context->status);
0449 }
0450
0451 if (out_is_imm && out_param) {
0452 *out_param = context->out_param;
0453 } else if (out_is_imm) {
0454 err = -EINVAL;
0455 goto out;
0456 }
0457
0458 out:
0459 spin_lock(&dev->cmd.context_lock);
0460 context->next = dev->cmd.free_head;
0461 dev->cmd.free_head = context - dev->cmd.context;
0462 spin_unlock(&dev->cmd.context_lock);
0463
0464 up(&dev->cmd.event_sem);
0465 return err;
0466 }
0467
0468
0469 static int mthca_cmd_box(struct mthca_dev *dev,
0470 u64 in_param,
0471 u64 out_param,
0472 u32 in_modifier,
0473 u8 op_modifier,
0474 u16 op,
0475 unsigned long timeout)
0476 {
0477 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
0478 return mthca_cmd_wait(dev, in_param, &out_param, 0,
0479 in_modifier, op_modifier, op,
0480 timeout);
0481 else
0482 return mthca_cmd_poll(dev, in_param, &out_param, 0,
0483 in_modifier, op_modifier, op,
0484 timeout);
0485 }
0486
0487
0488 static int mthca_cmd(struct mthca_dev *dev,
0489 u64 in_param,
0490 u32 in_modifier,
0491 u8 op_modifier,
0492 u16 op,
0493 unsigned long timeout)
0494 {
0495 return mthca_cmd_box(dev, in_param, 0, in_modifier,
0496 op_modifier, op, timeout);
0497 }
0498
0499
0500
0501
0502
0503
0504 static int mthca_cmd_imm(struct mthca_dev *dev,
0505 u64 in_param,
0506 u64 *out_param,
0507 u32 in_modifier,
0508 u8 op_modifier,
0509 u16 op,
0510 unsigned long timeout)
0511 {
0512 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
0513 return mthca_cmd_wait(dev, in_param, out_param, 1,
0514 in_modifier, op_modifier, op,
0515 timeout);
0516 else
0517 return mthca_cmd_poll(dev, in_param, out_param, 1,
0518 in_modifier, op_modifier, op,
0519 timeout);
0520 }
0521
0522 int mthca_cmd_init(struct mthca_dev *dev)
0523 {
0524 mutex_init(&dev->cmd.hcr_mutex);
0525 sema_init(&dev->cmd.poll_sem, 1);
0526 dev->cmd.flags = 0;
0527
0528 dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
0529 MTHCA_HCR_SIZE);
0530 if (!dev->hcr) {
0531 mthca_err(dev, "Couldn't map command register.");
0532 return -ENOMEM;
0533 }
0534
0535 dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev,
0536 MTHCA_MAILBOX_SIZE,
0537 MTHCA_MAILBOX_SIZE, 0);
0538 if (!dev->cmd.pool) {
0539 iounmap(dev->hcr);
0540 return -ENOMEM;
0541 }
0542
0543 return 0;
0544 }
0545
0546 void mthca_cmd_cleanup(struct mthca_dev *dev)
0547 {
0548 dma_pool_destroy(dev->cmd.pool);
0549 iounmap(dev->hcr);
0550 if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
0551 iounmap(dev->cmd.dbell_map);
0552 }
0553
0554
0555
0556
0557
0558 int mthca_cmd_use_events(struct mthca_dev *dev)
0559 {
0560 int i;
0561
0562 dev->cmd.context = kmalloc_array(dev->cmd.max_cmds,
0563 sizeof(struct mthca_cmd_context),
0564 GFP_KERNEL);
0565 if (!dev->cmd.context)
0566 return -ENOMEM;
0567
0568 for (i = 0; i < dev->cmd.max_cmds; ++i) {
0569 dev->cmd.context[i].token = i;
0570 dev->cmd.context[i].next = i + 1;
0571 }
0572
0573 dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
0574 dev->cmd.free_head = 0;
0575
0576 sema_init(&dev->cmd.event_sem, dev->cmd.max_cmds);
0577 spin_lock_init(&dev->cmd.context_lock);
0578
0579 for (dev->cmd.token_mask = 1;
0580 dev->cmd.token_mask < dev->cmd.max_cmds;
0581 dev->cmd.token_mask <<= 1)
0582 ;
0583 --dev->cmd.token_mask;
0584
0585 dev->cmd.flags |= MTHCA_CMD_USE_EVENTS;
0586
0587 down(&dev->cmd.poll_sem);
0588
0589 return 0;
0590 }
0591
0592
0593
0594
0595 void mthca_cmd_use_polling(struct mthca_dev *dev)
0596 {
0597 int i;
0598
0599 dev->cmd.flags &= ~MTHCA_CMD_USE_EVENTS;
0600
0601 for (i = 0; i < dev->cmd.max_cmds; ++i)
0602 down(&dev->cmd.event_sem);
0603
0604 kfree(dev->cmd.context);
0605
0606 up(&dev->cmd.poll_sem);
0607 }
0608
0609 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
0610 gfp_t gfp_mask)
0611 {
0612 struct mthca_mailbox *mailbox;
0613
0614 mailbox = kmalloc(sizeof *mailbox, gfp_mask);
0615 if (!mailbox)
0616 return ERR_PTR(-ENOMEM);
0617
0618 mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
0619 if (!mailbox->buf) {
0620 kfree(mailbox);
0621 return ERR_PTR(-ENOMEM);
0622 }
0623
0624 return mailbox;
0625 }
0626
0627 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
0628 {
0629 if (!mailbox)
0630 return;
0631
0632 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
0633 kfree(mailbox);
0634 }
0635
0636 int mthca_SYS_EN(struct mthca_dev *dev)
0637 {
0638 u64 out;
0639 int ret;
0640
0641 ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
0642
0643 if (ret == -ENOMEM)
0644 mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
0645 "sladdr=%d, SPD source=%s\n",
0646 (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
0647 (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM");
0648
0649 return ret;
0650 }
0651
0652 int mthca_SYS_DIS(struct mthca_dev *dev)
0653 {
0654 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
0655 }
0656
0657 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
0658 u64 virt)
0659 {
0660 struct mthca_mailbox *mailbox;
0661 struct mthca_icm_iter iter;
0662 __be64 *pages;
0663 int lg;
0664 int nent = 0;
0665 int i;
0666 int err = 0;
0667 int ts = 0, tc = 0;
0668
0669 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0670 if (IS_ERR(mailbox))
0671 return PTR_ERR(mailbox);
0672 memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
0673 pages = mailbox->buf;
0674
0675 for (mthca_icm_first(icm, &iter);
0676 !mthca_icm_last(&iter);
0677 mthca_icm_next(&iter)) {
0678
0679
0680
0681
0682
0683 lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
0684 if (lg < MTHCA_ICM_PAGE_SHIFT) {
0685 mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
0686 MTHCA_ICM_PAGE_SIZE,
0687 (unsigned long long) mthca_icm_addr(&iter),
0688 mthca_icm_size(&iter));
0689 err = -EINVAL;
0690 goto out;
0691 }
0692 for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
0693 if (virt != -1) {
0694 pages[nent * 2] = cpu_to_be64(virt);
0695 virt += 1ULL << lg;
0696 }
0697
0698 pages[nent * 2 + 1] =
0699 cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
0700 (lg - MTHCA_ICM_PAGE_SHIFT));
0701 ts += 1 << (lg - 10);
0702 ++tc;
0703
0704 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
0705 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
0706 CMD_TIME_CLASS_B);
0707 if (err)
0708 goto out;
0709 nent = 0;
0710 }
0711 }
0712 }
0713
0714 if (nent)
0715 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
0716 CMD_TIME_CLASS_B);
0717
0718 switch (op) {
0719 case CMD_MAP_FA:
0720 mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
0721 break;
0722 case CMD_MAP_ICM_AUX:
0723 mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
0724 break;
0725 case CMD_MAP_ICM:
0726 mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
0727 tc, ts, (unsigned long long) virt - (ts << 10));
0728 break;
0729 }
0730
0731 out:
0732 mthca_free_mailbox(dev, mailbox);
0733 return err;
0734 }
0735
0736 int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm)
0737 {
0738 return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1);
0739 }
0740
0741 int mthca_UNMAP_FA(struct mthca_dev *dev)
0742 {
0743 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B);
0744 }
0745
0746 int mthca_RUN_FW(struct mthca_dev *dev)
0747 {
0748 return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A);
0749 }
0750
0751 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
0752 {
0753 phys_addr_t addr;
0754 u16 max_off = 0;
0755 int i;
0756
0757 for (i = 0; i < 8; ++i)
0758 max_off = max(max_off, dev->cmd.dbell_offsets[i]);
0759
0760 if ((base & PAGE_MASK) != ((base + max_off) & PAGE_MASK)) {
0761 mthca_warn(dev, "Firmware doorbell region at 0x%016llx, "
0762 "length 0x%x crosses a page boundary\n",
0763 (unsigned long long) base, max_off);
0764 return;
0765 }
0766
0767 addr = pci_resource_start(dev->pdev, 2) +
0768 ((pci_resource_len(dev->pdev, 2) - 1) & base);
0769 dev->cmd.dbell_map = ioremap(addr, max_off + sizeof(u32));
0770 if (!dev->cmd.dbell_map)
0771 return;
0772
0773 dev->cmd.flags |= MTHCA_CMD_POST_DOORBELLS;
0774 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
0775 }
0776
0777 int mthca_QUERY_FW(struct mthca_dev *dev)
0778 {
0779 struct mthca_mailbox *mailbox;
0780 u32 *outbox;
0781 u64 base;
0782 u32 tmp;
0783 int err = 0;
0784 u8 lg;
0785 int i;
0786
0787 #define QUERY_FW_OUT_SIZE 0x100
0788 #define QUERY_FW_VER_OFFSET 0x00
0789 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
0790 #define QUERY_FW_ERR_START_OFFSET 0x30
0791 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
0792
0793 #define QUERY_FW_CMD_DB_EN_OFFSET 0x10
0794 #define QUERY_FW_CMD_DB_OFFSET 0x50
0795 #define QUERY_FW_CMD_DB_BASE 0x60
0796
0797 #define QUERY_FW_START_OFFSET 0x20
0798 #define QUERY_FW_END_OFFSET 0x28
0799
0800 #define QUERY_FW_SIZE_OFFSET 0x00
0801 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
0802 #define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40
0803 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
0804
0805 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0806 if (IS_ERR(mailbox))
0807 return PTR_ERR(mailbox);
0808 outbox = mailbox->buf;
0809
0810 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
0811 CMD_TIME_CLASS_A);
0812
0813 if (err)
0814 goto out;
0815
0816 MTHCA_GET(dev->fw_ver, outbox, QUERY_FW_VER_OFFSET);
0817
0818
0819
0820
0821 dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
0822 ((dev->fw_ver & 0xffff0000ull) >> 16) |
0823 ((dev->fw_ver & 0x0000ffffull) << 16);
0824
0825 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
0826 dev->cmd.max_cmds = 1 << lg;
0827
0828 mthca_dbg(dev, "FW version %012llx, max commands %d\n",
0829 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
0830
0831 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
0832 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
0833
0834 mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
0835 (unsigned long long) dev->catas_err.addr, dev->catas_err.size);
0836
0837 MTHCA_GET(tmp, outbox, QUERY_FW_CMD_DB_EN_OFFSET);
0838 if (tmp & 0x1) {
0839 mthca_dbg(dev, "FW supports commands through doorbells\n");
0840
0841 MTHCA_GET(base, outbox, QUERY_FW_CMD_DB_BASE);
0842 for (i = 0; i < MTHCA_CMD_NUM_DBELL_DWORDS; ++i)
0843 MTHCA_GET(dev->cmd.dbell_offsets[i], outbox,
0844 QUERY_FW_CMD_DB_OFFSET + (i << 1));
0845
0846 mthca_setup_cmd_doorbells(dev, base);
0847 }
0848
0849 if (mthca_is_memfree(dev)) {
0850 MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
0851 MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
0852 MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
0853 MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
0854 mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
0855
0856
0857
0858
0859
0860 dev->fw.arbel.fw_pages =
0861 ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
0862 (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
0863
0864 mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
0865 (unsigned long long) dev->fw.arbel.clr_int_base,
0866 (unsigned long long) dev->fw.arbel.eq_arm_base,
0867 (unsigned long long) dev->fw.arbel.eq_set_ci_base);
0868 } else {
0869 MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
0870 MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET);
0871
0872 mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n",
0873 (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
0874 (unsigned long long) dev->fw.tavor.fw_start,
0875 (unsigned long long) dev->fw.tavor.fw_end);
0876 }
0877
0878 out:
0879 mthca_free_mailbox(dev, mailbox);
0880 return err;
0881 }
0882
0883 int mthca_ENABLE_LAM(struct mthca_dev *dev)
0884 {
0885 struct mthca_mailbox *mailbox;
0886 u8 info;
0887 u32 *outbox;
0888 int err = 0;
0889
0890 #define ENABLE_LAM_OUT_SIZE 0x100
0891 #define ENABLE_LAM_START_OFFSET 0x00
0892 #define ENABLE_LAM_END_OFFSET 0x08
0893 #define ENABLE_LAM_INFO_OFFSET 0x13
0894
0895 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
0896 #define ENABLE_LAM_INFO_ECC_MASK 0x3
0897
0898 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0899 if (IS_ERR(mailbox))
0900 return PTR_ERR(mailbox);
0901 outbox = mailbox->buf;
0902
0903 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
0904 CMD_TIME_CLASS_C);
0905
0906 if (err)
0907 goto out;
0908
0909 MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
0910 MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET);
0911 MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET);
0912
0913 if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
0914 !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
0915 mthca_info(dev, "FW reports that HCA-attached memory "
0916 "is %s hidden; does not match PCI config\n",
0917 (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ?
0918 "" : "not");
0919 }
0920 if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
0921 mthca_dbg(dev, "HCA-attached memory is hidden.\n");
0922
0923 mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
0924 (int) ((dev->ddr_end - dev->ddr_start) >> 10),
0925 (unsigned long long) dev->ddr_start,
0926 (unsigned long long) dev->ddr_end);
0927
0928 out:
0929 mthca_free_mailbox(dev, mailbox);
0930 return err;
0931 }
0932
0933 int mthca_DISABLE_LAM(struct mthca_dev *dev)
0934 {
0935 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
0936 }
0937
0938 int mthca_QUERY_DDR(struct mthca_dev *dev)
0939 {
0940 struct mthca_mailbox *mailbox;
0941 u8 info;
0942 u32 *outbox;
0943 int err = 0;
0944
0945 #define QUERY_DDR_OUT_SIZE 0x100
0946 #define QUERY_DDR_START_OFFSET 0x00
0947 #define QUERY_DDR_END_OFFSET 0x08
0948 #define QUERY_DDR_INFO_OFFSET 0x13
0949
0950 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
0951 #define QUERY_DDR_INFO_ECC_MASK 0x3
0952
0953 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0954 if (IS_ERR(mailbox))
0955 return PTR_ERR(mailbox);
0956 outbox = mailbox->buf;
0957
0958 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
0959 CMD_TIME_CLASS_A);
0960
0961 if (err)
0962 goto out;
0963
0964 MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
0965 MTHCA_GET(dev->ddr_end, outbox, QUERY_DDR_END_OFFSET);
0966 MTHCA_GET(info, outbox, QUERY_DDR_INFO_OFFSET);
0967
0968 if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
0969 !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
0970 mthca_info(dev, "FW reports that HCA-attached memory "
0971 "is %s hidden; does not match PCI config\n",
0972 (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
0973 "" : "not");
0974 }
0975 if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
0976 mthca_dbg(dev, "HCA-attached memory is hidden.\n");
0977
0978 mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
0979 (int) ((dev->ddr_end - dev->ddr_start) >> 10),
0980 (unsigned long long) dev->ddr_start,
0981 (unsigned long long) dev->ddr_end);
0982
0983 out:
0984 mthca_free_mailbox(dev, mailbox);
0985 return err;
0986 }
0987
0988 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
0989 struct mthca_dev_lim *dev_lim)
0990 {
0991 struct mthca_mailbox *mailbox;
0992 u32 *outbox;
0993 u8 field;
0994 u16 size;
0995 u16 stat_rate;
0996 int err;
0997
0998 #define QUERY_DEV_LIM_OUT_SIZE 0x100
0999 #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET 0x10
1000 #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET 0x11
1001 #define QUERY_DEV_LIM_RSVD_QP_OFFSET 0x12
1002 #define QUERY_DEV_LIM_MAX_QP_OFFSET 0x13
1003 #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET 0x14
1004 #define QUERY_DEV_LIM_MAX_SRQ_OFFSET 0x15
1005 #define QUERY_DEV_LIM_RSVD_EEC_OFFSET 0x16
1006 #define QUERY_DEV_LIM_MAX_EEC_OFFSET 0x17
1007 #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET 0x19
1008 #define QUERY_DEV_LIM_RSVD_CQ_OFFSET 0x1a
1009 #define QUERY_DEV_LIM_MAX_CQ_OFFSET 0x1b
1010 #define QUERY_DEV_LIM_MAX_MPT_OFFSET 0x1d
1011 #define QUERY_DEV_LIM_RSVD_EQ_OFFSET 0x1e
1012 #define QUERY_DEV_LIM_MAX_EQ_OFFSET 0x1f
1013 #define QUERY_DEV_LIM_RSVD_MTT_OFFSET 0x20
1014 #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET 0x21
1015 #define QUERY_DEV_LIM_RSVD_MRW_OFFSET 0x22
1016 #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET 0x23
1017 #define QUERY_DEV_LIM_MAX_AV_OFFSET 0x27
1018 #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET 0x29
1019 #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET 0x2b
1020 #define QUERY_DEV_LIM_MAX_RDMA_OFFSET 0x2f
1021 #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET 0x33
1022 #define QUERY_DEV_LIM_ACK_DELAY_OFFSET 0x35
1023 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36
1024 #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37
1025 #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b
1026 #define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c
1027 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f
1028 #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44
1029 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48
1030 #define QUERY_DEV_LIM_UAR_SZ_OFFSET 0x49
1031 #define QUERY_DEV_LIM_PAGE_SZ_OFFSET 0x4b
1032 #define QUERY_DEV_LIM_MAX_SG_OFFSET 0x51
1033 #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET 0x52
1034 #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET 0x55
1035 #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
1036 #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET 0x61
1037 #define QUERY_DEV_LIM_RSVD_MCG_OFFSET 0x62
1038 #define QUERY_DEV_LIM_MAX_MCG_OFFSET 0x63
1039 #define QUERY_DEV_LIM_RSVD_PD_OFFSET 0x64
1040 #define QUERY_DEV_LIM_MAX_PD_OFFSET 0x65
1041 #define QUERY_DEV_LIM_RSVD_RDD_OFFSET 0x66
1042 #define QUERY_DEV_LIM_MAX_RDD_OFFSET 0x67
1043 #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET 0x80
1044 #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET 0x82
1045 #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET 0x84
1046 #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET 0x86
1047 #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET 0x88
1048 #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET 0x8a
1049 #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET 0x8c
1050 #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET 0x8e
1051 #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET 0x90
1052 #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET 0x92
1053 #define QUERY_DEV_LIM_PBL_SZ_OFFSET 0x96
1054 #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET 0x97
1055 #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET 0x98
1056 #define QUERY_DEV_LIM_LAMR_OFFSET 0x9f
1057 #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0
1058
1059 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1060 if (IS_ERR(mailbox))
1061 return PTR_ERR(mailbox);
1062 outbox = mailbox->buf;
1063
1064 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
1065 CMD_TIME_CLASS_A);
1066
1067 if (err)
1068 goto out;
1069
1070 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
1071 dev_lim->reserved_qps = 1 << (field & 0xf);
1072 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
1073 dev_lim->max_qps = 1 << (field & 0x1f);
1074 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
1075 dev_lim->reserved_srqs = 1 << (field >> 4);
1076 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
1077 dev_lim->max_srqs = 1 << (field & 0x1f);
1078 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
1079 dev_lim->reserved_eecs = 1 << (field & 0xf);
1080 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
1081 dev_lim->max_eecs = 1 << (field & 0x1f);
1082 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
1083 dev_lim->max_cq_sz = 1 << field;
1084 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
1085 dev_lim->reserved_cqs = 1 << (field & 0xf);
1086 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
1087 dev_lim->max_cqs = 1 << (field & 0x1f);
1088 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
1089 dev_lim->max_mpts = 1 << (field & 0x3f);
1090 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
1091 dev_lim->reserved_eqs = 1 << (field & 0xf);
1092 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
1093 dev_lim->max_eqs = 1 << (field & 0x7);
1094 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1095 if (mthca_is_memfree(dev))
1096 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
1097 dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
1098 else
1099 dev_lim->reserved_mtts = 1 << (field >> 4);
1100 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
1101 dev_lim->max_mrw_sz = 1 << field;
1102 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
1103 dev_lim->reserved_mrws = 1 << (field & 0xf);
1104 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
1105 dev_lim->max_mtt_seg = 1 << (field & 0x3f);
1106 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
1107 dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
1108 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
1109 dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
1110 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
1111 dev_lim->max_rdma_global = 1 << (field & 0x3f);
1112 MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
1113 dev_lim->local_ca_ack_delay = field & 0x1f;
1114 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
1115 dev_lim->max_mtu = field >> 4;
1116 dev_lim->max_port_width = field & 0xf;
1117 MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
1118 dev_lim->max_vl = field >> 4;
1119 dev_lim->num_ports = field & 0xf;
1120 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1121 dev_lim->max_gids = 1 << (field & 0xf);
1122 MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
1123 dev_lim->stat_rate_support = stat_rate;
1124 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1125 dev_lim->max_pkeys = 1 << (field & 0xf);
1126 MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
1127 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
1128 dev_lim->reserved_uars = field >> 4;
1129 MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
1130 dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
1131 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
1132 dev_lim->min_page_sz = 1 << field;
1133 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
1134 dev_lim->max_sg = field;
1135
1136 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
1137 dev_lim->max_desc_sz = size;
1138
1139 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
1140 dev_lim->max_qp_per_mcg = 1 << field;
1141 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
1142 dev_lim->reserved_mgms = field & 0xf;
1143 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
1144 dev_lim->max_mcgs = 1 << field;
1145 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
1146 dev_lim->reserved_pds = field >> 4;
1147 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
1148 dev_lim->max_pds = 1 << (field & 0x3f);
1149 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
1150 dev_lim->reserved_rdds = field >> 4;
1151 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
1152 dev_lim->max_rdds = 1 << (field & 0x3f);
1153
1154 MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
1155 dev_lim->eec_entry_sz = size;
1156 MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
1157 dev_lim->qpc_entry_sz = size;
1158 MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
1159 dev_lim->eeec_entry_sz = size;
1160 MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
1161 dev_lim->eqpc_entry_sz = size;
1162 MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
1163 dev_lim->eqc_entry_sz = size;
1164 MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
1165 dev_lim->cqc_entry_sz = size;
1166 MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
1167 dev_lim->srq_entry_sz = size;
1168 MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
1169 dev_lim->uar_scratch_entry_sz = size;
1170
1171 if (mthca_is_memfree(dev)) {
1172 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1173 dev_lim->max_srq_sz = 1 << field;
1174 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1175 dev_lim->max_qp_sz = 1 << field;
1176 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1177 dev_lim->hca.arbel.resize_srq = field & 1;
1178 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1179 dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
1180 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1181 dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
1182 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1183 dev_lim->mpt_entry_sz = size;
1184 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
1185 dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
1186 MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
1187 QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
1188 MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
1189 QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
1190 MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
1191 dev_lim->hca.arbel.lam_required = field & 1;
1192 MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
1193 QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
1194
1195 if (dev_lim->hca.arbel.bmme_flags & 1)
1196 mthca_dbg(dev, "Base MM extensions: yes "
1197 "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
1198 dev_lim->hca.arbel.bmme_flags,
1199 dev_lim->hca.arbel.max_pbl_sz,
1200 dev_lim->hca.arbel.reserved_lkey);
1201 else
1202 mthca_dbg(dev, "Base MM extensions: no\n");
1203
1204 mthca_dbg(dev, "Max ICM size %lld MB\n",
1205 (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
1206 } else {
1207 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1208 dev_lim->max_srq_sz = (1 << field) - 1;
1209 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1210 dev_lim->max_qp_sz = (1 << field) - 1;
1211 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1212 dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
1213 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
1214 }
1215
1216 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1217 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
1218 mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1219 dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
1220 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1221 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
1222 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
1223 dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz);
1224 mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1225 dev_lim->reserved_mrws, dev_lim->reserved_mtts);
1226 mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1227 dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
1228 mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1229 dev_lim->max_pds, dev_lim->reserved_mgms);
1230 mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1231 dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
1232
1233 mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
1234
1235 out:
1236 mthca_free_mailbox(dev, mailbox);
1237 return err;
1238 }
1239
1240 static void get_board_id(void *vsd, char *board_id)
1241 {
1242 int i;
1243
1244 #define VSD_OFFSET_SIG1 0x00
1245 #define VSD_OFFSET_SIG2 0xde
1246 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1247 #define VSD_OFFSET_TS_BOARD_ID 0x20
1248
1249 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1250
1251 memset(board_id, 0, MTHCA_BOARD_ID_LEN);
1252
1253 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1254 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1255 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
1256 } else {
1257
1258
1259
1260
1261
1262 for (i = 0; i < 4; ++i)
1263 ((u32 *) board_id)[i] =
1264 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1265 }
1266 }
1267
1268 int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1269 struct mthca_adapter *adapter)
1270 {
1271 struct mthca_mailbox *mailbox;
1272 u32 *outbox;
1273 int err;
1274
1275 #define QUERY_ADAPTER_OUT_SIZE 0x100
1276 #define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
1277 #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
1278 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
1279 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1280 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1281
1282 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1283 if (IS_ERR(mailbox))
1284 return PTR_ERR(mailbox);
1285 outbox = mailbox->buf;
1286
1287 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1288 CMD_TIME_CLASS_A);
1289
1290 if (err)
1291 goto out;
1292
1293 if (!mthca_is_memfree(dev)) {
1294 MTHCA_GET(adapter->vendor_id, outbox,
1295 QUERY_ADAPTER_VENDOR_ID_OFFSET);
1296 MTHCA_GET(adapter->device_id, outbox,
1297 QUERY_ADAPTER_DEVICE_ID_OFFSET);
1298 MTHCA_GET(adapter->revision_id, outbox,
1299 QUERY_ADAPTER_REVISION_ID_OFFSET);
1300 }
1301 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1302
1303 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1304 adapter->board_id);
1305
1306 out:
1307 mthca_free_mailbox(dev, mailbox);
1308 return err;
1309 }
1310
1311 int mthca_INIT_HCA(struct mthca_dev *dev,
1312 struct mthca_init_hca_param *param)
1313 {
1314 struct mthca_mailbox *mailbox;
1315 __be32 *inbox;
1316 int err;
1317
1318 #define INIT_HCA_IN_SIZE 0x200
1319 #define INIT_HCA_FLAGS1_OFFSET 0x00c
1320 #define INIT_HCA_FLAGS2_OFFSET 0x014
1321 #define INIT_HCA_QPC_OFFSET 0x020
1322 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1323 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1324 #define INIT_HCA_EEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x20)
1325 #define INIT_HCA_LOG_EEC_OFFSET (INIT_HCA_QPC_OFFSET + 0x27)
1326 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1327 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1328 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1329 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1330 #define INIT_HCA_EQPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1331 #define INIT_HCA_EEEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1332 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1333 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1334 #define INIT_HCA_RDB_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1335 #define INIT_HCA_UDAV_OFFSET 0x0b0
1336 #define INIT_HCA_UDAV_LKEY_OFFSET (INIT_HCA_UDAV_OFFSET + 0x0)
1337 #define INIT_HCA_UDAV_PD_OFFSET (INIT_HCA_UDAV_OFFSET + 0x4)
1338 #define INIT_HCA_MCAST_OFFSET 0x0c0
1339 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1340 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1341 #define INIT_HCA_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1342 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1343 #define INIT_HCA_TPT_OFFSET 0x0f0
1344 #define INIT_HCA_MPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1345 #define INIT_HCA_MTT_SEG_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x09)
1346 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1347 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1348 #define INIT_HCA_UAR_OFFSET 0x120
1349 #define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00)
1350 #define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09)
1351 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1352 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1353 #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1354 #define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
1355
1356 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1357 if (IS_ERR(mailbox))
1358 return PTR_ERR(mailbox);
1359 inbox = mailbox->buf;
1360
1361 memset(inbox, 0, INIT_HCA_IN_SIZE);
1362
1363 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
1364 MTHCA_PUT(inbox, 0x1, INIT_HCA_FLAGS1_OFFSET);
1365
1366 #if defined(__LITTLE_ENDIAN)
1367 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1368 #elif defined(__BIG_ENDIAN)
1369 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1 << 1);
1370 #else
1371 #error Host endianness not defined
1372 #endif
1373
1374 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
1375
1376
1377 if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
1378 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
1379
1380
1381
1382
1383
1384 MTHCA_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1385 MTHCA_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1386 MTHCA_PUT(inbox, param->eec_base, INIT_HCA_EEC_BASE_OFFSET);
1387 MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
1388 MTHCA_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1389 MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1390 MTHCA_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1391 MTHCA_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1392 MTHCA_PUT(inbox, param->eqpc_base, INIT_HCA_EQPC_BASE_OFFSET);
1393 MTHCA_PUT(inbox, param->eeec_base, INIT_HCA_EEEC_BASE_OFFSET);
1394 MTHCA_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1395 MTHCA_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1396 MTHCA_PUT(inbox, param->rdb_base, INIT_HCA_RDB_BASE_OFFSET);
1397
1398
1399
1400
1401
1402 MTHCA_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1403 MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1404 MTHCA_PUT(inbox, param->mc_hash_sz, INIT_HCA_MC_HASH_SZ_OFFSET);
1405 MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1406
1407
1408
1409 MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET);
1410 if (!mthca_is_memfree(dev))
1411 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1412 MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1413 MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1414
1415
1416 {
1417 u8 uar_page_sz = PAGE_SHIFT - 12;
1418 MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1419 }
1420
1421 MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1422
1423 if (mthca_is_memfree(dev)) {
1424 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1425 MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1426 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
1427 }
1428
1429 err = mthca_cmd(dev, mailbox->dma, 0, 0,
1430 CMD_INIT_HCA, CMD_TIME_CLASS_D);
1431
1432 mthca_free_mailbox(dev, mailbox);
1433 return err;
1434 }
1435
1436 int mthca_INIT_IB(struct mthca_dev *dev,
1437 struct mthca_init_ib_param *param,
1438 int port)
1439 {
1440 struct mthca_mailbox *mailbox;
1441 u32 *inbox;
1442 int err;
1443 u32 flags;
1444
1445 #define INIT_IB_IN_SIZE 56
1446 #define INIT_IB_FLAGS_OFFSET 0x00
1447 #define INIT_IB_FLAG_SIG (1 << 18)
1448 #define INIT_IB_FLAG_NG (1 << 17)
1449 #define INIT_IB_FLAG_G0 (1 << 16)
1450 #define INIT_IB_VL_SHIFT 4
1451 #define INIT_IB_PORT_WIDTH_SHIFT 8
1452 #define INIT_IB_MTU_SHIFT 12
1453 #define INIT_IB_MAX_GID_OFFSET 0x06
1454 #define INIT_IB_MAX_PKEY_OFFSET 0x0a
1455 #define INIT_IB_GUID0_OFFSET 0x10
1456 #define INIT_IB_NODE_GUID_OFFSET 0x18
1457 #define INIT_IB_SI_GUID_OFFSET 0x20
1458
1459 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1460 if (IS_ERR(mailbox))
1461 return PTR_ERR(mailbox);
1462 inbox = mailbox->buf;
1463
1464 memset(inbox, 0, INIT_IB_IN_SIZE);
1465
1466 flags = 0;
1467 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0;
1468 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0;
1469 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0;
1470 flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1471 flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1472 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1473 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1474
1475 MTHCA_PUT(inbox, param->gid_cap, INIT_IB_MAX_GID_OFFSET);
1476 MTHCA_PUT(inbox, param->pkey_cap, INIT_IB_MAX_PKEY_OFFSET);
1477 MTHCA_PUT(inbox, param->guid0, INIT_IB_GUID0_OFFSET);
1478 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1479 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
1480
1481 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1482 CMD_TIME_CLASS_A);
1483
1484 mthca_free_mailbox(dev, mailbox);
1485 return err;
1486 }
1487
1488 int mthca_CLOSE_IB(struct mthca_dev *dev, int port)
1489 {
1490 return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A);
1491 }
1492
1493 int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic)
1494 {
1495 return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C);
1496 }
1497
1498 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1499 int port)
1500 {
1501 struct mthca_mailbox *mailbox;
1502 u32 *inbox;
1503 int err;
1504 u32 flags = 0;
1505
1506 #define SET_IB_IN_SIZE 0x40
1507 #define SET_IB_FLAGS_OFFSET 0x00
1508 #define SET_IB_FLAG_SIG (1 << 18)
1509 #define SET_IB_FLAG_RQK (1 << 0)
1510 #define SET_IB_CAP_MASK_OFFSET 0x04
1511 #define SET_IB_SI_GUID_OFFSET 0x08
1512
1513 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1514 if (IS_ERR(mailbox))
1515 return PTR_ERR(mailbox);
1516 inbox = mailbox->buf;
1517
1518 memset(inbox, 0, SET_IB_IN_SIZE);
1519
1520 flags |= param->set_si_guid ? SET_IB_FLAG_SIG : 0;
1521 flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
1522 MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
1523
1524 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1525 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
1526
1527 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1528 CMD_TIME_CLASS_B);
1529
1530 mthca_free_mailbox(dev, mailbox);
1531 return err;
1532 }
1533
1534 int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt)
1535 {
1536 return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt);
1537 }
1538
1539 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt)
1540 {
1541 struct mthca_mailbox *mailbox;
1542 __be64 *inbox;
1543 int err;
1544
1545 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1546 if (IS_ERR(mailbox))
1547 return PTR_ERR(mailbox);
1548 inbox = mailbox->buf;
1549
1550 inbox[0] = cpu_to_be64(virt);
1551 inbox[1] = cpu_to_be64(dma_addr);
1552
1553 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1554 CMD_TIME_CLASS_B);
1555
1556 mthca_free_mailbox(dev, mailbox);
1557
1558 if (!err)
1559 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
1560 (unsigned long long) dma_addr, (unsigned long long) virt);
1561
1562 return err;
1563 }
1564
1565 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count)
1566 {
1567 mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
1568 page_count, (unsigned long long) virt);
1569
1570 return mthca_cmd(dev, virt, page_count, 0,
1571 CMD_UNMAP_ICM, CMD_TIME_CLASS_B);
1572 }
1573
1574 int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm)
1575 {
1576 return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1);
1577 }
1578
1579 int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev)
1580 {
1581 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B);
1582 }
1583
1584 int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages)
1585 {
1586 int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0,
1587 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A);
1588
1589 if (ret)
1590 return ret;
1591
1592
1593
1594
1595
1596 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
1597 (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
1598
1599 return 0;
1600 }
1601
1602 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1603 int mpt_index)
1604 {
1605 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1606 CMD_TIME_CLASS_B);
1607 }
1608
1609 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1610 int mpt_index)
1611 {
1612 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1613 !mailbox, CMD_HW2SW_MPT,
1614 CMD_TIME_CLASS_B);
1615 }
1616
1617 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1618 int num_mtt)
1619 {
1620 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1621 CMD_TIME_CLASS_B);
1622 }
1623
1624 int mthca_SYNC_TPT(struct mthca_dev *dev)
1625 {
1626 return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B);
1627 }
1628
1629 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1630 int eq_num)
1631 {
1632 mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
1633 unmap ? "Clearing" : "Setting",
1634 (unsigned long long) event_mask, eq_num);
1635 return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1636 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
1637 }
1638
1639 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1640 int eq_num)
1641 {
1642 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1643 CMD_TIME_CLASS_A);
1644 }
1645
1646 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1647 int eq_num)
1648 {
1649 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1650 CMD_HW2SW_EQ,
1651 CMD_TIME_CLASS_A);
1652 }
1653
1654 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1655 int cq_num)
1656 {
1657 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1658 CMD_TIME_CLASS_A);
1659 }
1660
1661 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1662 int cq_num)
1663 {
1664 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1665 CMD_HW2SW_CQ,
1666 CMD_TIME_CLASS_A);
1667 }
1668
1669 int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size)
1670 {
1671 struct mthca_mailbox *mailbox;
1672 __be32 *inbox;
1673 int err;
1674
1675 #define RESIZE_CQ_IN_SIZE 0x40
1676 #define RESIZE_CQ_LOG_SIZE_OFFSET 0x0c
1677 #define RESIZE_CQ_LKEY_OFFSET 0x1c
1678
1679 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1680 if (IS_ERR(mailbox))
1681 return PTR_ERR(mailbox);
1682 inbox = mailbox->buf;
1683
1684 memset(inbox, 0, RESIZE_CQ_IN_SIZE);
1685
1686
1687
1688
1689 MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET);
1690 MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET);
1691
1692 err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
1693 CMD_TIME_CLASS_B);
1694
1695 mthca_free_mailbox(dev, mailbox);
1696 return err;
1697 }
1698
1699 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1700 int srq_num)
1701 {
1702 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1703 CMD_TIME_CLASS_A);
1704 }
1705
1706 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1707 int srq_num)
1708 {
1709 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1710 CMD_HW2SW_SRQ,
1711 CMD_TIME_CLASS_A);
1712 }
1713
1714 int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1715 struct mthca_mailbox *mailbox)
1716 {
1717 return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1718 CMD_QUERY_SRQ, CMD_TIME_CLASS_A);
1719 }
1720
1721 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit)
1722 {
1723 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1724 CMD_TIME_CLASS_B);
1725 }
1726
1727 int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
1728 enum ib_qp_state next, u32 num, int is_ee,
1729 struct mthca_mailbox *mailbox, u32 optmask)
1730 {
1731 static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1732 [IB_QPS_RESET] = {
1733 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1734 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1735 [IB_QPS_INIT] = CMD_RST2INIT_QPEE,
1736 },
1737 [IB_QPS_INIT] = {
1738 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1739 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1740 [IB_QPS_INIT] = CMD_INIT2INIT_QPEE,
1741 [IB_QPS_RTR] = CMD_INIT2RTR_QPEE,
1742 },
1743 [IB_QPS_RTR] = {
1744 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1745 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1746 [IB_QPS_RTS] = CMD_RTR2RTS_QPEE,
1747 },
1748 [IB_QPS_RTS] = {
1749 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1750 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1751 [IB_QPS_RTS] = CMD_RTS2RTS_QPEE,
1752 [IB_QPS_SQD] = CMD_RTS2SQD_QPEE,
1753 },
1754 [IB_QPS_SQD] = {
1755 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1756 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1757 [IB_QPS_RTS] = CMD_SQD2RTS_QPEE,
1758 [IB_QPS_SQD] = CMD_SQD2SQD_QPEE,
1759 },
1760 [IB_QPS_SQE] = {
1761 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1762 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1763 [IB_QPS_RTS] = CMD_SQERR2RTS_QPEE,
1764 },
1765 [IB_QPS_ERR] = {
1766 [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
1767 [IB_QPS_ERR] = CMD_2ERR_QPEE,
1768 }
1769 };
1770
1771 u8 op_mod = 0;
1772 int my_mailbox = 0;
1773 int err;
1774
1775 if (op[cur][next] == CMD_ERR2RST_QPEE) {
1776 op_mod = 3;
1777
1778
1779 if (!mailbox) {
1780 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1781 if (!IS_ERR(mailbox)) {
1782 my_mailbox = 1;
1783 op_mod = 2;
1784 } else
1785 mailbox = NULL;
1786 }
1787
1788 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1789 (!!is_ee << 24) | num, op_mod,
1790 op[cur][next], CMD_TIME_CLASS_C);
1791
1792 if (0 && mailbox) {
1793 int i;
1794 mthca_dbg(dev, "Dumping QP context:\n");
1795 printk(" %08x\n", be32_to_cpup(mailbox->buf));
1796 for (i = 0; i < 0x100 / 4; ++i) {
1797 if (i % 8 == 0)
1798 printk("[%02x] ", i * 4);
1799 printk(" %08x",
1800 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1801 if ((i + 1) % 8 == 0)
1802 printk("\n");
1803 }
1804 }
1805
1806 if (my_mailbox)
1807 mthca_free_mailbox(dev, mailbox);
1808 } else {
1809 if (0) {
1810 int i;
1811 mthca_dbg(dev, "Dumping QP context:\n");
1812 printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
1813 for (i = 0; i < 0x100 / 4; ++i) {
1814 if (i % 8 == 0)
1815 printk(" [%02x] ", i * 4);
1816 printk(" %08x",
1817 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1818 if ((i + 1) % 8 == 0)
1819 printk("\n");
1820 }
1821 }
1822
1823 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1824 op_mod, op[cur][next], CMD_TIME_CLASS_C);
1825 }
1826
1827 return err;
1828 }
1829
1830 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1831 struct mthca_mailbox *mailbox)
1832 {
1833 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1834 CMD_QUERY_QPEE, CMD_TIME_CLASS_A);
1835 }
1836
1837 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
1838 {
1839 u8 op_mod;
1840
1841 switch (type) {
1842 case IB_QPT_SMI:
1843 op_mod = 0;
1844 break;
1845 case IB_QPT_GSI:
1846 op_mod = 1;
1847 break;
1848 case IB_QPT_RAW_IPV6:
1849 op_mod = 2;
1850 break;
1851 case IB_QPT_RAW_ETHERTYPE:
1852 op_mod = 3;
1853 break;
1854 default:
1855 return -EINVAL;
1856 }
1857
1858 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1859 CMD_TIME_CLASS_B);
1860 }
1861
1862 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1863 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1864 const void *in_mad, void *response_mad)
1865 {
1866 struct mthca_mailbox *inmailbox, *outmailbox;
1867 void *inbox;
1868 int err;
1869 u32 in_modifier = port;
1870 u8 op_modifier = 0;
1871
1872 #define MAD_IFC_BOX_SIZE 0x400
1873 #define MAD_IFC_MY_QPN_OFFSET 0x100
1874 #define MAD_IFC_RQPN_OFFSET 0x108
1875 #define MAD_IFC_SL_OFFSET 0x10c
1876 #define MAD_IFC_G_PATH_OFFSET 0x10d
1877 #define MAD_IFC_RLID_OFFSET 0x10e
1878 #define MAD_IFC_PKEY_OFFSET 0x112
1879 #define MAD_IFC_GRH_OFFSET 0x140
1880
1881 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1882 if (IS_ERR(inmailbox))
1883 return PTR_ERR(inmailbox);
1884 inbox = inmailbox->buf;
1885
1886 outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1887 if (IS_ERR(outmailbox)) {
1888 mthca_free_mailbox(dev, inmailbox);
1889 return PTR_ERR(outmailbox);
1890 }
1891
1892 memcpy(inbox, in_mad, 256);
1893
1894
1895
1896
1897
1898 if (ignore_mkey || !in_wc)
1899 op_modifier |= 0x1;
1900 if (ignore_bkey || !in_wc)
1901 op_modifier |= 0x2;
1902
1903 if (in_wc) {
1904 u8 val;
1905
1906 memset(inbox + 256, 0, 256);
1907
1908 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1909 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1910
1911 val = in_wc->sl << 4;
1912 MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
1913
1914 val = in_wc->dlid_path_bits |
1915 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1916 MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
1917
1918 MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
1919 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
1920
1921 if (in_grh)
1922 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1923
1924 op_modifier |= 0x4;
1925
1926 in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
1927 }
1928
1929 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1930 in_modifier, op_modifier,
1931 CMD_MAD_IFC, CMD_TIME_CLASS_C);
1932
1933 if (!err)
1934 memcpy(response_mad, outmailbox->buf, 256);
1935
1936 mthca_free_mailbox(dev, inmailbox);
1937 mthca_free_mailbox(dev, outmailbox);
1938 return err;
1939 }
1940
1941 int mthca_READ_MGM(struct mthca_dev *dev, int index,
1942 struct mthca_mailbox *mailbox)
1943 {
1944 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1945 CMD_READ_MGM, CMD_TIME_CLASS_A);
1946 }
1947
1948 int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1949 struct mthca_mailbox *mailbox)
1950 {
1951 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1952 CMD_TIME_CLASS_A);
1953 }
1954
1955 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1956 u16 *hash)
1957 {
1958 u64 imm;
1959 int err;
1960
1961 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1962 CMD_TIME_CLASS_A);
1963
1964 *hash = imm;
1965 return err;
1966 }
1967
1968 int mthca_NOP(struct mthca_dev *dev)
1969 {
1970 return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100));
1971 }