0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/errno.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/pci.h>
0037 #include <linux/slab.h>
0038
0039 #include "mthca_dev.h"
0040 #include "mthca_cmd.h"
0041 #include "mthca_config_reg.h"
0042
0043 enum {
0044 MTHCA_NUM_ASYNC_EQE = 0x80,
0045 MTHCA_NUM_CMD_EQE = 0x80,
0046 MTHCA_NUM_SPARE_EQE = 0x80,
0047 MTHCA_EQ_ENTRY_SIZE = 0x20
0048 };
0049
0050
0051
0052
0053 struct mthca_eq_context {
0054 __be32 flags;
0055 __be64 start;
0056 __be32 logsize_usrpage;
0057 __be32 tavor_pd;
0058 u8 reserved1[3];
0059 u8 intr;
0060 __be32 arbel_pd;
0061 __be32 lkey;
0062 u32 reserved2[2];
0063 __be32 consumer_index;
0064 __be32 producer_index;
0065 u32 reserved3[4];
0066 } __packed;
0067
0068 #define MTHCA_EQ_STATUS_OK ( 0 << 28)
0069 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
0070 #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
0071 #define MTHCA_EQ_OWNER_SW ( 0 << 24)
0072 #define MTHCA_EQ_OWNER_HW ( 1 << 24)
0073 #define MTHCA_EQ_FLAG_TR ( 1 << 18)
0074 #define MTHCA_EQ_FLAG_OI ( 1 << 17)
0075 #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
0076 #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
0077 #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
0078 #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
0079
0080 enum {
0081 MTHCA_EVENT_TYPE_COMP = 0x00,
0082 MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
0083 MTHCA_EVENT_TYPE_COMM_EST = 0x02,
0084 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
0085 MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
0086 MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
0087 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
0088 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
0089 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
0090 MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
0091 MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
0092 MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
0093 MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
0094 MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
0095 MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
0096 MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
0097 MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
0098 MTHCA_EVENT_TYPE_CMD = 0x0a
0099 };
0100
0101 #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
0102 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
0103 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
0104 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
0105 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
0106 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
0107 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
0108 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
0109 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
0110 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
0111 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
0112 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
0113 #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
0114 (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
0115 (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
0116 #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
0117
0118 #define MTHCA_EQ_DB_INC_CI (1 << 24)
0119 #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
0120 #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
0121 #define MTHCA_EQ_DB_SET_CI (4 << 24)
0122 #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
0123
0124 struct mthca_eqe {
0125 u8 reserved1;
0126 u8 type;
0127 u8 reserved2;
0128 u8 subtype;
0129 union {
0130 u32 raw[6];
0131 struct {
0132 __be32 cqn;
0133 } __packed comp;
0134 struct {
0135 u16 reserved1;
0136 __be16 token;
0137 u32 reserved2;
0138 u8 reserved3[3];
0139 u8 status;
0140 __be64 out_param;
0141 } __packed cmd;
0142 struct {
0143 __be32 qpn;
0144 } __packed qp;
0145 struct {
0146 __be32 srqn;
0147 } __packed srq;
0148 struct {
0149 __be32 cqn;
0150 u32 reserved1;
0151 u8 reserved2[3];
0152 u8 syndrome;
0153 } __packed cq_err;
0154 struct {
0155 u32 reserved1[2];
0156 __be32 port;
0157 } __packed port_change;
0158 } event;
0159 u8 reserved3[3];
0160 u8 owner;
0161 } __packed;
0162
0163 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
0164 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
0165
0166 static inline u64 async_mask(struct mthca_dev *dev)
0167 {
0168 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
0169 MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
0170 MTHCA_ASYNC_EVENT_MASK;
0171 }
0172
0173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
0174 {
0175
0176
0177
0178
0179
0180
0181
0182
0183 wmb();
0184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
0185 dev->kar + MTHCA_EQ_DOORBELL,
0186 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
0187 }
0188
0189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
0190 {
0191
0192 wmb();
0193 __raw_writel((__force u32) cpu_to_be32(ci),
0194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
0195
0196 mb();
0197 }
0198
0199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
0200 {
0201 if (mthca_is_memfree(dev))
0202 arbel_set_eq_ci(dev, eq, ci);
0203 else
0204 tavor_set_eq_ci(dev, eq, ci);
0205 }
0206
0207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
0208 {
0209 mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
0210 dev->kar + MTHCA_EQ_DOORBELL,
0211 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
0212 }
0213
0214 static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
0215 {
0216 writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
0217 }
0218
0219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
0220 {
0221 if (!mthca_is_memfree(dev)) {
0222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
0223 dev->kar + MTHCA_EQ_DOORBELL,
0224 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
0225 }
0226 }
0227
0228 static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
0229 {
0230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
0231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
0232 }
0233
0234 static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
0235 {
0236 struct mthca_eqe *eqe;
0237 eqe = get_eqe(eq, eq->cons_index);
0238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
0239 }
0240
0241 static inline void set_eqe_hw(struct mthca_eqe *eqe)
0242 {
0243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
0244 }
0245
0246 static void port_change(struct mthca_dev *dev, int port, int active)
0247 {
0248 struct ib_event record;
0249
0250 mthca_dbg(dev, "Port change to %s for port %d\n",
0251 active ? "active" : "down", port);
0252
0253 record.device = &dev->ib_dev;
0254 record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
0255 record.element.port_num = port;
0256
0257 ib_dispatch_event(&record);
0258 }
0259
0260 static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
0261 {
0262 struct mthca_eqe *eqe;
0263 int disarm_cqn;
0264 int eqes_found = 0;
0265 int set_ci = 0;
0266
0267 while ((eqe = next_eqe_sw(eq))) {
0268
0269
0270
0271
0272 rmb();
0273
0274 switch (eqe->type) {
0275 case MTHCA_EVENT_TYPE_COMP:
0276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
0277 disarm_cq(dev, eq->eqn, disarm_cqn);
0278 mthca_cq_completion(dev, disarm_cqn);
0279 break;
0280
0281 case MTHCA_EVENT_TYPE_PATH_MIG:
0282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0283 IB_EVENT_PATH_MIG);
0284 break;
0285
0286 case MTHCA_EVENT_TYPE_COMM_EST:
0287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0288 IB_EVENT_COMM_EST);
0289 break;
0290
0291 case MTHCA_EVENT_TYPE_SQ_DRAINED:
0292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0293 IB_EVENT_SQ_DRAINED);
0294 break;
0295
0296 case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
0297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0298 IB_EVENT_QP_LAST_WQE_REACHED);
0299 break;
0300
0301 case MTHCA_EVENT_TYPE_SRQ_LIMIT:
0302 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
0303 IB_EVENT_SRQ_LIMIT_REACHED);
0304 break;
0305
0306 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
0307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0308 IB_EVENT_QP_FATAL);
0309 break;
0310
0311 case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
0312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0313 IB_EVENT_PATH_MIG_ERR);
0314 break;
0315
0316 case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
0317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0318 IB_EVENT_QP_REQ_ERR);
0319 break;
0320
0321 case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
0322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
0323 IB_EVENT_QP_ACCESS_ERR);
0324 break;
0325
0326 case MTHCA_EVENT_TYPE_CMD:
0327 mthca_cmd_event(dev,
0328 be16_to_cpu(eqe->event.cmd.token),
0329 eqe->event.cmd.status,
0330 be64_to_cpu(eqe->event.cmd.out_param));
0331 break;
0332
0333 case MTHCA_EVENT_TYPE_PORT_CHANGE:
0334 port_change(dev,
0335 (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
0336 eqe->subtype == 0x4);
0337 break;
0338
0339 case MTHCA_EVENT_TYPE_CQ_ERROR:
0340 mthca_warn(dev, "CQ %s on CQN %06x\n",
0341 eqe->event.cq_err.syndrome == 1 ?
0342 "overrun" : "access violation",
0343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
0344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
0345 IB_EVENT_CQ_ERR);
0346 break;
0347
0348 case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
0349 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
0350 break;
0351
0352 case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
0353 case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
0354 case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
0355 case MTHCA_EVENT_TYPE_ECC_DETECT:
0356 default:
0357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
0358 eqe->type, eqe->subtype, eq->eqn);
0359 break;
0360 }
0361
0362 set_eqe_hw(eqe);
0363 ++eq->cons_index;
0364 eqes_found = 1;
0365 ++set_ci;
0366
0367
0368
0369
0370
0371
0372
0373
0374 if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
0375
0376
0377
0378
0379 set_eq_ci(dev, eq, eq->cons_index);
0380 set_ci = 0;
0381 }
0382 }
0383
0384
0385
0386
0387
0388 return eqes_found;
0389 }
0390
0391 static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
0392 {
0393 struct mthca_dev *dev = dev_ptr;
0394 u32 ecr;
0395 int i;
0396
0397 if (dev->eq_table.clr_mask)
0398 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
0399
0400 ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
0401 if (!ecr)
0402 return IRQ_NONE;
0403
0404 writel(ecr, dev->eq_regs.tavor.ecr_base +
0405 MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
0406
0407 for (i = 0; i < MTHCA_NUM_EQ; ++i)
0408 if (ecr & dev->eq_table.eq[i].eqn_mask) {
0409 if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
0410 tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
0411 dev->eq_table.eq[i].cons_index);
0412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
0413 }
0414
0415 return IRQ_HANDLED;
0416 }
0417
0418 static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
0419 {
0420 struct mthca_eq *eq = eq_ptr;
0421 struct mthca_dev *dev = eq->dev;
0422
0423 mthca_eq_int(dev, eq);
0424 tavor_set_eq_ci(dev, eq, eq->cons_index);
0425 tavor_eq_req_not(dev, eq->eqn);
0426
0427
0428 return IRQ_HANDLED;
0429 }
0430
0431 static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
0432 {
0433 struct mthca_dev *dev = dev_ptr;
0434 int work = 0;
0435 int i;
0436
0437 if (dev->eq_table.clr_mask)
0438 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
0439
0440 for (i = 0; i < MTHCA_NUM_EQ; ++i)
0441 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
0442 work = 1;
0443 arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
0444 dev->eq_table.eq[i].cons_index);
0445 }
0446
0447 arbel_eq_req_not(dev, dev->eq_table.arm_mask);
0448
0449 return IRQ_RETVAL(work);
0450 }
0451
0452 static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
0453 {
0454 struct mthca_eq *eq = eq_ptr;
0455 struct mthca_dev *dev = eq->dev;
0456
0457 mthca_eq_int(dev, eq);
0458 arbel_set_eq_ci(dev, eq, eq->cons_index);
0459 arbel_eq_req_not(dev, eq->eqn_mask);
0460
0461
0462 return IRQ_HANDLED;
0463 }
0464
0465 static int mthca_create_eq(struct mthca_dev *dev,
0466 int nent,
0467 u8 intr,
0468 struct mthca_eq *eq)
0469 {
0470 int npages;
0471 u64 *dma_list = NULL;
0472 dma_addr_t t;
0473 struct mthca_mailbox *mailbox;
0474 struct mthca_eq_context *eq_context;
0475 int err = -ENOMEM;
0476 int i;
0477
0478 eq->dev = dev;
0479 eq->nent = roundup_pow_of_two(max(nent, 2));
0480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
0481
0482 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
0483 GFP_KERNEL);
0484 if (!eq->page_list)
0485 goto err_out;
0486
0487 for (i = 0; i < npages; ++i)
0488 eq->page_list[i].buf = NULL;
0489
0490 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
0491 if (!dma_list)
0492 goto err_out_free;
0493
0494 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0495 if (IS_ERR(mailbox))
0496 goto err_out_free;
0497 eq_context = mailbox->buf;
0498
0499 for (i = 0; i < npages; ++i) {
0500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
0501 PAGE_SIZE, &t, GFP_KERNEL);
0502 if (!eq->page_list[i].buf)
0503 goto err_out_free_pages;
0504
0505 dma_list[i] = t;
0506 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
0507
0508 clear_page(eq->page_list[i].buf);
0509 }
0510
0511 for (i = 0; i < eq->nent; ++i)
0512 set_eqe_hw(get_eqe(eq, i));
0513
0514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
0515 if (eq->eqn == -1)
0516 goto err_out_free_pages;
0517
0518 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
0519 dma_list, PAGE_SHIFT, npages,
0520 0, npages * PAGE_SIZE,
0521 MTHCA_MPT_FLAG_LOCAL_WRITE |
0522 MTHCA_MPT_FLAG_LOCAL_READ,
0523 &eq->mr);
0524 if (err)
0525 goto err_out_free_eq;
0526
0527 memset(eq_context, 0, sizeof *eq_context);
0528 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
0529 MTHCA_EQ_OWNER_HW |
0530 MTHCA_EQ_STATE_ARMED |
0531 MTHCA_EQ_FLAG_TR);
0532 if (mthca_is_memfree(dev))
0533 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
0534
0535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
0536 if (mthca_is_memfree(dev)) {
0537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
0538 } else {
0539 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
0540 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
0541 }
0542 eq_context->intr = intr;
0543 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
0544
0545 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
0546 if (err) {
0547 mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
0548 goto err_out_free_mr;
0549 }
0550
0551 kfree(dma_list);
0552 mthca_free_mailbox(dev, mailbox);
0553
0554 eq->eqn_mask = swab32(1 << eq->eqn);
0555 eq->cons_index = 0;
0556
0557 dev->eq_table.arm_mask |= eq->eqn_mask;
0558
0559 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
0560 eq->eqn, eq->nent);
0561
0562 return err;
0563
0564 err_out_free_mr:
0565 mthca_free_mr(dev, &eq->mr);
0566
0567 err_out_free_eq:
0568 mthca_free(&dev->eq_table.alloc, eq->eqn);
0569
0570 err_out_free_pages:
0571 for (i = 0; i < npages; ++i)
0572 if (eq->page_list[i].buf)
0573 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0574 eq->page_list[i].buf,
0575 dma_unmap_addr(&eq->page_list[i],
0576 mapping));
0577
0578 mthca_free_mailbox(dev, mailbox);
0579
0580 err_out_free:
0581 kfree(eq->page_list);
0582 kfree(dma_list);
0583
0584 err_out:
0585 return err;
0586 }
0587
0588 static void mthca_free_eq(struct mthca_dev *dev,
0589 struct mthca_eq *eq)
0590 {
0591 struct mthca_mailbox *mailbox;
0592 int err;
0593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
0594 PAGE_SIZE;
0595 int i;
0596
0597 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0598 if (IS_ERR(mailbox))
0599 return;
0600
0601 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
0602 if (err)
0603 mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
0604
0605 dev->eq_table.arm_mask &= ~eq->eqn_mask;
0606
0607 if (0) {
0608 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
0609 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
0610 if (i % 4 == 0)
0611 printk("[%02x] ", i * 4);
0612 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
0613 if ((i + 1) % 4 == 0)
0614 printk("\n");
0615 }
0616 }
0617
0618 mthca_free_mr(dev, &eq->mr);
0619 for (i = 0; i < npages; ++i)
0620 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0621 eq->page_list[i].buf,
0622 dma_unmap_addr(&eq->page_list[i], mapping));
0623
0624 kfree(eq->page_list);
0625 mthca_free_mailbox(dev, mailbox);
0626 }
0627
0628 static void mthca_free_irqs(struct mthca_dev *dev)
0629 {
0630 int i;
0631
0632 if (dev->eq_table.have_irq)
0633 free_irq(dev->pdev->irq, dev);
0634 for (i = 0; i < MTHCA_NUM_EQ; ++i)
0635 if (dev->eq_table.eq[i].have_irq) {
0636 free_irq(dev->eq_table.eq[i].msi_x_vector,
0637 dev->eq_table.eq + i);
0638 dev->eq_table.eq[i].have_irq = 0;
0639 }
0640 }
0641
0642 static int mthca_map_reg(struct mthca_dev *dev,
0643 unsigned long offset, unsigned long size,
0644 void __iomem **map)
0645 {
0646 phys_addr_t base = pci_resource_start(dev->pdev, 0);
0647
0648 *map = ioremap(base + offset, size);
0649 if (!*map)
0650 return -ENOMEM;
0651
0652 return 0;
0653 }
0654
0655 static int mthca_map_eq_regs(struct mthca_dev *dev)
0656 {
0657 if (mthca_is_memfree(dev)) {
0658
0659
0660
0661
0662
0663
0664
0665 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
0666 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
0667 &dev->clr_base)) {
0668 mthca_err(dev, "Couldn't map interrupt clear register, "
0669 "aborting.\n");
0670 return -ENOMEM;
0671 }
0672
0673
0674
0675
0676
0677 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
0678 dev->fw.arbel.eq_arm_base) + 4, 4,
0679 &dev->eq_regs.arbel.eq_arm)) {
0680 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
0681 iounmap(dev->clr_base);
0682 return -ENOMEM;
0683 }
0684
0685 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
0686 dev->fw.arbel.eq_set_ci_base,
0687 MTHCA_EQ_SET_CI_SIZE,
0688 &dev->eq_regs.arbel.eq_set_ci_base)) {
0689 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
0690 iounmap(dev->eq_regs.arbel.eq_arm);
0691 iounmap(dev->clr_base);
0692 return -ENOMEM;
0693 }
0694 } else {
0695 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
0696 &dev->clr_base)) {
0697 mthca_err(dev, "Couldn't map interrupt clear register, "
0698 "aborting.\n");
0699 return -ENOMEM;
0700 }
0701
0702 if (mthca_map_reg(dev, MTHCA_ECR_BASE,
0703 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
0704 &dev->eq_regs.tavor.ecr_base)) {
0705 mthca_err(dev, "Couldn't map ecr register, "
0706 "aborting.\n");
0707 iounmap(dev->clr_base);
0708 return -ENOMEM;
0709 }
0710 }
0711
0712 return 0;
0713
0714 }
0715
0716 static void mthca_unmap_eq_regs(struct mthca_dev *dev)
0717 {
0718 if (mthca_is_memfree(dev)) {
0719 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
0720 iounmap(dev->eq_regs.arbel.eq_arm);
0721 iounmap(dev->clr_base);
0722 } else {
0723 iounmap(dev->eq_regs.tavor.ecr_base);
0724 iounmap(dev->clr_base);
0725 }
0726 }
0727
0728 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
0729 {
0730 int ret;
0731
0732
0733
0734
0735
0736
0737
0738 dev->eq_table.icm_virt = icm_virt;
0739 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
0740 if (!dev->eq_table.icm_page)
0741 return -ENOMEM;
0742 dev->eq_table.icm_dma =
0743 dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0,
0744 PAGE_SIZE, DMA_BIDIRECTIONAL);
0745 if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) {
0746 __free_page(dev->eq_table.icm_page);
0747 return -ENOMEM;
0748 }
0749
0750 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
0751 if (ret) {
0752 dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma,
0753 PAGE_SIZE, DMA_BIDIRECTIONAL);
0754 __free_page(dev->eq_table.icm_page);
0755 }
0756
0757 return ret;
0758 }
0759
0760 void mthca_unmap_eq_icm(struct mthca_dev *dev)
0761 {
0762 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
0763 dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
0764 DMA_BIDIRECTIONAL);
0765 __free_page(dev->eq_table.icm_page);
0766 }
0767
0768 int mthca_init_eq_table(struct mthca_dev *dev)
0769 {
0770 int err;
0771 u8 intr;
0772 int i;
0773
0774 err = mthca_alloc_init(&dev->eq_table.alloc,
0775 dev->limits.num_eqs,
0776 dev->limits.num_eqs - 1,
0777 dev->limits.reserved_eqs);
0778 if (err)
0779 return err;
0780
0781 err = mthca_map_eq_regs(dev);
0782 if (err)
0783 goto err_out_free;
0784
0785 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
0786 dev->eq_table.clr_mask = 0;
0787 } else {
0788 dev->eq_table.clr_mask =
0789 swab32(1 << (dev->eq_table.inta_pin & 31));
0790 dev->eq_table.clr_int = dev->clr_base +
0791 (dev->eq_table.inta_pin < 32 ? 4 : 0);
0792 }
0793
0794 dev->eq_table.arm_mask = 0;
0795
0796 intr = dev->eq_table.inta_pin;
0797
0798 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
0799 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
0800 &dev->eq_table.eq[MTHCA_EQ_COMP]);
0801 if (err)
0802 goto err_out_unmap;
0803
0804 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
0805 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
0806 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
0807 if (err)
0808 goto err_out_comp;
0809
0810 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
0811 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
0812 &dev->eq_table.eq[MTHCA_EQ_CMD]);
0813 if (err)
0814 goto err_out_async;
0815
0816 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
0817 static const char *eq_name[] = {
0818 [MTHCA_EQ_COMP] = DRV_NAME "-comp",
0819 [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
0820 [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
0821 };
0822
0823 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
0824 snprintf(dev->eq_table.eq[i].irq_name,
0825 IB_DEVICE_NAME_MAX,
0826 "%s@pci:%s", eq_name[i],
0827 pci_name(dev->pdev));
0828 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
0829 mthca_is_memfree(dev) ?
0830 mthca_arbel_msi_x_interrupt :
0831 mthca_tavor_msi_x_interrupt,
0832 0, dev->eq_table.eq[i].irq_name,
0833 dev->eq_table.eq + i);
0834 if (err)
0835 goto err_out_cmd;
0836 dev->eq_table.eq[i].have_irq = 1;
0837 }
0838 } else {
0839 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
0840 DRV_NAME "@pci:%s", pci_name(dev->pdev));
0841 err = request_irq(dev->pdev->irq,
0842 mthca_is_memfree(dev) ?
0843 mthca_arbel_interrupt :
0844 mthca_tavor_interrupt,
0845 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
0846 if (err)
0847 goto err_out_cmd;
0848 dev->eq_table.have_irq = 1;
0849 }
0850
0851 err = mthca_MAP_EQ(dev, async_mask(dev),
0852 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
0853 if (err)
0854 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
0855 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
0856
0857 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
0858 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
0859 if (err)
0860 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
0861 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
0862
0863 for (i = 0; i < MTHCA_NUM_EQ; ++i)
0864 if (mthca_is_memfree(dev))
0865 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
0866 else
0867 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
0868
0869 return 0;
0870
0871 err_out_cmd:
0872 mthca_free_irqs(dev);
0873 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
0874
0875 err_out_async:
0876 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
0877
0878 err_out_comp:
0879 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
0880
0881 err_out_unmap:
0882 mthca_unmap_eq_regs(dev);
0883
0884 err_out_free:
0885 mthca_alloc_cleanup(&dev->eq_table.alloc);
0886 return err;
0887 }
0888
0889 void mthca_cleanup_eq_table(struct mthca_dev *dev)
0890 {
0891 int i;
0892
0893 mthca_free_irqs(dev);
0894
0895 mthca_MAP_EQ(dev, async_mask(dev),
0896 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
0897 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
0898 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
0899
0900 for (i = 0; i < MTHCA_NUM_EQ; ++i)
0901 mthca_free_eq(dev, &dev->eq_table.eq[i]);
0902
0903 mthca_unmap_eq_regs(dev);
0904
0905 mthca_alloc_cleanup(&dev->eq_table.alloc);
0906 }