0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/kernel.h>
0020 #include <linux/init.h>
0021 #include <linux/types.h>
0022 #include <linux/pci.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/blkdev.h>
0025 #include <linux/delay.h>
0026 #include <linux/completion.h>
0027 #include <linux/time.h>
0028 #include <linux/interrupt.h>
0029
0030 #include <scsi/scsi_host.h>
0031
0032 #include "aacraid.h"
0033
0034 static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
0035 {
0036 struct aac_dev *dev = dev_id;
0037 unsigned long bellbits;
0038 u8 intstat = rx_readb(dev, MUnit.OISR);
0039
0040
0041
0042
0043
0044
0045
0046 if (likely(intstat & ~(dev->OIMR))) {
0047 bellbits = rx_readl(dev, OutboundDoorbellReg);
0048 if (unlikely(bellbits & DoorBellPrintfReady)) {
0049 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
0050 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
0051 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
0052 }
0053 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
0054 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
0055 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
0056 }
0057 else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
0058 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
0059 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
0060 }
0061 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
0062 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
0063 }
0064 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
0065 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
0066 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
0067 }
0068 return IRQ_HANDLED;
0069 }
0070 return IRQ_NONE;
0071 }
0072
0073 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
0074 {
0075 int isAif, isFastResponse, isSpecial;
0076 struct aac_dev *dev = dev_id;
0077 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
0078 if (unlikely(Index == 0xFFFFFFFFL))
0079 Index = rx_readl(dev, MUnit.OutboundQueue);
0080 if (likely(Index != 0xFFFFFFFFL)) {
0081 do {
0082 isAif = isFastResponse = isSpecial = 0;
0083 if (Index & 0x00000002L) {
0084 isAif = 1;
0085 if (Index == 0xFFFFFFFEL)
0086 isSpecial = 1;
0087 Index &= ~0x00000002L;
0088 } else {
0089 if (Index & 0x00000001L)
0090 isFastResponse = 1;
0091 Index >>= 2;
0092 }
0093 if (!isSpecial) {
0094 if (unlikely(aac_intr_normal(dev,
0095 Index, isAif,
0096 isFastResponse, NULL))) {
0097 rx_writel(dev,
0098 MUnit.OutboundQueue,
0099 Index);
0100 rx_writel(dev,
0101 MUnit.ODR,
0102 DoorBellAdapterNormRespReady);
0103 }
0104 }
0105 Index = rx_readl(dev, MUnit.OutboundQueue);
0106 } while (Index != 0xFFFFFFFFL);
0107 return IRQ_HANDLED;
0108 }
0109 return IRQ_NONE;
0110 }
0111
0112
0113
0114
0115
0116
0117 static void aac_rx_disable_interrupt(struct aac_dev *dev)
0118 {
0119 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
0120 }
0121
0122
0123
0124
0125
0126
0127 static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
0128 {
0129 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
0130 }
0131
0132
0133
0134
0135
0136
0137 static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
0138 {
0139 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static int rx_sync_cmd(struct aac_dev *dev, u32 command,
0163 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
0164 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
0165 {
0166 unsigned long start;
0167 int ok;
0168
0169
0170
0171 writel(command, &dev->IndexRegs->Mailbox[0]);
0172
0173
0174
0175 writel(p1, &dev->IndexRegs->Mailbox[1]);
0176 writel(p2, &dev->IndexRegs->Mailbox[2]);
0177 writel(p3, &dev->IndexRegs->Mailbox[3]);
0178 writel(p4, &dev->IndexRegs->Mailbox[4]);
0179
0180
0181
0182 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
0183
0184
0185
0186 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
0187
0188
0189
0190
0191 rx_readb (dev, MUnit.OIMR);
0192
0193
0194
0195 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
0196
0197 ok = 0;
0198 start = jiffies;
0199
0200
0201
0202
0203 while (time_before(jiffies, start+30*HZ))
0204 {
0205 udelay(5);
0206
0207
0208
0209 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
0210
0211
0212
0213 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
0214 ok = 1;
0215 break;
0216 }
0217
0218
0219
0220 msleep(1);
0221 }
0222 if (unlikely(ok != 1)) {
0223
0224
0225
0226 aac_adapter_enable_int(dev);
0227 return -ETIMEDOUT;
0228 }
0229
0230
0231
0232 if (status)
0233 *status = readl(&dev->IndexRegs->Mailbox[0]);
0234 if (r1)
0235 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
0236 if (r2)
0237 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
0238 if (r3)
0239 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
0240 if (r4)
0241 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
0242
0243
0244
0245 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
0246
0247
0248
0249 aac_adapter_enable_int(dev);
0250 return 0;
0251
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261 static void aac_rx_interrupt_adapter(struct aac_dev *dev)
0262 {
0263 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
0276 {
0277 switch (event) {
0278
0279 case AdapNormCmdQue:
0280 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
0281 break;
0282 case HostNormRespNotFull:
0283 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
0284 break;
0285 case AdapNormRespQue:
0286 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
0287 break;
0288 case HostNormCmdNotFull:
0289 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
0290 break;
0291 case HostShutdown:
0292 break;
0293 case FastIo:
0294 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
0295 break;
0296 case AdapPrintfDone:
0297 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
0298 break;
0299 default:
0300 BUG();
0301 break;
0302 }
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312 static void aac_rx_start_adapter(struct aac_dev *dev)
0313 {
0314 union aac_init *init;
0315
0316 init = dev->init;
0317 init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
0318
0319 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0320 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330 static int aac_rx_check_health(struct aac_dev *dev)
0331 {
0332 u32 status = rx_readl(dev, MUnit.OMRx[0]);
0333
0334
0335
0336
0337 if (unlikely(status & SELF_TEST_FAILED))
0338 return -1;
0339
0340
0341
0342 if (unlikely(status & KERNEL_PANIC)) {
0343 char * buffer;
0344 struct POSTSTATUS {
0345 __le32 Post_Command;
0346 __le32 Post_Address;
0347 } * post;
0348 dma_addr_t paddr, baddr;
0349 int ret;
0350
0351 if (likely((status & 0xFF000000L) == 0xBC000000L))
0352 return (status >> 16) & 0xFF;
0353 buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr,
0354 GFP_KERNEL);
0355 ret = -2;
0356 if (unlikely(buffer == NULL))
0357 return ret;
0358 post = dma_alloc_coherent(&dev->pdev->dev,
0359 sizeof(struct POSTSTATUS), &paddr,
0360 GFP_KERNEL);
0361 if (unlikely(post == NULL)) {
0362 dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
0363 return ret;
0364 }
0365 memset(buffer, 0, 512);
0366 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
0367 post->Post_Address = cpu_to_le32(baddr);
0368 rx_writel(dev, MUnit.IMRx[0], paddr);
0369 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
0370 NULL, NULL, NULL, NULL, NULL);
0371 dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS),
0372 post, paddr);
0373 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
0374 ret = (hex_to_bin(buffer[2]) << 4) +
0375 hex_to_bin(buffer[3]);
0376 }
0377 dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
0378 return ret;
0379 }
0380
0381
0382
0383 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
0384 return -3;
0385
0386
0387
0388 return 0;
0389 }
0390
0391
0392
0393
0394
0395
0396
0397 int aac_rx_deliver_producer(struct fib * fib)
0398 {
0399 struct aac_dev *dev = fib->dev;
0400 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
0401 u32 Index;
0402 unsigned long nointr = 0;
0403
0404 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
0405
0406 atomic_inc(&q->numpending);
0407 *(q->headers.producer) = cpu_to_le32(Index + 1);
0408 if (!(nointr & aac_config.irq_mod))
0409 aac_adapter_notify(dev, AdapNormCmdQueue);
0410
0411 return 0;
0412 }
0413
0414
0415
0416
0417
0418
0419
0420 static int aac_rx_deliver_message(struct fib * fib)
0421 {
0422 struct aac_dev *dev = fib->dev;
0423 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
0424 u32 Index;
0425 u64 addr;
0426 volatile void __iomem *device;
0427
0428 unsigned long count = 10000000L;
0429 atomic_inc(&q->numpending);
0430 for(;;) {
0431 Index = rx_readl(dev, MUnit.InboundQueue);
0432 if (unlikely(Index == 0xFFFFFFFFL))
0433 Index = rx_readl(dev, MUnit.InboundQueue);
0434 if (likely(Index != 0xFFFFFFFFL))
0435 break;
0436 if (--count == 0) {
0437 atomic_dec(&q->numpending);
0438 return -ETIMEDOUT;
0439 }
0440 udelay(5);
0441 }
0442 device = dev->base + Index;
0443 addr = fib->hw_fib_pa;
0444 writel((u32)(addr & 0xffffffff), device);
0445 device += sizeof(u32);
0446 writel((u32)(addr >> 32), device);
0447 device += sizeof(u32);
0448 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
0449 rx_writel(dev, MUnit.InboundQueue, Index);
0450 return 0;
0451 }
0452
0453
0454
0455
0456
0457
0458
0459 static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
0460 {
0461 if (!size) {
0462 iounmap(dev->regs.rx);
0463 return 0;
0464 }
0465 dev->base = dev->regs.rx = ioremap(dev->base_start, size);
0466 if (dev->base == NULL)
0467 return -1;
0468 dev->IndexRegs = &dev->regs.rx->IndexRegs;
0469 return 0;
0470 }
0471
0472 static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
0473 {
0474 u32 var = 0;
0475
0476 if (!(dev->supplement_adapter_info.supported_options2 &
0477 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
0478 if (bled)
0479 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
0480 dev->name, dev->id, bled);
0481 else {
0482 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0483 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
0484 if (!bled && (var != 0x00000001) && (var != 0x3803000F))
0485 bled = -EINVAL;
0486 }
0487 if (bled && (bled != -ETIMEDOUT))
0488 bled = aac_adapter_sync_cmd(dev, IOP_RESET,
0489 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
0490
0491 if (bled && (bled != -ETIMEDOUT))
0492 return -EINVAL;
0493 }
0494 if (bled && (var == 0x3803000F)) {
0495 rx_writel(dev, MUnit.reserved2, 3);
0496 msleep(5000);
0497 var = 0x00000001;
0498 }
0499 if (bled && (var != 0x00000001))
0500 return -EINVAL;
0501 ssleep(5);
0502 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
0503 return -ENODEV;
0504 if (startup_timeout < 300)
0505 startup_timeout = 300;
0506 return 0;
0507 }
0508
0509
0510
0511
0512
0513
0514
0515 int aac_rx_select_comm(struct aac_dev *dev, int comm)
0516 {
0517 switch (comm) {
0518 case AAC_COMM_PRODUCER:
0519 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
0520 dev->a_ops.adapter_intr = aac_rx_intr_producer;
0521 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
0522 break;
0523 case AAC_COMM_MESSAGE:
0524 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
0525 dev->a_ops.adapter_intr = aac_rx_intr_message;
0526 dev->a_ops.adapter_deliver = aac_rx_deliver_message;
0527 break;
0528 default:
0529 return 1;
0530 }
0531 return 0;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 int _aac_rx_init(struct aac_dev *dev)
0544 {
0545 unsigned long start;
0546 unsigned long status;
0547 int restart = 0;
0548 int instance = dev->id;
0549 const char * name = dev->name;
0550
0551 if (aac_adapter_ioremap(dev, dev->base_size)) {
0552 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
0553 goto error_iounmap;
0554 }
0555
0556
0557 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
0558 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
0559 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
0560
0561 if (((status & 0x0c) != 0x0c) || dev->init_reset) {
0562 dev->init_reset = false;
0563 if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
0564
0565 while ((++restart < 512) &&
0566 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
0567 }
0568 }
0569
0570
0571
0572
0573 status = rx_readl(dev, MUnit.OMRx[0]);
0574 if (status & KERNEL_PANIC) {
0575 if (aac_rx_restart_adapter(dev,
0576 aac_rx_check_health(dev), IOP_HWSOFT_RESET))
0577 goto error_iounmap;
0578 ++restart;
0579 }
0580
0581
0582
0583 status = rx_readl(dev, MUnit.OMRx[0]);
0584 if (status & SELF_TEST_FAILED) {
0585 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
0586 goto error_iounmap;
0587 }
0588
0589
0590
0591 if (status & MONITOR_PANIC) {
0592 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
0593 goto error_iounmap;
0594 }
0595 start = jiffies;
0596
0597
0598
0599 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
0600 {
0601 if ((restart &&
0602 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
0603 time_after(jiffies, start+HZ*startup_timeout)) {
0604 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
0605 dev->name, instance, status);
0606 goto error_iounmap;
0607 }
0608 if (!restart &&
0609 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
0610 time_after(jiffies, start + HZ *
0611 ((startup_timeout > 60)
0612 ? (startup_timeout - 60)
0613 : (startup_timeout / 2))))) {
0614 if (likely(!aac_rx_restart_adapter(dev,
0615 aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
0616 start = jiffies;
0617 ++restart;
0618 }
0619 msleep(1);
0620 }
0621 if (restart && aac_commit)
0622 aac_commit = 1;
0623
0624
0625
0626 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
0627 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
0628 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
0629 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
0630 dev->a_ops.adapter_check_health = aac_rx_check_health;
0631 dev->a_ops.adapter_restart = aac_rx_restart_adapter;
0632 dev->a_ops.adapter_start = aac_rx_start_adapter;
0633
0634
0635
0636
0637
0638 aac_adapter_comm(dev, AAC_COMM_PRODUCER);
0639 aac_adapter_disable_int(dev);
0640 rx_writel(dev, MUnit.ODR, 0xffffffff);
0641 aac_adapter_enable_int(dev);
0642
0643 if (aac_init_adapter(dev) == NULL)
0644 goto error_iounmap;
0645 aac_adapter_comm(dev, dev->comm_interface);
0646 dev->sync_mode = 0;
0647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
0648 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
0649 IRQF_SHARED, "aacraid", dev) < 0) {
0650 if (dev->msi)
0651 pci_disable_msi(dev->pdev);
0652 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
0653 name, instance);
0654 goto error_iounmap;
0655 }
0656 dev->dbg_base = dev->base_start;
0657 dev->dbg_base_mapped = dev->base;
0658 dev->dbg_size = dev->base_size;
0659
0660 aac_adapter_enable_int(dev);
0661
0662
0663
0664
0665 aac_rx_start_adapter(dev);
0666
0667 return 0;
0668
0669 error_iounmap:
0670
0671 return -1;
0672 }
0673
0674 int aac_rx_init(struct aac_dev *dev)
0675 {
0676
0677
0678
0679 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
0680 dev->a_ops.adapter_comm = aac_rx_select_comm;
0681
0682 return _aac_rx_init(dev);
0683 }