0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/kernel.h>
0021 #include <linux/init.h>
0022 #include <linux/types.h>
0023 #include <linux/pci.h>
0024 #include <linux/spinlock.h>
0025 #include <linux/slab.h>
0026 #include <linux/blkdev.h>
0027 #include <linux/delay.h>
0028 #include <linux/completion.h>
0029 #include <linux/mm.h>
0030 #include <scsi/scsi_host.h>
0031 #include <scsi/scsi_device.h>
0032 #include <scsi/scsi_cmnd.h>
0033
0034 #include "aacraid.h"
0035
0036 struct aac_common aac_config = {
0037 .irq_mod = 1
0038 };
0039
0040 static inline int aac_is_msix_mode(struct aac_dev *dev)
0041 {
0042 u32 status = 0;
0043
0044 if (aac_is_src(dev))
0045 status = src_readl(dev, MUnit.OMR);
0046 return (status & AAC_INT_MODE_MSIX);
0047 }
0048
0049 static inline void aac_change_to_intx(struct aac_dev *dev)
0050 {
0051 aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
0052 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
0053 }
0054
0055 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
0056 {
0057 unsigned char *base;
0058 unsigned long size, align;
0059 const unsigned long fibsize = dev->max_fib_size;
0060 const unsigned long printfbufsiz = 256;
0061 unsigned long host_rrq_size, aac_init_size;
0062 union aac_init *init;
0063 dma_addr_t phys;
0064 unsigned long aac_max_hostphysmempages;
0065
0066 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
0067 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
0068 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
0069 !dev->sa_firmware)) {
0070 host_rrq_size =
0071 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
0072 * sizeof(u32);
0073 aac_init_size = sizeof(union aac_init);
0074 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
0075 dev->sa_firmware) {
0076 host_rrq_size = (dev->scsi_host_ptr->can_queue
0077 + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX;
0078 aac_init_size = sizeof(union aac_init) +
0079 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
0080 } else {
0081 host_rrq_size = 0;
0082 aac_init_size = sizeof(union aac_init);
0083 }
0084 size = fibsize + aac_init_size + commsize + commalign +
0085 printfbufsiz + host_rrq_size;
0086
0087 base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL);
0088 if (base == NULL) {
0089 printk(KERN_ERR "aacraid: unable to create mapping.\n");
0090 return 0;
0091 }
0092
0093 dev->comm_addr = (void *)base;
0094 dev->comm_phys = phys;
0095 dev->comm_size = size;
0096
0097 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
0098 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
0099 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
0100 dev->host_rrq = (u32 *)(base + fibsize);
0101 dev->host_rrq_pa = phys + fibsize;
0102 memset(dev->host_rrq, 0, host_rrq_size);
0103 }
0104
0105 dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
0106 dev->init_pa = phys + fibsize + host_rrq_size;
0107
0108 init = dev->init;
0109
0110 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
0111 int i;
0112 u64 addr;
0113
0114 init->r8.init_struct_revision =
0115 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
0116 init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
0117 INITFLAGS_DRIVER_USES_UTC_TIME |
0118 INITFLAGS_DRIVER_SUPPORTS_PM);
0119 init->r8.init_flags |=
0120 cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
0121 init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
0122 init->r8.max_io_size =
0123 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
0124 init->r8.max_num_aif = init->r8.reserved1 =
0125 init->r8.reserved2 = 0;
0126
0127 for (i = 0; i < dev->max_msix; i++) {
0128 addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
0129 sizeof(u32);
0130 init->r8.rrq[i].host_addr_high = cpu_to_le32(
0131 upper_32_bits(addr));
0132 init->r8.rrq[i].host_addr_low = cpu_to_le32(
0133 lower_32_bits(addr));
0134 init->r8.rrq[i].msix_id = i;
0135 init->r8.rrq[i].element_count = cpu_to_le16(
0136 (u16)dev->vector_cap);
0137 init->r8.rrq[i].comp_thresh =
0138 init->r8.rrq[i].unused = 0;
0139 }
0140
0141 pr_warn("aacraid: Comm Interface type3 enabled\n");
0142 } else {
0143 init->r7.init_struct_revision =
0144 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
0145 if (dev->max_fib_size != sizeof(struct hw_fib))
0146 init->r7.init_struct_revision =
0147 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
0148 init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
0149 init->r7.fsrev = cpu_to_le32(dev->fsrev);
0150
0151
0152
0153
0154
0155 dev->aif_base_va = (struct hw_fib *)base;
0156
0157 init->r7.adapter_fibs_virtual_address = 0;
0158 init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
0159 init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
0160 init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
0161
0162
0163
0164
0165
0166
0167
0168
0169 aac_max_hostphysmempages =
0170 dma_get_required_mask(&dev->pdev->dev) >> 12;
0171 if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
0172 init->r7.host_phys_mem_pages =
0173 cpu_to_le32(aac_max_hostphysmempages);
0174 else
0175 init->r7.host_phys_mem_pages =
0176 cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
0177
0178 init->r7.init_flags =
0179 cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
0180 INITFLAGS_DRIVER_SUPPORTS_PM);
0181 init->r7.max_io_commands =
0182 cpu_to_le32(dev->scsi_host_ptr->can_queue +
0183 AAC_NUM_MGT_FIB);
0184 init->r7.max_io_size =
0185 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
0186 init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
0187 init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
0188
0189 if (dev->comm_interface == AAC_COMM_MESSAGE) {
0190 init->r7.init_flags |=
0191 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
0192 pr_warn("aacraid: Comm Interface enabled\n");
0193 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
0194 init->r7.init_struct_revision =
0195 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
0196 init->r7.init_flags |=
0197 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
0198 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
0199 INITFLAGS_FAST_JBOD_SUPPORTED);
0200 init->r7.host_rrq_addr_high =
0201 cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
0202 init->r7.host_rrq_addr_low =
0203 cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
0204 pr_warn("aacraid: Comm Interface type1 enabled\n");
0205 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
0206 init->r7.init_struct_revision =
0207 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
0208 init->r7.init_flags |=
0209 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
0210 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
0211 INITFLAGS_FAST_JBOD_SUPPORTED);
0212 init->r7.host_rrq_addr_high =
0213 cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
0214 init->r7.host_rrq_addr_low =
0215 cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
0216 init->r7.no_of_msix_vectors =
0217 cpu_to_le32(dev->max_msix);
0218
0219 pr_warn("aacraid: Comm Interface type2 enabled\n");
0220 }
0221 }
0222
0223
0224
0225
0226 base = base + fibsize + host_rrq_size + aac_init_size;
0227 phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
0228 aac_init_size);
0229
0230
0231
0232
0233 align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
0234 base = base + align;
0235 phys = phys + align;
0236
0237
0238
0239 *commaddr = base;
0240 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
0241 init->r7.comm_header_address = cpu_to_le32((u32)phys);
0242
0243
0244
0245 base = base + commsize;
0246 phys = phys + commsize;
0247
0248
0249
0250 dev->printfbuf = (void *)base;
0251 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
0252 init->r7.printfbuf = cpu_to_le32(phys);
0253 init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
0254 }
0255 memset(base, 0, printfbufsiz);
0256 return 1;
0257 }
0258
0259 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
0260 {
0261 atomic_set(&q->numpending, 0);
0262 q->dev = dev;
0263 init_waitqueue_head(&q->cmdready);
0264 INIT_LIST_HEAD(&q->cmdq);
0265 init_waitqueue_head(&q->qfull);
0266 spin_lock_init(&q->lockdata);
0267 q->lock = &q->lockdata;
0268 q->headers.producer = (__le32 *)mem;
0269 q->headers.consumer = (__le32 *)(mem+1);
0270 *(q->headers.producer) = cpu_to_le32(qsize);
0271 *(q->headers.consumer) = cpu_to_le32(qsize);
0272 q->entries = qsize;
0273 }
0274
0275 static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
0276 {
0277 int *active = data;
0278
0279 if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE)
0280 *active = *active + 1;
0281 return true;
0282 }
0283 static void aac_wait_for_io_completion(struct aac_dev *aac)
0284 {
0285 int i = 0, active;
0286
0287 for (i = 60; i; --i) {
0288
0289 active = 0;
0290 scsi_host_busy_iter(aac->scsi_host_ptr,
0291 wait_for_io_iter, &active);
0292
0293
0294
0295 if (active == 0)
0296 break;
0297 dev_info(&aac->pdev->dev,
0298 "Wait for %d commands to complete\n", active);
0299 ssleep(1);
0300 }
0301 if (active)
0302 dev_err(&aac->pdev->dev,
0303 "%d outstanding commands during shutdown\n", active);
0304 }
0305
0306
0307
0308
0309
0310
0311
0312
0313 int aac_send_shutdown(struct aac_dev * dev)
0314 {
0315 struct fib * fibctx;
0316 struct aac_close *cmd;
0317 int status = 0;
0318
0319 if (aac_adapter_check_health(dev))
0320 return status;
0321
0322 if (!dev->adapter_shutdown) {
0323 mutex_lock(&dev->ioctl_mutex);
0324 dev->adapter_shutdown = 1;
0325 mutex_unlock(&dev->ioctl_mutex);
0326 }
0327
0328 aac_wait_for_io_completion(dev);
0329
0330 fibctx = aac_fib_alloc(dev);
0331 if (!fibctx)
0332 return -ENOMEM;
0333 aac_fib_init(fibctx);
0334
0335 cmd = (struct aac_close *) fib_data(fibctx);
0336 cmd->command = cpu_to_le32(VM_CloseAll);
0337 cmd->cid = cpu_to_le32(0xfffffffe);
0338
0339 status = aac_fib_send(ContainerCommand,
0340 fibctx,
0341 sizeof(struct aac_close),
0342 FsaNormal,
0343 -2 , 1,
0344 NULL, NULL);
0345
0346 if (status >= 0)
0347 aac_fib_complete(fibctx);
0348
0349 if (status != -ERESTARTSYS)
0350 aac_fib_free(fibctx);
0351 if (aac_is_src(dev) &&
0352 dev->msi_enabled)
0353 aac_set_intx_mode(dev);
0354 return status;
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 static int aac_comm_init(struct aac_dev * dev)
0369 {
0370 unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
0371 unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
0372 u32 *headers;
0373 struct aac_entry * queues;
0374 unsigned long size;
0375 struct aac_queue_block * comm = dev->queues;
0376
0377
0378
0379
0380
0381
0382 spin_lock_init(&dev->fib_lock);
0383
0384
0385
0386
0387
0388
0389 size = hdrsize + queuesize;
0390
0391 if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
0392 return -ENOMEM;
0393
0394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
0395
0396
0397 comm->queue[HostNormCmdQueue].base = queues;
0398 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
0399 queues += HOST_NORM_CMD_ENTRIES;
0400 headers += 2;
0401
0402
0403 comm->queue[HostHighCmdQueue].base = queues;
0404 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
0405
0406 queues += HOST_HIGH_CMD_ENTRIES;
0407 headers +=2;
0408
0409
0410 comm->queue[AdapNormCmdQueue].base = queues;
0411 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
0412
0413 queues += ADAP_NORM_CMD_ENTRIES;
0414 headers += 2;
0415
0416
0417 comm->queue[AdapHighCmdQueue].base = queues;
0418 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
0419
0420 queues += ADAP_HIGH_CMD_ENTRIES;
0421 headers += 2;
0422
0423
0424 comm->queue[HostNormRespQueue].base = queues;
0425 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
0426 queues += HOST_NORM_RESP_ENTRIES;
0427 headers += 2;
0428
0429
0430 comm->queue[HostHighRespQueue].base = queues;
0431 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
0432
0433 queues += HOST_HIGH_RESP_ENTRIES;
0434 headers += 2;
0435
0436
0437 comm->queue[AdapNormRespQueue].base = queues;
0438 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
0439
0440 queues += ADAP_NORM_RESP_ENTRIES;
0441 headers += 2;
0442
0443
0444 comm->queue[AdapHighRespQueue].base = queues;
0445 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
0446
0447 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
0448 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
0449 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
0450 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
0451
0452 return 0;
0453 }
0454
0455 void aac_define_int_mode(struct aac_dev *dev)
0456 {
0457 int i, msi_count, min_msix;
0458
0459 msi_count = i = 0;
0460
0461 if (dev->max_msix == 0 ||
0462 dev->pdev->device == PMC_DEVICE_S6 ||
0463 dev->sync_mode) {
0464 dev->max_msix = 1;
0465 dev->vector_cap =
0466 dev->scsi_host_ptr->can_queue +
0467 AAC_NUM_MGT_FIB;
0468 return;
0469 }
0470
0471
0472 msi_count = min(dev->max_msix,
0473 (unsigned int)num_online_cpus());
0474
0475 dev->max_msix = msi_count;
0476
0477 if (msi_count > AAC_MAX_MSIX)
0478 msi_count = AAC_MAX_MSIX;
0479
0480 if (msi_count > 1 &&
0481 pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
0482 min_msix = 2;
0483 i = pci_alloc_irq_vectors(dev->pdev,
0484 min_msix, msi_count,
0485 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
0486 if (i > 0) {
0487 dev->msi_enabled = 1;
0488 msi_count = i;
0489 } else {
0490 dev->msi_enabled = 0;
0491 dev_err(&dev->pdev->dev,
0492 "MSIX not supported!! Will try INTX 0x%x.\n", i);
0493 }
0494 }
0495
0496 if (!dev->msi_enabled)
0497 dev->max_msix = msi_count = 1;
0498 else {
0499 if (dev->max_msix > msi_count)
0500 dev->max_msix = msi_count;
0501 }
0502 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
0503 dev->vector_cap = dev->scsi_host_ptr->can_queue +
0504 AAC_NUM_MGT_FIB;
0505 else
0506 dev->vector_cap = (dev->scsi_host_ptr->can_queue +
0507 AAC_NUM_MGT_FIB) / msi_count;
0508
0509 }
0510 struct aac_dev *aac_init_adapter(struct aac_dev *dev)
0511 {
0512 u32 status[5];
0513 struct Scsi_Host * host = dev->scsi_host_ptr;
0514 extern int aac_sync_mode;
0515
0516
0517
0518
0519 dev->management_fib_count = 0;
0520 spin_lock_init(&dev->manage_lock);
0521 spin_lock_init(&dev->sync_lock);
0522 spin_lock_init(&dev->iq_lock);
0523 dev->max_fib_size = sizeof(struct hw_fib);
0524 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
0525 - sizeof(struct aac_fibhdr)
0526 - sizeof(struct aac_write) + sizeof(struct sgentry))
0527 / sizeof(struct sgentry);
0528 dev->comm_interface = AAC_COMM_PRODUCER;
0529 dev->raw_io_interface = dev->raw_io_64 = 0;
0530
0531
0532
0533
0534
0535 if (aac_is_msix_mode(dev)) {
0536 aac_change_to_intx(dev);
0537 dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
0538 }
0539
0540 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0541 0, 0, 0, 0, 0, 0,
0542 status+0, status+1, status+2, status+3, status+4)) &&
0543 (status[0] == 0x00000001)) {
0544 dev->doorbell_mask = status[3];
0545 if (status[1] & AAC_OPT_NEW_COMM_64)
0546 dev->raw_io_64 = 1;
0547 dev->sync_mode = aac_sync_mode;
0548 if (dev->a_ops.adapter_comm &&
0549 (status[1] & AAC_OPT_NEW_COMM)) {
0550 dev->comm_interface = AAC_COMM_MESSAGE;
0551 dev->raw_io_interface = 1;
0552 if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
0553
0554 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
0555 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
0556
0557 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
0558 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
0559
0560 dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
0561 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
0562
0563 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
0564 dev->sync_mode = 1;
0565 }
0566 }
0567 if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
0568 (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
0569 dev->sa_firmware = 1;
0570 else
0571 dev->sa_firmware = 0;
0572
0573 if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
0574 dev->soft_reset_support = 1;
0575 else
0576 dev->soft_reset_support = 0;
0577
0578 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
0579 (status[2] > dev->base_size)) {
0580 aac_adapter_ioremap(dev, 0);
0581 dev->base_size = status[2];
0582 if (aac_adapter_ioremap(dev, status[2])) {
0583
0584 dev->comm_interface = AAC_COMM_PRODUCER;
0585 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
0586 printk(KERN_WARNING
0587 "aacraid: unable to map adapter.\n");
0588 return NULL;
0589 }
0590 }
0591 }
0592 }
0593 dev->max_msix = 0;
0594 dev->msi_enabled = 0;
0595 dev->adapter_shutdown = 0;
0596 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0597 0, 0, 0, 0, 0, 0,
0598 status+0, status+1, status+2, status+3, status+4))
0599 && (status[0] == 0x00000001)) {
0600
0601
0602
0603
0604
0605
0606
0607 host->max_sectors = (status[1] >> 16) << 1;
0608
0609 dev->max_fib_size = status[1] & 0xFFE0;
0610 host->sg_tablesize = status[2] >> 16;
0611 dev->sg_tablesize = status[2] & 0xFFFF;
0612 if (aac_is_src(dev)) {
0613 if (host->can_queue > (status[3] >> 16) -
0614 AAC_NUM_MGT_FIB)
0615 host->can_queue = (status[3] >> 16) -
0616 AAC_NUM_MGT_FIB;
0617 } else if (host->can_queue > (status[3] & 0xFFFF) -
0618 AAC_NUM_MGT_FIB)
0619 host->can_queue = (status[3] & 0xFFFF) -
0620 AAC_NUM_MGT_FIB;
0621
0622 dev->max_num_aif = status[4] & 0xFFFF;
0623 }
0624 if (numacb > 0) {
0625 if (numacb < host->can_queue)
0626 host->can_queue = numacb;
0627 else
0628 pr_warn("numacb=%d ignored\n", numacb);
0629 }
0630
0631 if (aac_is_src(dev))
0632 aac_define_int_mode(dev);
0633
0634
0635
0636
0637 dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
0638 if (dev->queues == NULL) {
0639 printk(KERN_ERR "Error could not allocate comm region.\n");
0640 return NULL;
0641 }
0642
0643 if (aac_comm_init(dev)<0){
0644 kfree(dev->queues);
0645 return NULL;
0646 }
0647
0648
0649
0650 if (aac_fib_setup(dev) < 0) {
0651 kfree(dev->queues);
0652 return NULL;
0653 }
0654
0655 INIT_LIST_HEAD(&dev->fib_list);
0656 INIT_LIST_HEAD(&dev->sync_fib_list);
0657
0658 return dev;
0659 }
0660