0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/module.h>
0014 #include <linux/types.h>
0015 #include <linux/delay.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/pci.h>
0018 #include <linux/raid_class.h>
0019 #include <asm/unaligned.h>
0020 #include <scsi/scsi.h>
0021 #include <scsi/scsi_host.h>
0022 #include <scsi/scsi_device.h>
0023 #include <scsi/scsi_cmnd.h>
0024 #include <scsi/scsi_tcq.h>
0025 #include "myrb.h"
0026
0027 static struct raid_template *myrb_raid_template;
0028
0029 static void myrb_monitor(struct work_struct *work);
0030 static inline void myrb_translate_devstate(void *DeviceState);
0031
0032 static inline int myrb_logical_channel(struct Scsi_Host *shost)
0033 {
0034 return shost->max_channel - 1;
0035 }
0036
0037 static struct myrb_devstate_name_entry {
0038 enum myrb_devstate state;
0039 const char *name;
0040 } myrb_devstate_name_list[] = {
0041 { MYRB_DEVICE_DEAD, "Dead" },
0042 { MYRB_DEVICE_WO, "WriteOnly" },
0043 { MYRB_DEVICE_ONLINE, "Online" },
0044 { MYRB_DEVICE_CRITICAL, "Critical" },
0045 { MYRB_DEVICE_STANDBY, "Standby" },
0046 { MYRB_DEVICE_OFFLINE, "Offline" },
0047 };
0048
0049 static const char *myrb_devstate_name(enum myrb_devstate state)
0050 {
0051 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
0052 int i;
0053
0054 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
0055 if (entry[i].state == state)
0056 return entry[i].name;
0057 }
0058 return "Unknown";
0059 }
0060
0061 static struct myrb_raidlevel_name_entry {
0062 enum myrb_raidlevel level;
0063 const char *name;
0064 } myrb_raidlevel_name_list[] = {
0065 { MYRB_RAID_LEVEL0, "RAID0" },
0066 { MYRB_RAID_LEVEL1, "RAID1" },
0067 { MYRB_RAID_LEVEL3, "RAID3" },
0068 { MYRB_RAID_LEVEL5, "RAID5" },
0069 { MYRB_RAID_LEVEL6, "RAID6" },
0070 { MYRB_RAID_JBOD, "JBOD" },
0071 };
0072
0073 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
0074 {
0075 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
0076 int i;
0077
0078 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
0079 if (entry[i].level == level)
0080 return entry[i].name;
0081 }
0082 return NULL;
0083 }
0084
0085
0086
0087
0088
0089
0090 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
0091 {
0092 size_t elem_size, elem_align;
0093
0094 elem_align = sizeof(struct myrb_sge);
0095 elem_size = cb->host->sg_tablesize * elem_align;
0096 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
0097 elem_size, elem_align, 0);
0098 if (cb->sg_pool == NULL) {
0099 shost_printk(KERN_ERR, cb->host,
0100 "Failed to allocate SG pool\n");
0101 return false;
0102 }
0103
0104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
0105 sizeof(struct myrb_dcdb),
0106 sizeof(unsigned int), 0);
0107 if (!cb->dcdb_pool) {
0108 dma_pool_destroy(cb->sg_pool);
0109 cb->sg_pool = NULL;
0110 shost_printk(KERN_ERR, cb->host,
0111 "Failed to allocate DCDB pool\n");
0112 return false;
0113 }
0114
0115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
0116 "myrb_wq_%d", cb->host->host_no);
0117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
0118 if (!cb->work_q) {
0119 dma_pool_destroy(cb->dcdb_pool);
0120 cb->dcdb_pool = NULL;
0121 dma_pool_destroy(cb->sg_pool);
0122 cb->sg_pool = NULL;
0123 shost_printk(KERN_ERR, cb->host,
0124 "Failed to create workqueue\n");
0125 return false;
0126 }
0127
0128
0129
0130
0131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
0132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
0133
0134 return true;
0135 }
0136
0137
0138
0139
0140 static void myrb_destroy_mempools(struct myrb_hba *cb)
0141 {
0142 cancel_delayed_work_sync(&cb->monitor_work);
0143 destroy_workqueue(cb->work_q);
0144
0145 dma_pool_destroy(cb->sg_pool);
0146 dma_pool_destroy(cb->dcdb_pool);
0147 }
0148
0149
0150
0151
0152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
0153 {
0154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0155
0156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
0157 cmd_blk->status = 0;
0158 }
0159
0160
0161
0162
0163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
0164 {
0165 void __iomem *base = cb->io_base;
0166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
0168
0169 cb->write_cmd_mbox(next_mbox, mbox);
0170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
0171 cb->prev_cmd_mbox2->words[0] == 0)
0172 cb->get_cmd_mbox(base);
0173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
0174 cb->prev_cmd_mbox1 = next_mbox;
0175 if (++next_mbox > cb->last_cmd_mbox)
0176 next_mbox = cb->first_cmd_mbox;
0177 cb->next_cmd_mbox = next_mbox;
0178 }
0179
0180
0181
0182
0183
0184
0185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
0186 struct myrb_cmdblk *cmd_blk)
0187 {
0188 DECLARE_COMPLETION_ONSTACK(cmpl);
0189 unsigned long flags;
0190
0191 cmd_blk->completion = &cmpl;
0192
0193 spin_lock_irqsave(&cb->queue_lock, flags);
0194 cb->qcmd(cb, cmd_blk);
0195 spin_unlock_irqrestore(&cb->queue_lock, flags);
0196
0197 wait_for_completion(&cmpl);
0198 return cmd_blk->status;
0199 }
0200
0201
0202
0203
0204
0205
0206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
0207 enum myrb_cmd_opcode op, dma_addr_t addr)
0208 {
0209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
0210 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0211 unsigned short status;
0212
0213 mutex_lock(&cb->dcmd_mutex);
0214 myrb_reset_cmd(cmd_blk);
0215 mbox->type3.id = MYRB_DCMD_TAG;
0216 mbox->type3.opcode = op;
0217 mbox->type3.addr = addr;
0218 status = myrb_exec_cmd(cb, cmd_blk);
0219 mutex_unlock(&cb->dcmd_mutex);
0220 return status;
0221 }
0222
0223
0224
0225
0226
0227
0228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
0229 enum myrb_cmd_opcode op, struct scsi_device *sdev,
0230 struct myrb_pdev_state *pdev_info)
0231 {
0232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
0233 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0234 unsigned short status;
0235 dma_addr_t pdev_info_addr;
0236
0237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
0238 sizeof(struct myrb_pdev_state),
0239 DMA_FROM_DEVICE);
0240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
0241 return MYRB_STATUS_SUBSYS_FAILED;
0242
0243 mutex_lock(&cb->dcmd_mutex);
0244 myrb_reset_cmd(cmd_blk);
0245 mbox->type3D.id = MYRB_DCMD_TAG;
0246 mbox->type3D.opcode = op;
0247 mbox->type3D.channel = sdev->channel;
0248 mbox->type3D.target = sdev->id;
0249 mbox->type3D.addr = pdev_info_addr;
0250 status = myrb_exec_cmd(cb, cmd_blk);
0251 mutex_unlock(&cb->dcmd_mutex);
0252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
0253 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
0254 if (status == MYRB_STATUS_SUCCESS &&
0255 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
0256 myrb_translate_devstate(pdev_info);
0257
0258 return status;
0259 }
0260
0261 static char *myrb_event_msg[] = {
0262 "killed because write recovery failed",
0263 "killed because of SCSI bus reset failure",
0264 "killed because of double check condition",
0265 "killed because it was removed",
0266 "killed because of gross error on SCSI chip",
0267 "killed because of bad tag returned from drive",
0268 "killed because of timeout on SCSI command",
0269 "killed because of reset SCSI command issued from system",
0270 "killed because busy or parity error count exceeded limit",
0271 "killed because of 'kill drive' command from system",
0272 "killed because of selection timeout",
0273 "killed due to SCSI phase sequence error",
0274 "killed due to unknown status",
0275 };
0276
0277
0278
0279
0280
0281
0282
0283
0284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
0285 {
0286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
0287 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0288 struct myrb_log_entry *ev_buf;
0289 dma_addr_t ev_addr;
0290 unsigned short status;
0291
0292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
0293 sizeof(struct myrb_log_entry),
0294 &ev_addr, GFP_KERNEL);
0295 if (!ev_buf)
0296 return;
0297
0298 myrb_reset_cmd(cmd_blk);
0299 mbox->type3E.id = MYRB_MCMD_TAG;
0300 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
0301 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
0302 mbox->type3E.opqual = 1;
0303 mbox->type3E.ev_seq = event;
0304 mbox->type3E.addr = ev_addr;
0305 status = myrb_exec_cmd(cb, cmd_blk);
0306 if (status != MYRB_STATUS_SUCCESS)
0307 shost_printk(KERN_INFO, cb->host,
0308 "Failed to get event log %d, status %04x\n",
0309 event, status);
0310
0311 else if (ev_buf->seq_num == event) {
0312 struct scsi_sense_hdr sshdr;
0313
0314 memset(&sshdr, 0, sizeof(sshdr));
0315 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
0316
0317 if (sshdr.sense_key == VENDOR_SPECIFIC &&
0318 sshdr.asc == 0x80 &&
0319 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
0320 shost_printk(KERN_CRIT, cb->host,
0321 "Physical drive %d:%d: %s\n",
0322 ev_buf->channel, ev_buf->target,
0323 myrb_event_msg[sshdr.ascq]);
0324 else
0325 shost_printk(KERN_CRIT, cb->host,
0326 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
0327 ev_buf->channel, ev_buf->target,
0328 sshdr.sense_key, sshdr.asc, sshdr.ascq);
0329 }
0330
0331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
0332 ev_buf, ev_addr);
0333 }
0334
0335
0336
0337
0338
0339
0340 static void myrb_get_errtable(struct myrb_hba *cb)
0341 {
0342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
0343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0344 unsigned short status;
0345 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
0346
0347 memcpy(&old_table, cb->err_table, sizeof(old_table));
0348
0349 myrb_reset_cmd(cmd_blk);
0350 mbox->type3.id = MYRB_MCMD_TAG;
0351 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
0352 mbox->type3.addr = cb->err_table_addr;
0353 status = myrb_exec_cmd(cb, cmd_blk);
0354 if (status == MYRB_STATUS_SUCCESS) {
0355 struct myrb_error_entry *table = cb->err_table;
0356 struct myrb_error_entry *new, *old;
0357 size_t err_table_offset;
0358 struct scsi_device *sdev;
0359
0360 shost_for_each_device(sdev, cb->host) {
0361 if (sdev->channel >= myrb_logical_channel(cb->host))
0362 continue;
0363 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
0364 + sdev->id;
0365 new = table + err_table_offset;
0366 old = &old_table[err_table_offset];
0367 if (new->parity_err == old->parity_err &&
0368 new->soft_err == old->soft_err &&
0369 new->hard_err == old->hard_err &&
0370 new->misc_err == old->misc_err)
0371 continue;
0372 sdev_printk(KERN_CRIT, sdev,
0373 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
0374 new->parity_err, new->soft_err,
0375 new->hard_err, new->misc_err);
0376 }
0377 }
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
0388 {
0389 unsigned short status;
0390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
0391 struct Scsi_Host *shost = cb->host;
0392
0393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
0394 cb->ldev_info_addr);
0395 if (status != MYRB_STATUS_SUCCESS)
0396 return status;
0397
0398 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
0399 struct myrb_ldev_info *old = NULL;
0400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
0401 struct scsi_device *sdev;
0402
0403 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
0404 ldev_num, 0);
0405 if (!sdev) {
0406 if (new->state == MYRB_DEVICE_OFFLINE)
0407 continue;
0408 shost_printk(KERN_INFO, shost,
0409 "Adding Logical Drive %d in state %s\n",
0410 ldev_num, myrb_devstate_name(new->state));
0411 scsi_add_device(shost, myrb_logical_channel(shost),
0412 ldev_num, 0);
0413 continue;
0414 }
0415 old = sdev->hostdata;
0416 if (new->state != old->state)
0417 shost_printk(KERN_INFO, shost,
0418 "Logical Drive %d is now %s\n",
0419 ldev_num, myrb_devstate_name(new->state));
0420 if (new->wb_enabled != old->wb_enabled)
0421 sdev_printk(KERN_INFO, sdev,
0422 "Logical Drive is now WRITE %s\n",
0423 (new->wb_enabled ? "BACK" : "THRU"));
0424 memcpy(old, new, sizeof(*new));
0425 scsi_device_put(sdev);
0426 }
0427 return status;
0428 }
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
0439 struct myrb_rbld_progress *rbld)
0440 {
0441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
0442 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0443 struct myrb_rbld_progress *rbld_buf;
0444 dma_addr_t rbld_addr;
0445 unsigned short status;
0446
0447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
0448 sizeof(struct myrb_rbld_progress),
0449 &rbld_addr, GFP_KERNEL);
0450 if (!rbld_buf)
0451 return MYRB_STATUS_RBLD_NOT_CHECKED;
0452
0453 myrb_reset_cmd(cmd_blk);
0454 mbox->type3.id = MYRB_MCMD_TAG;
0455 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
0456 mbox->type3.addr = rbld_addr;
0457 status = myrb_exec_cmd(cb, cmd_blk);
0458 if (rbld)
0459 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
0460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
0461 rbld_buf, rbld_addr);
0462 return status;
0463 }
0464
0465
0466
0467
0468
0469
0470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
0471 {
0472 struct myrb_rbld_progress rbld_buf;
0473 unsigned short status;
0474
0475 status = myrb_get_rbld_progress(cb, &rbld_buf);
0476 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
0477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
0478 status = MYRB_STATUS_RBLD_SUCCESS;
0479 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
0480 unsigned int blocks_done =
0481 rbld_buf.ldev_size - rbld_buf.blocks_left;
0482 struct scsi_device *sdev;
0483
0484 sdev = scsi_device_lookup(cb->host,
0485 myrb_logical_channel(cb->host),
0486 rbld_buf.ldev_num, 0);
0487 if (!sdev)
0488 return;
0489
0490 switch (status) {
0491 case MYRB_STATUS_SUCCESS:
0492 sdev_printk(KERN_INFO, sdev,
0493 "Rebuild in Progress, %d%% completed\n",
0494 (100 * (blocks_done >> 7))
0495 / (rbld_buf.ldev_size >> 7));
0496 break;
0497 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
0498 sdev_printk(KERN_INFO, sdev,
0499 "Rebuild Failed due to Logical Drive Failure\n");
0500 break;
0501 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
0502 sdev_printk(KERN_INFO, sdev,
0503 "Rebuild Failed due to Bad Blocks on Other Drives\n");
0504 break;
0505 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
0506 sdev_printk(KERN_INFO, sdev,
0507 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
0508 break;
0509 case MYRB_STATUS_RBLD_SUCCESS:
0510 sdev_printk(KERN_INFO, sdev,
0511 "Rebuild Completed Successfully\n");
0512 break;
0513 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
0514 sdev_printk(KERN_INFO, sdev,
0515 "Rebuild Successfully Terminated\n");
0516 break;
0517 default:
0518 break;
0519 }
0520 scsi_device_put(sdev);
0521 }
0522 cb->last_rbld_status = status;
0523 }
0524
0525
0526
0527
0528
0529
0530
0531 static void myrb_get_cc_progress(struct myrb_hba *cb)
0532 {
0533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
0534 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0535 struct myrb_rbld_progress *rbld_buf;
0536 dma_addr_t rbld_addr;
0537 unsigned short status;
0538
0539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
0540 sizeof(struct myrb_rbld_progress),
0541 &rbld_addr, GFP_KERNEL);
0542 if (!rbld_buf) {
0543 cb->need_cc_status = true;
0544 return;
0545 }
0546 myrb_reset_cmd(cmd_blk);
0547 mbox->type3.id = MYRB_MCMD_TAG;
0548 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
0549 mbox->type3.addr = rbld_addr;
0550 status = myrb_exec_cmd(cb, cmd_blk);
0551 if (status == MYRB_STATUS_SUCCESS) {
0552 unsigned int ldev_num = rbld_buf->ldev_num;
0553 unsigned int ldev_size = rbld_buf->ldev_size;
0554 unsigned int blocks_done =
0555 ldev_size - rbld_buf->blocks_left;
0556 struct scsi_device *sdev;
0557
0558 sdev = scsi_device_lookup(cb->host,
0559 myrb_logical_channel(cb->host),
0560 ldev_num, 0);
0561 if (sdev) {
0562 sdev_printk(KERN_INFO, sdev,
0563 "Consistency Check in Progress: %d%% completed\n",
0564 (100 * (blocks_done >> 7))
0565 / (ldev_size >> 7));
0566 scsi_device_put(sdev);
0567 }
0568 }
0569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
0570 rbld_buf, rbld_addr);
0571 }
0572
0573
0574
0575
0576
0577
0578 static void myrb_bgi_control(struct myrb_hba *cb)
0579 {
0580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
0581 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0582 struct myrb_bgi_status *bgi, *last_bgi;
0583 dma_addr_t bgi_addr;
0584 struct scsi_device *sdev = NULL;
0585 unsigned short status;
0586
0587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
0588 &bgi_addr, GFP_KERNEL);
0589 if (!bgi) {
0590 shost_printk(KERN_ERR, cb->host,
0591 "Failed to allocate bgi memory\n");
0592 return;
0593 }
0594 myrb_reset_cmd(cmd_blk);
0595 mbox->type3B.id = MYRB_DCMD_TAG;
0596 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
0597 mbox->type3B.optype = 0x20;
0598 mbox->type3B.addr = bgi_addr;
0599 status = myrb_exec_cmd(cb, cmd_blk);
0600 last_bgi = &cb->bgi_status;
0601 sdev = scsi_device_lookup(cb->host,
0602 myrb_logical_channel(cb->host),
0603 bgi->ldev_num, 0);
0604 switch (status) {
0605 case MYRB_STATUS_SUCCESS:
0606 switch (bgi->status) {
0607 case MYRB_BGI_INVALID:
0608 break;
0609 case MYRB_BGI_STARTED:
0610 if (!sdev)
0611 break;
0612 sdev_printk(KERN_INFO, sdev,
0613 "Background Initialization Started\n");
0614 break;
0615 case MYRB_BGI_INPROGRESS:
0616 if (!sdev)
0617 break;
0618 if (bgi->blocks_done == last_bgi->blocks_done &&
0619 bgi->ldev_num == last_bgi->ldev_num)
0620 break;
0621 sdev_printk(KERN_INFO, sdev,
0622 "Background Initialization in Progress: %d%% completed\n",
0623 (100 * (bgi->blocks_done >> 7))
0624 / (bgi->ldev_size >> 7));
0625 break;
0626 case MYRB_BGI_SUSPENDED:
0627 if (!sdev)
0628 break;
0629 sdev_printk(KERN_INFO, sdev,
0630 "Background Initialization Suspended\n");
0631 break;
0632 case MYRB_BGI_CANCELLED:
0633 if (!sdev)
0634 break;
0635 sdev_printk(KERN_INFO, sdev,
0636 "Background Initialization Cancelled\n");
0637 break;
0638 }
0639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
0640 break;
0641 case MYRB_STATUS_BGI_SUCCESS:
0642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
0643 sdev_printk(KERN_INFO, sdev,
0644 "Background Initialization Completed Successfully\n");
0645 cb->bgi_status.status = MYRB_BGI_INVALID;
0646 break;
0647 case MYRB_STATUS_BGI_ABORTED:
0648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
0649 sdev_printk(KERN_INFO, sdev,
0650 "Background Initialization Aborted\n");
0651 fallthrough;
0652 case MYRB_STATUS_NO_BGI_INPROGRESS:
0653 cb->bgi_status.status = MYRB_BGI_INVALID;
0654 break;
0655 }
0656 if (sdev)
0657 scsi_device_put(sdev);
0658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
0659 bgi, bgi_addr);
0660 }
0661
0662
0663
0664
0665
0666
0667
0668
0669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
0670 {
0671 struct myrb_enquiry old, *new;
0672 unsigned short status;
0673
0674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
0675
0676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
0677 if (status != MYRB_STATUS_SUCCESS)
0678 return status;
0679
0680 new = cb->enquiry;
0681 if (new->ldev_count > old.ldev_count) {
0682 int ldev_num = old.ldev_count - 1;
0683
0684 while (++ldev_num < new->ldev_count)
0685 shost_printk(KERN_CRIT, cb->host,
0686 "Logical Drive %d Now Exists\n",
0687 ldev_num);
0688 }
0689 if (new->ldev_count < old.ldev_count) {
0690 int ldev_num = new->ldev_count - 1;
0691
0692 while (++ldev_num < old.ldev_count)
0693 shost_printk(KERN_CRIT, cb->host,
0694 "Logical Drive %d No Longer Exists\n",
0695 ldev_num);
0696 }
0697 if (new->status.deferred != old.status.deferred)
0698 shost_printk(KERN_CRIT, cb->host,
0699 "Deferred Write Error Flag is now %s\n",
0700 (new->status.deferred ? "TRUE" : "FALSE"));
0701 if (new->ev_seq != old.ev_seq) {
0702 cb->new_ev_seq = new->ev_seq;
0703 cb->need_err_info = true;
0704 shost_printk(KERN_INFO, cb->host,
0705 "Event log %d/%d (%d/%d) available\n",
0706 cb->old_ev_seq, cb->new_ev_seq,
0707 old.ev_seq, new->ev_seq);
0708 }
0709 if ((new->ldev_critical > 0 &&
0710 new->ldev_critical != old.ldev_critical) ||
0711 (new->ldev_offline > 0 &&
0712 new->ldev_offline != old.ldev_offline) ||
0713 (new->ldev_count != old.ldev_count)) {
0714 shost_printk(KERN_INFO, cb->host,
0715 "Logical drive count changed (%d/%d/%d)\n",
0716 new->ldev_critical,
0717 new->ldev_offline,
0718 new->ldev_count);
0719 cb->need_ldev_info = true;
0720 }
0721 if (new->pdev_dead > 0 ||
0722 new->pdev_dead != old.pdev_dead ||
0723 time_after_eq(jiffies, cb->secondary_monitor_time
0724 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
0725 cb->need_bgi_status = cb->bgi_status_supported;
0726 cb->secondary_monitor_time = jiffies;
0727 }
0728 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
0729 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
0730 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
0731 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
0732 cb->need_rbld = true;
0733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
0734 }
0735 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
0736 switch (new->rbld) {
0737 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
0738 shost_printk(KERN_INFO, cb->host,
0739 "Consistency Check Completed Successfully\n");
0740 break;
0741 case MYRB_STDBY_RBLD_IN_PROGRESS:
0742 case MYRB_BG_RBLD_IN_PROGRESS:
0743 break;
0744 case MYRB_BG_CHECK_IN_PROGRESS:
0745 cb->need_cc_status = true;
0746 break;
0747 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
0748 shost_printk(KERN_INFO, cb->host,
0749 "Consistency Check Completed with Error\n");
0750 break;
0751 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
0752 shost_printk(KERN_INFO, cb->host,
0753 "Consistency Check Failed - Physical Device Failed\n");
0754 break;
0755 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
0756 shost_printk(KERN_INFO, cb->host,
0757 "Consistency Check Failed - Logical Drive Failed\n");
0758 break;
0759 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
0760 shost_printk(KERN_INFO, cb->host,
0761 "Consistency Check Failed - Other Causes\n");
0762 break;
0763 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
0764 shost_printk(KERN_INFO, cb->host,
0765 "Consistency Check Successfully Terminated\n");
0766 break;
0767 }
0768 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
0769 cb->need_cc_status = true;
0770
0771 return MYRB_STATUS_SUCCESS;
0772 }
0773
0774
0775
0776
0777
0778
0779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
0780 struct scsi_device *sdev, enum myrb_devstate state)
0781 {
0782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
0783 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
0784 unsigned short status;
0785
0786 mutex_lock(&cb->dcmd_mutex);
0787 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
0788 mbox->type3D.id = MYRB_DCMD_TAG;
0789 mbox->type3D.channel = sdev->channel;
0790 mbox->type3D.target = sdev->id;
0791 mbox->type3D.state = state & 0x1F;
0792 status = myrb_exec_cmd(cb, cmd_blk);
0793 mutex_unlock(&cb->dcmd_mutex);
0794
0795 return status;
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
0807 {
0808 void __iomem *base = cb->io_base;
0809 struct pci_dev *pdev = cb->pdev;
0810 size_t err_table_size;
0811 size_t ldev_info_size;
0812 union myrb_cmd_mbox *cmd_mbox_mem;
0813 struct myrb_stat_mbox *stat_mbox_mem;
0814 union myrb_cmd_mbox mbox;
0815 unsigned short status;
0816
0817 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
0818
0819 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
0820 dev_err(&pdev->dev, "DMA mask out of range\n");
0821 return false;
0822 }
0823
0824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
0825 sizeof(struct myrb_enquiry),
0826 &cb->enquiry_addr, GFP_KERNEL);
0827 if (!cb->enquiry)
0828 return false;
0829
0830 err_table_size = sizeof(struct myrb_error_entry) *
0831 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
0832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
0833 &cb->err_table_addr, GFP_KERNEL);
0834 if (!cb->err_table)
0835 return false;
0836
0837 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
0838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
0839 &cb->ldev_info_addr, GFP_KERNEL);
0840 if (!cb->ldev_info_buf)
0841 return false;
0842
0843
0844
0845
0846 if (!mmio_init_fn)
0847 return true;
0848
0849
0850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
0851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
0852 cb->cmd_mbox_size,
0853 &cb->cmd_mbox_addr,
0854 GFP_KERNEL);
0855 if (!cb->first_cmd_mbox)
0856 return false;
0857
0858 cmd_mbox_mem = cb->first_cmd_mbox;
0859 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
0860 cb->last_cmd_mbox = cmd_mbox_mem;
0861 cb->next_cmd_mbox = cb->first_cmd_mbox;
0862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
0863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
0864
0865
0866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
0867 sizeof(struct myrb_stat_mbox);
0868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
0869 cb->stat_mbox_size,
0870 &cb->stat_mbox_addr,
0871 GFP_KERNEL);
0872 if (!cb->first_stat_mbox)
0873 return false;
0874
0875 stat_mbox_mem = cb->first_stat_mbox;
0876 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
0877 cb->last_stat_mbox = stat_mbox_mem;
0878 cb->next_stat_mbox = cb->first_stat_mbox;
0879
0880
0881 cb->dual_mode_interface = true;
0882 mbox.typeX.opcode = 0x2B;
0883 mbox.typeX.id = 0;
0884 mbox.typeX.opcode2 = 0x14;
0885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
0886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
0887
0888 status = mmio_init_fn(pdev, base, &mbox);
0889 if (status != MYRB_STATUS_SUCCESS) {
0890 cb->dual_mode_interface = false;
0891 mbox.typeX.opcode2 = 0x10;
0892 status = mmio_init_fn(pdev, base, &mbox);
0893 if (status != MYRB_STATUS_SUCCESS) {
0894 dev_err(&pdev->dev,
0895 "Failed to enable mailbox, statux %02X\n",
0896 status);
0897 return false;
0898 }
0899 }
0900 return true;
0901 }
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911 static int myrb_get_hba_config(struct myrb_hba *cb)
0912 {
0913 struct myrb_enquiry2 *enquiry2;
0914 dma_addr_t enquiry2_addr;
0915 struct myrb_config2 *config2;
0916 dma_addr_t config2_addr;
0917 struct Scsi_Host *shost = cb->host;
0918 struct pci_dev *pdev = cb->pdev;
0919 int pchan_max = 0, pchan_cur = 0;
0920 unsigned short status;
0921 int ret = -ENODEV, memsize = 0;
0922
0923 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
0924 &enquiry2_addr, GFP_KERNEL);
0925 if (!enquiry2) {
0926 shost_printk(KERN_ERR, cb->host,
0927 "Failed to allocate V1 enquiry2 memory\n");
0928 return -ENOMEM;
0929 }
0930 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
0931 &config2_addr, GFP_KERNEL);
0932 if (!config2) {
0933 shost_printk(KERN_ERR, cb->host,
0934 "Failed to allocate V1 config2 memory\n");
0935 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
0936 enquiry2, enquiry2_addr);
0937 return -ENOMEM;
0938 }
0939 mutex_lock(&cb->dma_mutex);
0940 status = myrb_hba_enquiry(cb);
0941 mutex_unlock(&cb->dma_mutex);
0942 if (status != MYRB_STATUS_SUCCESS) {
0943 shost_printk(KERN_WARNING, cb->host,
0944 "Failed it issue V1 Enquiry\n");
0945 goto out_free;
0946 }
0947
0948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
0949 if (status != MYRB_STATUS_SUCCESS) {
0950 shost_printk(KERN_WARNING, cb->host,
0951 "Failed to issue V1 Enquiry2\n");
0952 goto out_free;
0953 }
0954
0955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
0956 if (status != MYRB_STATUS_SUCCESS) {
0957 shost_printk(KERN_WARNING, cb->host,
0958 "Failed to issue ReadConfig2\n");
0959 goto out_free;
0960 }
0961
0962 status = myrb_get_ldev_info(cb);
0963 if (status != MYRB_STATUS_SUCCESS) {
0964 shost_printk(KERN_WARNING, cb->host,
0965 "Failed to get logical drive information\n");
0966 goto out_free;
0967 }
0968
0969
0970
0971
0972 switch (enquiry2->hw.sub_model) {
0973 case DAC960_V1_P_PD_PU:
0974 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
0975 strcpy(cb->model_name, "DAC960PU");
0976 else
0977 strcpy(cb->model_name, "DAC960PD");
0978 break;
0979 case DAC960_V1_PL:
0980 strcpy(cb->model_name, "DAC960PL");
0981 break;
0982 case DAC960_V1_PG:
0983 strcpy(cb->model_name, "DAC960PG");
0984 break;
0985 case DAC960_V1_PJ:
0986 strcpy(cb->model_name, "DAC960PJ");
0987 break;
0988 case DAC960_V1_PR:
0989 strcpy(cb->model_name, "DAC960PR");
0990 break;
0991 case DAC960_V1_PT:
0992 strcpy(cb->model_name, "DAC960PT");
0993 break;
0994 case DAC960_V1_PTL0:
0995 strcpy(cb->model_name, "DAC960PTL0");
0996 break;
0997 case DAC960_V1_PRL:
0998 strcpy(cb->model_name, "DAC960PRL");
0999 break;
1000 case DAC960_V1_PTL1:
1001 strcpy(cb->model_name, "DAC960PTL1");
1002 break;
1003 case DAC960_V1_1164P:
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1005 break;
1006 default:
1007 shost_printk(KERN_WARNING, cb->host,
1008 "Unknown Model %X\n",
1009 enquiry2->hw.sub_model);
1010 goto out;
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 #if defined(CONFIG_ALPHA)
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 # define FIRMWARE_27X "2.70"
1040 #else
1041 # define FIRMWARE_27X "2.73"
1042 #endif
1043
1044 if (enquiry2->fw.major_version == 0) {
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 enquiry2->fw.firmware_type = '0';
1048 enquiry2->fw.turn_id = 0;
1049 }
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1051 "%u.%02u-%c-%02u",
1052 enquiry2->fw.major_version,
1053 enquiry2->fw.minor_version,
1054 enquiry2->fw.firmware_type,
1055 enquiry2->fw.turn_id);
1056 if (!((enquiry2->fw.major_version == 5 &&
1057 enquiry2->fw.minor_version >= 6) ||
1058 (enquiry2->fw.major_version == 4 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 3 &&
1061 enquiry2->fw.minor_version >= 51) ||
1062 (enquiry2->fw.major_version == 2 &&
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1065 "Firmware Version '%s' unsupported\n",
1066 cb->fw_version);
1067 goto out;
1068 }
1069
1070
1071
1072
1073 switch (enquiry2->hw.model) {
1074 case MYRB_5_CHANNEL_BOARD:
1075 pchan_max = 5;
1076 break;
1077 case MYRB_3_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_ASIC_DAC:
1079 pchan_max = 3;
1080 break;
1081 case MYRB_2_CHANNEL_BOARD:
1082 pchan_max = 2;
1083 break;
1084 default:
1085 pchan_max = enquiry2->cfg_chan;
1086 break;
1087 }
1088 pchan_cur = enquiry2->cur_chan;
1089 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090 cb->bus_width = 32;
1091 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092 cb->bus_width = 16;
1093 else
1094 cb->bus_width = 8;
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1096 shost->max_channel = pchan_cur;
1097 shost->max_id = enquiry2->max_targets;
1098 memsize = enquiry2->mem_size >> 20;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100
1101
1102
1103
1104
1105
1106
1107
1108 shost->can_queue = cb->enquiry->max_tcq;
1109 if (shost->can_queue < 3)
1110 shost->can_queue = enquiry2->max_cmds;
1111 if (shost->can_queue < 3)
1112
1113 shost->can_queue = 1;
1114
1115 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 shost->max_sectors = enquiry2->max_sectors;
1118 shost->sg_tablesize = enquiry2->max_sge;
1119 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121
1122
1123
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 >> (10 - MYRB_BLKSIZE_BITS);
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1131 if (config2->drive_geometry) {
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1134 }
1135
1136
1137
1138
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1145 }
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147 ret = 0;
1148
1149 out:
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1153 " Firmware Version: %s, Memory Size: %dMB\n",
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1157 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 (unsigned long)cb->pci_addr, cb->irq);
1159 else
1160 shost_printk(KERN_INFO, cb->host,
1161 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 cb->irq);
1164 shost_printk(KERN_INFO, cb->host,
1165 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1168 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 cb->host->can_queue, cb->host->sg_tablesize,
1170 MYRB_SCATTER_GATHER_LIMIT);
1171 shost_printk(KERN_INFO, cb->host,
1172 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 cb->safte_enabled ?
1176 " SAF-TE Enclosure Management Enabled" : "");
1177 shost_printk(KERN_INFO, cb->host,
1178 " Physical: %d/%d channels %d/%d/%d devices\n",
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 cb->host->max_id);
1181
1182 shost_printk(KERN_INFO, cb->host,
1183 " Logical: 1/1 channels, %d/%d disks\n",
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185
1186 out_free:
1187 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 enquiry2, enquiry2_addr);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 config2, config2_addr);
1191
1192 return ret;
1193 }
1194
1195
1196
1197
1198 static void myrb_unmap(struct myrb_hba *cb)
1199 {
1200 if (cb->ldev_info_buf) {
1201 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202 MYRB_MAX_LDEVS;
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1206 }
1207 if (cb->err_table) {
1208 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1213 }
1214 if (cb->enquiry) {
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1217 cb->enquiry = NULL;
1218 }
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1223 }
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1228 }
1229 }
1230
1231
1232
1233
1234 static void myrb_cleanup(struct myrb_hba *cb)
1235 {
1236 struct pci_dev *pdev = cb->pdev;
1237
1238
1239 myrb_unmap(cb);
1240
1241 if (cb->mmio_base) {
1242 if (cb->disable_intr)
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1245 }
1246 if (cb->irq)
1247 free_irq(cb->irq, cb);
1248 if (cb->io_addr)
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1253 }
1254
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 {
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1259
1260 cb->reset(cb->io_base);
1261 return SUCCESS;
1262 }
1263
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1266 {
1267 struct request *rq = scsi_cmd_to_rq(scmd);
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1276 int nsge;
1277
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 if (!dcdb)
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1283 if (nsge > 1) {
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1286 scsi_done(scmd);
1287 return 0;
1288 }
1289
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = rq->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1296 case DMA_NONE:
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298 break;
1299 case DMA_TO_DEVICE:
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 break;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304 break;
1305 default:
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307 break;
1308 }
1309 dcdb->early_status = false;
1310 if (rq->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (rq->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (rq->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 else
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 } else {
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1328 }
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1336 return 0;
1337 }
1338
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1341 {
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1348 };
1349
1350 if (cb->bus_width > 16)
1351 inq[7] |= 1 << 6;
1352 if (cb->bus_width > 8)
1353 inq[7] |= 1 << 5;
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1358
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360 }
1361
1362 static void
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1365 {
1366 unsigned char modes[32], *mode_pg;
1367 bool dbd;
1368 size_t mode_len;
1369
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371 if (dbd) {
1372 mode_len = 24;
1373 mode_pg = &modes[4];
1374 } else {
1375 mode_len = 32;
1376 mode_pg = &modes[12];
1377 }
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1380 if (!dbd) {
1381 unsigned char *block_desc = &modes[4];
1382
1383 modes[3] = 8;
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386 }
1387 mode_pg[0] = 0x08;
1388 mode_pg[1] = 0x12;
1389 if (ldev_info->wb_enabled)
1390 mode_pg[2] |= 0x04;
1391 if (cb->segment_size) {
1392 mode_pg[2] |= 0x08;
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394 }
1395
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397 }
1398
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1401 {
1402 scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1405 }
1406
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1409 {
1410 unsigned char data[8];
1411
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1418 }
1419
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1422 {
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1430 u64 lba;
1431 u32 block_cnt;
1432 int nsge;
1433
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1440 scsi_done(scmd);
1441 return 0;
1442 }
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1446 scsi_done(scmd);
1447 return 0;
1448 case INQUIRY:
1449 if (scmd->cmnd[1] & 1) {
1450
1451 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1452 } else {
1453 myrb_inquiry(cb, scmd);
1454 scmd->result = (DID_OK << 16);
1455 }
1456 scsi_done(scmd);
1457 return 0;
1458 case SYNCHRONIZE_CACHE:
1459 scmd->result = (DID_OK << 16);
1460 scsi_done(scmd);
1461 return 0;
1462 case MODE_SENSE:
1463 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464 (scmd->cmnd[2] & 0x3F) != 0x08) {
1465
1466 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1467 } else {
1468 myrb_mode_sense(cb, scmd, ldev_info);
1469 scmd->result = (DID_OK << 16);
1470 }
1471 scsi_done(scmd);
1472 return 0;
1473 case READ_CAPACITY:
1474 if ((scmd->cmnd[1] & 1) ||
1475 (scmd->cmnd[8] & 1)) {
1476
1477 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1478 scsi_done(scmd);
1479 return 0;
1480 }
1481 lba = get_unaligned_be32(&scmd->cmnd[2]);
1482 if (lba) {
1483
1484 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1485 scsi_done(scmd);
1486 return 0;
1487 }
1488 myrb_read_capacity(cb, scmd, ldev_info);
1489 scsi_done(scmd);
1490 return 0;
1491 case REQUEST_SENSE:
1492 myrb_request_sense(cb, scmd);
1493 scmd->result = (DID_OK << 16);
1494 return 0;
1495 case SEND_DIAGNOSTIC:
1496 if (scmd->cmnd[1] != 0x04) {
1497
1498 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1499 } else {
1500
1501 scmd->result = (DID_OK << 16);
1502 }
1503 scsi_done(scmd);
1504 return 0;
1505 case READ_6:
1506 if (ldev_info->state == MYRB_DEVICE_WO) {
1507
1508 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1509 scsi_done(scmd);
1510 return 0;
1511 }
1512 fallthrough;
1513 case WRITE_6:
1514 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515 (scmd->cmnd[2] << 8) |
1516 scmd->cmnd[3]);
1517 block_cnt = scmd->cmnd[4];
1518 break;
1519 case READ_10:
1520 if (ldev_info->state == MYRB_DEVICE_WO) {
1521
1522 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1523 scsi_done(scmd);
1524 return 0;
1525 }
1526 fallthrough;
1527 case WRITE_10:
1528 case VERIFY:
1529 case WRITE_VERIFY:
1530 lba = get_unaligned_be32(&scmd->cmnd[2]);
1531 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1532 break;
1533 case READ_12:
1534 if (ldev_info->state == MYRB_DEVICE_WO) {
1535
1536 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1537 scsi_done(scmd);
1538 return 0;
1539 }
1540 fallthrough;
1541 case WRITE_12:
1542 case VERIFY_12:
1543 case WRITE_VERIFY_12:
1544 lba = get_unaligned_be32(&scmd->cmnd[2]);
1545 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1546 break;
1547 default:
1548
1549 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1550 scsi_done(scmd);
1551 return 0;
1552 }
1553
1554 myrb_reset_cmd(cmd_blk);
1555 mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556 if (scmd->sc_data_direction == DMA_NONE)
1557 goto submit;
1558 nsge = scsi_dma_map(scmd);
1559 if (nsge == 1) {
1560 sgl = scsi_sglist(scmd);
1561 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562 mbox->type5.opcode = MYRB_CMD_READ;
1563 else
1564 mbox->type5.opcode = MYRB_CMD_WRITE;
1565
1566 mbox->type5.ld.xfer_len = block_cnt;
1567 mbox->type5.ld.ldev_num = sdev->id;
1568 mbox->type5.lba = lba;
1569 mbox->type5.addr = (u32)sg_dma_address(sgl);
1570 } else {
1571 struct myrb_sge *hw_sgl;
1572 dma_addr_t hw_sgl_addr;
1573 int i;
1574
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1576 if (!hw_sgl)
1577 return SCSI_MLQUEUE_HOST_BUSY;
1578
1579 cmd_blk->sgl = hw_sgl;
1580 cmd_blk->sgl_addr = hw_sgl_addr;
1581
1582 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583 mbox->type5.opcode = MYRB_CMD_READ_SG;
1584 else
1585 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1586
1587 mbox->type5.ld.xfer_len = block_cnt;
1588 mbox->type5.ld.ldev_num = sdev->id;
1589 mbox->type5.lba = lba;
1590 mbox->type5.addr = hw_sgl_addr;
1591 mbox->type5.sg_count = nsge;
1592
1593 scsi_for_each_sg(scmd, sgl, nsge, i) {
1594 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1596 hw_sgl++;
1597 }
1598 }
1599 submit:
1600 spin_lock_irqsave(&cb->queue_lock, flags);
1601 cb->qcmd(cb, cmd_blk);
1602 spin_unlock_irqrestore(&cb->queue_lock, flags);
1603
1604 return 0;
1605 }
1606
1607 static int myrb_queuecommand(struct Scsi_Host *shost,
1608 struct scsi_cmnd *scmd)
1609 {
1610 struct scsi_device *sdev = scmd->device;
1611
1612 if (sdev->channel > myrb_logical_channel(shost)) {
1613 scmd->result = (DID_BAD_TARGET << 16);
1614 scsi_done(scmd);
1615 return 0;
1616 }
1617 if (sdev->channel == myrb_logical_channel(shost))
1618 return myrb_ldev_queuecommand(shost, scmd);
1619
1620 return myrb_pthru_queuecommand(shost, scmd);
1621 }
1622
1623 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1624 {
1625 struct myrb_hba *cb = shost_priv(sdev->host);
1626 struct myrb_ldev_info *ldev_info;
1627 unsigned short ldev_num = sdev->id;
1628 enum raid_level level;
1629
1630 ldev_info = cb->ldev_info_buf + ldev_num;
1631 if (!ldev_info)
1632 return -ENXIO;
1633
1634 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635 if (!sdev->hostdata)
1636 return -ENOMEM;
1637 dev_dbg(&sdev->sdev_gendev,
1638 "slave alloc ldev %d state %x\n",
1639 ldev_num, ldev_info->state);
1640 memcpy(sdev->hostdata, ldev_info,
1641 sizeof(*ldev_info));
1642 switch (ldev_info->raid_level) {
1643 case MYRB_RAID_LEVEL0:
1644 level = RAID_LEVEL_LINEAR;
1645 break;
1646 case MYRB_RAID_LEVEL1:
1647 level = RAID_LEVEL_1;
1648 break;
1649 case MYRB_RAID_LEVEL3:
1650 level = RAID_LEVEL_3;
1651 break;
1652 case MYRB_RAID_LEVEL5:
1653 level = RAID_LEVEL_5;
1654 break;
1655 case MYRB_RAID_LEVEL6:
1656 level = RAID_LEVEL_6;
1657 break;
1658 case MYRB_RAID_JBOD:
1659 level = RAID_LEVEL_JBOD;
1660 break;
1661 default:
1662 level = RAID_LEVEL_UNKNOWN;
1663 break;
1664 }
1665 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1666 return 0;
1667 }
1668
1669 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1670 {
1671 struct myrb_hba *cb = shost_priv(sdev->host);
1672 struct myrb_pdev_state *pdev_info;
1673 unsigned short status;
1674
1675 if (sdev->id > MYRB_MAX_TARGETS)
1676 return -ENXIO;
1677
1678 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
1679 if (!pdev_info)
1680 return -ENOMEM;
1681
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1683 sdev, pdev_info);
1684 if (status != MYRB_STATUS_SUCCESS) {
1685 dev_dbg(&sdev->sdev_gendev,
1686 "Failed to get device state, status %x\n",
1687 status);
1688 kfree(pdev_info);
1689 return -ENXIO;
1690 }
1691 if (!pdev_info->present) {
1692 dev_dbg(&sdev->sdev_gendev,
1693 "device not present, skip\n");
1694 kfree(pdev_info);
1695 return -ENXIO;
1696 }
1697 dev_dbg(&sdev->sdev_gendev,
1698 "slave alloc pdev %d:%d state %x\n",
1699 sdev->channel, sdev->id, pdev_info->state);
1700 sdev->hostdata = pdev_info;
1701
1702 return 0;
1703 }
1704
1705 static int myrb_slave_alloc(struct scsi_device *sdev)
1706 {
1707 if (sdev->channel > myrb_logical_channel(sdev->host))
1708 return -ENXIO;
1709
1710 if (sdev->lun > 0)
1711 return -ENXIO;
1712
1713 if (sdev->channel == myrb_logical_channel(sdev->host))
1714 return myrb_ldev_slave_alloc(sdev);
1715
1716 return myrb_pdev_slave_alloc(sdev);
1717 }
1718
1719 static int myrb_slave_configure(struct scsi_device *sdev)
1720 {
1721 struct myrb_ldev_info *ldev_info;
1722
1723 if (sdev->channel > myrb_logical_channel(sdev->host))
1724 return -ENXIO;
1725
1726 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727 sdev->no_uld_attach = 1;
1728 return 0;
1729 }
1730 if (sdev->lun != 0)
1731 return -ENXIO;
1732
1733 ldev_info = sdev->hostdata;
1734 if (!ldev_info)
1735 return -ENXIO;
1736 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737 sdev_printk(KERN_INFO, sdev,
1738 "Logical drive is %s\n",
1739 myrb_devstate_name(ldev_info->state));
1740
1741 sdev->tagged_supported = 1;
1742 return 0;
1743 }
1744
1745 static void myrb_slave_destroy(struct scsi_device *sdev)
1746 {
1747 kfree(sdev->hostdata);
1748 }
1749
1750 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751 sector_t capacity, int geom[])
1752 {
1753 struct myrb_hba *cb = shost_priv(sdev->host);
1754
1755 geom[0] = cb->ldev_geom_heads;
1756 geom[1] = cb->ldev_geom_sectors;
1757 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1758
1759 return 0;
1760 }
1761
1762 static ssize_t raid_state_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1764 {
1765 struct scsi_device *sdev = to_scsi_device(dev);
1766 struct myrb_hba *cb = shost_priv(sdev->host);
1767 int ret;
1768
1769 if (!sdev->hostdata)
1770 return snprintf(buf, 16, "Unknown\n");
1771
1772 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1774 const char *name;
1775
1776 name = myrb_devstate_name(ldev_info->state);
1777 if (name)
1778 ret = snprintf(buf, 32, "%s\n", name);
1779 else
1780 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1781 ldev_info->state);
1782 } else {
1783 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784 unsigned short status;
1785 const char *name;
1786
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1788 sdev, pdev_info);
1789 if (status != MYRB_STATUS_SUCCESS)
1790 sdev_printk(KERN_INFO, sdev,
1791 "Failed to get device state, status %x\n",
1792 status);
1793
1794 if (!pdev_info->present)
1795 name = "Removed";
1796 else
1797 name = myrb_devstate_name(pdev_info->state);
1798 if (name)
1799 ret = snprintf(buf, 32, "%s\n", name);
1800 else
1801 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1802 pdev_info->state);
1803 }
1804 return ret;
1805 }
1806
1807 static ssize_t raid_state_store(struct device *dev,
1808 struct device_attribute *attr, const char *buf, size_t count)
1809 {
1810 struct scsi_device *sdev = to_scsi_device(dev);
1811 struct myrb_hba *cb = shost_priv(sdev->host);
1812 struct myrb_pdev_state *pdev_info;
1813 enum myrb_devstate new_state;
1814 unsigned short status;
1815
1816 if (!strncmp(buf, "kill", 4) ||
1817 !strncmp(buf, "offline", 7))
1818 new_state = MYRB_DEVICE_DEAD;
1819 else if (!strncmp(buf, "online", 6))
1820 new_state = MYRB_DEVICE_ONLINE;
1821 else if (!strncmp(buf, "standby", 7))
1822 new_state = MYRB_DEVICE_STANDBY;
1823 else
1824 return -EINVAL;
1825
1826 pdev_info = sdev->hostdata;
1827 if (!pdev_info) {
1828 sdev_printk(KERN_INFO, sdev,
1829 "Failed - no physical device information\n");
1830 return -ENXIO;
1831 }
1832 if (!pdev_info->present) {
1833 sdev_printk(KERN_INFO, sdev,
1834 "Failed - device not present\n");
1835 return -ENXIO;
1836 }
1837
1838 if (pdev_info->state == new_state)
1839 return count;
1840
1841 status = myrb_set_pdev_state(cb, sdev, new_state);
1842 switch (status) {
1843 case MYRB_STATUS_SUCCESS:
1844 break;
1845 case MYRB_STATUS_START_DEVICE_FAILED:
1846 sdev_printk(KERN_INFO, sdev,
1847 "Failed - Unable to Start Device\n");
1848 count = -EAGAIN;
1849 break;
1850 case MYRB_STATUS_NO_DEVICE:
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - No Device at Address\n");
1853 count = -ENODEV;
1854 break;
1855 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - Invalid Channel or Target or Modifier\n");
1858 count = -EINVAL;
1859 break;
1860 case MYRB_STATUS_CHANNEL_BUSY:
1861 sdev_printk(KERN_INFO, sdev,
1862 "Failed - Channel Busy\n");
1863 count = -EBUSY;
1864 break;
1865 default:
1866 sdev_printk(KERN_INFO, sdev,
1867 "Failed - Unexpected Status %04X\n", status);
1868 count = -EIO;
1869 break;
1870 }
1871 return count;
1872 }
1873 static DEVICE_ATTR_RW(raid_state);
1874
1875 static ssize_t raid_level_show(struct device *dev,
1876 struct device_attribute *attr, char *buf)
1877 {
1878 struct scsi_device *sdev = to_scsi_device(dev);
1879
1880 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1882 const char *name;
1883
1884 if (!ldev_info)
1885 return -ENXIO;
1886
1887 name = myrb_raidlevel_name(ldev_info->raid_level);
1888 if (!name)
1889 return snprintf(buf, 32, "Invalid (%02X)\n",
1890 ldev_info->state);
1891 return snprintf(buf, 32, "%s\n", name);
1892 }
1893 return snprintf(buf, 32, "Physical Drive\n");
1894 }
1895 static DEVICE_ATTR_RO(raid_level);
1896
1897 static ssize_t rebuild_show(struct device *dev,
1898 struct device_attribute *attr, char *buf)
1899 {
1900 struct scsi_device *sdev = to_scsi_device(dev);
1901 struct myrb_hba *cb = shost_priv(sdev->host);
1902 struct myrb_rbld_progress rbld_buf;
1903 unsigned char status;
1904
1905 if (sdev->channel < myrb_logical_channel(sdev->host))
1906 return snprintf(buf, 32, "physical device - not rebuilding\n");
1907
1908 status = myrb_get_rbld_progress(cb, &rbld_buf);
1909
1910 if (rbld_buf.ldev_num != sdev->id ||
1911 status != MYRB_STATUS_SUCCESS)
1912 return snprintf(buf, 32, "not rebuilding\n");
1913
1914 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915 rbld_buf.ldev_size - rbld_buf.blocks_left,
1916 rbld_buf.ldev_size);
1917 }
1918
1919 static ssize_t rebuild_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1921 {
1922 struct scsi_device *sdev = to_scsi_device(dev);
1923 struct myrb_hba *cb = shost_priv(sdev->host);
1924 struct myrb_cmdblk *cmd_blk;
1925 union myrb_cmd_mbox *mbox;
1926 unsigned short status;
1927 int rc, start;
1928 const char *msg;
1929
1930 rc = kstrtoint(buf, 0, &start);
1931 if (rc)
1932 return rc;
1933
1934 if (sdev->channel >= myrb_logical_channel(sdev->host))
1935 return -ENXIO;
1936
1937 status = myrb_get_rbld_progress(cb, NULL);
1938 if (start) {
1939 if (status == MYRB_STATUS_SUCCESS) {
1940 sdev_printk(KERN_INFO, sdev,
1941 "Rebuild Not Initiated; already in progress\n");
1942 return -EALREADY;
1943 }
1944 mutex_lock(&cb->dcmd_mutex);
1945 cmd_blk = &cb->dcmd_blk;
1946 myrb_reset_cmd(cmd_blk);
1947 mbox = &cmd_blk->mbox;
1948 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949 mbox->type3D.id = MYRB_DCMD_TAG;
1950 mbox->type3D.channel = sdev->channel;
1951 mbox->type3D.target = sdev->id;
1952 status = myrb_exec_cmd(cb, cmd_blk);
1953 mutex_unlock(&cb->dcmd_mutex);
1954 } else {
1955 struct pci_dev *pdev = cb->pdev;
1956 unsigned char *rate;
1957 dma_addr_t rate_addr;
1958
1959 if (status != MYRB_STATUS_SUCCESS) {
1960 sdev_printk(KERN_INFO, sdev,
1961 "Rebuild Not Cancelled; not in progress\n");
1962 return 0;
1963 }
1964
1965 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966 &rate_addr, GFP_KERNEL);
1967 if (rate == NULL) {
1968 sdev_printk(KERN_INFO, sdev,
1969 "Cancellation of Rebuild Failed - Out of Memory\n");
1970 return -ENOMEM;
1971 }
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1974 myrb_reset_cmd(cmd_blk);
1975 mbox = &cmd_blk->mbox;
1976 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977 mbox->type3R.id = MYRB_DCMD_TAG;
1978 mbox->type3R.rbld_rate = 0xFF;
1979 mbox->type3R.addr = rate_addr;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1981 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982 mutex_unlock(&cb->dcmd_mutex);
1983 }
1984 if (status == MYRB_STATUS_SUCCESS) {
1985 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986 start ? "Initiated" : "Cancelled");
1987 return count;
1988 }
1989 if (!start) {
1990 sdev_printk(KERN_INFO, sdev,
1991 "Rebuild Not Cancelled, status 0x%x\n",
1992 status);
1993 return -EIO;
1994 }
1995
1996 switch (status) {
1997 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998 msg = "Attempt to Rebuild Online or Unresponsive Drive";
1999 break;
2000 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001 msg = "New Disk Failed During Rebuild";
2002 break;
2003 case MYRB_STATUS_INVALID_ADDRESS:
2004 msg = "Invalid Device Address";
2005 break;
2006 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007 msg = "Already in Progress";
2008 break;
2009 default:
2010 msg = NULL;
2011 break;
2012 }
2013 if (msg)
2014 sdev_printk(KERN_INFO, sdev,
2015 "Rebuild Failed - %s\n", msg);
2016 else
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Failed, status 0x%x\n", status);
2019
2020 return -EIO;
2021 }
2022 static DEVICE_ATTR_RW(rebuild);
2023
2024 static ssize_t consistency_check_store(struct device *dev,
2025 struct device_attribute *attr, const char *buf, size_t count)
2026 {
2027 struct scsi_device *sdev = to_scsi_device(dev);
2028 struct myrb_hba *cb = shost_priv(sdev->host);
2029 struct myrb_rbld_progress rbld_buf;
2030 struct myrb_cmdblk *cmd_blk;
2031 union myrb_cmd_mbox *mbox;
2032 unsigned short ldev_num = 0xFFFF;
2033 unsigned short status;
2034 int rc, start;
2035 const char *msg;
2036
2037 rc = kstrtoint(buf, 0, &start);
2038 if (rc)
2039 return rc;
2040
2041 if (sdev->channel < myrb_logical_channel(sdev->host))
2042 return -ENXIO;
2043
2044 status = myrb_get_rbld_progress(cb, &rbld_buf);
2045 if (start) {
2046 if (status == MYRB_STATUS_SUCCESS) {
2047 sdev_printk(KERN_INFO, sdev,
2048 "Check Consistency Not Initiated; already in progress\n");
2049 return -EALREADY;
2050 }
2051 mutex_lock(&cb->dcmd_mutex);
2052 cmd_blk = &cb->dcmd_blk;
2053 myrb_reset_cmd(cmd_blk);
2054 mbox = &cmd_blk->mbox;
2055 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056 mbox->type3C.id = MYRB_DCMD_TAG;
2057 mbox->type3C.ldev_num = sdev->id;
2058 mbox->type3C.auto_restore = true;
2059
2060 status = myrb_exec_cmd(cb, cmd_blk);
2061 mutex_unlock(&cb->dcmd_mutex);
2062 } else {
2063 struct pci_dev *pdev = cb->pdev;
2064 unsigned char *rate;
2065 dma_addr_t rate_addr;
2066
2067 if (ldev_num != sdev->id) {
2068 sdev_printk(KERN_INFO, sdev,
2069 "Check Consistency Not Cancelled; not in progress\n");
2070 return 0;
2071 }
2072 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073 &rate_addr, GFP_KERNEL);
2074 if (rate == NULL) {
2075 sdev_printk(KERN_INFO, sdev,
2076 "Cancellation of Check Consistency Failed - Out of Memory\n");
2077 return -ENOMEM;
2078 }
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2081 myrb_reset_cmd(cmd_blk);
2082 mbox = &cmd_blk->mbox;
2083 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084 mbox->type3R.id = MYRB_DCMD_TAG;
2085 mbox->type3R.rbld_rate = 0xFF;
2086 mbox->type3R.addr = rate_addr;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089 mutex_unlock(&cb->dcmd_mutex);
2090 }
2091 if (status == MYRB_STATUS_SUCCESS) {
2092 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093 start ? "Initiated" : "Cancelled");
2094 return count;
2095 }
2096 if (!start) {
2097 sdev_printk(KERN_INFO, sdev,
2098 "Check Consistency Not Cancelled, status 0x%x\n",
2099 status);
2100 return -EIO;
2101 }
2102
2103 switch (status) {
2104 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105 msg = "Dependent Physical Device is DEAD";
2106 break;
2107 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108 msg = "New Disk Failed During Rebuild";
2109 break;
2110 case MYRB_STATUS_INVALID_ADDRESS:
2111 msg = "Invalid or Nonredundant Logical Drive";
2112 break;
2113 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114 msg = "Already in Progress";
2115 break;
2116 default:
2117 msg = NULL;
2118 break;
2119 }
2120 if (msg)
2121 sdev_printk(KERN_INFO, sdev,
2122 "Check Consistency Failed - %s\n", msg);
2123 else
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Failed, status 0x%x\n", status);
2126
2127 return -EIO;
2128 }
2129
2130 static ssize_t consistency_check_show(struct device *dev,
2131 struct device_attribute *attr, char *buf)
2132 {
2133 return rebuild_show(dev, attr, buf);
2134 }
2135 static DEVICE_ATTR_RW(consistency_check);
2136
2137 static ssize_t ctlr_num_show(struct device *dev,
2138 struct device_attribute *attr, char *buf)
2139 {
2140 struct Scsi_Host *shost = class_to_shost(dev);
2141 struct myrb_hba *cb = shost_priv(shost);
2142
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2144 }
2145 static DEVICE_ATTR_RO(ctlr_num);
2146
2147 static ssize_t firmware_show(struct device *dev,
2148 struct device_attribute *attr, char *buf)
2149 {
2150 struct Scsi_Host *shost = class_to_shost(dev);
2151 struct myrb_hba *cb = shost_priv(shost);
2152
2153 return snprintf(buf, 16, "%s\n", cb->fw_version);
2154 }
2155 static DEVICE_ATTR_RO(firmware);
2156
2157 static ssize_t model_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2159 {
2160 struct Scsi_Host *shost = class_to_shost(dev);
2161 struct myrb_hba *cb = shost_priv(shost);
2162
2163 return snprintf(buf, 16, "%s\n", cb->model_name);
2164 }
2165 static DEVICE_ATTR_RO(model);
2166
2167 static ssize_t flush_cache_store(struct device *dev,
2168 struct device_attribute *attr, const char *buf, size_t count)
2169 {
2170 struct Scsi_Host *shost = class_to_shost(dev);
2171 struct myrb_hba *cb = shost_priv(shost);
2172 unsigned short status;
2173
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175 if (status == MYRB_STATUS_SUCCESS) {
2176 shost_printk(KERN_INFO, shost,
2177 "Cache Flush Completed\n");
2178 return count;
2179 }
2180 shost_printk(KERN_INFO, shost,
2181 "Cache Flush Failed, status %x\n", status);
2182 return -EIO;
2183 }
2184 static DEVICE_ATTR_WO(flush_cache);
2185
2186 static struct attribute *myrb_sdev_attrs[] = {
2187 &dev_attr_rebuild.attr,
2188 &dev_attr_consistency_check.attr,
2189 &dev_attr_raid_state.attr,
2190 &dev_attr_raid_level.attr,
2191 NULL,
2192 };
2193
2194 ATTRIBUTE_GROUPS(myrb_sdev);
2195
2196 static struct attribute *myrb_shost_attrs[] = {
2197 &dev_attr_ctlr_num.attr,
2198 &dev_attr_model.attr,
2199 &dev_attr_firmware.attr,
2200 &dev_attr_flush_cache.attr,
2201 NULL,
2202 };
2203
2204 ATTRIBUTE_GROUPS(myrb_shost);
2205
2206 static struct scsi_host_template myrb_template = {
2207 .module = THIS_MODULE,
2208 .name = "DAC960",
2209 .proc_name = "myrb",
2210 .queuecommand = myrb_queuecommand,
2211 .eh_host_reset_handler = myrb_host_reset,
2212 .slave_alloc = myrb_slave_alloc,
2213 .slave_configure = myrb_slave_configure,
2214 .slave_destroy = myrb_slave_destroy,
2215 .bios_param = myrb_biosparam,
2216 .cmd_size = sizeof(struct myrb_cmdblk),
2217 .shost_groups = myrb_shost_groups,
2218 .sdev_groups = myrb_sdev_groups,
2219 .this_id = -1,
2220 };
2221
2222
2223
2224
2225
2226 static int myrb_is_raid(struct device *dev)
2227 {
2228 struct scsi_device *sdev = to_scsi_device(dev);
2229
2230 return sdev->channel == myrb_logical_channel(sdev->host);
2231 }
2232
2233
2234
2235
2236
2237 static void myrb_get_resync(struct device *dev)
2238 {
2239 struct scsi_device *sdev = to_scsi_device(dev);
2240 struct myrb_hba *cb = shost_priv(sdev->host);
2241 struct myrb_rbld_progress rbld_buf;
2242 unsigned int percent_complete = 0;
2243 unsigned short status;
2244 unsigned int ldev_size = 0, remaining = 0;
2245
2246 if (sdev->channel < myrb_logical_channel(sdev->host))
2247 return;
2248 status = myrb_get_rbld_progress(cb, &rbld_buf);
2249 if (status == MYRB_STATUS_SUCCESS) {
2250 if (rbld_buf.ldev_num == sdev->id) {
2251 ldev_size = rbld_buf.ldev_size;
2252 remaining = rbld_buf.blocks_left;
2253 }
2254 }
2255 if (remaining && ldev_size)
2256 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2257 raid_set_resync(myrb_raid_template, dev, percent_complete);
2258 }
2259
2260
2261
2262
2263
2264 static void myrb_get_state(struct device *dev)
2265 {
2266 struct scsi_device *sdev = to_scsi_device(dev);
2267 struct myrb_hba *cb = shost_priv(sdev->host);
2268 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2269 enum raid_state state = RAID_STATE_UNKNOWN;
2270 unsigned short status;
2271
2272 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2273 state = RAID_STATE_UNKNOWN;
2274 else {
2275 status = myrb_get_rbld_progress(cb, NULL);
2276 if (status == MYRB_STATUS_SUCCESS)
2277 state = RAID_STATE_RESYNCING;
2278 else {
2279 switch (ldev_info->state) {
2280 case MYRB_DEVICE_ONLINE:
2281 state = RAID_STATE_ACTIVE;
2282 break;
2283 case MYRB_DEVICE_WO:
2284 case MYRB_DEVICE_CRITICAL:
2285 state = RAID_STATE_DEGRADED;
2286 break;
2287 default:
2288 state = RAID_STATE_OFFLINE;
2289 }
2290 }
2291 }
2292 raid_set_state(myrb_raid_template, dev, state);
2293 }
2294
2295 static struct raid_function_template myrb_raid_functions = {
2296 .cookie = &myrb_template,
2297 .is_raid = myrb_is_raid,
2298 .get_resync = myrb_get_resync,
2299 .get_state = myrb_get_state,
2300 };
2301
2302 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2303 struct scsi_cmnd *scmd)
2304 {
2305 unsigned short status;
2306
2307 if (!cmd_blk)
2308 return;
2309
2310 scsi_dma_unmap(scmd);
2311
2312 if (cmd_blk->dcdb) {
2313 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2314 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2315 cmd_blk->dcdb_addr);
2316 cmd_blk->dcdb = NULL;
2317 }
2318 if (cmd_blk->sgl) {
2319 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2320 cmd_blk->sgl = NULL;
2321 cmd_blk->sgl_addr = 0;
2322 }
2323 status = cmd_blk->status;
2324 switch (status) {
2325 case MYRB_STATUS_SUCCESS:
2326 case MYRB_STATUS_DEVICE_BUSY:
2327 scmd->result = (DID_OK << 16) | status;
2328 break;
2329 case MYRB_STATUS_BAD_DATA:
2330 dev_dbg(&scmd->device->sdev_gendev,
2331 "Bad Data Encountered\n");
2332 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2333
2334 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2335 else
2336
2337 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2338 break;
2339 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2340 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2341 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2342
2343 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2344 else
2345
2346 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2347 break;
2348 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Logical Drive Nonexistent or Offline");
2351 scmd->result = (DID_BAD_TARGET << 16);
2352 break;
2353 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2354 dev_dbg(&scmd->device->sdev_gendev,
2355 "Attempt to Access Beyond End of Logical Drive");
2356
2357 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2358 break;
2359 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2360 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2361 scmd->result = (DID_BAD_TARGET << 16);
2362 break;
2363 default:
2364 scmd_printk(KERN_ERR, scmd,
2365 "Unexpected Error Status %04X", status);
2366 scmd->result = (DID_ERROR << 16);
2367 break;
2368 }
2369 scsi_done(scmd);
2370 }
2371
2372 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2373 {
2374 if (!cmd_blk)
2375 return;
2376
2377 if (cmd_blk->completion) {
2378 complete(cmd_blk->completion);
2379 cmd_blk->completion = NULL;
2380 }
2381 }
2382
2383 static void myrb_monitor(struct work_struct *work)
2384 {
2385 struct myrb_hba *cb = container_of(work,
2386 struct myrb_hba, monitor_work.work);
2387 struct Scsi_Host *shost = cb->host;
2388 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2389
2390 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2391
2392 if (cb->new_ev_seq > cb->old_ev_seq) {
2393 int event = cb->old_ev_seq;
2394
2395 dev_dbg(&shost->shost_gendev,
2396 "get event log no %d/%d\n",
2397 cb->new_ev_seq, event);
2398 myrb_get_event(cb, event);
2399 cb->old_ev_seq = event + 1;
2400 interval = 10;
2401 } else if (cb->need_err_info) {
2402 cb->need_err_info = false;
2403 dev_dbg(&shost->shost_gendev, "get error table\n");
2404 myrb_get_errtable(cb);
2405 interval = 10;
2406 } else if (cb->need_rbld && cb->rbld_first) {
2407 cb->need_rbld = false;
2408 dev_dbg(&shost->shost_gendev,
2409 "get rebuild progress\n");
2410 myrb_update_rbld_progress(cb);
2411 interval = 10;
2412 } else if (cb->need_ldev_info) {
2413 cb->need_ldev_info = false;
2414 dev_dbg(&shost->shost_gendev,
2415 "get logical drive info\n");
2416 myrb_get_ldev_info(cb);
2417 interval = 10;
2418 } else if (cb->need_rbld) {
2419 cb->need_rbld = false;
2420 dev_dbg(&shost->shost_gendev,
2421 "get rebuild progress\n");
2422 myrb_update_rbld_progress(cb);
2423 interval = 10;
2424 } else if (cb->need_cc_status) {
2425 cb->need_cc_status = false;
2426 dev_dbg(&shost->shost_gendev,
2427 "get consistency check progress\n");
2428 myrb_get_cc_progress(cb);
2429 interval = 10;
2430 } else if (cb->need_bgi_status) {
2431 cb->need_bgi_status = false;
2432 dev_dbg(&shost->shost_gendev, "get background init status\n");
2433 myrb_bgi_control(cb);
2434 interval = 10;
2435 } else {
2436 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2437 mutex_lock(&cb->dma_mutex);
2438 myrb_hba_enquiry(cb);
2439 mutex_unlock(&cb->dma_mutex);
2440 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2441 cb->need_err_info || cb->need_rbld ||
2442 cb->need_ldev_info || cb->need_cc_status ||
2443 cb->need_bgi_status) {
2444 dev_dbg(&shost->shost_gendev,
2445 "reschedule monitor\n");
2446 interval = 0;
2447 }
2448 }
2449 if (interval > 1)
2450 cb->primary_monitor_time = jiffies;
2451 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2463 unsigned char parm0, unsigned char parm1)
2464 {
2465 struct pci_dev *pdev = cb->pdev;
2466
2467 switch (error) {
2468 case 0x00:
2469 dev_info(&pdev->dev,
2470 "Physical Device %d:%d Not Responding\n",
2471 parm1, parm0);
2472 break;
2473 case 0x08:
2474 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2475 break;
2476 case 0x30:
2477 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2478 break;
2479 case 0x60:
2480 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2481 break;
2482 case 0x70:
2483 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2484 break;
2485 case 0x90:
2486 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2487 parm1, parm0);
2488 break;
2489 case 0xA0:
2490 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2491 break;
2492 case 0xB0:
2493 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2494 break;
2495 case 0xD0:
2496 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2497 break;
2498 case 0xF0:
2499 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2500 return true;
2501 default:
2502 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2503 error);
2504 return true;
2505 }
2506 return false;
2507 }
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2518 {
2519 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2520 }
2521
2522 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2523 {
2524 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2525 }
2526
2527 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2528 {
2529 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2530 }
2531
2532 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2533 {
2534 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2535 }
2536
2537 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2538 {
2539 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2540
2541 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2542 }
2543
2544 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2545 {
2546 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2547
2548 return !(idb & DAC960_LA_IDB_INIT_DONE);
2549 }
2550
2551 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2552 {
2553 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2554 }
2555
2556 static inline void DAC960_LA_ack_intr(void __iomem *base)
2557 {
2558 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2559 base + DAC960_LA_ODB_OFFSET);
2560 }
2561
2562 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2563 {
2564 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2565
2566 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2567 }
2568
2569 static inline void DAC960_LA_enable_intr(void __iomem *base)
2570 {
2571 unsigned char odb = 0xFF;
2572
2573 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2574 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2575 }
2576
2577 static inline void DAC960_LA_disable_intr(void __iomem *base)
2578 {
2579 unsigned char odb = 0xFF;
2580
2581 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2582 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2583 }
2584
2585 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2586 union myrb_cmd_mbox *mbox)
2587 {
2588 mem_mbox->words[1] = mbox->words[1];
2589 mem_mbox->words[2] = mbox->words[2];
2590 mem_mbox->words[3] = mbox->words[3];
2591
2592 wmb();
2593 mem_mbox->words[0] = mbox->words[0];
2594
2595 mb();
2596 }
2597
2598 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2599 union myrb_cmd_mbox *mbox)
2600 {
2601 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2602 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2603 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2604 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2605 }
2606
2607 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2608 {
2609 return readw(base + DAC960_LA_STS_OFFSET);
2610 }
2611
2612 static inline bool
2613 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2614 unsigned char *param0, unsigned char *param1)
2615 {
2616 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2617
2618 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2619 return false;
2620 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2621
2622 *error = errsts;
2623 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2624 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2625 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2626 return true;
2627 }
2628
2629 static inline unsigned short
2630 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2631 union myrb_cmd_mbox *mbox)
2632 {
2633 unsigned short status;
2634 int timeout = 0;
2635
2636 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2637 if (!DAC960_LA_hw_mbox_is_full(base))
2638 break;
2639 udelay(10);
2640 timeout++;
2641 }
2642 if (DAC960_LA_hw_mbox_is_full(base)) {
2643 dev_err(&pdev->dev,
2644 "Timeout waiting for empty mailbox\n");
2645 return MYRB_STATUS_SUBSYS_TIMEOUT;
2646 }
2647 DAC960_LA_write_hw_mbox(base, mbox);
2648 DAC960_LA_hw_mbox_new_cmd(base);
2649 timeout = 0;
2650 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2651 if (DAC960_LA_hw_mbox_status_available(base))
2652 break;
2653 udelay(10);
2654 timeout++;
2655 }
2656 if (!DAC960_LA_hw_mbox_status_available(base)) {
2657 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2658 return MYRB_STATUS_SUBSYS_TIMEOUT;
2659 }
2660 status = DAC960_LA_read_status(base);
2661 DAC960_LA_ack_hw_mbox_intr(base);
2662 DAC960_LA_ack_hw_mbox_status(base);
2663
2664 return status;
2665 }
2666
2667 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2668 struct myrb_hba *cb, void __iomem *base)
2669 {
2670 int timeout = 0;
2671 unsigned char error, parm0, parm1;
2672
2673 DAC960_LA_disable_intr(base);
2674 DAC960_LA_ack_hw_mbox_status(base);
2675 udelay(1000);
2676 while (DAC960_LA_init_in_progress(base) &&
2677 timeout < MYRB_MAILBOX_TIMEOUT) {
2678 if (DAC960_LA_read_error_status(base, &error,
2679 &parm0, &parm1) &&
2680 myrb_err_status(cb, error, parm0, parm1))
2681 return -ENODEV;
2682 udelay(10);
2683 timeout++;
2684 }
2685 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2686 dev_err(&pdev->dev,
2687 "Timeout waiting for Controller Initialisation\n");
2688 return -ETIMEDOUT;
2689 }
2690 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2691 dev_err(&pdev->dev,
2692 "Unable to Enable Memory Mailbox Interface\n");
2693 DAC960_LA_reset_ctrl(base);
2694 return -ENODEV;
2695 }
2696 DAC960_LA_enable_intr(base);
2697 cb->qcmd = myrb_qcmd;
2698 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2699 if (cb->dual_mode_interface)
2700 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2701 else
2702 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2703 cb->disable_intr = DAC960_LA_disable_intr;
2704 cb->reset = DAC960_LA_reset_ctrl;
2705
2706 return 0;
2707 }
2708
2709 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2710 {
2711 struct myrb_hba *cb = arg;
2712 void __iomem *base = cb->io_base;
2713 struct myrb_stat_mbox *next_stat_mbox;
2714 unsigned long flags;
2715
2716 spin_lock_irqsave(&cb->queue_lock, flags);
2717 DAC960_LA_ack_intr(base);
2718 next_stat_mbox = cb->next_stat_mbox;
2719 while (next_stat_mbox->valid) {
2720 unsigned char id = next_stat_mbox->id;
2721 struct scsi_cmnd *scmd = NULL;
2722 struct myrb_cmdblk *cmd_blk = NULL;
2723
2724 if (id == MYRB_DCMD_TAG)
2725 cmd_blk = &cb->dcmd_blk;
2726 else if (id == MYRB_MCMD_TAG)
2727 cmd_blk = &cb->mcmd_blk;
2728 else {
2729 scmd = scsi_host_find_tag(cb->host, id - 3);
2730 if (scmd)
2731 cmd_blk = scsi_cmd_priv(scmd);
2732 }
2733 if (cmd_blk)
2734 cmd_blk->status = next_stat_mbox->status;
2735 else
2736 dev_err(&cb->pdev->dev,
2737 "Unhandled command completion %d\n", id);
2738
2739 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2740 if (++next_stat_mbox > cb->last_stat_mbox)
2741 next_stat_mbox = cb->first_stat_mbox;
2742
2743 if (cmd_blk) {
2744 if (id < 3)
2745 myrb_handle_cmdblk(cb, cmd_blk);
2746 else
2747 myrb_handle_scsi(cb, cmd_blk, scmd);
2748 }
2749 }
2750 cb->next_stat_mbox = next_stat_mbox;
2751 spin_unlock_irqrestore(&cb->queue_lock, flags);
2752 return IRQ_HANDLED;
2753 }
2754
2755 static struct myrb_privdata DAC960_LA_privdata = {
2756 .hw_init = DAC960_LA_hw_init,
2757 .irq_handler = DAC960_LA_intr_handler,
2758 .mmio_size = DAC960_LA_mmio_size,
2759 };
2760
2761
2762
2763
2764 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2765 {
2766 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2767 }
2768
2769 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2770 {
2771 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2772 }
2773
2774 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2775 {
2776 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2777 }
2778
2779 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2780 {
2781 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2782 }
2783
2784 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2785 {
2786 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2787
2788 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2789 }
2790
2791 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2792 {
2793 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2794
2795 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2796 }
2797
2798 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2799 {
2800 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2801 }
2802
2803 static inline void DAC960_PG_ack_intr(void __iomem *base)
2804 {
2805 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2806 base + DAC960_PG_ODB_OFFSET);
2807 }
2808
2809 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2810 {
2811 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2812
2813 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2814 }
2815
2816 static inline void DAC960_PG_enable_intr(void __iomem *base)
2817 {
2818 unsigned int imask = (unsigned int)-1;
2819
2820 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2821 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2822 }
2823
2824 static inline void DAC960_PG_disable_intr(void __iomem *base)
2825 {
2826 unsigned int imask = (unsigned int)-1;
2827
2828 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2829 }
2830
2831 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2832 union myrb_cmd_mbox *mbox)
2833 {
2834 mem_mbox->words[1] = mbox->words[1];
2835 mem_mbox->words[2] = mbox->words[2];
2836 mem_mbox->words[3] = mbox->words[3];
2837
2838 wmb();
2839 mem_mbox->words[0] = mbox->words[0];
2840
2841 mb();
2842 }
2843
2844 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2845 union myrb_cmd_mbox *mbox)
2846 {
2847 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2848 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2849 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2850 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2851 }
2852
2853 static inline unsigned short
2854 DAC960_PG_read_status(void __iomem *base)
2855 {
2856 return readw(base + DAC960_PG_STS_OFFSET);
2857 }
2858
2859 static inline bool
2860 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2861 unsigned char *param0, unsigned char *param1)
2862 {
2863 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2864
2865 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2866 return false;
2867 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2868 *error = errsts;
2869 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2870 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2871 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2872 return true;
2873 }
2874
2875 static inline unsigned short
2876 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2877 union myrb_cmd_mbox *mbox)
2878 {
2879 unsigned short status;
2880 int timeout = 0;
2881
2882 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2883 if (!DAC960_PG_hw_mbox_is_full(base))
2884 break;
2885 udelay(10);
2886 timeout++;
2887 }
2888 if (DAC960_PG_hw_mbox_is_full(base)) {
2889 dev_err(&pdev->dev,
2890 "Timeout waiting for empty mailbox\n");
2891 return MYRB_STATUS_SUBSYS_TIMEOUT;
2892 }
2893 DAC960_PG_write_hw_mbox(base, mbox);
2894 DAC960_PG_hw_mbox_new_cmd(base);
2895
2896 timeout = 0;
2897 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2898 if (DAC960_PG_hw_mbox_status_available(base))
2899 break;
2900 udelay(10);
2901 timeout++;
2902 }
2903 if (!DAC960_PG_hw_mbox_status_available(base)) {
2904 dev_err(&pdev->dev,
2905 "Timeout waiting for mailbox status\n");
2906 return MYRB_STATUS_SUBSYS_TIMEOUT;
2907 }
2908 status = DAC960_PG_read_status(base);
2909 DAC960_PG_ack_hw_mbox_intr(base);
2910 DAC960_PG_ack_hw_mbox_status(base);
2911
2912 return status;
2913 }
2914
2915 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2916 struct myrb_hba *cb, void __iomem *base)
2917 {
2918 int timeout = 0;
2919 unsigned char error, parm0, parm1;
2920
2921 DAC960_PG_disable_intr(base);
2922 DAC960_PG_ack_hw_mbox_status(base);
2923 udelay(1000);
2924 while (DAC960_PG_init_in_progress(base) &&
2925 timeout < MYRB_MAILBOX_TIMEOUT) {
2926 if (DAC960_PG_read_error_status(base, &error,
2927 &parm0, &parm1) &&
2928 myrb_err_status(cb, error, parm0, parm1))
2929 return -EIO;
2930 udelay(10);
2931 timeout++;
2932 }
2933 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2934 dev_err(&pdev->dev,
2935 "Timeout waiting for Controller Initialisation\n");
2936 return -ETIMEDOUT;
2937 }
2938 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2939 dev_err(&pdev->dev,
2940 "Unable to Enable Memory Mailbox Interface\n");
2941 DAC960_PG_reset_ctrl(base);
2942 return -ENODEV;
2943 }
2944 DAC960_PG_enable_intr(base);
2945 cb->qcmd = myrb_qcmd;
2946 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2947 if (cb->dual_mode_interface)
2948 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2949 else
2950 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2951 cb->disable_intr = DAC960_PG_disable_intr;
2952 cb->reset = DAC960_PG_reset_ctrl;
2953
2954 return 0;
2955 }
2956
2957 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2958 {
2959 struct myrb_hba *cb = arg;
2960 void __iomem *base = cb->io_base;
2961 struct myrb_stat_mbox *next_stat_mbox;
2962 unsigned long flags;
2963
2964 spin_lock_irqsave(&cb->queue_lock, flags);
2965 DAC960_PG_ack_intr(base);
2966 next_stat_mbox = cb->next_stat_mbox;
2967 while (next_stat_mbox->valid) {
2968 unsigned char id = next_stat_mbox->id;
2969 struct scsi_cmnd *scmd = NULL;
2970 struct myrb_cmdblk *cmd_blk = NULL;
2971
2972 if (id == MYRB_DCMD_TAG)
2973 cmd_blk = &cb->dcmd_blk;
2974 else if (id == MYRB_MCMD_TAG)
2975 cmd_blk = &cb->mcmd_blk;
2976 else {
2977 scmd = scsi_host_find_tag(cb->host, id - 3);
2978 if (scmd)
2979 cmd_blk = scsi_cmd_priv(scmd);
2980 }
2981 if (cmd_blk)
2982 cmd_blk->status = next_stat_mbox->status;
2983 else
2984 dev_err(&cb->pdev->dev,
2985 "Unhandled command completion %d\n", id);
2986
2987 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2988 if (++next_stat_mbox > cb->last_stat_mbox)
2989 next_stat_mbox = cb->first_stat_mbox;
2990
2991 if (id < 3)
2992 myrb_handle_cmdblk(cb, cmd_blk);
2993 else
2994 myrb_handle_scsi(cb, cmd_blk, scmd);
2995 }
2996 cb->next_stat_mbox = next_stat_mbox;
2997 spin_unlock_irqrestore(&cb->queue_lock, flags);
2998 return IRQ_HANDLED;
2999 }
3000
3001 static struct myrb_privdata DAC960_PG_privdata = {
3002 .hw_init = DAC960_PG_hw_init,
3003 .irq_handler = DAC960_PG_intr_handler,
3004 .mmio_size = DAC960_PG_mmio_size,
3005 };
3006
3007
3008
3009
3010
3011
3012 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3013 {
3014 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3015 }
3016
3017 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3018 {
3019 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3020 }
3021
3022 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3023 {
3024 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3025 }
3026
3027 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3028 {
3029 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3030
3031 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3032 }
3033
3034 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3035 {
3036 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3037
3038 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3039 }
3040
3041 static inline void DAC960_PD_ack_intr(void __iomem *base)
3042 {
3043 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3044 }
3045
3046 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3047 {
3048 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3049
3050 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3051 }
3052
3053 static inline void DAC960_PD_enable_intr(void __iomem *base)
3054 {
3055 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3056 }
3057
3058 static inline void DAC960_PD_disable_intr(void __iomem *base)
3059 {
3060 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3061 }
3062
3063 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3064 union myrb_cmd_mbox *mbox)
3065 {
3066 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3067 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3068 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3069 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3070 }
3071
3072 static inline unsigned char
3073 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3074 {
3075 return readb(base + DAC960_PD_STSID_OFFSET);
3076 }
3077
3078 static inline unsigned short
3079 DAC960_PD_read_status(void __iomem *base)
3080 {
3081 return readw(base + DAC960_PD_STS_OFFSET);
3082 }
3083
3084 static inline bool
3085 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3086 unsigned char *param0, unsigned char *param1)
3087 {
3088 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3089
3090 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3091 return false;
3092 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3093 *error = errsts;
3094 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3095 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3096 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3097 return true;
3098 }
3099
3100 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3101 {
3102 void __iomem *base = cb->io_base;
3103 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3104
3105 while (DAC960_PD_hw_mbox_is_full(base))
3106 udelay(1);
3107 DAC960_PD_write_cmd_mbox(base, mbox);
3108 DAC960_PD_hw_mbox_new_cmd(base);
3109 }
3110
3111 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3112 struct myrb_hba *cb, void __iomem *base)
3113 {
3114 int timeout = 0;
3115 unsigned char error, parm0, parm1;
3116
3117 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3118 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3119 (unsigned long)cb->io_addr);
3120 return -EBUSY;
3121 }
3122 DAC960_PD_disable_intr(base);
3123 DAC960_PD_ack_hw_mbox_status(base);
3124 udelay(1000);
3125 while (DAC960_PD_init_in_progress(base) &&
3126 timeout < MYRB_MAILBOX_TIMEOUT) {
3127 if (DAC960_PD_read_error_status(base, &error,
3128 &parm0, &parm1) &&
3129 myrb_err_status(cb, error, parm0, parm1))
3130 return -EIO;
3131 udelay(10);
3132 timeout++;
3133 }
3134 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3135 dev_err(&pdev->dev,
3136 "Timeout waiting for Controller Initialisation\n");
3137 return -ETIMEDOUT;
3138 }
3139 if (!myrb_enable_mmio(cb, NULL)) {
3140 dev_err(&pdev->dev,
3141 "Unable to Enable Memory Mailbox Interface\n");
3142 DAC960_PD_reset_ctrl(base);
3143 return -ENODEV;
3144 }
3145 DAC960_PD_enable_intr(base);
3146 cb->qcmd = DAC960_PD_qcmd;
3147 cb->disable_intr = DAC960_PD_disable_intr;
3148 cb->reset = DAC960_PD_reset_ctrl;
3149
3150 return 0;
3151 }
3152
3153 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3154 {
3155 struct myrb_hba *cb = arg;
3156 void __iomem *base = cb->io_base;
3157 unsigned long flags;
3158
3159 spin_lock_irqsave(&cb->queue_lock, flags);
3160 while (DAC960_PD_hw_mbox_status_available(base)) {
3161 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3162 struct scsi_cmnd *scmd = NULL;
3163 struct myrb_cmdblk *cmd_blk = NULL;
3164
3165 if (id == MYRB_DCMD_TAG)
3166 cmd_blk = &cb->dcmd_blk;
3167 else if (id == MYRB_MCMD_TAG)
3168 cmd_blk = &cb->mcmd_blk;
3169 else {
3170 scmd = scsi_host_find_tag(cb->host, id - 3);
3171 if (scmd)
3172 cmd_blk = scsi_cmd_priv(scmd);
3173 }
3174 if (cmd_blk)
3175 cmd_blk->status = DAC960_PD_read_status(base);
3176 else
3177 dev_err(&cb->pdev->dev,
3178 "Unhandled command completion %d\n", id);
3179
3180 DAC960_PD_ack_intr(base);
3181 DAC960_PD_ack_hw_mbox_status(base);
3182
3183 if (id < 3)
3184 myrb_handle_cmdblk(cb, cmd_blk);
3185 else
3186 myrb_handle_scsi(cb, cmd_blk, scmd);
3187 }
3188 spin_unlock_irqrestore(&cb->queue_lock, flags);
3189 return IRQ_HANDLED;
3190 }
3191
3192 static struct myrb_privdata DAC960_PD_privdata = {
3193 .hw_init = DAC960_PD_hw_init,
3194 .irq_handler = DAC960_PD_intr_handler,
3195 .mmio_size = DAC960_PD_mmio_size,
3196 };
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206 static inline void myrb_translate_enquiry(void *enq)
3207 {
3208 memcpy(enq + 132, enq + 36, 64);
3209 memset(enq + 36, 0, 96);
3210 }
3211
3212 static inline void myrb_translate_devstate(void *state)
3213 {
3214 memcpy(state + 2, state + 3, 1);
3215 memmove(state + 4, state + 5, 2);
3216 memmove(state + 6, state + 8, 4);
3217 }
3218
3219 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3220 {
3221 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3222 int ldev_num = mbox->type5.ld.ldev_num;
3223
3224 mbox->bytes[3] &= 0x7;
3225 mbox->bytes[3] |= mbox->bytes[7] << 6;
3226 mbox->bytes[7] = ldev_num;
3227 }
3228
3229 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3230 {
3231 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3232 int ldev_num = mbox->bytes[7];
3233
3234 mbox->bytes[7] = mbox->bytes[3] >> 6;
3235 mbox->bytes[3] &= 0x7;
3236 mbox->bytes[3] |= ldev_num << 3;
3237 }
3238
3239 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3240 {
3241 void __iomem *base = cb->io_base;
3242 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3243
3244 switch (mbox->common.opcode) {
3245 case MYRB_CMD_ENQUIRY:
3246 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3247 break;
3248 case MYRB_CMD_GET_DEVICE_STATE:
3249 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3250 break;
3251 case MYRB_CMD_READ:
3252 mbox->common.opcode = MYRB_CMD_READ_OLD;
3253 myrb_translate_to_rw_command(cmd_blk);
3254 break;
3255 case MYRB_CMD_WRITE:
3256 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3257 myrb_translate_to_rw_command(cmd_blk);
3258 break;
3259 case MYRB_CMD_READ_SG:
3260 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3261 myrb_translate_to_rw_command(cmd_blk);
3262 break;
3263 case MYRB_CMD_WRITE_SG:
3264 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3265 myrb_translate_to_rw_command(cmd_blk);
3266 break;
3267 default:
3268 break;
3269 }
3270 while (DAC960_PD_hw_mbox_is_full(base))
3271 udelay(1);
3272 DAC960_PD_write_cmd_mbox(base, mbox);
3273 DAC960_PD_hw_mbox_new_cmd(base);
3274 }
3275
3276
3277 static int DAC960_P_hw_init(struct pci_dev *pdev,
3278 struct myrb_hba *cb, void __iomem *base)
3279 {
3280 int timeout = 0;
3281 unsigned char error, parm0, parm1;
3282
3283 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3284 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3285 (unsigned long)cb->io_addr);
3286 return -EBUSY;
3287 }
3288 DAC960_PD_disable_intr(base);
3289 DAC960_PD_ack_hw_mbox_status(base);
3290 udelay(1000);
3291 while (DAC960_PD_init_in_progress(base) &&
3292 timeout < MYRB_MAILBOX_TIMEOUT) {
3293 if (DAC960_PD_read_error_status(base, &error,
3294 &parm0, &parm1) &&
3295 myrb_err_status(cb, error, parm0, parm1))
3296 return -EAGAIN;
3297 udelay(10);
3298 timeout++;
3299 }
3300 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3301 dev_err(&pdev->dev,
3302 "Timeout waiting for Controller Initialisation\n");
3303 return -ETIMEDOUT;
3304 }
3305 if (!myrb_enable_mmio(cb, NULL)) {
3306 dev_err(&pdev->dev,
3307 "Unable to allocate DMA mapped memory\n");
3308 DAC960_PD_reset_ctrl(base);
3309 return -ETIMEDOUT;
3310 }
3311 DAC960_PD_enable_intr(base);
3312 cb->qcmd = DAC960_P_qcmd;
3313 cb->disable_intr = DAC960_PD_disable_intr;
3314 cb->reset = DAC960_PD_reset_ctrl;
3315
3316 return 0;
3317 }
3318
3319 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3320 {
3321 struct myrb_hba *cb = arg;
3322 void __iomem *base = cb->io_base;
3323 unsigned long flags;
3324
3325 spin_lock_irqsave(&cb->queue_lock, flags);
3326 while (DAC960_PD_hw_mbox_status_available(base)) {
3327 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3328 struct scsi_cmnd *scmd = NULL;
3329 struct myrb_cmdblk *cmd_blk = NULL;
3330 union myrb_cmd_mbox *mbox;
3331 enum myrb_cmd_opcode op;
3332
3333
3334 if (id == MYRB_DCMD_TAG)
3335 cmd_blk = &cb->dcmd_blk;
3336 else if (id == MYRB_MCMD_TAG)
3337 cmd_blk = &cb->mcmd_blk;
3338 else {
3339 scmd = scsi_host_find_tag(cb->host, id - 3);
3340 if (scmd)
3341 cmd_blk = scsi_cmd_priv(scmd);
3342 }
3343 if (cmd_blk)
3344 cmd_blk->status = DAC960_PD_read_status(base);
3345 else
3346 dev_err(&cb->pdev->dev,
3347 "Unhandled command completion %d\n", id);
3348
3349 DAC960_PD_ack_intr(base);
3350 DAC960_PD_ack_hw_mbox_status(base);
3351
3352 if (!cmd_blk)
3353 continue;
3354
3355 mbox = &cmd_blk->mbox;
3356 op = mbox->common.opcode;
3357 switch (op) {
3358 case MYRB_CMD_ENQUIRY_OLD:
3359 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3360 myrb_translate_enquiry(cb->enquiry);
3361 break;
3362 case MYRB_CMD_READ_OLD:
3363 mbox->common.opcode = MYRB_CMD_READ;
3364 myrb_translate_from_rw_command(cmd_blk);
3365 break;
3366 case MYRB_CMD_WRITE_OLD:
3367 mbox->common.opcode = MYRB_CMD_WRITE;
3368 myrb_translate_from_rw_command(cmd_blk);
3369 break;
3370 case MYRB_CMD_READ_SG_OLD:
3371 mbox->common.opcode = MYRB_CMD_READ_SG;
3372 myrb_translate_from_rw_command(cmd_blk);
3373 break;
3374 case MYRB_CMD_WRITE_SG_OLD:
3375 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3376 myrb_translate_from_rw_command(cmd_blk);
3377 break;
3378 default:
3379 break;
3380 }
3381 if (id < 3)
3382 myrb_handle_cmdblk(cb, cmd_blk);
3383 else
3384 myrb_handle_scsi(cb, cmd_blk, scmd);
3385 }
3386 spin_unlock_irqrestore(&cb->queue_lock, flags);
3387 return IRQ_HANDLED;
3388 }
3389
3390 static struct myrb_privdata DAC960_P_privdata = {
3391 .hw_init = DAC960_P_hw_init,
3392 .irq_handler = DAC960_P_intr_handler,
3393 .mmio_size = DAC960_PD_mmio_size,
3394 };
3395
3396 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3397 const struct pci_device_id *entry)
3398 {
3399 struct myrb_privdata *privdata =
3400 (struct myrb_privdata *)entry->driver_data;
3401 irq_handler_t irq_handler = privdata->irq_handler;
3402 unsigned int mmio_size = privdata->mmio_size;
3403 struct Scsi_Host *shost;
3404 struct myrb_hba *cb = NULL;
3405
3406 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3407 if (!shost) {
3408 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3409 return NULL;
3410 }
3411 shost->max_cmd_len = 12;
3412 shost->max_lun = 256;
3413 cb = shost_priv(shost);
3414 mutex_init(&cb->dcmd_mutex);
3415 mutex_init(&cb->dma_mutex);
3416 cb->pdev = pdev;
3417 cb->host = shost;
3418
3419 if (pci_enable_device(pdev)) {
3420 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3421 scsi_host_put(shost);
3422 return NULL;
3423 }
3424
3425 if (privdata->hw_init == DAC960_PD_hw_init ||
3426 privdata->hw_init == DAC960_P_hw_init) {
3427 cb->io_addr = pci_resource_start(pdev, 0);
3428 cb->pci_addr = pci_resource_start(pdev, 1);
3429 } else
3430 cb->pci_addr = pci_resource_start(pdev, 0);
3431
3432 pci_set_drvdata(pdev, cb);
3433 spin_lock_init(&cb->queue_lock);
3434 if (mmio_size < PAGE_SIZE)
3435 mmio_size = PAGE_SIZE;
3436 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3437 if (cb->mmio_base == NULL) {
3438 dev_err(&pdev->dev,
3439 "Unable to map Controller Register Window\n");
3440 goto failure;
3441 }
3442
3443 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3444 if (privdata->hw_init(pdev, cb, cb->io_base))
3445 goto failure;
3446
3447 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3448 dev_err(&pdev->dev,
3449 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3450 goto failure;
3451 }
3452 cb->irq = pdev->irq;
3453 return cb;
3454
3455 failure:
3456 dev_err(&pdev->dev,
3457 "Failed to initialize Controller\n");
3458 myrb_cleanup(cb);
3459 return NULL;
3460 }
3461
3462 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3463 {
3464 struct myrb_hba *cb;
3465 int ret;
3466
3467 cb = myrb_detect(dev, entry);
3468 if (!cb)
3469 return -ENODEV;
3470
3471 ret = myrb_get_hba_config(cb);
3472 if (ret < 0) {
3473 myrb_cleanup(cb);
3474 return ret;
3475 }
3476
3477 if (!myrb_create_mempools(dev, cb)) {
3478 ret = -ENOMEM;
3479 goto failed;
3480 }
3481
3482 ret = scsi_add_host(cb->host, &dev->dev);
3483 if (ret) {
3484 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3485 myrb_destroy_mempools(cb);
3486 goto failed;
3487 }
3488 scsi_scan_host(cb->host);
3489 return 0;
3490 failed:
3491 myrb_cleanup(cb);
3492 return ret;
3493 }
3494
3495
3496 static void myrb_remove(struct pci_dev *pdev)
3497 {
3498 struct myrb_hba *cb = pci_get_drvdata(pdev);
3499
3500 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3501 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3502 myrb_cleanup(cb);
3503 myrb_destroy_mempools(cb);
3504 }
3505
3506
3507 static const struct pci_device_id myrb_id_table[] = {
3508 {
3509 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3510 PCI_DEVICE_ID_DEC_21285,
3511 PCI_VENDOR_ID_MYLEX,
3512 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3513 .driver_data = (unsigned long) &DAC960_LA_privdata,
3514 },
3515 {
3516 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3517 },
3518 {
3519 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3520 },
3521 {
3522 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3523 },
3524 {0, },
3525 };
3526
3527 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3528
3529 static struct pci_driver myrb_pci_driver = {
3530 .name = "myrb",
3531 .id_table = myrb_id_table,
3532 .probe = myrb_probe,
3533 .remove = myrb_remove,
3534 };
3535
3536 static int __init myrb_init_module(void)
3537 {
3538 int ret;
3539
3540 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3541 if (!myrb_raid_template)
3542 return -ENODEV;
3543
3544 ret = pci_register_driver(&myrb_pci_driver);
3545 if (ret)
3546 raid_class_release(myrb_raid_template);
3547
3548 return ret;
3549 }
3550
3551 static void __exit myrb_cleanup_module(void)
3552 {
3553 pci_unregister_driver(&myrb_pci_driver);
3554 raid_class_release(myrb_raid_template);
3555 }
3556
3557 module_init(myrb_init_module);
3558 module_exit(myrb_cleanup_module);
3559
3560 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3561 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3562 MODULE_LICENSE("GPL");