0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/types.h>
0009 #include <linux/slab.h>
0010 #include <linux/delay.h>
0011 #include <linux/list.h>
0012 #include <linux/completion.h>
0013 #include <linux/kallsyms.h>
0014 #include <linux/module.h>
0015 #include <linux/moduleparam.h>
0016 #include <linux/init.h>
0017 #include <linux/irqreturn.h>
0018
0019 #include <asm/irq.h>
0020 #include <asm/io.h>
0021 #include <asm/dma.h>
0022
0023 #include <scsi/scsi.h>
0024 #include <scsi/scsi_host.h>
0025 #include <scsi/scsi_cmnd.h>
0026 #include <scsi/scsi_device.h>
0027 #include <scsi/scsi_tcq.h>
0028 #include <scsi/scsi_dbg.h>
0029 #include <scsi/scsi_transport_spi.h>
0030
0031 #include "esp_scsi.h"
0032
0033 #define DRV_MODULE_NAME "esp"
0034 #define PFX DRV_MODULE_NAME ": "
0035 #define DRV_VERSION "2.000"
0036 #define DRV_MODULE_RELDATE "April 19, 2007"
0037
0038
0039 static int esp_bus_reset_settle = 3;
0040
0041 static u32 esp_debug;
0042 #define ESP_DEBUG_INTR 0x00000001
0043 #define ESP_DEBUG_SCSICMD 0x00000002
0044 #define ESP_DEBUG_RESET 0x00000004
0045 #define ESP_DEBUG_MSGIN 0x00000008
0046 #define ESP_DEBUG_MSGOUT 0x00000010
0047 #define ESP_DEBUG_CMDDONE 0x00000020
0048 #define ESP_DEBUG_DISCONNECT 0x00000040
0049 #define ESP_DEBUG_DATASTART 0x00000080
0050 #define ESP_DEBUG_DATADONE 0x00000100
0051 #define ESP_DEBUG_RECONNECT 0x00000200
0052 #define ESP_DEBUG_AUTOSENSE 0x00000400
0053 #define ESP_DEBUG_EVENT 0x00000800
0054 #define ESP_DEBUG_COMMAND 0x00001000
0055
0056 #define esp_log_intr(f, a...) \
0057 do { if (esp_debug & ESP_DEBUG_INTR) \
0058 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0059 } while (0)
0060
0061 #define esp_log_reset(f, a...) \
0062 do { if (esp_debug & ESP_DEBUG_RESET) \
0063 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0064 } while (0)
0065
0066 #define esp_log_msgin(f, a...) \
0067 do { if (esp_debug & ESP_DEBUG_MSGIN) \
0068 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0069 } while (0)
0070
0071 #define esp_log_msgout(f, a...) \
0072 do { if (esp_debug & ESP_DEBUG_MSGOUT) \
0073 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0074 } while (0)
0075
0076 #define esp_log_cmddone(f, a...) \
0077 do { if (esp_debug & ESP_DEBUG_CMDDONE) \
0078 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0079 } while (0)
0080
0081 #define esp_log_disconnect(f, a...) \
0082 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
0083 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0084 } while (0)
0085
0086 #define esp_log_datastart(f, a...) \
0087 do { if (esp_debug & ESP_DEBUG_DATASTART) \
0088 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0089 } while (0)
0090
0091 #define esp_log_datadone(f, a...) \
0092 do { if (esp_debug & ESP_DEBUG_DATADONE) \
0093 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0094 } while (0)
0095
0096 #define esp_log_reconnect(f, a...) \
0097 do { if (esp_debug & ESP_DEBUG_RECONNECT) \
0098 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0099 } while (0)
0100
0101 #define esp_log_autosense(f, a...) \
0102 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
0103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0104 } while (0)
0105
0106 #define esp_log_event(f, a...) \
0107 do { if (esp_debug & ESP_DEBUG_EVENT) \
0108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0109 } while (0)
0110
0111 #define esp_log_command(f, a...) \
0112 do { if (esp_debug & ESP_DEBUG_COMMAND) \
0113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
0114 } while (0)
0115
0116 #define esp_read8(REG) esp->ops->esp_read8(esp, REG)
0117 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
0118
0119 static void esp_log_fill_regs(struct esp *esp,
0120 struct esp_event_ent *p)
0121 {
0122 p->sreg = esp->sreg;
0123 p->seqreg = esp->seqreg;
0124 p->sreg2 = esp->sreg2;
0125 p->ireg = esp->ireg;
0126 p->select_state = esp->select_state;
0127 p->event = esp->event;
0128 }
0129
0130 void scsi_esp_cmd(struct esp *esp, u8 val)
0131 {
0132 struct esp_event_ent *p;
0133 int idx = esp->esp_event_cur;
0134
0135 p = &esp->esp_event_log[idx];
0136 p->type = ESP_EVENT_TYPE_CMD;
0137 p->val = val;
0138 esp_log_fill_regs(esp, p);
0139
0140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
0141
0142 esp_log_command("cmd[%02x]\n", val);
0143 esp_write8(val, ESP_CMD);
0144 }
0145 EXPORT_SYMBOL(scsi_esp_cmd);
0146
0147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
0148 {
0149 if (esp->flags & ESP_FLAG_USE_FIFO) {
0150 int i;
0151
0152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
0153 for (i = 0; i < len; i++)
0154 esp_write8(esp->command_block[i], ESP_FDATA);
0155 scsi_esp_cmd(esp, cmd);
0156 } else {
0157 if (esp->rev == FASHME)
0158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
0159 cmd |= ESP_CMD_DMA;
0160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
0161 len, max_len, 0, cmd);
0162 }
0163 }
0164
0165 static void esp_event(struct esp *esp, u8 val)
0166 {
0167 struct esp_event_ent *p;
0168 int idx = esp->esp_event_cur;
0169
0170 p = &esp->esp_event_log[idx];
0171 p->type = ESP_EVENT_TYPE_EVENT;
0172 p->val = val;
0173 esp_log_fill_regs(esp, p);
0174
0175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
0176
0177 esp->event = val;
0178 }
0179
0180 static void esp_dump_cmd_log(struct esp *esp)
0181 {
0182 int idx = esp->esp_event_cur;
0183 int stop = idx;
0184
0185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
0186 do {
0187 struct esp_event_ent *p = &esp->esp_event_log[idx];
0188
0189 shost_printk(KERN_INFO, esp->host,
0190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
0191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
0192 idx,
0193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
0194 p->val, p->sreg, p->seqreg,
0195 p->sreg2, p->ireg, p->select_state, p->event);
0196
0197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
0198 } while (idx != stop);
0199 }
0200
0201 static void esp_flush_fifo(struct esp *esp)
0202 {
0203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
0204 if (esp->rev == ESP236) {
0205 int lim = 1000;
0206
0207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
0208 if (--lim == 0) {
0209 shost_printk(KERN_ALERT, esp->host,
0210 "ESP_FF_BYTES will not clear!\n");
0211 break;
0212 }
0213 udelay(1);
0214 }
0215 }
0216 }
0217
0218 static void hme_read_fifo(struct esp *esp)
0219 {
0220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
0221 int idx = 0;
0222
0223 while (fcnt--) {
0224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
0225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
0226 }
0227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
0228 esp_write8(0, ESP_FDATA);
0229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
0230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
0231 }
0232 esp->fifo_cnt = idx;
0233 }
0234
0235 static void esp_set_all_config3(struct esp *esp, u8 val)
0236 {
0237 int i;
0238
0239 for (i = 0; i < ESP_MAX_TARGET; i++)
0240 esp->target[i].esp_config3 = val;
0241 }
0242
0243
0244 static void esp_reset_esp(struct esp *esp)
0245 {
0246
0247 scsi_esp_cmd(esp, ESP_CMD_RC);
0248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
0249 if (esp->rev == FAST)
0250 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
0251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
0252
0253
0254
0255
0256 esp->max_period = ((35 * esp->ccycle) / 1000);
0257 if (esp->rev == FAST) {
0258 u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
0259
0260 if (family_code == ESP_UID_F236) {
0261 esp->rev = FAS236;
0262 } else if (family_code == ESP_UID_HME) {
0263 esp->rev = FASHME;
0264 } else if (family_code == ESP_UID_FSC) {
0265 esp->rev = FSC;
0266
0267 esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
0268 } else {
0269 esp->rev = FAS100A;
0270 }
0271 esp->min_period = ((4 * esp->ccycle) / 1000);
0272 } else {
0273 esp->min_period = ((5 * esp->ccycle) / 1000);
0274 }
0275 if (esp->rev == FAS236) {
0276
0277
0278
0279
0280 u8 config4 = ESP_CONFIG4_GE1;
0281 esp_write8(config4, ESP_CFG4);
0282 config4 = esp_read8(ESP_CFG4);
0283 if (config4 & ESP_CONFIG4_GE1) {
0284 esp->rev = PCSCSI;
0285 esp_write8(esp->config4, ESP_CFG4);
0286 }
0287 }
0288 esp->max_period = (esp->max_period + 3)>>2;
0289 esp->min_period = (esp->min_period + 3)>>2;
0290
0291 esp_write8(esp->config1, ESP_CFG1);
0292 switch (esp->rev) {
0293 case ESP100:
0294
0295 break;
0296
0297 case ESP100A:
0298 esp_write8(esp->config2, ESP_CFG2);
0299 break;
0300
0301 case ESP236:
0302
0303 esp_write8(esp->config2, ESP_CFG2);
0304 esp->prev_cfg3 = esp->target[0].esp_config3;
0305 esp_write8(esp->prev_cfg3, ESP_CFG3);
0306 break;
0307
0308 case FASHME:
0309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
0310 fallthrough;
0311
0312 case FAS236:
0313 case PCSCSI:
0314 case FSC:
0315 esp_write8(esp->config2, ESP_CFG2);
0316 if (esp->rev == FASHME) {
0317 u8 cfg3 = esp->target[0].esp_config3;
0318
0319 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
0320 if (esp->scsi_id >= 8)
0321 cfg3 |= ESP_CONFIG3_IDBIT3;
0322 esp_set_all_config3(esp, cfg3);
0323 } else {
0324 u32 cfg3 = esp->target[0].esp_config3;
0325
0326 cfg3 |= ESP_CONFIG3_FCLK;
0327 esp_set_all_config3(esp, cfg3);
0328 }
0329 esp->prev_cfg3 = esp->target[0].esp_config3;
0330 esp_write8(esp->prev_cfg3, ESP_CFG3);
0331 if (esp->rev == FASHME) {
0332 esp->radelay = 80;
0333 } else {
0334 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
0335 esp->radelay = 0;
0336 else
0337 esp->radelay = 96;
0338 }
0339 break;
0340
0341 case FAS100A:
0342
0343 esp_write8(esp->config2, ESP_CFG2);
0344 esp_set_all_config3(esp,
0345 (esp->target[0].esp_config3 |
0346 ESP_CONFIG3_FCLOCK));
0347 esp->prev_cfg3 = esp->target[0].esp_config3;
0348 esp_write8(esp->prev_cfg3, ESP_CFG3);
0349 esp->radelay = 32;
0350 break;
0351
0352 default:
0353 break;
0354 }
0355
0356
0357 esp_write8(esp->cfact, ESP_CFACT);
0358
0359 esp->prev_stp = 0;
0360 esp_write8(esp->prev_stp, ESP_STP);
0361
0362 esp->prev_soff = 0;
0363 esp_write8(esp->prev_soff, ESP_SOFF);
0364
0365 esp_write8(esp->neg_defp, ESP_TIMEO);
0366
0367
0368 esp_read8(ESP_INTRPT);
0369 udelay(100);
0370 }
0371
0372 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
0373 {
0374 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
0375 struct scatterlist *sg = scsi_sglist(cmd);
0376 int total = 0, i;
0377 struct scatterlist *s;
0378
0379 if (cmd->sc_data_direction == DMA_NONE)
0380 return;
0381
0382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
0383
0384
0385
0386
0387 spriv->num_sg = scsi_sg_count(cmd);
0388
0389 scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
0390 s->dma_address = (uintptr_t)sg_virt(s);
0391 total += sg_dma_len(s);
0392 }
0393 } else {
0394 spriv->num_sg = scsi_dma_map(cmd);
0395 scsi_for_each_sg(cmd, s, spriv->num_sg, i)
0396 total += sg_dma_len(s);
0397 }
0398 spriv->cur_residue = sg_dma_len(sg);
0399 spriv->prv_sg = NULL;
0400 spriv->cur_sg = sg;
0401 spriv->tot_residue = total;
0402 }
0403
0404 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
0405 struct scsi_cmnd *cmd)
0406 {
0407 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
0408
0409 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0410 return ent->sense_dma +
0411 (ent->sense_ptr - cmd->sense_buffer);
0412 }
0413
0414 return sg_dma_address(p->cur_sg) +
0415 (sg_dma_len(p->cur_sg) -
0416 p->cur_residue);
0417 }
0418
0419 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
0420 struct scsi_cmnd *cmd)
0421 {
0422 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
0423
0424 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0425 return SCSI_SENSE_BUFFERSIZE -
0426 (ent->sense_ptr - cmd->sense_buffer);
0427 }
0428 return p->cur_residue;
0429 }
0430
0431 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
0432 struct scsi_cmnd *cmd, unsigned int len)
0433 {
0434 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
0435
0436 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0437 ent->sense_ptr += len;
0438 return;
0439 }
0440
0441 p->cur_residue -= len;
0442 p->tot_residue -= len;
0443 if (p->cur_residue < 0 || p->tot_residue < 0) {
0444 shost_printk(KERN_ERR, esp->host,
0445 "Data transfer overflow.\n");
0446 shost_printk(KERN_ERR, esp->host,
0447 "cur_residue[%d] tot_residue[%d] len[%u]\n",
0448 p->cur_residue, p->tot_residue, len);
0449 p->cur_residue = 0;
0450 p->tot_residue = 0;
0451 }
0452 if (!p->cur_residue && p->tot_residue) {
0453 p->prv_sg = p->cur_sg;
0454 p->cur_sg = sg_next(p->cur_sg);
0455 p->cur_residue = sg_dma_len(p->cur_sg);
0456 }
0457 }
0458
0459 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
0460 {
0461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
0462 scsi_dma_unmap(cmd);
0463 }
0464
0465 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
0466 {
0467 struct scsi_cmnd *cmd = ent->cmd;
0468 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
0469
0470 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0471 ent->saved_sense_ptr = ent->sense_ptr;
0472 return;
0473 }
0474 ent->saved_cur_residue = spriv->cur_residue;
0475 ent->saved_prv_sg = spriv->prv_sg;
0476 ent->saved_cur_sg = spriv->cur_sg;
0477 ent->saved_tot_residue = spriv->tot_residue;
0478 }
0479
0480 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
0481 {
0482 struct scsi_cmnd *cmd = ent->cmd;
0483 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
0484
0485 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0486 ent->sense_ptr = ent->saved_sense_ptr;
0487 return;
0488 }
0489 spriv->cur_residue = ent->saved_cur_residue;
0490 spriv->prv_sg = ent->saved_prv_sg;
0491 spriv->cur_sg = ent->saved_cur_sg;
0492 spriv->tot_residue = ent->saved_tot_residue;
0493 }
0494
0495 static void esp_write_tgt_config3(struct esp *esp, int tgt)
0496 {
0497 if (esp->rev > ESP100A) {
0498 u8 val = esp->target[tgt].esp_config3;
0499
0500 if (val != esp->prev_cfg3) {
0501 esp->prev_cfg3 = val;
0502 esp_write8(val, ESP_CFG3);
0503 }
0504 }
0505 }
0506
0507 static void esp_write_tgt_sync(struct esp *esp, int tgt)
0508 {
0509 u8 off = esp->target[tgt].esp_offset;
0510 u8 per = esp->target[tgt].esp_period;
0511
0512 if (off != esp->prev_soff) {
0513 esp->prev_soff = off;
0514 esp_write8(off, ESP_SOFF);
0515 }
0516 if (per != esp->prev_stp) {
0517 esp->prev_stp = per;
0518 esp_write8(per, ESP_STP);
0519 }
0520 }
0521
0522 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
0523 {
0524 if (esp->rev == FASHME) {
0525
0526 if (dma_len > (1U << 24))
0527 dma_len = (1U << 24);
0528 } else {
0529 u32 base, end;
0530
0531
0532
0533
0534
0535
0536
0537 if (dma_len > (1U << 16))
0538 dma_len = (1U << 16);
0539
0540
0541
0542
0543 base = dma_addr & ((1U << 24) - 1U);
0544 end = base + dma_len;
0545 if (end > (1U << 24))
0546 end = (1U <<24);
0547 dma_len = end - base;
0548 }
0549 return dma_len;
0550 }
0551
0552 static int esp_need_to_nego_wide(struct esp_target_data *tp)
0553 {
0554 struct scsi_target *target = tp->starget;
0555
0556 return spi_width(target) != tp->nego_goal_width;
0557 }
0558
0559 static int esp_need_to_nego_sync(struct esp_target_data *tp)
0560 {
0561 struct scsi_target *target = tp->starget;
0562
0563
0564 if (!spi_offset(target) && !tp->nego_goal_offset)
0565 return 0;
0566
0567 if (spi_offset(target) == tp->nego_goal_offset &&
0568 spi_period(target) == tp->nego_goal_period)
0569 return 0;
0570
0571 return 1;
0572 }
0573
0574 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
0575 struct esp_lun_data *lp)
0576 {
0577 if (!ent->orig_tag[0]) {
0578
0579 if (lp->non_tagged_cmd)
0580 return -EBUSY;
0581
0582 if (lp->hold) {
0583
0584
0585
0586 if (lp->num_tagged)
0587 return -EBUSY;
0588
0589
0590
0591
0592 lp->hold = 0;
0593 } else if (lp->num_tagged) {
0594
0595
0596
0597 lp->hold = 1;
0598 return -EBUSY;
0599 }
0600
0601 lp->non_tagged_cmd = ent;
0602 return 0;
0603 }
0604
0605
0606 if (lp->non_tagged_cmd || lp->hold)
0607 return -EBUSY;
0608
0609 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
0610
0611 lp->tagged_cmds[ent->orig_tag[1]] = ent;
0612 lp->num_tagged++;
0613
0614 return 0;
0615 }
0616
0617 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
0618 struct esp_lun_data *lp)
0619 {
0620 if (ent->orig_tag[0]) {
0621 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
0622 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
0623 lp->num_tagged--;
0624 } else {
0625 BUG_ON(lp->non_tagged_cmd != ent);
0626 lp->non_tagged_cmd = NULL;
0627 }
0628 }
0629
0630 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
0631 {
0632 ent->sense_ptr = ent->cmd->sense_buffer;
0633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
0634 ent->sense_dma = (uintptr_t)ent->sense_ptr;
0635 return;
0636 }
0637
0638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
0639 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
0640 }
0641
0642 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
0643 {
0644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
0645 dma_unmap_single(esp->dev, ent->sense_dma,
0646 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
0647 ent->sense_ptr = NULL;
0648 }
0649
0650
0651
0652
0653
0654
0655
0656
0657 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
0658 {
0659 struct scsi_cmnd *cmd = ent->cmd;
0660 struct scsi_device *dev = cmd->device;
0661 int tgt, lun;
0662 u8 *p, val;
0663
0664 tgt = dev->id;
0665 lun = dev->lun;
0666
0667
0668 if (!ent->sense_ptr) {
0669 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
0670 tgt, lun);
0671 esp_map_sense(esp, ent);
0672 }
0673 ent->saved_sense_ptr = ent->sense_ptr;
0674
0675 esp->active_cmd = ent;
0676
0677 p = esp->command_block;
0678 esp->msg_out_len = 0;
0679
0680 *p++ = IDENTIFY(0, lun);
0681 *p++ = REQUEST_SENSE;
0682 *p++ = ((dev->scsi_level <= SCSI_2) ?
0683 (lun << 5) : 0);
0684 *p++ = 0;
0685 *p++ = 0;
0686 *p++ = SCSI_SENSE_BUFFERSIZE;
0687 *p++ = 0;
0688
0689 esp->select_state = ESP_SELECT_BASIC;
0690
0691 val = tgt;
0692 if (esp->rev == FASHME)
0693 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
0694 esp_write8(val, ESP_BUSID);
0695
0696 esp_write_tgt_sync(esp, tgt);
0697 esp_write_tgt_config3(esp, tgt);
0698
0699 val = (p - esp->command_block);
0700
0701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
0702 }
0703
0704 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
0705 {
0706 struct esp_cmd_entry *ent;
0707
0708 list_for_each_entry(ent, &esp->queued_cmds, list) {
0709 struct scsi_cmnd *cmd = ent->cmd;
0710 struct scsi_device *dev = cmd->device;
0711 struct esp_lun_data *lp = dev->hostdata;
0712
0713 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0714 ent->tag[0] = 0;
0715 ent->tag[1] = 0;
0716 return ent;
0717 }
0718
0719 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
0720 ent->tag[0] = 0;
0721 ent->tag[1] = 0;
0722 }
0723 ent->orig_tag[0] = ent->tag[0];
0724 ent->orig_tag[1] = ent->tag[1];
0725
0726 if (esp_alloc_lun_tag(ent, lp) < 0)
0727 continue;
0728
0729 return ent;
0730 }
0731
0732 return NULL;
0733 }
0734
0735 static void esp_maybe_execute_command(struct esp *esp)
0736 {
0737 struct esp_target_data *tp;
0738 struct scsi_device *dev;
0739 struct scsi_cmnd *cmd;
0740 struct esp_cmd_entry *ent;
0741 bool select_and_stop = false;
0742 int tgt, lun, i;
0743 u32 val, start_cmd;
0744 u8 *p;
0745
0746 if (esp->active_cmd ||
0747 (esp->flags & ESP_FLAG_RESETTING))
0748 return;
0749
0750 ent = find_and_prep_issuable_command(esp);
0751 if (!ent)
0752 return;
0753
0754 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0755 esp_autosense(esp, ent);
0756 return;
0757 }
0758
0759 cmd = ent->cmd;
0760 dev = cmd->device;
0761 tgt = dev->id;
0762 lun = dev->lun;
0763 tp = &esp->target[tgt];
0764
0765 list_move(&ent->list, &esp->active_cmds);
0766
0767 esp->active_cmd = ent;
0768
0769 esp_map_dma(esp, cmd);
0770 esp_save_pointers(esp, ent);
0771
0772 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
0773 select_and_stop = true;
0774
0775 p = esp->command_block;
0776
0777 esp->msg_out_len = 0;
0778 if (tp->flags & ESP_TGT_CHECK_NEGO) {
0779
0780
0781
0782 if (tp->flags & ESP_TGT_BROKEN) {
0783 tp->flags &= ~ESP_TGT_DISCONNECT;
0784 tp->nego_goal_period = 0;
0785 tp->nego_goal_offset = 0;
0786 tp->nego_goal_width = 0;
0787 tp->nego_goal_tags = 0;
0788 }
0789
0790
0791 if (spi_width(tp->starget) == tp->nego_goal_width &&
0792 spi_period(tp->starget) == tp->nego_goal_period &&
0793 spi_offset(tp->starget) == tp->nego_goal_offset) {
0794 tp->flags &= ~ESP_TGT_CHECK_NEGO;
0795 goto build_identify;
0796 }
0797
0798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
0799 esp->msg_out_len =
0800 spi_populate_width_msg(&esp->msg_out[0],
0801 (tp->nego_goal_width ?
0802 1 : 0));
0803 tp->flags |= ESP_TGT_NEGO_WIDE;
0804 } else if (esp_need_to_nego_sync(tp)) {
0805 esp->msg_out_len =
0806 spi_populate_sync_msg(&esp->msg_out[0],
0807 tp->nego_goal_period,
0808 tp->nego_goal_offset);
0809 tp->flags |= ESP_TGT_NEGO_SYNC;
0810 } else {
0811 tp->flags &= ~ESP_TGT_CHECK_NEGO;
0812 }
0813
0814
0815 if (esp->msg_out_len)
0816 select_and_stop = true;
0817 }
0818
0819 build_identify:
0820 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
0821
0822 if (ent->tag[0] && esp->rev == ESP100) {
0823
0824
0825
0826 select_and_stop = true;
0827 }
0828
0829 if (select_and_stop) {
0830 esp->cmd_bytes_left = cmd->cmd_len;
0831 esp->cmd_bytes_ptr = &cmd->cmnd[0];
0832
0833 if (ent->tag[0]) {
0834 for (i = esp->msg_out_len - 1;
0835 i >= 0; i--)
0836 esp->msg_out[i + 2] = esp->msg_out[i];
0837 esp->msg_out[0] = ent->tag[0];
0838 esp->msg_out[1] = ent->tag[1];
0839 esp->msg_out_len += 2;
0840 }
0841
0842 start_cmd = ESP_CMD_SELAS;
0843 esp->select_state = ESP_SELECT_MSGOUT;
0844 } else {
0845 start_cmd = ESP_CMD_SELA;
0846 if (ent->tag[0]) {
0847 *p++ = ent->tag[0];
0848 *p++ = ent->tag[1];
0849
0850 start_cmd = ESP_CMD_SA3;
0851 }
0852
0853 for (i = 0; i < cmd->cmd_len; i++)
0854 *p++ = cmd->cmnd[i];
0855
0856 esp->select_state = ESP_SELECT_BASIC;
0857 }
0858 val = tgt;
0859 if (esp->rev == FASHME)
0860 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
0861 esp_write8(val, ESP_BUSID);
0862
0863 esp_write_tgt_sync(esp, tgt);
0864 esp_write_tgt_config3(esp, tgt);
0865
0866 val = (p - esp->command_block);
0867
0868 if (esp_debug & ESP_DEBUG_SCSICMD) {
0869 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
0870 for (i = 0; i < cmd->cmd_len; i++)
0871 printk("%02x ", cmd->cmnd[i]);
0872 printk("]\n");
0873 }
0874
0875 esp_send_dma_cmd(esp, val, 16, start_cmd);
0876 }
0877
0878 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
0879 {
0880 struct list_head *head = &esp->esp_cmd_pool;
0881 struct esp_cmd_entry *ret;
0882
0883 if (list_empty(head)) {
0884 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
0885 } else {
0886 ret = list_entry(head->next, struct esp_cmd_entry, list);
0887 list_del(&ret->list);
0888 memset(ret, 0, sizeof(*ret));
0889 }
0890 return ret;
0891 }
0892
0893 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
0894 {
0895 list_add(&ent->list, &esp->esp_cmd_pool);
0896 }
0897
0898 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
0899 struct scsi_cmnd *cmd, unsigned char host_byte)
0900 {
0901 struct scsi_device *dev = cmd->device;
0902 int tgt = dev->id;
0903 int lun = dev->lun;
0904
0905 esp->active_cmd = NULL;
0906 esp_unmap_dma(esp, cmd);
0907 esp_free_lun_tag(ent, dev->hostdata);
0908 cmd->result = 0;
0909 set_host_byte(cmd, host_byte);
0910 if (host_byte == DID_OK)
0911 set_status_byte(cmd, ent->status);
0912
0913 if (ent->eh_done) {
0914 complete(ent->eh_done);
0915 ent->eh_done = NULL;
0916 }
0917
0918 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
0919 esp_unmap_sense(esp, ent);
0920
0921
0922
0923
0924
0925 cmd->result = SAM_STAT_CHECK_CONDITION;
0926
0927 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
0928 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
0929 int i;
0930
0931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
0932 esp->host->unique_id, tgt, lun);
0933 for (i = 0; i < 18; i++)
0934 printk("%02x ", cmd->sense_buffer[i]);
0935 printk("]\n");
0936 }
0937 }
0938
0939 scsi_done(cmd);
0940
0941 list_del(&ent->list);
0942 esp_put_ent(esp, ent);
0943
0944 esp_maybe_execute_command(esp);
0945 }
0946
0947 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
0948 {
0949 struct scsi_device *dev = ent->cmd->device;
0950 struct esp_lun_data *lp = dev->hostdata;
0951
0952 scsi_track_queue_full(dev, lp->num_tagged - 1);
0953 }
0954
0955 static int esp_queuecommand_lck(struct scsi_cmnd *cmd)
0956 {
0957 struct scsi_device *dev = cmd->device;
0958 struct esp *esp = shost_priv(dev->host);
0959 struct esp_cmd_priv *spriv;
0960 struct esp_cmd_entry *ent;
0961
0962 ent = esp_get_ent(esp);
0963 if (!ent)
0964 return SCSI_MLQUEUE_HOST_BUSY;
0965
0966 ent->cmd = cmd;
0967
0968 spriv = ESP_CMD_PRIV(cmd);
0969 spriv->num_sg = 0;
0970
0971 list_add_tail(&ent->list, &esp->queued_cmds);
0972
0973 esp_maybe_execute_command(esp);
0974
0975 return 0;
0976 }
0977
0978 static DEF_SCSI_QCMD(esp_queuecommand)
0979
0980 static int esp_check_gross_error(struct esp *esp)
0981 {
0982 if (esp->sreg & ESP_STAT_SPAM) {
0983
0984
0985
0986
0987
0988
0989 shost_printk(KERN_ERR, esp->host,
0990 "Gross error sreg[%02x]\n", esp->sreg);
0991
0992 return 1;
0993 }
0994 return 0;
0995 }
0996
0997 static int esp_check_spur_intr(struct esp *esp)
0998 {
0999 switch (esp->rev) {
1000 case ESP100:
1001 case ESP100A:
1002
1003
1004
1005 esp->sreg &= ~ESP_STAT_INTR;
1006 break;
1007
1008 default:
1009 if (!(esp->sreg & ESP_STAT_INTR)) {
1010 if (esp->ireg & ESP_INTR_SR)
1011 return 1;
1012
1013
1014
1015
1016 if (!esp->ops->dma_error(esp)) {
1017 shost_printk(KERN_ERR, esp->host,
1018 "Spurious irq, sreg=%02x.\n",
1019 esp->sreg);
1020 return -1;
1021 }
1022
1023 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1024
1025
1026 return -1;
1027 }
1028 break;
1029 }
1030
1031 return 0;
1032 }
1033
1034 static void esp_schedule_reset(struct esp *esp)
1035 {
1036 esp_log_reset("esp_schedule_reset() from %ps\n",
1037 __builtin_return_address(0));
1038 esp->flags |= ESP_FLAG_RESETTING;
1039 esp_event(esp, ESP_EVENT_RESET);
1040 }
1041
1042
1043
1044
1045
1046 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1047 struct esp_lun_data *lp)
1048 {
1049 struct esp_cmd_entry *ent;
1050 int i;
1051
1052 if (!lp->num_tagged) {
1053 shost_printk(KERN_ERR, esp->host,
1054 "Reconnect w/num_tagged==0\n");
1055 return NULL;
1056 }
1057
1058 esp_log_reconnect("reconnect tag, ");
1059
1060 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1061 if (esp->ops->irq_pending(esp))
1062 break;
1063 }
1064 if (i == ESP_QUICKIRQ_LIMIT) {
1065 shost_printk(KERN_ERR, esp->host,
1066 "Reconnect IRQ1 timeout\n");
1067 return NULL;
1068 }
1069
1070 esp->sreg = esp_read8(ESP_STATUS);
1071 esp->ireg = esp_read8(ESP_INTRPT);
1072
1073 esp_log_reconnect("IRQ(%d:%x:%x), ",
1074 i, esp->ireg, esp->sreg);
1075
1076 if (esp->ireg & ESP_INTR_DC) {
1077 shost_printk(KERN_ERR, esp->host,
1078 "Reconnect, got disconnect.\n");
1079 return NULL;
1080 }
1081
1082 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1083 shost_printk(KERN_ERR, esp->host,
1084 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1085 return NULL;
1086 }
1087
1088
1089 esp->command_block[0] = 0xff;
1090 esp->command_block[1] = 0xff;
1091 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1092 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1093
1094
1095 scsi_esp_cmd(esp, ESP_CMD_MOK);
1096
1097 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1098 if (esp->ops->irq_pending(esp)) {
1099 esp->sreg = esp_read8(ESP_STATUS);
1100 esp->ireg = esp_read8(ESP_INTRPT);
1101 if (esp->ireg & ESP_INTR_FDONE)
1102 break;
1103 }
1104 udelay(1);
1105 }
1106 if (i == ESP_RESELECT_TAG_LIMIT) {
1107 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1108 return NULL;
1109 }
1110 esp->ops->dma_drain(esp);
1111 esp->ops->dma_invalidate(esp);
1112
1113 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1114 i, esp->ireg, esp->sreg,
1115 esp->command_block[0],
1116 esp->command_block[1]);
1117
1118 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1119 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1120 shost_printk(KERN_ERR, esp->host,
1121 "Reconnect, bad tag type %02x.\n",
1122 esp->command_block[0]);
1123 return NULL;
1124 }
1125
1126 ent = lp->tagged_cmds[esp->command_block[1]];
1127 if (!ent) {
1128 shost_printk(KERN_ERR, esp->host,
1129 "Reconnect, no entry for tag %02x.\n",
1130 esp->command_block[1]);
1131 return NULL;
1132 }
1133
1134 return ent;
1135 }
1136
1137 static int esp_reconnect(struct esp *esp)
1138 {
1139 struct esp_cmd_entry *ent;
1140 struct esp_target_data *tp;
1141 struct esp_lun_data *lp;
1142 struct scsi_device *dev;
1143 int target, lun;
1144
1145 BUG_ON(esp->active_cmd);
1146 if (esp->rev == FASHME) {
1147
1148
1149
1150 target = esp->fifo[0];
1151 lun = esp->fifo[1] & 0x7;
1152 } else {
1153 u8 bits = esp_read8(ESP_FDATA);
1154
1155
1156
1157
1158
1159
1160
1161 if (!(bits & esp->scsi_id_mask))
1162 goto do_reset;
1163 bits &= ~esp->scsi_id_mask;
1164 if (!bits || (bits & (bits - 1)))
1165 goto do_reset;
1166
1167 target = ffs(bits) - 1;
1168 lun = (esp_read8(ESP_FDATA) & 0x7);
1169
1170 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1171 if (esp->rev == ESP100) {
1172 u8 ireg = esp_read8(ESP_INTRPT);
1173
1174
1175
1176
1177
1178 if (ireg & ESP_INTR_SR)
1179 goto do_reset;
1180 }
1181 scsi_esp_cmd(esp, ESP_CMD_NULL);
1182 }
1183
1184 esp_write_tgt_sync(esp, target);
1185 esp_write_tgt_config3(esp, target);
1186
1187 scsi_esp_cmd(esp, ESP_CMD_MOK);
1188
1189 if (esp->rev == FASHME)
1190 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1191 ESP_BUSID);
1192
1193 tp = &esp->target[target];
1194 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1195 if (!dev) {
1196 shost_printk(KERN_ERR, esp->host,
1197 "Reconnect, no lp tgt[%u] lun[%u]\n",
1198 target, lun);
1199 goto do_reset;
1200 }
1201 lp = dev->hostdata;
1202
1203 ent = lp->non_tagged_cmd;
1204 if (!ent) {
1205 ent = esp_reconnect_with_tag(esp, lp);
1206 if (!ent)
1207 goto do_reset;
1208 }
1209
1210 esp->active_cmd = ent;
1211
1212 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1213 esp_restore_pointers(esp, ent);
1214 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1215 return 1;
1216
1217 do_reset:
1218 esp_schedule_reset(esp);
1219 return 0;
1220 }
1221
1222 static int esp_finish_select(struct esp *esp)
1223 {
1224 struct esp_cmd_entry *ent;
1225 struct scsi_cmnd *cmd;
1226
1227
1228 esp->select_state = ESP_SELECT_NONE;
1229
1230 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1231 ent = esp->active_cmd;
1232 cmd = ent->cmd;
1233
1234 if (esp->ops->dma_error(esp)) {
1235
1236
1237
1238 esp_schedule_reset(esp);
1239 esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
1240 return 0;
1241 }
1242
1243 esp->ops->dma_invalidate(esp);
1244
1245 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1246 struct esp_target_data *tp = &esp->target[cmd->device->id];
1247
1248
1249
1250
1251
1252 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1253 esp_unmap_dma(esp, cmd);
1254 esp_free_lun_tag(ent, cmd->device->hostdata);
1255 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1256 esp->cmd_bytes_ptr = NULL;
1257 esp->cmd_bytes_left = 0;
1258 } else {
1259 esp_unmap_sense(esp, ent);
1260 }
1261
1262
1263
1264
1265 list_move(&ent->list, &esp->queued_cmds);
1266 esp->active_cmd = NULL;
1267
1268
1269
1270
1271 return 0;
1272 }
1273
1274 if (esp->ireg == ESP_INTR_DC) {
1275 struct scsi_device *dev = cmd->device;
1276
1277
1278
1279
1280
1281 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1282
1283 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284 esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
1285 return 1;
1286 }
1287
1288 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1289
1290
1291
1292 if (esp->rev <= ESP236) {
1293 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1294
1295 scsi_esp_cmd(esp, ESP_CMD_NULL);
1296
1297 if (!fcnt &&
1298 (!esp->prev_soff ||
1299 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300 esp_flush_fifo(esp);
1301 }
1302
1303
1304
1305
1306 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1307 return 0;
1308 }
1309
1310 shost_printk(KERN_INFO, esp->host,
1311 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1312 esp_schedule_reset(esp);
1313 return 0;
1314 }
1315
1316 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1317 struct scsi_cmnd *cmd)
1318 {
1319 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1320
1321 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1322 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1323 fifo_cnt <<= 1;
1324
1325 ecount = 0;
1326 if (!(esp->sreg & ESP_STAT_TCNT)) {
1327 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1328 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1329 if (esp->rev == FASHME)
1330 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1331 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1332 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1333 }
1334
1335 bytes_sent = esp->data_dma_len;
1336 bytes_sent -= ecount;
1337 bytes_sent -= esp->send_cmd_residual;
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1348 size_t count = 1;
1349 size_t offset = bytes_sent;
1350 u8 bval = esp_read8(ESP_FDATA);
1351
1352 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1353 ent->sense_ptr[bytes_sent] = bval;
1354 else {
1355 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1356 u8 *ptr;
1357
1358 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1359 &offset, &count);
1360 if (likely(ptr)) {
1361 *(ptr + offset) = bval;
1362 scsi_kunmap_atomic_sg(ptr);
1363 }
1364 }
1365 bytes_sent += fifo_cnt;
1366 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1367 }
1368 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1369 bytes_sent -= fifo_cnt;
1370
1371 flush_fifo = 0;
1372 if (!esp->prev_soff) {
1373
1374 flush_fifo = 1;
1375 } else {
1376 if (esp->rev == ESP100) {
1377 u32 fflags, phase;
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 esp->sreg = esp_read8(ESP_STATUS);
1391 phase = esp->sreg & ESP_STAT_PMASK;
1392 fflags = esp_read8(ESP_FFLAGS);
1393
1394 if ((phase == ESP_DOP &&
1395 (fflags & ESP_FF_ONOTZERO)) ||
1396 (phase == ESP_DIP &&
1397 (fflags & ESP_FF_FBYTES)))
1398 return -1;
1399 }
1400 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1401 flush_fifo = 1;
1402 }
1403
1404 if (flush_fifo)
1405 esp_flush_fifo(esp);
1406
1407 return bytes_sent;
1408 }
1409
1410 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1411 u8 scsi_period, u8 scsi_offset,
1412 u8 esp_stp, u8 esp_soff)
1413 {
1414 spi_period(tp->starget) = scsi_period;
1415 spi_offset(tp->starget) = scsi_offset;
1416 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1417
1418 if (esp_soff) {
1419 esp_stp &= 0x1f;
1420 esp_soff |= esp->radelay;
1421 if (esp->rev >= FAS236) {
1422 u8 bit = ESP_CONFIG3_FSCSI;
1423 if (esp->rev >= FAS100A)
1424 bit = ESP_CONFIG3_FAST;
1425
1426 if (scsi_period < 50) {
1427 if (esp->rev == FASHME)
1428 esp_soff &= ~esp->radelay;
1429 tp->esp_config3 |= bit;
1430 } else {
1431 tp->esp_config3 &= ~bit;
1432 }
1433 esp->prev_cfg3 = tp->esp_config3;
1434 esp_write8(esp->prev_cfg3, ESP_CFG3);
1435 }
1436 }
1437
1438 tp->esp_period = esp->prev_stp = esp_stp;
1439 tp->esp_offset = esp->prev_soff = esp_soff;
1440
1441 esp_write8(esp_soff, ESP_SOFF);
1442 esp_write8(esp_stp, ESP_STP);
1443
1444 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1445
1446 spi_display_xfer_agreement(tp->starget);
1447 }
1448
1449 static void esp_msgin_reject(struct esp *esp)
1450 {
1451 struct esp_cmd_entry *ent = esp->active_cmd;
1452 struct scsi_cmnd *cmd = ent->cmd;
1453 struct esp_target_data *tp;
1454 int tgt;
1455
1456 tgt = cmd->device->id;
1457 tp = &esp->target[tgt];
1458
1459 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1460 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1461
1462 if (!esp_need_to_nego_sync(tp)) {
1463 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1464 scsi_esp_cmd(esp, ESP_CMD_RATN);
1465 } else {
1466 esp->msg_out_len =
1467 spi_populate_sync_msg(&esp->msg_out[0],
1468 tp->nego_goal_period,
1469 tp->nego_goal_offset);
1470 tp->flags |= ESP_TGT_NEGO_SYNC;
1471 scsi_esp_cmd(esp, ESP_CMD_SATN);
1472 }
1473 return;
1474 }
1475
1476 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1477 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1478 tp->esp_period = 0;
1479 tp->esp_offset = 0;
1480 esp_setsync(esp, tp, 0, 0, 0, 0);
1481 scsi_esp_cmd(esp, ESP_CMD_RATN);
1482 return;
1483 }
1484
1485 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1486 esp_schedule_reset(esp);
1487 }
1488
1489 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1490 {
1491 u8 period = esp->msg_in[3];
1492 u8 offset = esp->msg_in[4];
1493 u8 stp;
1494
1495 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1496 goto do_reject;
1497
1498 if (offset > 15)
1499 goto do_reject;
1500
1501 if (offset) {
1502 int one_clock;
1503
1504 if (period > esp->max_period) {
1505 period = offset = 0;
1506 goto do_sdtr;
1507 }
1508 if (period < esp->min_period)
1509 goto do_reject;
1510
1511 one_clock = esp->ccycle / 1000;
1512 stp = DIV_ROUND_UP(period << 2, one_clock);
1513 if (stp && esp->rev >= FAS236) {
1514 if (stp >= 50)
1515 stp--;
1516 }
1517 } else {
1518 stp = 0;
1519 }
1520
1521 esp_setsync(esp, tp, period, offset, stp, offset);
1522 return;
1523
1524 do_reject:
1525 esp->msg_out[0] = MESSAGE_REJECT;
1526 esp->msg_out_len = 1;
1527 scsi_esp_cmd(esp, ESP_CMD_SATN);
1528 return;
1529
1530 do_sdtr:
1531 tp->nego_goal_period = period;
1532 tp->nego_goal_offset = offset;
1533 esp->msg_out_len =
1534 spi_populate_sync_msg(&esp->msg_out[0],
1535 tp->nego_goal_period,
1536 tp->nego_goal_offset);
1537 scsi_esp_cmd(esp, ESP_CMD_SATN);
1538 }
1539
1540 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1541 {
1542 int size = 8 << esp->msg_in[3];
1543 u8 cfg3;
1544
1545 if (esp->rev != FASHME)
1546 goto do_reject;
1547
1548 if (size != 8 && size != 16)
1549 goto do_reject;
1550
1551 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1552 goto do_reject;
1553
1554 cfg3 = tp->esp_config3;
1555 if (size == 16) {
1556 tp->flags |= ESP_TGT_WIDE;
1557 cfg3 |= ESP_CONFIG3_EWIDE;
1558 } else {
1559 tp->flags &= ~ESP_TGT_WIDE;
1560 cfg3 &= ~ESP_CONFIG3_EWIDE;
1561 }
1562 tp->esp_config3 = cfg3;
1563 esp->prev_cfg3 = cfg3;
1564 esp_write8(cfg3, ESP_CFG3);
1565
1566 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1567
1568 spi_period(tp->starget) = 0;
1569 spi_offset(tp->starget) = 0;
1570 if (!esp_need_to_nego_sync(tp)) {
1571 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1572 scsi_esp_cmd(esp, ESP_CMD_RATN);
1573 } else {
1574 esp->msg_out_len =
1575 spi_populate_sync_msg(&esp->msg_out[0],
1576 tp->nego_goal_period,
1577 tp->nego_goal_offset);
1578 tp->flags |= ESP_TGT_NEGO_SYNC;
1579 scsi_esp_cmd(esp, ESP_CMD_SATN);
1580 }
1581 return;
1582
1583 do_reject:
1584 esp->msg_out[0] = MESSAGE_REJECT;
1585 esp->msg_out_len = 1;
1586 scsi_esp_cmd(esp, ESP_CMD_SATN);
1587 }
1588
1589 static void esp_msgin_extended(struct esp *esp)
1590 {
1591 struct esp_cmd_entry *ent = esp->active_cmd;
1592 struct scsi_cmnd *cmd = ent->cmd;
1593 struct esp_target_data *tp;
1594 int tgt = cmd->device->id;
1595
1596 tp = &esp->target[tgt];
1597 if (esp->msg_in[2] == EXTENDED_SDTR) {
1598 esp_msgin_sdtr(esp, tp);
1599 return;
1600 }
1601 if (esp->msg_in[2] == EXTENDED_WDTR) {
1602 esp_msgin_wdtr(esp, tp);
1603 return;
1604 }
1605
1606 shost_printk(KERN_INFO, esp->host,
1607 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1608
1609 esp->msg_out[0] = MESSAGE_REJECT;
1610 esp->msg_out_len = 1;
1611 scsi_esp_cmd(esp, ESP_CMD_SATN);
1612 }
1613
1614
1615
1616
1617 static int esp_msgin_process(struct esp *esp)
1618 {
1619 u8 msg0 = esp->msg_in[0];
1620 int len = esp->msg_in_len;
1621
1622 if (msg0 & 0x80) {
1623
1624 shost_printk(KERN_INFO, esp->host,
1625 "Unexpected msgin identify\n");
1626 return 0;
1627 }
1628
1629 switch (msg0) {
1630 case EXTENDED_MESSAGE:
1631 if (len == 1)
1632 return 1;
1633 if (len < esp->msg_in[1] + 2)
1634 return 1;
1635 esp_msgin_extended(esp);
1636 return 0;
1637
1638 case IGNORE_WIDE_RESIDUE: {
1639 struct esp_cmd_entry *ent;
1640 struct esp_cmd_priv *spriv;
1641 if (len == 1)
1642 return 1;
1643
1644 if (esp->msg_in[1] != 1)
1645 goto do_reject;
1646
1647 ent = esp->active_cmd;
1648 spriv = ESP_CMD_PRIV(ent->cmd);
1649
1650 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1651 spriv->cur_sg = spriv->prv_sg;
1652 spriv->cur_residue = 1;
1653 } else
1654 spriv->cur_residue++;
1655 spriv->tot_residue++;
1656 return 0;
1657 }
1658 case NOP:
1659 return 0;
1660 case RESTORE_POINTERS:
1661 esp_restore_pointers(esp, esp->active_cmd);
1662 return 0;
1663 case SAVE_POINTERS:
1664 esp_save_pointers(esp, esp->active_cmd);
1665 return 0;
1666
1667 case COMMAND_COMPLETE:
1668 case DISCONNECT: {
1669 struct esp_cmd_entry *ent = esp->active_cmd;
1670
1671 ent->message = msg0;
1672 esp_event(esp, ESP_EVENT_FREE_BUS);
1673 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1674 return 0;
1675 }
1676 case MESSAGE_REJECT:
1677 esp_msgin_reject(esp);
1678 return 0;
1679
1680 default:
1681 do_reject:
1682 esp->msg_out[0] = MESSAGE_REJECT;
1683 esp->msg_out_len = 1;
1684 scsi_esp_cmd(esp, ESP_CMD_SATN);
1685 return 0;
1686 }
1687 }
1688
1689 static int esp_process_event(struct esp *esp)
1690 {
1691 int write, i;
1692
1693 again:
1694 write = 0;
1695 esp_log_event("process event %d phase %x\n",
1696 esp->event, esp->sreg & ESP_STAT_PMASK);
1697 switch (esp->event) {
1698 case ESP_EVENT_CHECK_PHASE:
1699 switch (esp->sreg & ESP_STAT_PMASK) {
1700 case ESP_DOP:
1701 esp_event(esp, ESP_EVENT_DATA_OUT);
1702 break;
1703 case ESP_DIP:
1704 esp_event(esp, ESP_EVENT_DATA_IN);
1705 break;
1706 case ESP_STATP:
1707 esp_flush_fifo(esp);
1708 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1709 esp_event(esp, ESP_EVENT_STATUS);
1710 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1711 return 1;
1712
1713 case ESP_MOP:
1714 esp_event(esp, ESP_EVENT_MSGOUT);
1715 break;
1716
1717 case ESP_MIP:
1718 esp_event(esp, ESP_EVENT_MSGIN);
1719 break;
1720
1721 case ESP_CMDP:
1722 esp_event(esp, ESP_EVENT_CMD_START);
1723 break;
1724
1725 default:
1726 shost_printk(KERN_INFO, esp->host,
1727 "Unexpected phase, sreg=%02x\n",
1728 esp->sreg);
1729 esp_schedule_reset(esp);
1730 return 0;
1731 }
1732 goto again;
1733
1734 case ESP_EVENT_DATA_IN:
1735 write = 1;
1736 fallthrough;
1737
1738 case ESP_EVENT_DATA_OUT: {
1739 struct esp_cmd_entry *ent = esp->active_cmd;
1740 struct scsi_cmnd *cmd = ent->cmd;
1741 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1742 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1743
1744 if (esp->rev == ESP100)
1745 scsi_esp_cmd(esp, ESP_CMD_NULL);
1746
1747 if (write)
1748 ent->flags |= ESP_CMD_FLAG_WRITE;
1749 else
1750 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1751
1752 if (esp->ops->dma_length_limit)
1753 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1754 dma_len);
1755 else
1756 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1757
1758 esp->data_dma_len = dma_len;
1759
1760 if (!dma_len) {
1761 shost_printk(KERN_ERR, esp->host,
1762 "DMA length is zero!\n");
1763 shost_printk(KERN_ERR, esp->host,
1764 "cur adr[%08llx] len[%08x]\n",
1765 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1766 esp_cur_dma_len(ent, cmd));
1767 esp_schedule_reset(esp);
1768 return 0;
1769 }
1770
1771 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1772 (unsigned long long)dma_addr, dma_len, write);
1773
1774 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1775 write, ESP_CMD_DMA | ESP_CMD_TI);
1776 esp_event(esp, ESP_EVENT_DATA_DONE);
1777 break;
1778 }
1779 case ESP_EVENT_DATA_DONE: {
1780 struct esp_cmd_entry *ent = esp->active_cmd;
1781 struct scsi_cmnd *cmd = ent->cmd;
1782 int bytes_sent;
1783
1784 if (esp->ops->dma_error(esp)) {
1785 shost_printk(KERN_INFO, esp->host,
1786 "data done, DMA error, resetting\n");
1787 esp_schedule_reset(esp);
1788 return 0;
1789 }
1790
1791 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1792
1793
1794 esp->ops->dma_drain(esp);
1795 }
1796 esp->ops->dma_invalidate(esp);
1797
1798 if (esp->ireg != ESP_INTR_BSERV) {
1799
1800
1801
1802 shost_printk(KERN_INFO, esp->host,
1803 "data done, not BSERV, resetting\n");
1804 esp_schedule_reset(esp);
1805 return 0;
1806 }
1807
1808 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1809
1810 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1811 ent->flags, bytes_sent);
1812
1813 if (bytes_sent < 0) {
1814
1815 esp_schedule_reset(esp);
1816 return 0;
1817 }
1818
1819 esp_advance_dma(esp, ent, cmd, bytes_sent);
1820 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1821 goto again;
1822 }
1823
1824 case ESP_EVENT_STATUS: {
1825 struct esp_cmd_entry *ent = esp->active_cmd;
1826
1827 if (esp->ireg & ESP_INTR_FDONE) {
1828 ent->status = esp_read8(ESP_FDATA);
1829 ent->message = esp_read8(ESP_FDATA);
1830 scsi_esp_cmd(esp, ESP_CMD_MOK);
1831 } else if (esp->ireg == ESP_INTR_BSERV) {
1832 ent->status = esp_read8(ESP_FDATA);
1833 ent->message = 0xff;
1834 esp_event(esp, ESP_EVENT_MSGIN);
1835 return 0;
1836 }
1837
1838 if (ent->message != COMMAND_COMPLETE) {
1839 shost_printk(KERN_INFO, esp->host,
1840 "Unexpected message %x in status\n",
1841 ent->message);
1842 esp_schedule_reset(esp);
1843 return 0;
1844 }
1845
1846 esp_event(esp, ESP_EVENT_FREE_BUS);
1847 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1848 break;
1849 }
1850 case ESP_EVENT_FREE_BUS: {
1851 struct esp_cmd_entry *ent = esp->active_cmd;
1852 struct scsi_cmnd *cmd = ent->cmd;
1853
1854 if (ent->message == COMMAND_COMPLETE ||
1855 ent->message == DISCONNECT)
1856 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1857
1858 if (ent->message == COMMAND_COMPLETE) {
1859 esp_log_cmddone("Command done status[%x] message[%x]\n",
1860 ent->status, ent->message);
1861 if (ent->status == SAM_STAT_TASK_SET_FULL)
1862 esp_event_queue_full(esp, ent);
1863
1864 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1865 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1866 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1867 esp_autosense(esp, ent);
1868 } else {
1869 esp_cmd_is_done(esp, ent, cmd, DID_OK);
1870 }
1871 } else if (ent->message == DISCONNECT) {
1872 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1873 cmd->device->id,
1874 ent->tag[0], ent->tag[1]);
1875
1876 esp->active_cmd = NULL;
1877 esp_maybe_execute_command(esp);
1878 } else {
1879 shost_printk(KERN_INFO, esp->host,
1880 "Unexpected message %x in freebus\n",
1881 ent->message);
1882 esp_schedule_reset(esp);
1883 return 0;
1884 }
1885 if (esp->active_cmd)
1886 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1887 break;
1888 }
1889 case ESP_EVENT_MSGOUT: {
1890 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1891
1892 if (esp_debug & ESP_DEBUG_MSGOUT) {
1893 int i;
1894 printk("ESP: Sending message [ ");
1895 for (i = 0; i < esp->msg_out_len; i++)
1896 printk("%02x ", esp->msg_out[i]);
1897 printk("]\n");
1898 }
1899
1900 if (esp->rev == FASHME) {
1901 int i;
1902
1903
1904 for (i = 0; i < esp->msg_out_len; i++) {
1905 esp_write8(esp->msg_out[i], ESP_FDATA);
1906 esp_write8(0, ESP_FDATA);
1907 }
1908 scsi_esp_cmd(esp, ESP_CMD_TI);
1909 } else {
1910 if (esp->msg_out_len == 1) {
1911 esp_write8(esp->msg_out[0], ESP_FDATA);
1912 scsi_esp_cmd(esp, ESP_CMD_TI);
1913 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1914 for (i = 0; i < esp->msg_out_len; i++)
1915 esp_write8(esp->msg_out[i], ESP_FDATA);
1916 scsi_esp_cmd(esp, ESP_CMD_TI);
1917 } else {
1918
1919 memcpy(esp->command_block,
1920 esp->msg_out,
1921 esp->msg_out_len);
1922
1923 esp->ops->send_dma_cmd(esp,
1924 esp->command_block_dma,
1925 esp->msg_out_len,
1926 esp->msg_out_len,
1927 0,
1928 ESP_CMD_DMA|ESP_CMD_TI);
1929 }
1930 }
1931 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1932 break;
1933 }
1934 case ESP_EVENT_MSGOUT_DONE:
1935 if (esp->rev == FASHME) {
1936 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937 } else {
1938 if (esp->msg_out_len > 1)
1939 esp->ops->dma_invalidate(esp);
1940
1941
1942
1943
1944 if (!(esp->ireg & ESP_INTR_DC))
1945 scsi_esp_cmd(esp, ESP_CMD_NULL);
1946 }
1947
1948 esp->msg_out_len = 0;
1949
1950 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951 goto again;
1952 case ESP_EVENT_MSGIN:
1953 if (esp->ireg & ESP_INTR_BSERV) {
1954 if (esp->rev == FASHME) {
1955 if (!(esp_read8(ESP_STATUS2) &
1956 ESP_STAT2_FEMPTY))
1957 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1958 } else {
1959 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960 if (esp->rev == ESP100)
1961 scsi_esp_cmd(esp, ESP_CMD_NULL);
1962 }
1963 scsi_esp_cmd(esp, ESP_CMD_TI);
1964 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1965 return 1;
1966 }
1967 if (esp->ireg & ESP_INTR_FDONE) {
1968 u8 val;
1969
1970 if (esp->rev == FASHME)
1971 val = esp->fifo[0];
1972 else
1973 val = esp_read8(ESP_FDATA);
1974 esp->msg_in[esp->msg_in_len++] = val;
1975
1976 esp_log_msgin("Got msgin byte %x\n", val);
1977
1978 if (!esp_msgin_process(esp))
1979 esp->msg_in_len = 0;
1980
1981 if (esp->rev == FASHME)
1982 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1983
1984 scsi_esp_cmd(esp, ESP_CMD_MOK);
1985
1986
1987 if (esp->event == ESP_EVENT_RESET)
1988 return 0;
1989
1990 if (esp->event != ESP_EVENT_FREE_BUS)
1991 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1992 } else {
1993 shost_printk(KERN_INFO, esp->host,
1994 "MSGIN neither BSERV not FDON, resetting");
1995 esp_schedule_reset(esp);
1996 return 0;
1997 }
1998 break;
1999 case ESP_EVENT_CMD_START:
2000 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2001 esp->cmd_bytes_left);
2002 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2003 esp_event(esp, ESP_EVENT_CMD_DONE);
2004 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2005 break;
2006 case ESP_EVENT_CMD_DONE:
2007 esp->ops->dma_invalidate(esp);
2008 if (esp->ireg & ESP_INTR_BSERV) {
2009 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2010 goto again;
2011 }
2012 esp_schedule_reset(esp);
2013 return 0;
2014
2015 case ESP_EVENT_RESET:
2016 scsi_esp_cmd(esp, ESP_CMD_RS);
2017 break;
2018
2019 default:
2020 shost_printk(KERN_INFO, esp->host,
2021 "Unexpected event %x, resetting\n", esp->event);
2022 esp_schedule_reset(esp);
2023 return 0;
2024 }
2025 return 1;
2026 }
2027
2028 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2029 {
2030 struct scsi_cmnd *cmd = ent->cmd;
2031
2032 esp_unmap_dma(esp, cmd);
2033 esp_free_lun_tag(ent, cmd->device->hostdata);
2034 cmd->result = DID_RESET << 16;
2035
2036 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2037 esp_unmap_sense(esp, ent);
2038
2039 scsi_done(cmd);
2040 list_del(&ent->list);
2041 esp_put_ent(esp, ent);
2042 }
2043
2044 static void esp_clear_hold(struct scsi_device *dev, void *data)
2045 {
2046 struct esp_lun_data *lp = dev->hostdata;
2047
2048 BUG_ON(lp->num_tagged);
2049 lp->hold = 0;
2050 }
2051
2052 static void esp_reset_cleanup(struct esp *esp)
2053 {
2054 struct esp_cmd_entry *ent, *tmp;
2055 int i;
2056
2057 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2058 struct scsi_cmnd *cmd = ent->cmd;
2059
2060 list_del(&ent->list);
2061 cmd->result = DID_RESET << 16;
2062 scsi_done(cmd);
2063 esp_put_ent(esp, ent);
2064 }
2065
2066 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2067 if (ent == esp->active_cmd)
2068 esp->active_cmd = NULL;
2069 esp_reset_cleanup_one(esp, ent);
2070 }
2071
2072 BUG_ON(esp->active_cmd != NULL);
2073
2074
2075 for (i = 0; i < ESP_MAX_TARGET; i++) {
2076 struct esp_target_data *tp = &esp->target[i];
2077
2078 tp->esp_period = 0;
2079 tp->esp_offset = 0;
2080 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2081 ESP_CONFIG3_FSCSI |
2082 ESP_CONFIG3_FAST);
2083 tp->flags &= ~ESP_TGT_WIDE;
2084 tp->flags |= ESP_TGT_CHECK_NEGO;
2085
2086 if (tp->starget)
2087 __starget_for_each_device(tp->starget, NULL,
2088 esp_clear_hold);
2089 }
2090 esp->flags &= ~ESP_FLAG_RESETTING;
2091 }
2092
2093
2094 static void __esp_interrupt(struct esp *esp)
2095 {
2096 int finish_reset, intr_done;
2097 u8 phase;
2098
2099
2100
2101
2102 esp->sreg = esp_read8(ESP_STATUS);
2103 esp->seqreg = esp_read8(ESP_SSTEP);
2104 esp->ireg = esp_read8(ESP_INTRPT);
2105
2106 if (esp->flags & ESP_FLAG_RESETTING) {
2107 finish_reset = 1;
2108 } else {
2109 if (esp_check_gross_error(esp))
2110 return;
2111
2112 finish_reset = esp_check_spur_intr(esp);
2113 if (finish_reset < 0)
2114 return;
2115 }
2116
2117 if (esp->ireg & ESP_INTR_SR)
2118 finish_reset = 1;
2119
2120 if (finish_reset) {
2121 esp_reset_cleanup(esp);
2122 if (esp->eh_reset) {
2123 complete(esp->eh_reset);
2124 esp->eh_reset = NULL;
2125 }
2126 return;
2127 }
2128
2129 phase = (esp->sreg & ESP_STAT_PMASK);
2130 if (esp->rev == FASHME) {
2131 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2132 esp->select_state == ESP_SELECT_NONE &&
2133 esp->event != ESP_EVENT_STATUS &&
2134 esp->event != ESP_EVENT_DATA_DONE) ||
2135 (esp->ireg & ESP_INTR_RSEL)) {
2136 esp->sreg2 = esp_read8(ESP_STATUS2);
2137 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2138 (esp->sreg2 & ESP_STAT2_F1BYTE))
2139 hme_read_fifo(esp);
2140 }
2141 }
2142
2143 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2144 "sreg2[%02x] ireg[%02x]\n",
2145 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2146
2147 intr_done = 0;
2148
2149 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2150 shost_printk(KERN_INFO, esp->host,
2151 "unexpected IREG %02x\n", esp->ireg);
2152 if (esp->ireg & ESP_INTR_IC)
2153 esp_dump_cmd_log(esp);
2154
2155 esp_schedule_reset(esp);
2156 } else {
2157 if (esp->ireg & ESP_INTR_RSEL) {
2158 if (esp->active_cmd)
2159 (void) esp_finish_select(esp);
2160 intr_done = esp_reconnect(esp);
2161 } else {
2162
2163 if (esp->select_state != ESP_SELECT_NONE)
2164 intr_done = esp_finish_select(esp);
2165 }
2166 }
2167 while (!intr_done)
2168 intr_done = esp_process_event(esp);
2169 }
2170
2171 irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2172 {
2173 struct esp *esp = dev_id;
2174 unsigned long flags;
2175 irqreturn_t ret;
2176
2177 spin_lock_irqsave(esp->host->host_lock, flags);
2178 ret = IRQ_NONE;
2179 if (esp->ops->irq_pending(esp)) {
2180 ret = IRQ_HANDLED;
2181 for (;;) {
2182 int i;
2183
2184 __esp_interrupt(esp);
2185 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2186 break;
2187 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2188
2189 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2190 if (esp->ops->irq_pending(esp))
2191 break;
2192 }
2193 if (i == ESP_QUICKIRQ_LIMIT)
2194 break;
2195 }
2196 }
2197 spin_unlock_irqrestore(esp->host->host_lock, flags);
2198
2199 return ret;
2200 }
2201 EXPORT_SYMBOL(scsi_esp_intr);
2202
2203 static void esp_get_revision(struct esp *esp)
2204 {
2205 u8 val;
2206
2207 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2208 if (esp->config2 == 0) {
2209 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2210 esp_write8(esp->config2, ESP_CFG2);
2211
2212 val = esp_read8(ESP_CFG2);
2213 val &= ~ESP_CONFIG2_MAGIC;
2214
2215 esp->config2 = 0;
2216 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2217
2218
2219
2220
2221
2222 esp->rev = ESP100;
2223 return;
2224 }
2225 }
2226
2227 esp_set_all_config3(esp, 5);
2228 esp->prev_cfg3 = 5;
2229 esp_write8(esp->config2, ESP_CFG2);
2230 esp_write8(0, ESP_CFG3);
2231 esp_write8(esp->prev_cfg3, ESP_CFG3);
2232
2233 val = esp_read8(ESP_CFG3);
2234 if (val != 5) {
2235
2236
2237
2238 esp->rev = ESP100A;
2239 } else {
2240 esp_set_all_config3(esp, 0);
2241 esp->prev_cfg3 = 0;
2242 esp_write8(esp->prev_cfg3, ESP_CFG3);
2243
2244
2245
2246
2247 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2248 esp->rev = FAST;
2249 esp->sync_defp = SYNC_DEFP_FAST;
2250 } else {
2251 esp->rev = ESP236;
2252 }
2253 }
2254 }
2255
2256 static void esp_init_swstate(struct esp *esp)
2257 {
2258 int i;
2259
2260 INIT_LIST_HEAD(&esp->queued_cmds);
2261 INIT_LIST_HEAD(&esp->active_cmds);
2262 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2263
2264
2265
2266
2267
2268 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2269 esp->target[i].flags = 0;
2270 esp->target[i].nego_goal_period = 0;
2271 esp->target[i].nego_goal_offset = 0;
2272 esp->target[i].nego_goal_width = 0;
2273 esp->target[i].nego_goal_tags = 0;
2274 }
2275 }
2276
2277
2278 static void esp_bootup_reset(struct esp *esp)
2279 {
2280 u8 val;
2281
2282
2283 esp->ops->reset_dma(esp);
2284
2285
2286 esp_reset_esp(esp);
2287
2288
2289 val = esp_read8(ESP_CFG1);
2290 val |= ESP_CONFIG1_SRRDISAB;
2291 esp_write8(val, ESP_CFG1);
2292
2293 scsi_esp_cmd(esp, ESP_CMD_RS);
2294 udelay(400);
2295
2296 esp_write8(esp->config1, ESP_CFG1);
2297
2298
2299 esp_read8(ESP_INTRPT);
2300 }
2301
2302 static void esp_set_clock_params(struct esp *esp)
2303 {
2304 int fhz;
2305 u8 ccf;
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 fhz = esp->cfreq;
2340
2341 ccf = ((fhz / 1000000) + 4) / 5;
2342 if (ccf == 1)
2343 ccf = 2;
2344
2345
2346
2347
2348
2349
2350 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2351 fhz = 20000000;
2352 ccf = 4;
2353 }
2354
2355 esp->cfact = (ccf == 8 ? 0 : ccf);
2356 esp->cfreq = fhz;
2357 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2358 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2359 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2360 esp->sync_defp = SYNC_DEFP_SLOW;
2361 }
2362
2363 static const char *esp_chip_names[] = {
2364 "ESP100",
2365 "ESP100A",
2366 "ESP236",
2367 "FAS236",
2368 "AM53C974",
2369 "53CF9x-2",
2370 "FAS100A",
2371 "FAST",
2372 "FASHME",
2373 };
2374
2375 static struct scsi_transport_template *esp_transport_template;
2376
2377 int scsi_esp_register(struct esp *esp)
2378 {
2379 static int instance;
2380 int err;
2381
2382 if (!esp->num_tags)
2383 esp->num_tags = ESP_DEFAULT_TAGS;
2384 esp->host->transportt = esp_transport_template;
2385 esp->host->max_lun = ESP_MAX_LUN;
2386 esp->host->cmd_per_lun = 2;
2387 esp->host->unique_id = instance;
2388
2389 esp_set_clock_params(esp);
2390
2391 esp_get_revision(esp);
2392
2393 esp_init_swstate(esp);
2394
2395 esp_bootup_reset(esp);
2396
2397 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2398 esp->host->unique_id, esp->regs, esp->dma_regs,
2399 esp->host->irq);
2400 dev_printk(KERN_INFO, esp->dev,
2401 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2402 esp->host->unique_id, esp_chip_names[esp->rev],
2403 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2404
2405
2406 ssleep(esp_bus_reset_settle);
2407
2408 err = scsi_add_host(esp->host, esp->dev);
2409 if (err)
2410 return err;
2411
2412 instance++;
2413
2414 scsi_scan_host(esp->host);
2415
2416 return 0;
2417 }
2418 EXPORT_SYMBOL(scsi_esp_register);
2419
2420 void scsi_esp_unregister(struct esp *esp)
2421 {
2422 scsi_remove_host(esp->host);
2423 }
2424 EXPORT_SYMBOL(scsi_esp_unregister);
2425
2426 static int esp_target_alloc(struct scsi_target *starget)
2427 {
2428 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2429 struct esp_target_data *tp = &esp->target[starget->id];
2430
2431 tp->starget = starget;
2432
2433 return 0;
2434 }
2435
2436 static void esp_target_destroy(struct scsi_target *starget)
2437 {
2438 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2439 struct esp_target_data *tp = &esp->target[starget->id];
2440
2441 tp->starget = NULL;
2442 }
2443
2444 static int esp_slave_alloc(struct scsi_device *dev)
2445 {
2446 struct esp *esp = shost_priv(dev->host);
2447 struct esp_target_data *tp = &esp->target[dev->id];
2448 struct esp_lun_data *lp;
2449
2450 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2451 if (!lp)
2452 return -ENOMEM;
2453 dev->hostdata = lp;
2454
2455 spi_min_period(tp->starget) = esp->min_period;
2456 spi_max_offset(tp->starget) = 15;
2457
2458 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2459 spi_max_width(tp->starget) = 1;
2460 else
2461 spi_max_width(tp->starget) = 0;
2462
2463 return 0;
2464 }
2465
2466 static int esp_slave_configure(struct scsi_device *dev)
2467 {
2468 struct esp *esp = shost_priv(dev->host);
2469 struct esp_target_data *tp = &esp->target[dev->id];
2470
2471 if (dev->tagged_supported)
2472 scsi_change_queue_depth(dev, esp->num_tags);
2473
2474 tp->flags |= ESP_TGT_DISCONNECT;
2475
2476 if (!spi_initial_dv(dev->sdev_target))
2477 spi_dv_device(dev);
2478
2479 return 0;
2480 }
2481
2482 static void esp_slave_destroy(struct scsi_device *dev)
2483 {
2484 struct esp_lun_data *lp = dev->hostdata;
2485
2486 kfree(lp);
2487 dev->hostdata = NULL;
2488 }
2489
2490 static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2491 {
2492 struct esp *esp = shost_priv(cmd->device->host);
2493 struct esp_cmd_entry *ent, *tmp;
2494 struct completion eh_done;
2495 unsigned long flags;
2496
2497
2498
2499
2500 spin_lock_irqsave(esp->host->host_lock, flags);
2501 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2502 cmd, cmd->cmnd[0]);
2503 ent = esp->active_cmd;
2504 if (ent)
2505 shost_printk(KERN_ERR, esp->host,
2506 "Current command [%p:%02x]\n",
2507 ent->cmd, ent->cmd->cmnd[0]);
2508 list_for_each_entry(ent, &esp->queued_cmds, list) {
2509 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2510 ent->cmd, ent->cmd->cmnd[0]);
2511 }
2512 list_for_each_entry(ent, &esp->active_cmds, list) {
2513 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2514 ent->cmd, ent->cmd->cmnd[0]);
2515 }
2516 esp_dump_cmd_log(esp);
2517 spin_unlock_irqrestore(esp->host->host_lock, flags);
2518
2519 spin_lock_irqsave(esp->host->host_lock, flags);
2520
2521 ent = NULL;
2522 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2523 if (tmp->cmd == cmd) {
2524 ent = tmp;
2525 break;
2526 }
2527 }
2528
2529 if (ent) {
2530
2531
2532
2533 list_del(&ent->list);
2534
2535 cmd->result = DID_ABORT << 16;
2536 scsi_done(cmd);
2537
2538 esp_put_ent(esp, ent);
2539
2540 goto out_success;
2541 }
2542
2543 init_completion(&eh_done);
2544
2545 ent = esp->active_cmd;
2546 if (ent && ent->cmd == cmd) {
2547
2548
2549
2550
2551 if (esp->msg_out_len)
2552 goto out_failure;
2553
2554
2555
2556
2557 esp->msg_out[0] = ABORT_TASK_SET;
2558 esp->msg_out_len = 1;
2559 ent->eh_done = &eh_done;
2560
2561 scsi_esp_cmd(esp, ESP_CMD_SATN);
2562 } else {
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 goto out_failure;
2580 }
2581
2582 spin_unlock_irqrestore(esp->host->host_lock, flags);
2583
2584 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2585 spin_lock_irqsave(esp->host->host_lock, flags);
2586 ent->eh_done = NULL;
2587 spin_unlock_irqrestore(esp->host->host_lock, flags);
2588
2589 return FAILED;
2590 }
2591
2592 return SUCCESS;
2593
2594 out_success:
2595 spin_unlock_irqrestore(esp->host->host_lock, flags);
2596 return SUCCESS;
2597
2598 out_failure:
2599
2600
2601
2602
2603 spin_unlock_irqrestore(esp->host->host_lock, flags);
2604 return FAILED;
2605 }
2606
2607 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2608 {
2609 struct esp *esp = shost_priv(cmd->device->host);
2610 struct completion eh_reset;
2611 unsigned long flags;
2612
2613 init_completion(&eh_reset);
2614
2615 spin_lock_irqsave(esp->host->host_lock, flags);
2616
2617 esp->eh_reset = &eh_reset;
2618
2619
2620
2621
2622
2623
2624 esp->flags |= ESP_FLAG_RESETTING;
2625 scsi_esp_cmd(esp, ESP_CMD_RS);
2626
2627 spin_unlock_irqrestore(esp->host->host_lock, flags);
2628
2629 ssleep(esp_bus_reset_settle);
2630
2631 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2632 spin_lock_irqsave(esp->host->host_lock, flags);
2633 esp->eh_reset = NULL;
2634 spin_unlock_irqrestore(esp->host->host_lock, flags);
2635
2636 return FAILED;
2637 }
2638
2639 return SUCCESS;
2640 }
2641
2642
2643 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2644 {
2645 struct esp *esp = shost_priv(cmd->device->host);
2646 unsigned long flags;
2647
2648 spin_lock_irqsave(esp->host->host_lock, flags);
2649 esp_bootup_reset(esp);
2650 esp_reset_cleanup(esp);
2651 spin_unlock_irqrestore(esp->host->host_lock, flags);
2652
2653 ssleep(esp_bus_reset_settle);
2654
2655 return SUCCESS;
2656 }
2657
2658 static const char *esp_info(struct Scsi_Host *host)
2659 {
2660 return "esp";
2661 }
2662
2663 struct scsi_host_template scsi_esp_template = {
2664 .module = THIS_MODULE,
2665 .name = "esp",
2666 .info = esp_info,
2667 .queuecommand = esp_queuecommand,
2668 .target_alloc = esp_target_alloc,
2669 .target_destroy = esp_target_destroy,
2670 .slave_alloc = esp_slave_alloc,
2671 .slave_configure = esp_slave_configure,
2672 .slave_destroy = esp_slave_destroy,
2673 .eh_abort_handler = esp_eh_abort_handler,
2674 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2675 .eh_host_reset_handler = esp_eh_host_reset_handler,
2676 .can_queue = 7,
2677 .this_id = 7,
2678 .sg_tablesize = SG_ALL,
2679 .max_sectors = 0xffff,
2680 .skip_settle_delay = 1,
2681 .cmd_size = sizeof(struct esp_cmd_priv),
2682 };
2683 EXPORT_SYMBOL(scsi_esp_template);
2684
2685 static void esp_get_signalling(struct Scsi_Host *host)
2686 {
2687 struct esp *esp = shost_priv(host);
2688 enum spi_signal_type type;
2689
2690 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2691 type = SPI_SIGNAL_HVD;
2692 else
2693 type = SPI_SIGNAL_SE;
2694
2695 spi_signalling(host) = type;
2696 }
2697
2698 static void esp_set_offset(struct scsi_target *target, int offset)
2699 {
2700 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2701 struct esp *esp = shost_priv(host);
2702 struct esp_target_data *tp = &esp->target[target->id];
2703
2704 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2705 tp->nego_goal_offset = 0;
2706 else
2707 tp->nego_goal_offset = offset;
2708 tp->flags |= ESP_TGT_CHECK_NEGO;
2709 }
2710
2711 static void esp_set_period(struct scsi_target *target, int period)
2712 {
2713 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2714 struct esp *esp = shost_priv(host);
2715 struct esp_target_data *tp = &esp->target[target->id];
2716
2717 tp->nego_goal_period = period;
2718 tp->flags |= ESP_TGT_CHECK_NEGO;
2719 }
2720
2721 static void esp_set_width(struct scsi_target *target, int width)
2722 {
2723 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2724 struct esp *esp = shost_priv(host);
2725 struct esp_target_data *tp = &esp->target[target->id];
2726
2727 tp->nego_goal_width = (width ? 1 : 0);
2728 tp->flags |= ESP_TGT_CHECK_NEGO;
2729 }
2730
2731 static struct spi_function_template esp_transport_ops = {
2732 .set_offset = esp_set_offset,
2733 .show_offset = 1,
2734 .set_period = esp_set_period,
2735 .show_period = 1,
2736 .set_width = esp_set_width,
2737 .show_width = 1,
2738 .get_signalling = esp_get_signalling,
2739 };
2740
2741 static int __init esp_init(void)
2742 {
2743 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2744 if (!esp_transport_template)
2745 return -ENODEV;
2746
2747 return 0;
2748 }
2749
2750 static void __exit esp_exit(void)
2751 {
2752 spi_release_transport(esp_transport_template);
2753 }
2754
2755 MODULE_DESCRIPTION("ESP SCSI driver core");
2756 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2757 MODULE_LICENSE("GPL");
2758 MODULE_VERSION(DRV_VERSION);
2759
2760 module_param(esp_bus_reset_settle, int, 0);
2761 MODULE_PARM_DESC(esp_bus_reset_settle,
2762 "ESP scsi bus reset delay in seconds");
2763
2764 module_param(esp_debug, int, 0);
2765 MODULE_PARM_DESC(esp_debug,
2766 "ESP bitmapped debugging message enable value:\n"
2767 " 0x00000001 Log interrupt events\n"
2768 " 0x00000002 Log scsi commands\n"
2769 " 0x00000004 Log resets\n"
2770 " 0x00000008 Log message in events\n"
2771 " 0x00000010 Log message out events\n"
2772 " 0x00000020 Log command completion\n"
2773 " 0x00000040 Log disconnects\n"
2774 " 0x00000080 Log data start\n"
2775 " 0x00000100 Log data done\n"
2776 " 0x00000200 Log reconnects\n"
2777 " 0x00000400 Log auto-sense data\n"
2778 );
2779
2780 module_init(esp_init);
2781 module_exit(esp_exit);
2782
2783 #ifdef CONFIG_SCSI_ESP_PIO
2784 static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2785 {
2786 int i = 500000;
2787
2788 do {
2789 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2790
2791 if (fbytes)
2792 return fbytes;
2793
2794 udelay(1);
2795 } while (--i);
2796
2797 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2798 esp_read8(ESP_STATUS));
2799 return 0;
2800 }
2801
2802 static inline int esp_wait_for_intr(struct esp *esp)
2803 {
2804 int i = 500000;
2805
2806 do {
2807 esp->sreg = esp_read8(ESP_STATUS);
2808 if (esp->sreg & ESP_STAT_INTR)
2809 return 0;
2810
2811 udelay(1);
2812 } while (--i);
2813
2814 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2815 esp->sreg);
2816 return 1;
2817 }
2818
2819 #define ESP_FIFO_SIZE 16
2820
2821 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2822 u32 dma_count, int write, u8 cmd)
2823 {
2824 u8 phase = esp->sreg & ESP_STAT_PMASK;
2825
2826 cmd &= ~ESP_CMD_DMA;
2827 esp->send_cmd_error = 0;
2828
2829 if (write) {
2830 u8 *dst = (u8 *)addr;
2831 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2832
2833 scsi_esp_cmd(esp, cmd);
2834
2835 while (1) {
2836 if (!esp_wait_for_fifo(esp))
2837 break;
2838
2839 *dst++ = readb(esp->fifo_reg);
2840 --esp_count;
2841
2842 if (!esp_count)
2843 break;
2844
2845 if (esp_wait_for_intr(esp)) {
2846 esp->send_cmd_error = 1;
2847 break;
2848 }
2849
2850 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2851 break;
2852
2853 esp->ireg = esp_read8(ESP_INTRPT);
2854 if (esp->ireg & mask) {
2855 esp->send_cmd_error = 1;
2856 break;
2857 }
2858
2859 if (phase == ESP_MIP)
2860 esp_write8(ESP_CMD_MOK, ESP_CMD);
2861
2862 esp_write8(ESP_CMD_TI, ESP_CMD);
2863 }
2864 } else {
2865 unsigned int n = ESP_FIFO_SIZE;
2866 u8 *src = (u8 *)addr;
2867
2868 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2869
2870 if (n > esp_count)
2871 n = esp_count;
2872 writesb(esp->fifo_reg, src, n);
2873 src += n;
2874 esp_count -= n;
2875
2876 scsi_esp_cmd(esp, cmd);
2877
2878 while (esp_count) {
2879 if (esp_wait_for_intr(esp)) {
2880 esp->send_cmd_error = 1;
2881 break;
2882 }
2883
2884 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2885 break;
2886
2887 esp->ireg = esp_read8(ESP_INTRPT);
2888 if (esp->ireg & ~ESP_INTR_BSERV) {
2889 esp->send_cmd_error = 1;
2890 break;
2891 }
2892
2893 n = ESP_FIFO_SIZE -
2894 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2895
2896 if (n > esp_count)
2897 n = esp_count;
2898 writesb(esp->fifo_reg, src, n);
2899 src += n;
2900 esp_count -= n;
2901
2902 esp_write8(ESP_CMD_TI, ESP_CMD);
2903 }
2904 }
2905
2906 esp->send_cmd_residual = esp_count;
2907 }
2908 EXPORT_SYMBOL(esp_send_pio_cmd);
2909 #endif