0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 #include <linux/module.h>
0050 #include <linux/moduleparam.h>
0051 #include <linux/delay.h>
0052 #include <linux/ctype.h>
0053 #include <linux/blkdev.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/init.h>
0056 #include <linux/spinlock.h>
0057 #include <linux/pci.h>
0058 #include <linux/list.h>
0059 #include <linux/vmalloc.h>
0060 #include <linux/slab.h>
0061 #include <asm/io.h>
0062
0063 #include <scsi/scsi.h>
0064 #include <scsi/scsi_cmnd.h>
0065 #include <scsi/scsi_device.h>
0066 #include <scsi/scsi_host.h>
0067 #include <scsi/scsi_transport_spi.h>
0068
0069 #include "dc395x.h"
0070
0071 #define DC395X_NAME "dc395x"
0072 #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
0073 #define DC395X_VERSION "v2.05, 2004/03/08"
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define DBG_KG 0x0001
0093 #define DBG_0 0x0002
0094 #define DBG_1 0x0004
0095 #define DBG_SG 0x0020
0096 #define DBG_FIFO 0x0040
0097 #define DBG_PIO 0x0080
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 #define dprintkl(level, format, arg...) \
0113 printk(level DC395X_NAME ": " format , ## arg)
0114
0115
0116 #ifdef DEBUG_MASK
0117
0118
0119
0120
0121
0122
0123 #define dprintkdbg(type, format, arg...) \
0124 do { \
0125 if ((type) & (DEBUG_MASK)) \
0126 dprintkl(KERN_DEBUG , format , ## arg); \
0127 } while (0)
0128
0129
0130
0131
0132 #define debug_enabled(type) ((DEBUG_MASK) & (type))
0133
0134 #else
0135
0136
0137
0138 #define dprintkdbg(type, format, arg...) \
0139 do {} while (0)
0140 #define debug_enabled(type) (0)
0141
0142 #endif
0143
0144
0145 #ifndef PCI_VENDOR_ID_TEKRAM
0146 #define PCI_VENDOR_ID_TEKRAM 0x1DE1
0147 #endif
0148 #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
0149 #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391
0150 #endif
0151
0152
0153 #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
0154 #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
0155
0156 #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
0157 #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
0158 #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
0159 #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
0160 #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
0161 #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
0162
0163 #define TAG_NONE 255
0164
0165
0166
0167
0168
0169
0170 #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
0171
0172
0173 struct SGentry {
0174 u32 address;
0175 u32 length;
0176 };
0177
0178
0179 struct NVRamTarget {
0180 u8 cfg0;
0181 u8 period;
0182 u8 cfg2;
0183 u8 cfg3;
0184 };
0185
0186 struct NvRamType {
0187 u8 sub_vendor_id[2];
0188 u8 sub_sys_id[2];
0189 u8 sub_class;
0190 u8 vendor_id[2];
0191 u8 device_id[2];
0192 u8 reserved;
0193 struct NVRamTarget target[DC395x_MAX_SCSI_ID];
0194
0195
0196
0197
0198
0199
0200 u8 scsi_id;
0201 u8 channel_cfg;
0202 u8 delay_time;
0203 u8 max_tag;
0204 u8 reserved0;
0205 u8 boot_target;
0206 u8 boot_lun;
0207 u8 reserved1;
0208 u16 reserved2[22];
0209 u16 cksum;
0210 };
0211
0212 struct ScsiReqBlk {
0213 struct list_head list;
0214 struct DeviceCtlBlk *dcb;
0215 struct scsi_cmnd *cmd;
0216
0217 struct SGentry *segment_x;
0218 dma_addr_t sg_bus_addr;
0219
0220 u8 sg_count;
0221 u8 sg_index;
0222 size_t total_xfer_length;
0223 size_t request_length;
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233 size_t xferred;
0234
0235 u16 state;
0236
0237 u8 msgin_buf[6];
0238 u8 msgout_buf[6];
0239
0240 u8 adapter_status;
0241 u8 target_status;
0242 u8 msg_count;
0243 u8 end_message;
0244
0245 u8 tag_number;
0246 u8 status;
0247 u8 retry_count;
0248 u8 flag;
0249
0250 u8 scsi_phase;
0251 };
0252
0253 struct DeviceCtlBlk {
0254 struct list_head list;
0255 struct AdapterCtlBlk *acb;
0256 struct list_head srb_going_list;
0257 struct list_head srb_waiting_list;
0258
0259 struct ScsiReqBlk *active_srb;
0260 u32 tag_mask;
0261
0262 u16 max_command;
0263
0264 u8 target_id;
0265 u8 target_lun;
0266 u8 identify_msg;
0267 u8 dev_mode;
0268
0269 u8 inquiry7;
0270 u8 sync_mode;
0271 u8 min_nego_period;
0272 u8 sync_period;
0273
0274 u8 sync_offset;
0275 u8 flag;
0276 u8 dev_type;
0277 u8 init_tcq_flag;
0278 };
0279
0280 struct AdapterCtlBlk {
0281 struct Scsi_Host *scsi_host;
0282
0283 unsigned long io_port_base;
0284 unsigned long io_port_len;
0285
0286 struct list_head dcb_list;
0287 struct DeviceCtlBlk *dcb_run_robin;
0288 struct DeviceCtlBlk *active_dcb;
0289
0290 struct list_head srb_free_list;
0291 struct ScsiReqBlk *tmp_srb;
0292 struct timer_list waiting_timer;
0293 struct timer_list selto_timer;
0294
0295 unsigned long last_reset;
0296
0297 u16 srb_count;
0298
0299 u8 sel_timeout;
0300
0301 unsigned int irq_level;
0302 u8 tag_max_num;
0303 u8 acb_flag;
0304 u8 gmode2;
0305
0306 u8 config;
0307 u8 lun_chk;
0308 u8 scan_devices;
0309 u8 hostid_bit;
0310
0311 u8 dcb_map[DC395x_MAX_SCSI_ID];
0312 struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
0313
0314 struct pci_dev *dev;
0315
0316 u8 msg_len;
0317
0318 struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
0319 struct ScsiReqBlk srb;
0320
0321 struct NvRamType eeprom;
0322 };
0323
0324
0325
0326
0327
0328 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0329 u16 *pscsi_status);
0330 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0331 u16 *pscsi_status);
0332 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0333 u16 *pscsi_status);
0334 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0335 u16 *pscsi_status);
0336 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0337 u16 *pscsi_status);
0338 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0339 u16 *pscsi_status);
0340 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0341 u16 *pscsi_status);
0342 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0343 u16 *pscsi_status);
0344 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0345 u16 *pscsi_status);
0346 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0347 u16 *pscsi_status);
0348 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0349 u16 *pscsi_status);
0350 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0351 u16 *pscsi_status);
0352 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0353 u16 *pscsi_status);
0354 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
0355 u16 *pscsi_status);
0356 static void set_basic_config(struct AdapterCtlBlk *acb);
0357 static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
0358 struct ScsiReqBlk *srb);
0359 static void reset_scsi_bus(struct AdapterCtlBlk *acb);
0360 static void data_io_transfer(struct AdapterCtlBlk *acb,
0361 struct ScsiReqBlk *srb, u16 io_dir);
0362 static void disconnect(struct AdapterCtlBlk *acb);
0363 static void reselect(struct AdapterCtlBlk *acb);
0364 static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
0365 struct ScsiReqBlk *srb);
0366 static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
0367 struct ScsiReqBlk *srb);
0368 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
0369 struct ScsiReqBlk *srb);
0370 static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
0371 struct scsi_cmnd *cmd, u8 force);
0372 static void scsi_reset_detect(struct AdapterCtlBlk *acb);
0373 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
0374 static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
0375 struct ScsiReqBlk *srb);
0376 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
0377 struct ScsiReqBlk *srb);
0378 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
0379 struct ScsiReqBlk *srb);
0380 static void set_xfer_rate(struct AdapterCtlBlk *acb,
0381 struct DeviceCtlBlk *dcb);
0382 static void waiting_timeout(struct timer_list *t);
0383
0384
0385
0386
0387
0388 static u16 current_sync_offset = 0;
0389
0390 static void *dc395x_scsi_phase0[] = {
0391 data_out_phase0,
0392 data_in_phase0,
0393 command_phase0,
0394 status_phase0,
0395 nop0,
0396 nop0,
0397 msgout_phase0,
0398 msgin_phase0,
0399 };
0400
0401 static void *dc395x_scsi_phase1[] = {
0402 data_out_phase1,
0403 data_in_phase1,
0404 command_phase1,
0405 status_phase1,
0406 nop1,
0407 nop1,
0408 msgout_phase1,
0409 msgin_phase1,
0410 };
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434 static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
0435 static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 #define CFG_ADAPTER_ID 0
0452 #define CFG_MAX_SPEED 1
0453 #define CFG_DEV_MODE 2
0454 #define CFG_ADAPTER_MODE 3
0455 #define CFG_TAGS 4
0456 #define CFG_RESET_DELAY 5
0457
0458 #define CFG_NUM 6
0459
0460
0461
0462
0463
0464
0465 #define CFG_PARAM_UNSET -1
0466
0467
0468
0469
0470
0471 struct ParameterData {
0472 int value;
0473 int min;
0474 int max;
0475 int def;
0476 int safe;
0477 };
0478 static struct ParameterData cfg_data[] = {
0479 {
0480 CFG_PARAM_UNSET,
0481 0,
0482 15,
0483 7,
0484 7
0485 },
0486 {
0487 CFG_PARAM_UNSET,
0488 0,
0489 7,
0490 1,
0491 4,
0492 },
0493 {
0494 CFG_PARAM_UNSET,
0495 0,
0496 0x3f,
0497 NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
0498 NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
0499 NTC_DO_SEND_START,
0500 NTC_DO_PARITY_CHK | NTC_DO_SEND_START
0501 },
0502 {
0503 CFG_PARAM_UNSET,
0504 0,
0505 0x2f,
0506 NAC_SCANLUN |
0507 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
0508 ,
0509 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
0510 },
0511 {
0512 CFG_PARAM_UNSET,
0513 0,
0514 5,
0515 3,
0516 2,
0517 },
0518 {
0519 CFG_PARAM_UNSET,
0520 0,
0521 180,
0522 1,
0523 10,
0524 }
0525 };
0526
0527
0528
0529
0530
0531
0532
0533 static bool use_safe_settings = 0;
0534 module_param_named(safe, use_safe_settings, bool, 0);
0535 MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
0536
0537
0538 module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
0539 MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
0540
0541 module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
0542 MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
0543
0544 module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
0545 MODULE_PARM_DESC(dev_mode, "Device mode.");
0546
0547 module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
0548 MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
0549
0550 module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
0551 MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
0552
0553 module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
0554 MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
0555
0556
0557
0558
0559
0560
0561 static void set_safe_settings(void)
0562 {
0563 if (use_safe_settings)
0564 {
0565 int i;
0566
0567 dprintkl(KERN_INFO, "Using safe settings.\n");
0568 for (i = 0; i < CFG_NUM; i++)
0569 {
0570 cfg_data[i].value = cfg_data[i].safe;
0571 }
0572 }
0573 }
0574
0575
0576
0577
0578
0579
0580 static void fix_settings(void)
0581 {
0582 int i;
0583
0584 dprintkdbg(DBG_1,
0585 "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
0586 "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
0587 cfg_data[CFG_ADAPTER_ID].value,
0588 cfg_data[CFG_MAX_SPEED].value,
0589 cfg_data[CFG_DEV_MODE].value,
0590 cfg_data[CFG_ADAPTER_MODE].value,
0591 cfg_data[CFG_TAGS].value,
0592 cfg_data[CFG_RESET_DELAY].value);
0593 for (i = 0; i < CFG_NUM; i++)
0594 {
0595 if (cfg_data[i].value < cfg_data[i].min
0596 || cfg_data[i].value > cfg_data[i].max)
0597 cfg_data[i].value = cfg_data[i].def;
0598 }
0599 }
0600
0601
0602
0603
0604
0605
0606
0607 static char eeprom_index_to_delay_map[] =
0608 { 1, 3, 5, 10, 16, 30, 60, 120 };
0609
0610
0611
0612
0613
0614
0615
0616
0617 static void eeprom_index_to_delay(struct NvRamType *eeprom)
0618 {
0619 eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
0620 }
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 static int delay_to_eeprom_index(int delay)
0631 {
0632 u8 idx = 0;
0633 while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
0634 idx++;
0635 return idx;
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 static void eeprom_override(struct NvRamType *eeprom)
0647 {
0648 u8 id;
0649
0650
0651 if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
0652 eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
0653
0654 if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
0655 eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
0656
0657 if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
0658 eeprom->delay_time = delay_to_eeprom_index(
0659 cfg_data[CFG_RESET_DELAY].value);
0660
0661 if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
0662 eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
0663
0664
0665 for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
0666 if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
0667 eeprom->target[id].cfg0 =
0668 (u8)cfg_data[CFG_DEV_MODE].value;
0669
0670 if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
0671 eeprom->target[id].period =
0672 (u8)cfg_data[CFG_MAX_SPEED].value;
0673
0674 }
0675 }
0676
0677
0678
0679
0680
0681 static unsigned int list_size(struct list_head *head)
0682 {
0683 unsigned int count = 0;
0684 struct list_head *pos;
0685 list_for_each(pos, head)
0686 count++;
0687 return count;
0688 }
0689
0690
0691 static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
0692 struct DeviceCtlBlk *pos)
0693 {
0694 int use_next = 0;
0695 struct DeviceCtlBlk* next = NULL;
0696 struct DeviceCtlBlk* i;
0697
0698 if (list_empty(head))
0699 return NULL;
0700
0701
0702 list_for_each_entry(i, head, list)
0703 if (use_next) {
0704 next = i;
0705 break;
0706 } else if (i == pos) {
0707 use_next = 1;
0708 }
0709
0710 if (!next)
0711 list_for_each_entry(i, head, list) {
0712 next = i;
0713 break;
0714 }
0715
0716 return next;
0717 }
0718
0719
0720 static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
0721 {
0722 if (srb->tag_number < 255) {
0723 dcb->tag_mask &= ~(1 << srb->tag_number);
0724 srb->tag_number = 255;
0725 }
0726 }
0727
0728
0729
0730 static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
0731 struct list_head *head)
0732 {
0733 struct ScsiReqBlk *i;
0734 list_for_each_entry(i, head, list)
0735 if (i->cmd == cmd)
0736 return i;
0737 return NULL;
0738 }
0739
0740
0741 static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
0742 {
0743 if (timer_pending(&acb->waiting_timer))
0744 return;
0745 if (time_before(jiffies + to, acb->last_reset - HZ / 2))
0746 acb->waiting_timer.expires =
0747 acb->last_reset - HZ / 2 + 1;
0748 else
0749 acb->waiting_timer.expires = jiffies + to + 1;
0750 add_timer(&acb->waiting_timer);
0751 }
0752
0753
0754
0755 static void waiting_process_next(struct AdapterCtlBlk *acb)
0756 {
0757 struct DeviceCtlBlk *start = NULL;
0758 struct DeviceCtlBlk *pos;
0759 struct DeviceCtlBlk *dcb;
0760 struct ScsiReqBlk *srb;
0761 struct list_head *dcb_list_head = &acb->dcb_list;
0762
0763 if (acb->active_dcb
0764 || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
0765 return;
0766
0767 if (timer_pending(&acb->waiting_timer))
0768 del_timer(&acb->waiting_timer);
0769
0770 if (list_empty(dcb_list_head))
0771 return;
0772
0773
0774
0775
0776
0777 list_for_each_entry(dcb, dcb_list_head, list)
0778 if (dcb == acb->dcb_run_robin) {
0779 start = dcb;
0780 break;
0781 }
0782 if (!start) {
0783
0784 start = list_entry(dcb_list_head->next, typeof(*start), list);
0785 acb->dcb_run_robin = start;
0786 }
0787
0788
0789
0790
0791
0792
0793 pos = start;
0794 do {
0795 struct list_head *waiting_list_head = &pos->srb_waiting_list;
0796
0797
0798 acb->dcb_run_robin = dcb_get_next(dcb_list_head,
0799 acb->dcb_run_robin);
0800
0801 if (list_empty(waiting_list_head) ||
0802 pos->max_command <= list_size(&pos->srb_going_list)) {
0803
0804 pos = dcb_get_next(dcb_list_head, pos);
0805 } else {
0806 srb = list_entry(waiting_list_head->next,
0807 struct ScsiReqBlk, list);
0808
0809
0810 if (!start_scsi(acb, pos, srb))
0811 list_move(&srb->list, &pos->srb_going_list);
0812 else
0813 waiting_set_timer(acb, HZ/50);
0814 break;
0815 }
0816 } while (pos != start);
0817 }
0818
0819
0820
0821 static void waiting_timeout(struct timer_list *t)
0822 {
0823 unsigned long flags;
0824 struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
0825 dprintkdbg(DBG_1,
0826 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
0827 DC395x_LOCK_IO(acb->scsi_host, flags);
0828 waiting_process_next(acb);
0829 DC395x_UNLOCK_IO(acb->scsi_host, flags);
0830 }
0831
0832
0833
0834 static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
0835 {
0836 return acb->children[id][lun];
0837 }
0838
0839
0840
0841 static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
0842 {
0843 struct DeviceCtlBlk *dcb = srb->dcb;
0844
0845 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
0846 acb->active_dcb ||
0847 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
0848 list_add_tail(&srb->list, &dcb->srb_waiting_list);
0849 waiting_process_next(acb);
0850 return;
0851 }
0852
0853 if (!start_scsi(acb, dcb, srb)) {
0854 list_add_tail(&srb->list, &dcb->srb_going_list);
0855 } else {
0856 list_add(&srb->list, &dcb->srb_waiting_list);
0857 waiting_set_timer(acb, HZ / 50);
0858 }
0859 }
0860
0861
0862 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
0863 struct ScsiReqBlk *srb)
0864 {
0865 int nseg;
0866 enum dma_data_direction dir = cmd->sc_data_direction;
0867 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
0868 cmd, dcb->target_id, dcb->target_lun);
0869
0870 srb->dcb = dcb;
0871 srb->cmd = cmd;
0872 srb->sg_count = 0;
0873 srb->total_xfer_length = 0;
0874 srb->sg_bus_addr = 0;
0875 srb->sg_index = 0;
0876 srb->adapter_status = 0;
0877 srb->target_status = 0;
0878 srb->msg_count = 0;
0879 srb->status = 0;
0880 srb->flag = 0;
0881 srb->state = 0;
0882 srb->retry_count = 0;
0883 srb->tag_number = TAG_NONE;
0884 srb->scsi_phase = PH_BUS_FREE;
0885 srb->end_message = 0;
0886
0887 nseg = scsi_dma_map(cmd);
0888 BUG_ON(nseg < 0);
0889
0890 if (dir == DMA_NONE || !nseg) {
0891 dprintkdbg(DBG_0,
0892 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
0893 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
0894 srb->segment_x[0].address);
0895 } else {
0896 int i;
0897 u32 reqlen = scsi_bufflen(cmd);
0898 struct scatterlist *sg;
0899 struct SGentry *sgp = srb->segment_x;
0900
0901 srb->sg_count = nseg;
0902
0903 dprintkdbg(DBG_0,
0904 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
0905 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
0906 srb->sg_count);
0907
0908 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
0909 u32 busaddr = (u32)sg_dma_address(sg);
0910 u32 seglen = (u32)sg->length;
0911 sgp[i].address = busaddr;
0912 sgp[i].length = seglen;
0913 srb->total_xfer_length += seglen;
0914 }
0915 sgp += srb->sg_count - 1;
0916
0917
0918
0919
0920
0921 if (srb->total_xfer_length > reqlen) {
0922 sgp->length -= (srb->total_xfer_length - reqlen);
0923 srb->total_xfer_length = reqlen;
0924 }
0925
0926
0927 if (dcb->sync_period & WIDE_SYNC &&
0928 srb->total_xfer_length % 2) {
0929 srb->total_xfer_length++;
0930 sgp->length++;
0931 }
0932
0933 srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
0934 srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
0935
0936 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
0937 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
0938 }
0939
0940 srb->request_length = srb->total_xfer_length;
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962 static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
0963 {
0964 void (*done)(struct scsi_cmnd *) = scsi_done;
0965 struct DeviceCtlBlk *dcb;
0966 struct ScsiReqBlk *srb;
0967 struct AdapterCtlBlk *acb =
0968 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
0969 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
0970 cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
0971
0972
0973 set_host_byte(cmd, DID_BAD_TARGET);
0974
0975
0976 if (cmd->device->id >= acb->scsi_host->max_id ||
0977 cmd->device->lun >= acb->scsi_host->max_lun ||
0978 cmd->device->lun >31) {
0979 goto complete;
0980 }
0981
0982
0983 if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
0984 dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
0985 cmd->device->id, (u8)cmd->device->lun);
0986 goto complete;
0987 }
0988
0989
0990 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
0991 if (!dcb) {
0992
0993 dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
0994 cmd->device->id, (u8)cmd->device->lun);
0995 goto complete;
0996 }
0997
0998 set_host_byte(cmd, DID_OK);
0999 set_status_byte(cmd, SAM_STAT_GOOD);
1000
1001 srb = list_first_entry_or_null(&acb->srb_free_list,
1002 struct ScsiReqBlk, list);
1003 if (!srb) {
1004
1005
1006
1007
1008 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1009 return 1;
1010 }
1011 list_del(&srb->list);
1012
1013 build_srb(cmd, dcb, srb);
1014
1015 if (!list_empty(&dcb->srb_waiting_list)) {
1016
1017 list_add_tail(&srb->list, &dcb->srb_waiting_list);
1018 waiting_process_next(acb);
1019 } else {
1020
1021 send_srb(acb, srb);
1022 }
1023 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1024 return 0;
1025
1026 complete:
1027
1028
1029
1030
1031
1032
1033 done(cmd);
1034 return 0;
1035 }
1036
1037 static DEF_SCSI_QCMD(dc395x_queue_command)
1038
1039 static void dump_register_info(struct AdapterCtlBlk *acb,
1040 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1041 {
1042 u16 pstat;
1043 struct pci_dev *dev = acb->dev;
1044 pci_read_config_word(dev, PCI_STATUS, &pstat);
1045 if (!dcb)
1046 dcb = acb->active_dcb;
1047 if (!srb && dcb)
1048 srb = dcb->active_srb;
1049 if (srb) {
1050 if (!srb->cmd)
1051 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1052 srb, srb->cmd);
1053 else
1054 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1055 "cmnd=0x%02x <%02i-%i>\n",
1056 srb, srb->cmd,
1057 srb->cmd->cmnd[0], srb->cmd->device->id,
1058 (u8)srb->cmd->device->lun);
1059 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
1060 srb->segment_x, srb->sg_count, srb->sg_index,
1061 srb->total_xfer_length);
1062 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
1063 srb->state, srb->status, srb->scsi_phase,
1064 (acb->active_dcb) ? "" : "not");
1065 }
1066 dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
1067 "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
1068 "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
1069 "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
1070 DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
1071 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1072 DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
1073 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
1074 DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
1075 DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
1076 DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
1077 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1078 DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
1079 DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
1080 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
1081 DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
1082 DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
1083 dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
1084 "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
1085 "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
1086 DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
1087 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1088 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1089 DC395x_read8(acb, TRM_S1040_DMA_STATUS),
1090 DC395x_read8(acb, TRM_S1040_DMA_INTEN),
1091 DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
1092 DC395x_read32(acb, TRM_S1040_DMA_XCNT),
1093 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
1094 DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
1095 DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
1096 dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
1097 "pci{status=0x%04x}\n",
1098 DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
1099 DC395x_read8(acb, TRM_S1040_GEN_STATUS),
1100 DC395x_read8(acb, TRM_S1040_GEN_TIMER),
1101 pstat);
1102 }
1103
1104
1105 static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
1106 {
1107 #if debug_enabled(DBG_FIFO)
1108 u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1109 u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
1110 if (!(fifocnt & 0x40))
1111 dprintkdbg(DBG_FIFO,
1112 "clear_fifo: (%i bytes) on phase %02x in %s\n",
1113 fifocnt & 0x3f, lines, txt);
1114 #endif
1115 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
1116 }
1117
1118
1119 static void reset_dev_param(struct AdapterCtlBlk *acb)
1120 {
1121 struct DeviceCtlBlk *dcb;
1122 struct NvRamType *eeprom = &acb->eeprom;
1123 dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
1124
1125 list_for_each_entry(dcb, &acb->dcb_list, list) {
1126 u8 period_index;
1127
1128 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1129 dcb->sync_period = 0;
1130 dcb->sync_offset = 0;
1131
1132 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1133 period_index = eeprom->target[dcb->target_id].period & 0x07;
1134 dcb->min_nego_period = clock_period[period_index];
1135 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1136 || !(acb->config & HCC_WIDE_CARD))
1137 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1138 }
1139 }
1140
1141
1142
1143
1144
1145
1146
1147 static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1148 {
1149 struct AdapterCtlBlk *acb =
1150 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1151 dprintkl(KERN_INFO,
1152 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1153 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1154
1155 if (timer_pending(&acb->waiting_timer))
1156 del_timer(&acb->waiting_timer);
1157
1158
1159
1160
1161 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
1162 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
1163 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
1164 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
1165
1166 reset_scsi_bus(acb);
1167 udelay(500);
1168
1169
1170 acb->last_reset =
1171 jiffies + 3 * HZ / 2 +
1172 HZ * acb->eeprom.delay_time;
1173
1174
1175
1176
1177
1178 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1179 clear_fifo(acb, "eh_bus_reset");
1180
1181 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1182 set_basic_config(acb);
1183
1184 reset_dev_param(acb);
1185 doing_srb_done(acb, DID_RESET, cmd, 0);
1186 acb->active_dcb = NULL;
1187 acb->acb_flag = 0;
1188 waiting_process_next(acb);
1189
1190 return SUCCESS;
1191 }
1192
1193 static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1194 {
1195 int rc;
1196
1197 spin_lock_irq(cmd->device->host->host_lock);
1198 rc = __dc395x_eh_bus_reset(cmd);
1199 spin_unlock_irq(cmd->device->host->host_lock);
1200
1201 return rc;
1202 }
1203
1204
1205
1206
1207
1208
1209 static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1210 {
1211
1212
1213
1214
1215 struct AdapterCtlBlk *acb =
1216 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1217 struct DeviceCtlBlk *dcb;
1218 struct ScsiReqBlk *srb;
1219 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1220 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1221
1222 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1223 if (!dcb) {
1224 dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
1225 return FAILED;
1226 }
1227
1228 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1229 if (srb) {
1230 list_del(&srb->list);
1231 pci_unmap_srb_sense(acb, srb);
1232 pci_unmap_srb(acb, srb);
1233 free_tag(dcb, srb);
1234 list_add_tail(&srb->list, &acb->srb_free_list);
1235 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1236 set_host_byte(cmd, DID_ABORT);
1237 return SUCCESS;
1238 }
1239 srb = find_cmd(cmd, &dcb->srb_going_list);
1240 if (srb) {
1241 dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
1242
1243 } else {
1244 dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
1245 }
1246 return FAILED;
1247 }
1248
1249
1250
1251 static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1252 struct ScsiReqBlk *srb)
1253 {
1254 u8 *ptr = srb->msgout_buf + srb->msg_count;
1255 if (srb->msg_count > 1) {
1256 dprintkl(KERN_INFO,
1257 "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1258 srb->msg_count, srb->msgout_buf[0],
1259 srb->msgout_buf[1]);
1260 return;
1261 }
1262 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1263 dcb->sync_offset = 0;
1264 dcb->min_nego_period = 200 >> 2;
1265 } else if (dcb->sync_offset == 0)
1266 dcb->sync_offset = SYNC_NEGO_OFFSET;
1267
1268 srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period,
1269 dcb->sync_offset);
1270 srb->state |= SRB_DO_SYNC_NEGO;
1271 }
1272
1273
1274
1275 static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1276 struct ScsiReqBlk *srb)
1277 {
1278 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1279 (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
1280 u8 *ptr = srb->msgout_buf + srb->msg_count;
1281 if (srb->msg_count > 1) {
1282 dprintkl(KERN_INFO,
1283 "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1284 srb->msg_count, srb->msgout_buf[0],
1285 srb->msgout_buf[1]);
1286 return;
1287 }
1288 srb->msg_count += spi_populate_width_msg(ptr, wide);
1289 srb->state |= SRB_DO_WIDE_NEGO;
1290 }
1291
1292
1293 #if 0
1294
1295
1296 void selection_timeout_missed(unsigned long ptr);
1297
1298 static void selto_timer(struct AdapterCtlBlk *acb)
1299 {
1300 if (timer_pending(&acb->selto_timer))
1301 return;
1302 acb->selto_timer.function = selection_timeout_missed;
1303 acb->selto_timer.data = (unsigned long) acb;
1304 if (time_before
1305 (jiffies + HZ, acb->last_reset + HZ / 2))
1306 acb->selto_timer.expires =
1307 acb->last_reset + HZ / 2 + 1;
1308 else
1309 acb->selto_timer.expires = jiffies + HZ + 1;
1310 add_timer(&acb->selto_timer);
1311 }
1312
1313
1314 void selection_timeout_missed(unsigned long ptr)
1315 {
1316 unsigned long flags;
1317 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
1318 struct ScsiReqBlk *srb;
1319 dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
1320 if (!acb->active_dcb || !acb->active_dcb->active_srb) {
1321 dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
1322 return;
1323 }
1324 DC395x_LOCK_IO(acb->scsi_host, flags);
1325 srb = acb->active_dcb->active_srb;
1326 disconnect(acb);
1327 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1328 }
1329 #endif
1330
1331
1332 static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1333 struct ScsiReqBlk* srb)
1334 {
1335 u16 __maybe_unused s_stat2, return_code;
1336 u8 s_stat, scsicommand, i, identify_message;
1337 u8 *ptr;
1338 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1339 dcb->target_id, dcb->target_lun, srb);
1340
1341 srb->tag_number = TAG_NONE;
1342
1343 s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1344 s_stat2 = 0;
1345 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1346 #if 1
1347 if (s_stat & 0x20 ) {
1348 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1349 s_stat, s_stat2);
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361 return 1;
1362 }
1363 #endif
1364 if (acb->active_dcb) {
1365 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1366 "command while another command (0x%p) is active.",
1367 srb->cmd,
1368 acb->active_dcb->active_srb ?
1369 acb->active_dcb->active_srb->cmd : 0);
1370 return 1;
1371 }
1372 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1373 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1374 return 1;
1375 }
1376
1377
1378 if (time_before(jiffies, acb->last_reset - HZ / 2)) {
1379 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
1380 return 1;
1381 }
1382
1383
1384 clear_fifo(acb, "start_scsi");
1385 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
1386 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1387 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1388 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1389 srb->scsi_phase = PH_BUS_FREE;
1390
1391 identify_message = dcb->identify_msg;
1392
1393
1394 if (srb->flag & AUTO_REQSENSE)
1395 identify_message &= 0xBF;
1396
1397 if (((srb->cmd->cmnd[0] == INQUIRY)
1398 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1399 || (srb->flag & AUTO_REQSENSE))
1400 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1401 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1402 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1403 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1404 && (dcb->target_lun == 0)) {
1405 srb->msgout_buf[0] = identify_message;
1406 srb->msg_count = 1;
1407 scsicommand = SCMD_SEL_ATNSTOP;
1408 srb->state = SRB_MSGOUT;
1409 #ifndef SYNC_FIRST
1410 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1411 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1412 build_wdtr(acb, dcb, srb);
1413 goto no_cmd;
1414 }
1415 #endif
1416 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1417 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1418 build_sdtr(acb, dcb, srb);
1419 goto no_cmd;
1420 }
1421 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1422 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1423 build_wdtr(acb, dcb, srb);
1424 goto no_cmd;
1425 }
1426 srb->msg_count = 0;
1427 }
1428
1429 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
1430
1431 scsicommand = SCMD_SEL_ATN;
1432 srb->state = SRB_START_;
1433 #ifndef DC395x_NO_TAGQ
1434 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1435 && (identify_message & 0xC0)) {
1436
1437 u32 tag_mask = 1;
1438 u8 tag_number = 0;
1439 while (tag_mask & dcb->tag_mask
1440 && tag_number < dcb->max_command) {
1441 tag_mask = tag_mask << 1;
1442 tag_number++;
1443 }
1444 if (tag_number >= dcb->max_command) {
1445 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1446 "Out of tags target=<%02i-%i>)\n",
1447 srb->cmd, srb->cmd->device->id,
1448 (u8)srb->cmd->device->lun);
1449 srb->state = SRB_READY;
1450 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1451 DO_HWRESELECT);
1452 return 1;
1453 }
1454
1455 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG);
1456 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
1457 dcb->tag_mask |= tag_mask;
1458 srb->tag_number = tag_number;
1459 scsicommand = SCMD_SEL_ATN3;
1460 srb->state = SRB_START_;
1461 }
1462 #endif
1463
1464
1465 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1466 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1467 srb->cmd->cmnd[0], srb->tag_number);
1468 if (srb->flag & AUTO_REQSENSE) {
1469 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1470 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1471 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1472 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1473 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1474 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1475 } else {
1476 ptr = (u8 *)srb->cmd->cmnd;
1477 for (i = 0; i < srb->cmd->cmd_len; i++)
1478 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1479 }
1480 no_cmd:
1481 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1482 DO_HWRESELECT | DO_DATALATCH);
1483 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1484
1485
1486
1487
1488
1489 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1490 srb->cmd, dcb->target_id, dcb->target_lun);
1491 srb->state = SRB_READY;
1492 free_tag(dcb, srb);
1493 srb->msg_count = 0;
1494 return_code = 1;
1495
1496 } else {
1497
1498
1499
1500
1501 srb->scsi_phase = PH_BUS_FREE;
1502 dcb->active_srb = srb;
1503 acb->active_dcb = dcb;
1504 return_code = 0;
1505
1506 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1507 DO_DATALATCH | DO_HWRESELECT);
1508
1509 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
1510 }
1511 return return_code;
1512 }
1513
1514
1515 #define DC395x_ENABLE_MSGOUT \
1516 DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
1517 srb->state |= SRB_MSGOUT
1518
1519
1520
1521 static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
1522 struct ScsiReqBlk *srb)
1523 {
1524 srb->msgout_buf[0] = ABORT;
1525 srb->msg_count = 1;
1526 DC395x_ENABLE_MSGOUT;
1527 srb->state &= ~SRB_MSGIN;
1528 srb->state |= SRB_MSGOUT;
1529 }
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
1540 u16 scsi_status)
1541 {
1542 struct DeviceCtlBlk *dcb;
1543 struct ScsiReqBlk *srb;
1544 u16 phase;
1545 u8 scsi_intstatus;
1546 unsigned long flags;
1547 void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
1548 u16 *);
1549
1550 DC395x_LOCK_IO(acb->scsi_host, flags);
1551
1552
1553 scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1554 if ((scsi_status & 0x2007) == 0x2002)
1555 dprintkl(KERN_DEBUG,
1556 "COP after COP completed? %04x\n", scsi_status);
1557 if (debug_enabled(DBG_KG)) {
1558 if (scsi_intstatus & INT_SELTIMEOUT)
1559 dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
1560 }
1561
1562
1563 if (timer_pending(&acb->selto_timer))
1564 del_timer(&acb->selto_timer);
1565
1566 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
1567 disconnect(acb);
1568 goto out_unlock;
1569 }
1570 if (scsi_intstatus & INT_RESELECTED) {
1571 reselect(acb);
1572 goto out_unlock;
1573 }
1574 if (scsi_intstatus & INT_SELECT) {
1575 dprintkl(KERN_INFO, "Host does not support target mode!\n");
1576 goto out_unlock;
1577 }
1578 if (scsi_intstatus & INT_SCSIRESET) {
1579 scsi_reset_detect(acb);
1580 goto out_unlock;
1581 }
1582 if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
1583 dcb = acb->active_dcb;
1584 if (!dcb) {
1585 dprintkl(KERN_DEBUG,
1586 "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
1587 scsi_status, scsi_intstatus);
1588 goto out_unlock;
1589 }
1590 srb = dcb->active_srb;
1591 if (dcb->flag & ABORT_DEV_) {
1592 dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
1593 enable_msgout_abort(acb, srb);
1594 }
1595
1596
1597 phase = (u16)srb->scsi_phase;
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 dc395x_statev = dc395x_scsi_phase0[phase];
1613 dc395x_statev(acb, srb, &scsi_status);
1614
1615
1616
1617
1618
1619
1620 srb->scsi_phase = scsi_status & PHASEMASK;
1621 phase = (u16)scsi_status & PHASEMASK;
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 dc395x_statev = dc395x_scsi_phase1[phase];
1636 dc395x_statev(acb, srb, &scsi_status);
1637 }
1638 out_unlock:
1639 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1640 }
1641
1642
1643 static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1644 {
1645 struct AdapterCtlBlk *acb = dev_id;
1646 u16 scsi_status;
1647 u8 dma_status;
1648 irqreturn_t handled = IRQ_NONE;
1649
1650
1651
1652
1653 scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1654 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
1655 if (scsi_status & SCSIINTERRUPT) {
1656
1657 dc395x_handle_interrupt(acb, scsi_status);
1658 handled = IRQ_HANDLED;
1659 }
1660 else if (dma_status & 0x20) {
1661
1662 dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
1663 #if 0
1664 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
1665 if (acb->active_dcb) {
1666 acb->active_dcb-> flag |= ABORT_DEV_;
1667 if (acb->active_dcb->active_srb)
1668 enable_msgout_abort(acb, acb->active_dcb->active_srb);
1669 }
1670 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
1671 #else
1672 dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
1673 acb = NULL;
1674 #endif
1675 handled = IRQ_HANDLED;
1676 }
1677
1678 return handled;
1679 }
1680
1681
1682 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1683 u16 *pscsi_status)
1684 {
1685 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1686 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1687 *pscsi_status = PH_BUS_FREE;
1688
1689 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1690 srb->state &= ~SRB_MSGOUT;
1691 }
1692
1693
1694 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1695 u16 *pscsi_status)
1696 {
1697 u16 i;
1698 u8 *ptr;
1699 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1700
1701 clear_fifo(acb, "msgout_phase1");
1702 if (!(srb->state & SRB_MSGOUT)) {
1703 srb->state |= SRB_MSGOUT;
1704 dprintkl(KERN_DEBUG,
1705 "msgout_phase1: (0x%p) Phase unexpected\n",
1706 srb->cmd);
1707 }
1708 if (!srb->msg_count) {
1709 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1710 srb->cmd);
1711 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
1712 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1713
1714 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1715 return;
1716 }
1717 ptr = (u8 *)srb->msgout_buf;
1718 for (i = 0; i < srb->msg_count; i++)
1719 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1720 srb->msg_count = 0;
1721 if (srb->msgout_buf[0] == ABORT_TASK_SET)
1722 srb->state = SRB_ABORT_SENT;
1723
1724 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1725 }
1726
1727
1728 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1729 u16 *pscsi_status)
1730 {
1731 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1732 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1733 }
1734
1735
1736 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1737 u16 *pscsi_status)
1738 {
1739 struct DeviceCtlBlk *dcb;
1740 u8 *ptr;
1741 u16 i;
1742 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1743
1744 clear_fifo(acb, "command_phase1");
1745 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
1746 if (!(srb->flag & AUTO_REQSENSE)) {
1747 ptr = (u8 *)srb->cmd->cmnd;
1748 for (i = 0; i < srb->cmd->cmd_len; i++) {
1749 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
1750 ptr++;
1751 }
1752 } else {
1753 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1754 dcb = acb->active_dcb;
1755
1756 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1757 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1758 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1759 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1760 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1761 }
1762 srb->state |= SRB_COMMAND;
1763
1764 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1765
1766 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1767 }
1768
1769
1770
1771
1772
1773
1774 static void sg_verify_length(struct ScsiReqBlk *srb)
1775 {
1776 if (debug_enabled(DBG_SG)) {
1777 unsigned len = 0;
1778 unsigned idx = srb->sg_index;
1779 struct SGentry *psge = srb->segment_x + idx;
1780 for (; idx < srb->sg_count; psge++, idx++)
1781 len += psge->length;
1782 if (len != srb->total_xfer_length)
1783 dprintkdbg(DBG_SG,
1784 "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
1785 srb->total_xfer_length, len);
1786 }
1787 }
1788
1789
1790
1791
1792
1793
1794 static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1795 {
1796 u8 idx;
1797 u32 xferred = srb->total_xfer_length - left;
1798 struct SGentry *psge = srb->segment_x + srb->sg_index;
1799
1800 dprintkdbg(DBG_0,
1801 "sg_update_list: Transferred %i of %i bytes, %i remain\n",
1802 xferred, srb->total_xfer_length, left);
1803 if (xferred == 0) {
1804
1805 return;
1806 }
1807
1808 sg_verify_length(srb);
1809 srb->total_xfer_length = left;
1810 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1811 if (xferred >= psge->length) {
1812
1813 xferred -= psge->length;
1814 } else {
1815
1816 dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
1817 srb->sg_bus_addr, SEGMENTX_LEN,
1818 DMA_TO_DEVICE);
1819 psge->length -= xferred;
1820 psge->address += xferred;
1821 srb->sg_index = idx;
1822 dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
1823 srb->sg_bus_addr, SEGMENTX_LEN,
1824 DMA_TO_DEVICE);
1825 break;
1826 }
1827 psge++;
1828 }
1829 sg_verify_length(srb);
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839 static void sg_subtract_one(struct ScsiReqBlk *srb)
1840 {
1841 sg_update_list(srb, srb->total_xfer_length - 1);
1842 }
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
1854 struct ScsiReqBlk *srb)
1855 {
1856
1857 if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) {
1858 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1859 clear_fifo(acb, "cleanup/in");
1860 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1861 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1862 } else {
1863 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1864 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1865 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1866 clear_fifo(acb, "cleanup/out");
1867 }
1868 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1869 }
1870
1871
1872
1873
1874
1875
1876 #define DC395x_LASTPIO 4
1877
1878
1879 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1880 u16 *pscsi_status)
1881 {
1882 struct DeviceCtlBlk *dcb = srb->dcb;
1883 u16 scsi_status = *pscsi_status;
1884 u32 d_left_counter = 0;
1885 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
1886 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 dprintkdbg(DBG_PIO, "data_out_phase0: "
1901 "DMA{fifocnt=0x%02x fifostat=0x%02x} "
1902 "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
1903 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1904 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1905 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1906 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
1907 srb->total_xfer_length);
1908 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
1909
1910 if (!(srb->state & SRB_XFERPAD)) {
1911 if (scsi_status & PARITYERROR)
1912 srb->status |= PARITY_ERROR;
1913
1914
1915
1916
1917
1918
1919
1920 if (!(scsi_status & SCSIXFERDONE)) {
1921
1922
1923
1924
1925 d_left_counter =
1926 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
1927 0x1F);
1928 if (dcb->sync_period & WIDE_SYNC)
1929 d_left_counter <<= 1;
1930
1931 dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
1932 "SCSI{fifocnt=0x%02x cnt=0x%08x} "
1933 "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
1934 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1935 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
1936 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1937 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1938 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1939 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1940 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
1941 }
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951 if (srb->total_xfer_length > DC395x_LASTPIO)
1952 d_left_counter +=
1953 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
1954
1955
1956
1957
1958 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
1959 && scsi_bufflen(srb->cmd) % 2) {
1960 d_left_counter = 0;
1961 dprintkl(KERN_INFO,
1962 "data_out_phase0: Discard 1 byte (0x%02x)\n",
1963 scsi_status);
1964 }
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 if (d_left_counter == 0) {
1976 srb->total_xfer_length = 0;
1977 } else {
1978
1979
1980
1981
1982
1983 long oldxferred =
1984 srb->total_xfer_length - d_left_counter;
1985 const int diff =
1986 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
1987 sg_update_list(srb, d_left_counter);
1988
1989 if ((srb->segment_x[srb->sg_index].length ==
1990 diff && scsi_sg_count(srb->cmd))
1991 || ((oldxferred & ~PAGE_MASK) ==
1992 (PAGE_SIZE - diff))
1993 ) {
1994 dprintkl(KERN_INFO, "data_out_phase0: "
1995 "Work around chip bug (%i)?\n", diff);
1996 d_left_counter =
1997 srb->total_xfer_length - diff;
1998 sg_update_list(srb, d_left_counter);
1999
2000
2001
2002
2003 }
2004 }
2005 }
2006 if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
2007 cleanup_after_transfer(acb, srb);
2008 }
2009 }
2010
2011
2012 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2013 u16 *pscsi_status)
2014 {
2015 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2016 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2017 clear_fifo(acb, "data_out_phase1");
2018
2019 data_io_transfer(acb, srb, XFERDATAOUT);
2020 }
2021
2022 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2023 u16 *pscsi_status)
2024 {
2025 u16 scsi_status = *pscsi_status;
2026
2027 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2028 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 if (!(srb->state & SRB_XFERPAD)) {
2044 u32 d_left_counter;
2045 unsigned int sc, fc;
2046
2047 if (scsi_status & PARITYERROR) {
2048 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2049 "Parity Error\n", srb->cmd);
2050 srb->status |= PARITY_ERROR;
2051 }
2052
2053
2054
2055
2056
2057
2058 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
2059 #if 0
2060 int ctr = 6000000;
2061 dprintkl(KERN_DEBUG,
2062 "DIP0: Wait for DMA FIFO to flush ...\n");
2063
2064
2065
2066 while (!
2067 (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
2068 0x80) && --ctr);
2069 if (ctr < 6000000 - 1)
2070 dprintkl(KERN_DEBUG
2071 "DIP0: Had to wait for DMA ...\n");
2072 if (!ctr)
2073 dprintkl(KERN_ERR,
2074 "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
2075
2076 #endif
2077 dprintkdbg(DBG_KG, "data_in_phase0: "
2078 "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
2079 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2080 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
2081 }
2082
2083 sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2084 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2085 d_left_counter = sc + ((fc & 0x1f)
2086 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2087 0));
2088 dprintkdbg(DBG_KG, "data_in_phase0: "
2089 "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
2090 "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
2091 "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
2092 fc,
2093 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2094 sc,
2095 fc,
2096 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2097 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
2098 srb->total_xfer_length, d_left_counter);
2099 #if DC395x_LASTPIO
2100
2101 if (d_left_counter
2102 && srb->total_xfer_length <= DC395x_LASTPIO) {
2103 size_t left_io = srb->total_xfer_length;
2104
2105
2106
2107 dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
2108 "for remaining %i bytes:",
2109 fc & 0x1f,
2110 (srb->dcb->sync_period & WIDE_SYNC) ?
2111 "words" : "bytes",
2112 srb->total_xfer_length);
2113 if (srb->dcb->sync_period & WIDE_SYNC)
2114 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2115 CFG2_WIDEFIFO);
2116 while (left_io) {
2117 unsigned char *virt, *base = NULL;
2118 unsigned long flags = 0;
2119 size_t len = left_io;
2120 size_t offset = srb->request_length - left_io;
2121
2122 local_irq_save(flags);
2123
2124
2125 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2126 srb->sg_count, &offset, &len);
2127 virt = base + offset;
2128
2129 left_io -= len;
2130
2131 while (len) {
2132 u8 byte;
2133 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2134 *virt++ = byte;
2135
2136 if (debug_enabled(DBG_PIO))
2137 printk(" %02x", byte);
2138
2139 d_left_counter--;
2140 sg_subtract_one(srb);
2141
2142 len--;
2143
2144 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2145
2146 if (fc == 0x40) {
2147 left_io = 0;
2148 break;
2149 }
2150 }
2151
2152 WARN_ON((fc != 0x40) == !d_left_counter);
2153
2154 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2155
2156 if (srb->total_xfer_length > 0) {
2157 u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2158
2159 *virt++ = byte;
2160 srb->total_xfer_length--;
2161 if (debug_enabled(DBG_PIO))
2162 printk(" %02x", byte);
2163 }
2164
2165 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2166 }
2167
2168 scsi_kunmap_atomic_sg(base);
2169 local_irq_restore(flags);
2170 }
2171
2172
2173 if (debug_enabled(DBG_PIO))
2174 printk("\n");
2175 }
2176 #endif
2177
2178 #if 0
2179
2180
2181
2182
2183 if (!(scsi_status & SCSIXFERDONE)) {
2184
2185
2186
2187
2188 d_left_counter =
2189 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2190 0x1F);
2191 if (srb->dcb->sync_period & WIDE_SYNC)
2192 d_left_counter <<= 1;
2193
2194
2195
2196
2197
2198 }
2199 #endif
2200
2201 if (d_left_counter == 0
2202 || (scsi_status & SCSIXFERCNT_2_ZERO)) {
2203 #if 0
2204 int ctr = 6000000;
2205 u8 TempDMAstatus;
2206 do {
2207 TempDMAstatus =
2208 DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2209 } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
2210 if (!ctr)
2211 dprintkl(KERN_ERR,
2212 "Deadlock in DataInPhase0 waiting for DMA!!\n");
2213 srb->total_xfer_length = 0;
2214 #endif
2215 srb->total_xfer_length = d_left_counter;
2216 } else {
2217
2218
2219
2220
2221
2222
2223
2224
2225 sg_update_list(srb, d_left_counter);
2226 }
2227 }
2228
2229 if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
2230 cleanup_after_transfer(acb, srb);
2231 }
2232 }
2233
2234
2235 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2236 u16 *pscsi_status)
2237 {
2238 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2239 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2240 data_io_transfer(acb, srb, XFERDATAIN);
2241 }
2242
2243
2244 static void data_io_transfer(struct AdapterCtlBlk *acb,
2245 struct ScsiReqBlk *srb, u16 io_dir)
2246 {
2247 struct DeviceCtlBlk *dcb = srb->dcb;
2248 u8 bval;
2249 dprintkdbg(DBG_0,
2250 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2251 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2252 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2253 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2254 if (srb == acb->tmp_srb)
2255 dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
2256 if (srb->sg_index >= srb->sg_count) {
2257
2258 return;
2259 }
2260
2261 if (srb->total_xfer_length > DC395x_LASTPIO) {
2262 u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2263
2264
2265
2266
2267 if (dma_status & XFERPENDING) {
2268 dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
2269 "Expect trouble!\n");
2270 dump_register_info(acb, dcb, srb);
2271 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2272 }
2273
2274
2275
2276
2277
2278 srb->state |= SRB_DATA_XFER;
2279 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2280 if (scsi_sg_count(srb->cmd)) {
2281 io_dir |= DMACMD_SG;
2282 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2283 srb->sg_bus_addr +
2284 sizeof(struct SGentry) *
2285 srb->sg_index);
2286
2287 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2288 ((u32)(srb->sg_count -
2289 srb->sg_index) << 3));
2290 } else {
2291 io_dir &= ~DMACMD_SG;
2292 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2293 srb->segment_x[0].address);
2294 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2295 srb->segment_x[0].length);
2296 }
2297
2298 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2299 srb->total_xfer_length);
2300 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2301 if (io_dir & DMACMD_DIR) {
2302 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2303 SCMD_DMA_IN);
2304 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2305 } else {
2306 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2307 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2308 SCMD_DMA_OUT);
2309 }
2310
2311 }
2312 #if DC395x_LASTPIO
2313 else if (srb->total_xfer_length > 0) {
2314
2315
2316
2317
2318 srb->state |= SRB_DATA_XFER;
2319
2320 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2321 srb->total_xfer_length);
2322 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2323 if (io_dir & DMACMD_DIR) {
2324 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2325 SCMD_FIFO_IN);
2326 } else {
2327 int ln = srb->total_xfer_length;
2328 size_t left_io = srb->total_xfer_length;
2329
2330 if (srb->dcb->sync_period & WIDE_SYNC)
2331 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2332 CFG2_WIDEFIFO);
2333
2334 while (left_io) {
2335 unsigned char *virt, *base = NULL;
2336 unsigned long flags = 0;
2337 size_t len = left_io;
2338 size_t offset = srb->request_length - left_io;
2339
2340 local_irq_save(flags);
2341
2342 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2343 srb->sg_count, &offset, &len);
2344 virt = base + offset;
2345
2346 left_io -= len;
2347
2348 while (len--) {
2349 if (debug_enabled(DBG_PIO))
2350 printk(" %02x", *virt);
2351
2352 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
2353
2354 sg_subtract_one(srb);
2355 }
2356
2357 scsi_kunmap_atomic_sg(base);
2358 local_irq_restore(flags);
2359 }
2360 if (srb->dcb->sync_period & WIDE_SYNC) {
2361 if (ln % 2) {
2362 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
2363 if (debug_enabled(DBG_PIO))
2364 printk(" |00");
2365 }
2366 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2367 }
2368
2369 if (debug_enabled(DBG_PIO))
2370 printk("\n");
2371 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2372 SCMD_FIFO_OUT);
2373 }
2374 }
2375 #endif
2376 else {
2377 if (srb->sg_count) {
2378 srb->adapter_status = H_OVER_UNDER_RUN;
2379 srb->status |= OVER_RUN;
2380 }
2381
2382
2383
2384
2385
2386 if (dcb->sync_period & WIDE_SYNC) {
2387 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
2388 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2389 CFG2_WIDEFIFO);
2390 if (io_dir & DMACMD_DIR) {
2391 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2392 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2393 } else {
2394
2395
2396
2397 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2398 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
2399 }
2400 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2401 } else {
2402 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2403
2404
2405 if (io_dir & DMACMD_DIR)
2406 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2407 else
2408 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2409 }
2410 srb->state |= SRB_XFERPAD;
2411 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2412
2413 bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
2414 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
2415 }
2416 }
2417
2418
2419 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2420 u16 *pscsi_status)
2421 {
2422 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2423 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2424 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2425 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2426 srb->state = SRB_COMPLETED;
2427 *pscsi_status = PH_BUS_FREE;
2428 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2429 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2430 }
2431
2432
2433 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2434 u16 *pscsi_status)
2435 {
2436 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2437 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2438 srb->state = SRB_STATUS;
2439 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2440 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
2441 }
2442
2443
2444
2445 static inline u8 msgin_completed(u8 * msgbuf, u32 len)
2446 {
2447 if (*msgbuf == EXTENDED_MESSAGE) {
2448 if (len < 2)
2449 return 0;
2450 if (len < msgbuf[1] + 2)
2451 return 0;
2452 } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f)
2453 if (len < 2)
2454 return 0;
2455 return 1;
2456 }
2457
2458
2459 static inline void msgin_reject(struct AdapterCtlBlk *acb,
2460 struct ScsiReqBlk *srb)
2461 {
2462 srb->msgout_buf[0] = MESSAGE_REJECT;
2463 srb->msg_count = 1;
2464 DC395x_ENABLE_MSGOUT;
2465 srb->state &= ~SRB_MSGIN;
2466 srb->state |= SRB_MSGOUT;
2467 dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
2468 srb->msgin_buf[0],
2469 srb->dcb->target_id, srb->dcb->target_lun);
2470 }
2471
2472
2473 static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2474 struct DeviceCtlBlk *dcb, u8 tag)
2475 {
2476 struct ScsiReqBlk *srb = NULL;
2477 struct ScsiReqBlk *i;
2478 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2479 srb->cmd, tag, srb);
2480
2481 if (!(dcb->tag_mask & (1 << tag)))
2482 dprintkl(KERN_DEBUG,
2483 "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
2484 dcb->tag_mask, tag);
2485
2486 if (list_empty(&dcb->srb_going_list))
2487 goto mingx0;
2488 list_for_each_entry(i, &dcb->srb_going_list, list) {
2489 if (i->tag_number == tag) {
2490 srb = i;
2491 break;
2492 }
2493 }
2494 if (!srb)
2495 goto mingx0;
2496
2497 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2498 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2499 if (dcb->flag & ABORT_DEV_) {
2500
2501 enable_msgout_abort(acb, srb);
2502 }
2503
2504 if (!(srb->state & SRB_DISCONNECT))
2505 goto mingx0;
2506
2507 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2508 srb->state |= dcb->active_srb->state;
2509 srb->state |= SRB_DATA_XFER;
2510 dcb->active_srb = srb;
2511
2512 return srb;
2513
2514 mingx0:
2515 srb = acb->tmp_srb;
2516 srb->state = SRB_UNEXPECT_RESEL;
2517 dcb->active_srb = srb;
2518 srb->msgout_buf[0] = ABORT_TASK;
2519 srb->msg_count = 1;
2520 DC395x_ENABLE_MSGOUT;
2521 dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
2522 return srb;
2523 }
2524
2525
2526 static inline void reprogram_regs(struct AdapterCtlBlk *acb,
2527 struct DeviceCtlBlk *dcb)
2528 {
2529 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2530 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2531 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2532 set_xfer_rate(acb, dcb);
2533 }
2534
2535
2536
2537 static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2538 {
2539 struct DeviceCtlBlk *dcb = srb->dcb;
2540 dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
2541 dcb->target_id, dcb->target_lun);
2542
2543 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2544 dcb->sync_mode |= SYNC_NEGO_DONE;
2545
2546 dcb->sync_offset = 0;
2547 dcb->min_nego_period = 200 >> 2;
2548 srb->state &= ~SRB_DO_SYNC_NEGO;
2549 reprogram_regs(acb, dcb);
2550 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2551 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2552 build_wdtr(acb, dcb, srb);
2553 DC395x_ENABLE_MSGOUT;
2554 dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
2555 }
2556 }
2557
2558
2559
2560 static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2561 {
2562 struct DeviceCtlBlk *dcb = srb->dcb;
2563 u8 bval;
2564 int fact;
2565 dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
2566 "(%02i.%01i MHz) Offset %i\n",
2567 dcb->target_id, srb->msgin_buf[3] << 2,
2568 (250 / srb->msgin_buf[3]),
2569 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2570 srb->msgin_buf[4]);
2571
2572 if (srb->msgin_buf[4] > 15)
2573 srb->msgin_buf[4] = 15;
2574 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2575 dcb->sync_offset = 0;
2576 else if (dcb->sync_offset == 0)
2577 dcb->sync_offset = srb->msgin_buf[4];
2578 if (srb->msgin_buf[4] > dcb->sync_offset)
2579 srb->msgin_buf[4] = dcb->sync_offset;
2580 else
2581 dcb->sync_offset = srb->msgin_buf[4];
2582 bval = 0;
2583 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2584 || dcb->min_nego_period >
2585 clock_period[bval]))
2586 bval++;
2587 if (srb->msgin_buf[3] < clock_period[bval])
2588 dprintkl(KERN_INFO,
2589 "msgin_set_sync: Increase sync nego period to %ins\n",
2590 clock_period[bval] << 2);
2591 srb->msgin_buf[3] = clock_period[bval];
2592 dcb->sync_period &= 0xf0;
2593 dcb->sync_period |= ALT_SYNC | bval;
2594 dcb->min_nego_period = srb->msgin_buf[3];
2595
2596 if (dcb->sync_period & WIDE_SYNC)
2597 fact = 500;
2598 else
2599 fact = 250;
2600
2601 dprintkl(KERN_INFO,
2602 "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
2603 dcb->target_id, (fact == 500) ? "Wide16" : "",
2604 dcb->min_nego_period << 2, dcb->sync_offset,
2605 (fact / dcb->min_nego_period),
2606 ((fact % dcb->min_nego_period) * 10 +
2607 dcb->min_nego_period / 2) / dcb->min_nego_period);
2608
2609 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2610
2611 dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
2612 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2613
2614 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2615 srb->msg_count = 5;
2616 DC395x_ENABLE_MSGOUT;
2617 dcb->sync_mode |= SYNC_NEGO_DONE;
2618 } else {
2619 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2620 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2621 build_wdtr(acb, dcb, srb);
2622 DC395x_ENABLE_MSGOUT;
2623 dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
2624 }
2625 }
2626 srb->state &= ~SRB_DO_SYNC_NEGO;
2627 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2628
2629 reprogram_regs(acb, dcb);
2630 }
2631
2632
2633 static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
2634 struct ScsiReqBlk *srb)
2635 {
2636 struct DeviceCtlBlk *dcb = srb->dcb;
2637 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2638
2639 dcb->sync_period &= ~WIDE_SYNC;
2640 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2641 dcb->sync_mode |= WIDE_NEGO_DONE;
2642 srb->state &= ~SRB_DO_WIDE_NEGO;
2643 reprogram_regs(acb, dcb);
2644 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2645 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2646 build_sdtr(acb, dcb, srb);
2647 DC395x_ENABLE_MSGOUT;
2648 dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
2649 }
2650 }
2651
2652 static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2653 {
2654 struct DeviceCtlBlk *dcb = srb->dcb;
2655 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2656 && acb->config & HCC_WIDE_CARD) ? 1 : 0;
2657 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2658
2659 if (srb->msgin_buf[3] > wide)
2660 srb->msgin_buf[3] = wide;
2661
2662 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2663 dprintkl(KERN_DEBUG,
2664 "msgin_set_wide: Wide nego initiated <%02i>\n",
2665 dcb->target_id);
2666 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2667 srb->msg_count = 4;
2668 srb->state |= SRB_DO_WIDE_NEGO;
2669 DC395x_ENABLE_MSGOUT;
2670 }
2671
2672 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2673 if (srb->msgin_buf[3] > 0)
2674 dcb->sync_period |= WIDE_SYNC;
2675 else
2676 dcb->sync_period &= ~WIDE_SYNC;
2677 srb->state &= ~SRB_DO_WIDE_NEGO;
2678
2679 dprintkdbg(DBG_1,
2680 "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
2681 (8 << srb->msgin_buf[3]), dcb->target_id);
2682 reprogram_regs(acb, dcb);
2683 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2684 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2685 build_sdtr(acb, dcb, srb);
2686 DC395x_ENABLE_MSGOUT;
2687 dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
2688 }
2689 }
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2705 u16 *pscsi_status)
2706 {
2707 struct DeviceCtlBlk *dcb = acb->active_dcb;
2708 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2709
2710 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2711 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2712
2713 switch (srb->msgin_buf[0]) {
2714 case DISCONNECT:
2715 srb->state = SRB_DISCONNECT;
2716 break;
2717
2718 case SIMPLE_QUEUE_TAG:
2719 case HEAD_OF_QUEUE_TAG:
2720 case ORDERED_QUEUE_TAG:
2721 srb =
2722 msgin_qtag(acb, dcb,
2723 srb->msgin_buf[1]);
2724 break;
2725
2726 case MESSAGE_REJECT:
2727 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
2728 DO_CLRATN | DO_DATALATCH);
2729
2730 if (srb->state & SRB_DO_SYNC_NEGO) {
2731 msgin_set_async(acb, srb);
2732 break;
2733 }
2734
2735 if (srb->state & SRB_DO_WIDE_NEGO) {
2736 msgin_set_nowide(acb, srb);
2737 break;
2738 }
2739 enable_msgout_abort(acb, srb);
2740
2741 break;
2742
2743 case EXTENDED_MESSAGE:
2744
2745 if (srb->msgin_buf[1] == 3
2746 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2747 msgin_set_sync(acb, srb);
2748 break;
2749 }
2750
2751 if (srb->msgin_buf[1] == 2
2752 && srb->msgin_buf[2] == EXTENDED_WDTR
2753 && srb->msgin_buf[3] <= 2) {
2754 msgin_set_wide(acb, srb);
2755 break;
2756 }
2757 msgin_reject(acb, srb);
2758 break;
2759
2760 case IGNORE_WIDE_RESIDUE:
2761
2762 dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
2763 break;
2764
2765 case COMMAND_COMPLETE:
2766
2767 break;
2768
2769 case SAVE_POINTERS:
2770
2771
2772
2773
2774 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2775 "SAVE POINTER rem=%i Ignore\n",
2776 srb->cmd, srb->total_xfer_length);
2777 break;
2778
2779 case RESTORE_POINTERS:
2780 dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
2781 break;
2782
2783 case ABORT:
2784 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2785 "<%02i-%i> ABORT msg\n",
2786 srb->cmd, dcb->target_id,
2787 dcb->target_lun);
2788 dcb->flag |= ABORT_DEV_;
2789 enable_msgout_abort(acb, srb);
2790 break;
2791
2792 default:
2793
2794 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2795 dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
2796 srb->msg_count = 1;
2797 srb->msgout_buf[0] = dcb->identify_msg;
2798 DC395x_ENABLE_MSGOUT;
2799 srb->state |= SRB_MSGOUT;
2800
2801 }
2802 msgin_reject(acb, srb);
2803 }
2804
2805
2806 srb->state &= ~SRB_MSGIN;
2807 acb->msg_len = 0;
2808 }
2809 *pscsi_status = PH_BUS_FREE;
2810 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2811 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2812 }
2813
2814
2815 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2816 u16 *pscsi_status)
2817 {
2818 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2819 clear_fifo(acb, "msgin_phase1");
2820 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2821 if (!(srb->state & SRB_MSGIN)) {
2822 srb->state &= ~SRB_DISCONNECT;
2823 srb->state |= SRB_MSGIN;
2824 }
2825 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2826
2827 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
2828 }
2829
2830
2831 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2832 u16 *pscsi_status)
2833 {
2834 }
2835
2836
2837 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2838 u16 *pscsi_status)
2839 {
2840 }
2841
2842
2843 static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
2844 {
2845 struct DeviceCtlBlk *i;
2846
2847
2848 if (dcb->identify_msg & 0x07)
2849 return;
2850
2851 if (acb->scan_devices) {
2852 current_sync_offset = dcb->sync_offset;
2853 return;
2854 }
2855
2856 list_for_each_entry(i, &acb->dcb_list, list)
2857 if (i->target_id == dcb->target_id) {
2858 i->sync_period = dcb->sync_period;
2859 i->sync_offset = dcb->sync_offset;
2860 i->sync_mode = dcb->sync_mode;
2861 i->min_nego_period = dcb->min_nego_period;
2862 }
2863 }
2864
2865
2866 static void disconnect(struct AdapterCtlBlk *acb)
2867 {
2868 struct DeviceCtlBlk *dcb = acb->active_dcb;
2869 struct ScsiReqBlk *srb;
2870
2871 if (!dcb) {
2872 dprintkl(KERN_ERR, "disconnect: No such device\n");
2873 udelay(500);
2874
2875 acb->last_reset =
2876 jiffies + HZ / 2 +
2877 HZ * acb->eeprom.delay_time;
2878 clear_fifo(acb, "disconnectEx");
2879 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2880 return;
2881 }
2882 srb = dcb->active_srb;
2883 acb->active_dcb = NULL;
2884 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
2885
2886 srb->scsi_phase = PH_BUS_FREE;
2887 clear_fifo(acb, "disconnect");
2888 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2889 if (srb->state & SRB_UNEXPECT_RESEL) {
2890 dprintkl(KERN_ERR,
2891 "disconnect: Unexpected reselection <%02i-%i>\n",
2892 dcb->target_id, dcb->target_lun);
2893 srb->state = 0;
2894 waiting_process_next(acb);
2895 } else if (srb->state & SRB_ABORT_SENT) {
2896 dcb->flag &= ~ABORT_DEV_;
2897 acb->last_reset = jiffies + HZ / 2 + 1;
2898 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
2899 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
2900 waiting_process_next(acb);
2901 } else {
2902 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
2903 || !(srb->
2904 state & (SRB_DISCONNECT | SRB_COMPLETED))) {
2905
2906
2907
2908
2909
2910 if (srb->state != SRB_START_
2911 && srb->state != SRB_MSGOUT) {
2912 srb->state = SRB_READY;
2913 dprintkl(KERN_DEBUG,
2914 "disconnect: (0x%p) Unexpected\n",
2915 srb->cmd);
2916 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
2917 goto disc1;
2918 } else {
2919
2920 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
2921 "<%02i-%i> SelTO\n", srb->cmd,
2922 dcb->target_id, dcb->target_lun);
2923 if (srb->retry_count++ > DC395x_MAX_RETRIES
2924 || acb->scan_devices) {
2925 srb->target_status =
2926 SCSI_STAT_SEL_TIMEOUT;
2927 goto disc1;
2928 }
2929 free_tag(dcb, srb);
2930 list_move(&srb->list, &dcb->srb_waiting_list);
2931 dprintkdbg(DBG_KG,
2932 "disconnect: (0x%p) Retry\n",
2933 srb->cmd);
2934 waiting_set_timer(acb, HZ / 20);
2935 }
2936 } else if (srb->state & SRB_DISCONNECT) {
2937 u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
2938
2939
2940
2941 if (bval & 0x40) {
2942 dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
2943 " 0x%02x: ACK set! Other controllers?\n",
2944 bval);
2945
2946 } else
2947 waiting_process_next(acb);
2948 } else if (srb->state & SRB_COMPLETED) {
2949 disc1:
2950
2951
2952
2953 free_tag(dcb, srb);
2954 dcb->active_srb = NULL;
2955 srb->state = SRB_FREE;
2956 srb_done(acb, dcb, srb);
2957 }
2958 }
2959 }
2960
2961
2962 static void reselect(struct AdapterCtlBlk *acb)
2963 {
2964 struct DeviceCtlBlk *dcb = acb->active_dcb;
2965 struct ScsiReqBlk *srb = NULL;
2966 u16 rsel_tar_lun_id;
2967 u8 id, lun;
2968 dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
2969
2970 clear_fifo(acb, "reselect");
2971
2972
2973 rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
2974 if (dcb) {
2975 srb = dcb->active_srb;
2976 if (!srb) {
2977 dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
2978 "but active_srb == NULL\n");
2979 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2980 return;
2981 }
2982
2983 if (!acb->scan_devices) {
2984 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
2985 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
2986 srb->cmd, dcb->target_id,
2987 dcb->target_lun, rsel_tar_lun_id,
2988 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
2989
2990
2991 srb->state = SRB_READY;
2992 free_tag(dcb, srb);
2993 list_move(&srb->list, &dcb->srb_waiting_list);
2994 waiting_set_timer(acb, HZ / 20);
2995
2996
2997 }
2998 }
2999
3000 if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
3001 dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
3002 "Got %i!\n", rsel_tar_lun_id);
3003 id = rsel_tar_lun_id & 0xff;
3004 lun = (rsel_tar_lun_id >> 8) & 7;
3005 dcb = find_dcb(acb, id, lun);
3006 if (!dcb) {
3007 dprintkl(KERN_ERR, "reselect: From non existent device "
3008 "<%02i-%i>\n", id, lun);
3009 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3010 return;
3011 }
3012 acb->active_dcb = dcb;
3013
3014 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3015 dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
3016 "disconnection? <%02i-%i>\n",
3017 dcb->target_id, dcb->target_lun);
3018
3019 if (dcb->sync_mode & EN_TAG_QUEUEING) {
3020 srb = acb->tmp_srb;
3021 dcb->active_srb = srb;
3022 } else {
3023
3024 srb = dcb->active_srb;
3025 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3026
3027
3028
3029 dprintkl(KERN_DEBUG,
3030 "reselect: w/o disconnected cmds <%02i-%i>\n",
3031 dcb->target_id, dcb->target_lun);
3032 srb = acb->tmp_srb;
3033 srb->state = SRB_UNEXPECT_RESEL;
3034 dcb->active_srb = srb;
3035 enable_msgout_abort(acb, srb);
3036 } else {
3037 if (dcb->flag & ABORT_DEV_) {
3038
3039 enable_msgout_abort(acb, srb);
3040 } else
3041 srb->state = SRB_DATA_XFER;
3042
3043 }
3044 }
3045 srb->scsi_phase = PH_BUS_FREE;
3046
3047
3048 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3049 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3050 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
3051 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
3052 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
3053 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3054
3055 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
3056 }
3057
3058
3059 static inline u8 tagq_blacklist(char *name)
3060 {
3061 #ifndef DC395x_NO_TAGQ
3062 #if 0
3063 u8 i;
3064 for (i = 0; i < BADDEVCNT; i++)
3065 if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
3066 return 1;
3067 #endif
3068 return 0;
3069 #else
3070 return 1;
3071 #endif
3072 }
3073
3074
3075 static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3076 {
3077
3078 if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
3079 if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
3080 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3081
3082
3083
3084 !tagq_blacklist(((char *)ptr) + 8)) {
3085 if (dcb->max_command == 1)
3086 dcb->max_command =
3087 dcb->acb->tag_max_num;
3088 dcb->sync_mode |= EN_TAG_QUEUEING;
3089
3090 } else
3091 dcb->max_command = 1;
3092 }
3093 }
3094
3095
3096 static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3097 struct ScsiInqData *ptr)
3098 {
3099 u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
3100 dcb->dev_type = bval1;
3101
3102 disc_tagq_set(dcb, ptr);
3103 }
3104
3105
3106
3107 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3108 {
3109 struct scsi_cmnd *cmd = srb->cmd;
3110 enum dma_data_direction dir = cmd->sc_data_direction;
3111
3112 if (scsi_sg_count(cmd) && dir != DMA_NONE) {
3113
3114 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3115 srb->sg_bus_addr, SEGMENTX_LEN);
3116 dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
3117 DMA_TO_DEVICE);
3118 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3119 scsi_sg_count(cmd), scsi_bufflen(cmd));
3120
3121 scsi_dma_unmap(cmd);
3122 }
3123 }
3124
3125
3126
3127 static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3128 struct ScsiReqBlk *srb)
3129 {
3130 if (!(srb->flag & AUTO_REQSENSE))
3131 return;
3132
3133 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3134 srb->segment_x[0].address);
3135 dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
3136 srb->segment_x[0].length, DMA_FROM_DEVICE);
3137
3138 srb->total_xfer_length = srb->xferred;
3139 srb->segment_x[0].address =
3140 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3141 srb->segment_x[0].length =
3142 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3143 }
3144
3145
3146
3147
3148
3149
3150 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3151 struct ScsiReqBlk *srb)
3152 {
3153 u8 tempcnt, status;
3154 struct scsi_cmnd *cmd = srb->cmd;
3155 enum dma_data_direction dir = cmd->sc_data_direction;
3156 int ckc_only = 1;
3157
3158 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3159 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3160 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3161 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3162 scsi_sgtalbe(cmd));
3163 status = srb->target_status;
3164 set_host_byte(cmd, DID_OK);
3165 set_status_byte(cmd, SAM_STAT_GOOD);
3166 if (srb->flag & AUTO_REQSENSE) {
3167 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
3168 pci_unmap_srb_sense(acb, srb);
3169
3170
3171
3172 srb->flag &= ~AUTO_REQSENSE;
3173 srb->adapter_status = 0;
3174 srb->target_status = SAM_STAT_CHECK_CONDITION;
3175 if (debug_enabled(DBG_1)) {
3176 switch (cmd->sense_buffer[2] & 0x0f) {
3177 case NOT_READY:
3178 dprintkl(KERN_DEBUG,
3179 "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3180 cmd->cmnd[0], dcb->target_id,
3181 dcb->target_lun, status, acb->scan_devices);
3182 break;
3183 case UNIT_ATTENTION:
3184 dprintkl(KERN_DEBUG,
3185 "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3186 cmd->cmnd[0], dcb->target_id,
3187 dcb->target_lun, status, acb->scan_devices);
3188 break;
3189 case ILLEGAL_REQUEST:
3190 dprintkl(KERN_DEBUG,
3191 "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3192 cmd->cmnd[0], dcb->target_id,
3193 dcb->target_lun, status, acb->scan_devices);
3194 break;
3195 case MEDIUM_ERROR:
3196 dprintkl(KERN_DEBUG,
3197 "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3198 cmd->cmnd[0], dcb->target_id,
3199 dcb->target_lun, status, acb->scan_devices);
3200 break;
3201 case HARDWARE_ERROR:
3202 dprintkl(KERN_DEBUG,
3203 "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3204 cmd->cmnd[0], dcb->target_id,
3205 dcb->target_lun, status, acb->scan_devices);
3206 break;
3207 }
3208 if (cmd->sense_buffer[7] >= 6)
3209 printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
3210 "(0x%08x 0x%08x)\n",
3211 cmd->sense_buffer[2], cmd->sense_buffer[12],
3212 cmd->sense_buffer[13],
3213 *((unsigned int *)(cmd->sense_buffer + 3)),
3214 *((unsigned int *)(cmd->sense_buffer + 8)));
3215 else
3216 printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
3217 cmd->sense_buffer[2],
3218 *((unsigned int *)(cmd->sense_buffer + 3)));
3219 }
3220
3221 if (status == SAM_STAT_CHECK_CONDITION) {
3222 set_host_byte(cmd, DID_BAD_TARGET);
3223 goto ckc_e;
3224 }
3225 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
3226
3227 set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
3228
3229 goto ckc_e;
3230 }
3231
3232
3233 if (status) {
3234
3235
3236
3237 if (status == SAM_STAT_CHECK_CONDITION) {
3238 request_sense(acb, dcb, srb);
3239 return;
3240 } else if (status == SAM_STAT_TASK_SET_FULL) {
3241 tempcnt = (u8)list_size(&dcb->srb_going_list);
3242 dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
3243 dcb->target_id, dcb->target_lun, tempcnt);
3244 if (tempcnt > 1)
3245 tempcnt--;
3246 dcb->max_command = tempcnt;
3247 free_tag(dcb, srb);
3248 list_move(&srb->list, &dcb->srb_waiting_list);
3249 waiting_set_timer(acb, HZ / 20);
3250 srb->adapter_status = 0;
3251 srb->target_status = 0;
3252 return;
3253 } else if (status == SCSI_STAT_SEL_TIMEOUT) {
3254 srb->adapter_status = H_SEL_TIMEOUT;
3255 srb->target_status = 0;
3256 set_host_byte(cmd, DID_NO_CONNECT);
3257 } else {
3258 srb->adapter_status = 0;
3259 set_host_byte(cmd, DID_ERROR);
3260 set_status_byte(cmd, status);
3261 }
3262 } else {
3263
3264
3265
3266 status = srb->adapter_status;
3267 if (status & H_OVER_UNDER_RUN) {
3268 srb->target_status = 0;
3269 scsi_msg_to_host_byte(cmd, srb->end_message);
3270 } else if (srb->status & PARITY_ERROR) {
3271 set_host_byte(cmd, DID_PARITY);
3272 } else {
3273
3274 srb->adapter_status = 0;
3275 srb->target_status = 0;
3276 }
3277 }
3278
3279 ckc_only = 0;
3280
3281 ckc_e:
3282
3283 pci_unmap_srb(acb, srb);
3284
3285 if (cmd->cmnd[0] == INQUIRY) {
3286 unsigned char *base = NULL;
3287 struct ScsiInqData *ptr;
3288 unsigned long flags = 0;
3289 struct scatterlist* sg = scsi_sglist(cmd);
3290 size_t offset = 0, len = sizeof(struct ScsiInqData);
3291
3292 local_irq_save(flags);
3293 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3294 ptr = (struct ScsiInqData *)(base + offset);
3295
3296 if (!ckc_only && get_host_byte(cmd) == DID_OK
3297 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3298 && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3299 dcb->inquiry7 = ptr->Flags;
3300
3301
3302
3303 if ((get_host_byte(cmd) == DID_OK) ||
3304 (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
3305 if (!dcb->init_tcq_flag) {
3306 add_dev(acb, dcb, ptr);
3307 dcb->init_tcq_flag = 1;
3308 }
3309 }
3310
3311 scsi_kunmap_atomic_sg(base);
3312 local_irq_restore(flags);
3313 }
3314
3315
3316 scsi_set_resid(cmd, srb->total_xfer_length);
3317 if (debug_enabled(DBG_KG)) {
3318 if (srb->total_xfer_length)
3319 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3320 "cmnd=0x%02x Missed %i bytes\n",
3321 cmd, cmd->device->id, (u8)cmd->device->lun,
3322 cmd->cmnd[0], srb->total_xfer_length);
3323 }
3324
3325 if (srb != acb->tmp_srb) {
3326
3327 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3328 cmd, cmd->result);
3329 list_move_tail(&srb->list, &acb->srb_free_list);
3330 } else {
3331 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3332 }
3333
3334 scsi_done(cmd);
3335 waiting_process_next(acb);
3336 }
3337
3338
3339
3340 static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3341 struct scsi_cmnd *cmd, u8 force)
3342 {
3343 struct DeviceCtlBlk *dcb;
3344 dprintkl(KERN_INFO, "doing_srb_done: pids ");
3345
3346 list_for_each_entry(dcb, &acb->dcb_list, list) {
3347 struct ScsiReqBlk *srb;
3348 struct ScsiReqBlk *tmp;
3349 struct scsi_cmnd *p;
3350
3351 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3352 p = srb->cmd;
3353 printk("G:%p(%02i-%i) ", p,
3354 p->device->id, (u8)p->device->lun);
3355 list_del(&srb->list);
3356 free_tag(dcb, srb);
3357 list_add_tail(&srb->list, &acb->srb_free_list);
3358 set_host_byte(p, did_flag);
3359 set_status_byte(p, SAM_STAT_GOOD);
3360 pci_unmap_srb_sense(acb, srb);
3361 pci_unmap_srb(acb, srb);
3362 if (force) {
3363
3364
3365 scsi_done(p);
3366 }
3367 }
3368 if (!list_empty(&dcb->srb_going_list))
3369 dprintkl(KERN_DEBUG,
3370 "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
3371 dcb->target_id, dcb->target_lun);
3372 if (dcb->tag_mask)
3373 dprintkl(KERN_DEBUG,
3374 "tag_mask for <%02i-%i> should be empty, is %08x!\n",
3375 dcb->target_id, dcb->target_lun,
3376 dcb->tag_mask);
3377
3378
3379 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3380 p = srb->cmd;
3381
3382 printk("W:%p<%02i-%i>", p, p->device->id,
3383 (u8)p->device->lun);
3384 list_move_tail(&srb->list, &acb->srb_free_list);
3385 set_host_byte(p, did_flag);
3386 set_status_byte(p, SAM_STAT_GOOD);
3387 pci_unmap_srb_sense(acb, srb);
3388 pci_unmap_srb(acb, srb);
3389 if (force) {
3390
3391
3392 scsi_done(cmd);
3393 }
3394 }
3395 if (!list_empty(&dcb->srb_waiting_list))
3396 dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
3397 list_size(&dcb->srb_waiting_list), dcb->target_id,
3398 dcb->target_lun);
3399 dcb->flag &= ~ABORT_DEV_;
3400 }
3401 printk("\n");
3402 }
3403
3404
3405 static void reset_scsi_bus(struct AdapterCtlBlk *acb)
3406 {
3407 dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
3408 acb->acb_flag |= RESET_DEV;
3409 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
3410
3411 while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
3412 ;
3413 }
3414
3415
3416 static void set_basic_config(struct AdapterCtlBlk *acb)
3417 {
3418 u8 bval;
3419 u16 wval;
3420 DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
3421 if (acb->config & HCC_PARITY)
3422 bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
3423 else
3424 bval = PHASELATCH | INITIATOR | BLOCKRST;
3425
3426 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
3427
3428
3429 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03);
3430
3431 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3432
3433 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
3434
3435 wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
3436 DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
3437
3438 wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
3439 wval |=
3440 DMA_FIFO_HALF_HALF | DMA_ENHANCE ;
3441 DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
3442
3443 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
3444
3445 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
3446 DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
3447
3448 );
3449 }
3450
3451
3452 static void scsi_reset_detect(struct AdapterCtlBlk *acb)
3453 {
3454 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
3455
3456 if (timer_pending(&acb->waiting_timer))
3457 del_timer(&acb->waiting_timer);
3458
3459 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
3460 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
3461
3462 udelay(500);
3463
3464 acb->last_reset =
3465 jiffies + 5 * HZ / 2 +
3466 HZ * acb->eeprom.delay_time;
3467
3468 clear_fifo(acb, "scsi_reset_detect");
3469 set_basic_config(acb);
3470
3471
3472
3473 if (acb->acb_flag & RESET_DEV) {
3474 acb->acb_flag |= RESET_DONE;
3475 } else {
3476 acb->acb_flag |= RESET_DETECT;
3477 reset_dev_param(acb);
3478 doing_srb_done(acb, DID_RESET, NULL, 1);
3479
3480 acb->active_dcb = NULL;
3481 acb->acb_flag = 0;
3482 waiting_process_next(acb);
3483 }
3484 }
3485
3486
3487 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3488 struct ScsiReqBlk *srb)
3489 {
3490 struct scsi_cmnd *cmd = srb->cmd;
3491 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3492 cmd, cmd->device->id, (u8)cmd->device->lun);
3493
3494 srb->flag |= AUTO_REQSENSE;
3495 srb->adapter_status = 0;
3496 srb->target_status = 0;
3497
3498
3499 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3500
3501
3502 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3503 srb->segment_x[0].address;
3504 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3505 srb->segment_x[0].length;
3506 srb->xferred = srb->total_xfer_length;
3507
3508 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3509 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3510
3511 srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
3512 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
3513 DMA_FROM_DEVICE);
3514 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3515 cmd->sense_buffer, srb->segment_x[0].address,
3516 SCSI_SENSE_BUFFERSIZE);
3517 srb->sg_count = 1;
3518 srb->sg_index = 0;
3519
3520 if (start_scsi(acb, dcb, srb)) {
3521 dprintkl(KERN_DEBUG,
3522 "request_sense: (0x%p) failed <%02i-%i>\n",
3523 srb->cmd, dcb->target_id, dcb->target_lun);
3524 list_move(&srb->list, &dcb->srb_waiting_list);
3525 waiting_set_timer(acb, HZ / 100);
3526 }
3527 }
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543 static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3544 u8 target, u8 lun)
3545 {
3546 struct NvRamType *eeprom = &acb->eeprom;
3547 u8 period_index = eeprom->target[target].period & 0x07;
3548 struct DeviceCtlBlk *dcb;
3549
3550 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3551 dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
3552 if (!dcb)
3553 return NULL;
3554 dcb->acb = NULL;
3555 INIT_LIST_HEAD(&dcb->srb_going_list);
3556 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3557 dcb->active_srb = NULL;
3558 dcb->tag_mask = 0;
3559 dcb->max_command = 1;
3560 dcb->target_id = target;
3561 dcb->target_lun = lun;
3562 dcb->dev_mode = eeprom->target[target].cfg0;
3563 #ifndef DC395x_NO_DISCONNECT
3564 dcb->identify_msg =
3565 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3566 #else
3567 dcb->identify_msg = IDENTIFY(0, lun);
3568 #endif
3569 dcb->inquiry7 = 0;
3570 dcb->sync_mode = 0;
3571 dcb->min_nego_period = clock_period[period_index];
3572 dcb->sync_period = 0;
3573 dcb->sync_offset = 0;
3574 dcb->flag = 0;
3575
3576 #ifndef DC395x_NO_WIDE
3577 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3578 && (acb->config & HCC_WIDE_CARD))
3579 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3580 #endif
3581 #ifndef DC395x_NO_SYNC
3582 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3583 if (!(lun) || current_sync_offset)
3584 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3585 #endif
3586 if (dcb->target_lun != 0) {
3587
3588 struct DeviceCtlBlk *p = NULL, *iter;
3589
3590 list_for_each_entry(iter, &acb->dcb_list, list)
3591 if (iter->target_id == dcb->target_id) {
3592 p = iter;
3593 break;
3594 }
3595
3596 if (!p) {
3597 kfree(dcb);
3598 return NULL;
3599 }
3600
3601 dprintkdbg(DBG_1,
3602 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
3603 dcb->target_id, dcb->target_lun,
3604 p->target_id, p->target_lun);
3605 dcb->sync_mode = p->sync_mode;
3606 dcb->sync_period = p->sync_period;
3607 dcb->min_nego_period = p->min_nego_period;
3608 dcb->sync_offset = p->sync_offset;
3609 dcb->inquiry7 = p->inquiry7;
3610 }
3611 return dcb;
3612 }
3613
3614
3615
3616
3617
3618
3619
3620
3621 static void adapter_add_device(struct AdapterCtlBlk *acb,
3622 struct DeviceCtlBlk *dcb)
3623 {
3624
3625 dcb->acb = acb;
3626
3627
3628 if (list_empty(&acb->dcb_list))
3629 acb->dcb_run_robin = dcb;
3630
3631
3632 list_add_tail(&dcb->list, &acb->dcb_list);
3633
3634
3635 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3636 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3637 }
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649 static void adapter_remove_device(struct AdapterCtlBlk *acb,
3650 struct DeviceCtlBlk *dcb)
3651 {
3652 struct DeviceCtlBlk *i;
3653 struct DeviceCtlBlk *tmp;
3654 dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
3655 dcb->target_id, dcb->target_lun);
3656
3657
3658 if (acb->active_dcb == dcb)
3659 acb->active_dcb = NULL;
3660 if (acb->dcb_run_robin == dcb)
3661 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3662
3663
3664 list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
3665 if (dcb == i) {
3666 list_del(&i->list);
3667 break;
3668 }
3669
3670
3671 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3672 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3673 dcb->acb = NULL;
3674 }
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684 static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
3685 struct DeviceCtlBlk *dcb)
3686 {
3687 if (list_size(&dcb->srb_going_list) > 1) {
3688 dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
3689 "Won't remove because of %i active requests.\n",
3690 dcb->target_id, dcb->target_lun,
3691 list_size(&dcb->srb_going_list));
3692 return;
3693 }
3694 adapter_remove_device(acb, dcb);
3695 kfree(dcb);
3696 }
3697
3698
3699
3700
3701
3702
3703
3704
3705 static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
3706 {
3707 struct DeviceCtlBlk *dcb;
3708 struct DeviceCtlBlk *tmp;
3709 dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
3710 list_size(&acb->dcb_list));
3711
3712 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3713 adapter_remove_and_free_device(acb, dcb);
3714 }
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724 static int dc395x_slave_alloc(struct scsi_device *scsi_device)
3725 {
3726 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3727 struct DeviceCtlBlk *dcb;
3728
3729 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3730 if (!dcb)
3731 return -ENOMEM;
3732 adapter_add_device(acb, dcb);
3733
3734 return 0;
3735 }
3736
3737
3738
3739
3740
3741
3742
3743
3744 static void dc395x_slave_destroy(struct scsi_device *scsi_device)
3745 {
3746 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3747 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3748 if (dcb)
3749 adapter_remove_and_free_device(acb, dcb);
3750 }
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762 static void trms1040_wait_30us(unsigned long io_port)
3763 {
3764
3765 outb(5, io_port + TRM_S1040_GEN_TIMER);
3766 while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
3767 ;
3768 }
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779 static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
3780 {
3781 int i;
3782 u8 send_data;
3783
3784
3785 for (i = 0; i < 3; i++, cmd <<= 1) {
3786 send_data = NVR_SELECT;
3787 if (cmd & 0x04)
3788 send_data |= NVR_BITOUT;
3789
3790 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3791 trms1040_wait_30us(io_port);
3792 outb((send_data | NVR_CLOCK),
3793 io_port + TRM_S1040_GEN_NVRAM);
3794 trms1040_wait_30us(io_port);
3795 }
3796
3797
3798 for (i = 0; i < 7; i++, addr <<= 1) {
3799 send_data = NVR_SELECT;
3800 if (addr & 0x40)
3801 send_data |= NVR_BITOUT;
3802
3803 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3804 trms1040_wait_30us(io_port);
3805 outb((send_data | NVR_CLOCK),
3806 io_port + TRM_S1040_GEN_NVRAM);
3807 trms1040_wait_30us(io_port);
3808 }
3809 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3810 trms1040_wait_30us(io_port);
3811 }
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824 static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
3825 {
3826 int i;
3827 u8 send_data;
3828
3829
3830 trms1040_write_cmd(io_port, 0x05, addr);
3831
3832
3833 for (i = 0; i < 8; i++, byte <<= 1) {
3834 send_data = NVR_SELECT;
3835 if (byte & 0x80)
3836 send_data |= NVR_BITOUT;
3837
3838 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3839 trms1040_wait_30us(io_port);
3840 outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3841 trms1040_wait_30us(io_port);
3842 }
3843 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3844 trms1040_wait_30us(io_port);
3845
3846
3847 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3848 trms1040_wait_30us(io_port);
3849
3850 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3851 trms1040_wait_30us(io_port);
3852
3853
3854 while (1) {
3855 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3856 trms1040_wait_30us(io_port);
3857
3858 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3859 trms1040_wait_30us(io_port);
3860
3861 if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
3862 break;
3863 }
3864
3865
3866 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3867 }
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878 static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
3879 {
3880 u8 *b_eeprom = (u8 *)eeprom;
3881 u8 addr;
3882
3883
3884 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
3885 io_port + TRM_S1040_GEN_CONTROL);
3886
3887
3888 trms1040_write_cmd(io_port, 0x04, 0xFF);
3889 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3890 trms1040_wait_30us(io_port);
3891
3892
3893 for (addr = 0; addr < 128; addr++, b_eeprom++)
3894 trms1040_set_data(io_port, addr, *b_eeprom);
3895
3896
3897 trms1040_write_cmd(io_port, 0x04, 0x00);
3898 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3899 trms1040_wait_30us(io_port);
3900
3901
3902 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
3903 io_port + TRM_S1040_GEN_CONTROL);
3904 }
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918 static u8 trms1040_get_data(unsigned long io_port, u8 addr)
3919 {
3920 int i;
3921 u8 read_byte;
3922 u8 result = 0;
3923
3924
3925 trms1040_write_cmd(io_port, 0x06, addr);
3926
3927
3928 for (i = 0; i < 8; i++) {
3929 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3930 trms1040_wait_30us(io_port);
3931 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3932
3933
3934 read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
3935 result <<= 1;
3936 if (read_byte & NVR_BITIN)
3937 result |= 1;
3938
3939 trms1040_wait_30us(io_port);
3940 }
3941
3942
3943 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3944 return result;
3945 }
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956 static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
3957 {
3958 u8 *b_eeprom = (u8 *)eeprom;
3959 u8 addr;
3960
3961
3962 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
3963 io_port + TRM_S1040_GEN_CONTROL);
3964
3965
3966 for (addr = 0; addr < 128; addr++, b_eeprom++)
3967 *b_eeprom = trms1040_get_data(io_port, addr);
3968
3969
3970 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
3971 io_port + TRM_S1040_GEN_CONTROL);
3972 }
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986 static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
3987 {
3988 u16 *w_eeprom = (u16 *)eeprom;
3989 u16 w_addr;
3990 u16 cksum;
3991 u32 d_addr;
3992 u32 *d_eeprom;
3993
3994 trms1040_read_all(eeprom, io_port);
3995
3996 cksum = 0;
3997 for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
3998 w_addr++, w_eeprom++)
3999 cksum += *w_eeprom;
4000 if (cksum != 0x1234) {
4001
4002
4003
4004
4005 dprintkl(KERN_WARNING,
4006 "EEProm checksum error: using default values and options.\n");
4007 eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4008 eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4009 eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4010 eeprom->sub_sys_id[1] =
4011 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4012 eeprom->sub_class = 0x00;
4013 eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4014 eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4015 eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4016 eeprom->device_id[1] =
4017 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4018 eeprom->reserved = 0x00;
4019
4020 for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
4021 d_addr < 16; d_addr++, d_eeprom++)
4022 *d_eeprom = 0x00000077;
4023
4024 *d_eeprom++ = 0x04000F07;
4025 *d_eeprom++ = 0x00000015;
4026 for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
4027 *d_eeprom = 0x00;
4028
4029
4030 set_safe_settings();
4031 fix_settings();
4032 eeprom_override(eeprom);
4033
4034 eeprom->cksum = 0x00;
4035 for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
4036 w_addr < 63; w_addr++, w_eeprom++)
4037 cksum += *w_eeprom;
4038
4039 *w_eeprom = 0x1234 - cksum;
4040 trms1040_write_all(eeprom, io_port);
4041 eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
4042 } else {
4043 set_safe_settings();
4044 eeprom_index_to_delay(eeprom);
4045 eeprom_override(eeprom);
4046 }
4047 }
4048
4049
4050
4051
4052
4053
4054
4055
4056 static void print_eeprom_settings(struct NvRamType *eeprom)
4057 {
4058 dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
4059 eeprom->scsi_id,
4060 eeprom->target[0].period,
4061 clock_speed[eeprom->target[0].period] / 10,
4062 clock_speed[eeprom->target[0].period] % 10,
4063 eeprom->target[0].cfg0);
4064 dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
4065 eeprom->channel_cfg, eeprom->max_tag,
4066 1 << eeprom->max_tag, eeprom->delay_time);
4067 }
4068
4069
4070
4071 static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4072 {
4073 int i;
4074 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4075
4076 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4077 kfree(acb->srb_array[i].segment_x);
4078 }
4079
4080
4081
4082
4083
4084 static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4085 {
4086 const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
4087 *SEGMENTX_LEN;
4088 int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
4089 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4090 int srb_idx = 0;
4091 unsigned i = 0;
4092 struct SGentry *ptr;
4093
4094 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4095 acb->srb_array[i].segment_x = NULL;
4096
4097 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4098 while (pages--) {
4099 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4100 if (!ptr) {
4101 adapter_sg_tables_free(acb);
4102 return 1;
4103 }
4104 dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
4105 PAGE_SIZE, ptr, srb_idx);
4106 i = 0;
4107 while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
4108 acb->srb_array[srb_idx++].segment_x =
4109 ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
4110 }
4111 if (i < srbs_per_page)
4112 acb->srb.segment_x =
4113 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4114 else
4115 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4116 return 0;
4117 }
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130 static void adapter_print_config(struct AdapterCtlBlk *acb)
4131 {
4132 u8 bval;
4133
4134 bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
4135 dprintkl(KERN_INFO, "%sConnectors: ",
4136 ((bval & WIDESCSI) ? "(Wide) " : ""));
4137 if (!(bval & CON5068))
4138 printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
4139 if (!(bval & CON68))
4140 printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
4141 if (!(bval & CON50))
4142 printk("int50 ");
4143 if ((bval & (CON5068 | CON50 | CON68)) ==
4144 0 )
4145 printk(" Oops! (All 3?) ");
4146 bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
4147 printk(" Termination: ");
4148 if (bval & DIS_TERM)
4149 printk("Disabled\n");
4150 else {
4151 if (bval & AUTOTERM)
4152 printk("Auto ");
4153 if (bval & LOW8TERM)
4154 printk("Low ");
4155 if (bval & UP8TERM)
4156 printk("High ");
4157 printk("\n");
4158 }
4159 }
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174 static void adapter_init_params(struct AdapterCtlBlk *acb)
4175 {
4176 struct NvRamType *eeprom = &acb->eeprom;
4177 int i;
4178
4179
4180
4181
4182
4183 INIT_LIST_HEAD(&acb->dcb_list);
4184 acb->dcb_run_robin = NULL;
4185 acb->active_dcb = NULL;
4186
4187 INIT_LIST_HEAD(&acb->srb_free_list);
4188
4189 acb->tmp_srb = &acb->srb;
4190 timer_setup(&acb->waiting_timer, waiting_timeout, 0);
4191 timer_setup(&acb->selto_timer, NULL, 0);
4192
4193 acb->srb_count = DC395x_MAX_SRB_CNT;
4194
4195 acb->sel_timeout = DC395x_SEL_TIMEOUT;
4196
4197
4198 acb->tag_max_num = 1 << eeprom->max_tag;
4199 if (acb->tag_max_num > 30)
4200 acb->tag_max_num = 30;
4201
4202 acb->acb_flag = 0;
4203 acb->gmode2 = eeprom->channel_cfg;
4204 acb->config = 0;
4205
4206 if (eeprom->channel_cfg & NAC_SCANLUN)
4207 acb->lun_chk = 1;
4208 acb->scan_devices = 1;
4209
4210 acb->scsi_host->this_id = eeprom->scsi_id;
4211 acb->hostid_bit = (1 << acb->scsi_host->this_id);
4212
4213 for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
4214 acb->dcb_map[i] = 0;
4215
4216 acb->msg_len = 0;
4217
4218
4219 for (i = 0; i < acb->srb_count - 1; i++)
4220 list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
4221 }
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236 static void adapter_init_scsi_host(struct Scsi_Host *host)
4237 {
4238 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4239 struct NvRamType *eeprom = &acb->eeprom;
4240
4241 host->max_cmd_len = 24;
4242 host->can_queue = DC395x_MAX_CMD_QUEUE;
4243 host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
4244 host->this_id = (int)eeprom->scsi_id;
4245 host->io_port = acb->io_port_base;
4246 host->n_io_port = acb->io_port_len;
4247 host->dma_channel = -1;
4248 host->unique_id = acb->io_port_base;
4249 host->irq = acb->irq_level;
4250 acb->last_reset = jiffies;
4251
4252 host->max_id = 16;
4253 if (host->max_id - 1 == eeprom->scsi_id)
4254 host->max_id--;
4255
4256 if (eeprom->channel_cfg & NAC_SCANLUN)
4257 host->max_lun = 8;
4258 else
4259 host->max_lun = 1;
4260 }
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272 static void adapter_init_chip(struct AdapterCtlBlk *acb)
4273 {
4274 struct NvRamType *eeprom = &acb->eeprom;
4275
4276
4277 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
4278 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
4279
4280
4281 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
4282
4283
4284 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
4285 udelay(20);
4286
4287
4288 acb->config = HCC_AUTOTERM | HCC_PARITY;
4289 if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
4290 acb->config |= HCC_WIDE_CARD;
4291
4292 if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
4293 acb->config |= HCC_SCSI_RESET;
4294
4295 if (acb->config & HCC_SCSI_RESET) {
4296 dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
4297 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
4298
4299
4300
4301 udelay(500);
4302
4303 acb->last_reset =
4304 jiffies + HZ / 2 +
4305 HZ * acb->eeprom.delay_time;
4306
4307
4308 }
4309 }
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326 static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
4327 u32 io_port_len, unsigned int irq)
4328 {
4329 if (!request_region(io_port, io_port_len, DC395X_NAME)) {
4330 dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
4331 goto failed;
4332 }
4333
4334 acb->io_port_base = io_port;
4335 acb->io_port_len = io_port_len;
4336
4337 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4338
4339 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4340 goto failed;
4341 }
4342
4343 acb->irq_level = irq;
4344
4345
4346 check_eeprom(&acb->eeprom, io_port);
4347 print_eeprom_settings(&acb->eeprom);
4348
4349
4350 adapter_init_params(acb);
4351
4352
4353 adapter_print_config(acb);
4354
4355 if (adapter_sg_tables_alloc(acb)) {
4356 dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
4357 goto failed;
4358 }
4359 adapter_init_scsi_host(acb->scsi_host);
4360 adapter_init_chip(acb);
4361 set_basic_config(acb);
4362
4363 dprintkdbg(DBG_0,
4364 "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
4365 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4366 acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
4367 sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
4368 return 0;
4369
4370 failed:
4371 if (acb->irq_level)
4372 free_irq(acb->irq_level, acb);
4373 if (acb->io_port_base)
4374 release_region(acb->io_port_base, acb->io_port_len);
4375 adapter_sg_tables_free(acb);
4376
4377 return 1;
4378 }
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388 static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
4389 {
4390
4391 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
4392 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
4393
4394
4395 if (acb->config & HCC_SCSI_RESET)
4396 reset_scsi_bus(acb);
4397
4398
4399 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
4400 }
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411 static void adapter_uninit(struct AdapterCtlBlk *acb)
4412 {
4413 unsigned long flags;
4414 DC395x_LOCK_IO(acb->scsi_host, flags);
4415
4416
4417 if (timer_pending(&acb->waiting_timer))
4418 del_timer(&acb->waiting_timer);
4419 if (timer_pending(&acb->selto_timer))
4420 del_timer(&acb->selto_timer);
4421
4422 adapter_uninit_chip(acb);
4423 adapter_remove_and_free_all_devices(acb);
4424 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4425
4426 if (acb->irq_level)
4427 free_irq(acb->irq_level, acb);
4428 if (acb->io_port_base)
4429 release_region(acb->io_port_base, acb->io_port_len);
4430
4431 adapter_sg_tables_free(acb);
4432 }
4433
4434
4435 #undef YESNO
4436 #define YESNO(YN) \
4437 if (YN) seq_printf(m, " Yes ");\
4438 else seq_printf(m, " No ")
4439
4440 static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
4441 {
4442 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4443 int spd, spd1;
4444 struct DeviceCtlBlk *dcb;
4445 unsigned long flags;
4446 int dev;
4447
4448 seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
4449 " Driver Version " DC395X_VERSION "\n");
4450
4451 DC395x_LOCK_IO(acb->scsi_host, flags);
4452
4453 seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
4454 seq_printf(m, "DC395U/UW/F DC315/U %s\n",
4455 (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
4456 seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
4457 seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
4458 seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
4459
4460 seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
4461 seq_printf(m, "AdapterID %i\n", host->this_id);
4462
4463 seq_printf(m, "tag_max_num %i", acb->tag_max_num);
4464
4465 seq_printf(m, ", FilterCfg 0x%02x",
4466 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
4467 seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
4468
4469
4470 seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
4471 seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
4472 seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
4473
4474 seq_puts(m,
4475 "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
4476
4477 dev = 0;
4478 list_for_each_entry(dcb, &acb->dcb_list, list) {
4479 int nego_period;
4480 seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
4481 dcb->target_lun);
4482 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4483 YESNO(dcb->sync_offset);
4484 YESNO(dcb->sync_period & WIDE_SYNC);
4485 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4486 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4487 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4488 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4489 if (dcb->sync_offset)
4490 seq_printf(m, " %03i ns ", nego_period);
4491 else
4492 seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
4493
4494 if (dcb->sync_offset & 0x0f) {
4495 spd = 1000 / (nego_period);
4496 spd1 = 1000 % (nego_period);
4497 spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
4498 seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
4499 (dcb->sync_offset & 0x0f));
4500 } else
4501 seq_puts(m, " ");
4502
4503
4504 seq_printf(m, " %02i\n", dcb->max_command);
4505 dev++;
4506 }
4507
4508 if (timer_pending(&acb->waiting_timer))
4509 seq_puts(m, "Waiting queue timer running\n");
4510 else
4511 seq_putc(m, '\n');
4512
4513 list_for_each_entry(dcb, &acb->dcb_list, list) {
4514 struct ScsiReqBlk *srb;
4515 if (!list_empty(&dcb->srb_waiting_list))
4516 seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
4517 dcb->target_id, dcb->target_lun,
4518 list_size(&dcb->srb_waiting_list));
4519 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4520 seq_printf(m, " %p", srb->cmd);
4521 if (!list_empty(&dcb->srb_going_list))
4522 seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
4523 dcb->target_id, dcb->target_lun,
4524 list_size(&dcb->srb_going_list));
4525 list_for_each_entry(srb, &dcb->srb_going_list, list)
4526 seq_printf(m, " %p", srb->cmd);
4527 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4528 seq_putc(m, '\n');
4529 }
4530
4531 if (debug_enabled(DBG_1)) {
4532 seq_printf(m, "DCB list for ACB %p:\n", acb);
4533 list_for_each_entry(dcb, &acb->dcb_list, list) {
4534 seq_printf(m, "%p -> ", dcb);
4535 }
4536 seq_puts(m, "END\n");
4537 }
4538
4539 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4540 return 0;
4541 }
4542
4543
4544 static struct scsi_host_template dc395x_driver_template = {
4545 .module = THIS_MODULE,
4546 .proc_name = DC395X_NAME,
4547 .show_info = dc395x_show_info,
4548 .name = DC395X_BANNER " " DC395X_VERSION,
4549 .queuecommand = dc395x_queue_command,
4550 .slave_alloc = dc395x_slave_alloc,
4551 .slave_destroy = dc395x_slave_destroy,
4552 .can_queue = DC395x_MAX_CAN_QUEUE,
4553 .this_id = 7,
4554 .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
4555 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4556 .eh_abort_handler = dc395x_eh_abort,
4557 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4558 .dma_boundary = PAGE_SIZE - 1,
4559 };
4560
4561
4562
4563
4564
4565
4566 static void banner_display(void)
4567 {
4568 static int banner_done = 0;
4569 if (!banner_done)
4570 {
4571 dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
4572 banner_done = 1;
4573 }
4574 }
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590 static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
4591 {
4592 struct Scsi_Host *scsi_host = NULL;
4593 struct AdapterCtlBlk *acb = NULL;
4594 unsigned long io_port_base;
4595 unsigned int io_port_len;
4596 unsigned int irq;
4597
4598 dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
4599 banner_display();
4600
4601 if (pci_enable_device(dev))
4602 {
4603 dprintkl(KERN_INFO, "PCI Enable device failed.\n");
4604 return -ENODEV;
4605 }
4606 io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
4607 io_port_len = pci_resource_len(dev, 0);
4608 irq = dev->irq;
4609 dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
4610
4611
4612 scsi_host = scsi_host_alloc(&dc395x_driver_template,
4613 sizeof(struct AdapterCtlBlk));
4614 if (!scsi_host) {
4615 dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
4616 goto fail;
4617 }
4618 acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
4619 acb->scsi_host = scsi_host;
4620 acb->dev = dev;
4621
4622
4623 if (adapter_init(acb, io_port_base, io_port_len, irq)) {
4624 dprintkl(KERN_INFO, "adapter init failed\n");
4625 acb = NULL;
4626 goto fail;
4627 }
4628
4629 pci_set_master(dev);
4630
4631
4632 if (scsi_add_host(scsi_host, &dev->dev)) {
4633 dprintkl(KERN_ERR, "scsi_add_host failed\n");
4634 goto fail;
4635 }
4636 pci_set_drvdata(dev, scsi_host);
4637 scsi_scan_host(scsi_host);
4638
4639 return 0;
4640
4641 fail:
4642 if (acb != NULL)
4643 adapter_uninit(acb);
4644 if (scsi_host != NULL)
4645 scsi_host_put(scsi_host);
4646 pci_disable_device(dev);
4647 return -ENODEV;
4648 }
4649
4650
4651
4652
4653
4654
4655
4656
4657 static void dc395x_remove_one(struct pci_dev *dev)
4658 {
4659 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
4660 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
4661
4662 dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
4663
4664 scsi_remove_host(scsi_host);
4665 adapter_uninit(acb);
4666 pci_disable_device(dev);
4667 scsi_host_put(scsi_host);
4668 }
4669
4670
4671 static struct pci_device_id dc395x_pci_table[] = {
4672 {
4673 .vendor = PCI_VENDOR_ID_TEKRAM,
4674 .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
4675 .subvendor = PCI_ANY_ID,
4676 .subdevice = PCI_ANY_ID,
4677 },
4678 {}
4679 };
4680 MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
4681
4682
4683 static struct pci_driver dc395x_driver = {
4684 .name = DC395X_NAME,
4685 .id_table = dc395x_pci_table,
4686 .probe = dc395x_init_one,
4687 .remove = dc395x_remove_one,
4688 };
4689 module_pci_driver(dc395x_driver);
4690
4691 MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
4692 MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
4693 MODULE_LICENSE("GPL");