0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 #include <asm/io.h>
0165 #include <asm/byteorder.h>
0166 #include <asm/page.h>
0167 #include <linux/stddef.h>
0168 #include <linux/string.h>
0169 #include <linux/errno.h>
0170 #include <linux/kernel.h>
0171 #include <linux/ioport.h>
0172 #include <linux/slab.h>
0173 #include <linux/delay.h>
0174 #include <linux/pci.h>
0175 #include <linux/proc_fs.h>
0176 #include <linux/reboot.h>
0177 #include <linux/interrupt.h>
0178
0179 #include <linux/blkdev.h>
0180 #include <linux/types.h>
0181 #include <linux/dma-mapping.h>
0182
0183 #include <scsi/scsi.h>
0184 #include <scsi/scsi_cmnd.h>
0185 #include <scsi/scsi_device.h>
0186 #include <scsi/scsi_eh.h>
0187 #include <scsi/scsi_host.h>
0188 #include <scsi/scsi_tcq.h>
0189 #include <scsi/sg.h>
0190
0191 #include "ips.h"
0192
0193 #include <linux/module.h>
0194
0195 #include <linux/stat.h>
0196
0197 #include <linux/spinlock.h>
0198 #include <linux/init.h>
0199
0200 #include <linux/smp.h>
0201
0202 #ifdef MODULE
0203 static char *ips = NULL;
0204 module_param(ips, charp, 0);
0205 #endif
0206
0207
0208
0209
0210 #define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
0211 #define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
0212
0213 #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
0214 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
0215 DMA_BIDIRECTIONAL : \
0216 scb->scsi_cmd->sc_data_direction)
0217
0218 #ifdef IPS_DEBUG
0219 #define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
0220 #define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
0221 #define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
0222 #else
0223 #define METHOD_TRACE(s, i)
0224 #define DEBUG(i, s)
0225 #define DEBUG_VAR(i, s, v...)
0226 #endif
0227
0228
0229
0230
0231 static int ips_eh_abort(struct scsi_cmnd *);
0232 static int ips_eh_reset(struct scsi_cmnd *);
0233 static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
0234 static const char *ips_info(struct Scsi_Host *);
0235 static irqreturn_t do_ipsintr(int, void *);
0236 static int ips_hainit(ips_ha_t *);
0237 static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
0238 static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
0239 static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
0240 static int ips_online(ips_ha_t *, ips_scb_t *);
0241 static int ips_inquiry(ips_ha_t *, ips_scb_t *);
0242 static int ips_rdcap(ips_ha_t *, ips_scb_t *);
0243 static int ips_msense(ips_ha_t *, ips_scb_t *);
0244 static int ips_reqsen(ips_ha_t *, ips_scb_t *);
0245 static int ips_deallocatescbs(ips_ha_t *, int);
0246 static int ips_allocatescbs(ips_ha_t *);
0247 static int ips_reset_copperhead(ips_ha_t *);
0248 static int ips_reset_copperhead_memio(ips_ha_t *);
0249 static int ips_reset_morpheus(ips_ha_t *);
0250 static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
0251 static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
0252 static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
0253 static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
0254 static int ips_isintr_copperhead(ips_ha_t *);
0255 static int ips_isintr_copperhead_memio(ips_ha_t *);
0256 static int ips_isintr_morpheus(ips_ha_t *);
0257 static int ips_wait(ips_ha_t *, int, int);
0258 static int ips_write_driver_status(ips_ha_t *, int);
0259 static int ips_read_adapter_status(ips_ha_t *, int);
0260 static int ips_read_subsystem_parameters(ips_ha_t *, int);
0261 static int ips_read_config(ips_ha_t *, int);
0262 static int ips_clear_adapter(ips_ha_t *, int);
0263 static int ips_readwrite_page5(ips_ha_t *, int, int);
0264 static int ips_init_copperhead(ips_ha_t *);
0265 static int ips_init_copperhead_memio(ips_ha_t *);
0266 static int ips_init_morpheus(ips_ha_t *);
0267 static int ips_isinit_copperhead(ips_ha_t *);
0268 static int ips_isinit_copperhead_memio(ips_ha_t *);
0269 static int ips_isinit_morpheus(ips_ha_t *);
0270 static int ips_erase_bios(ips_ha_t *);
0271 static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
0272 static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
0273 static int ips_erase_bios_memio(ips_ha_t *);
0274 static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
0275 static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
0276 static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
0277 static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
0278 static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
0279 static void ips_free_flash_copperhead(ips_ha_t * ha);
0280 static void ips_get_bios_version(ips_ha_t *, int);
0281 static void ips_identify_controller(ips_ha_t *);
0282 static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
0283 static void ips_enable_int_copperhead(ips_ha_t *);
0284 static void ips_enable_int_copperhead_memio(ips_ha_t *);
0285 static void ips_enable_int_morpheus(ips_ha_t *);
0286 static int ips_intr_copperhead(ips_ha_t *);
0287 static int ips_intr_morpheus(ips_ha_t *);
0288 static void ips_next(ips_ha_t *, int);
0289 static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
0290 static void ipsintr_done(ips_ha_t *, struct ips_scb *);
0291 static void ips_done(ips_ha_t *, ips_scb_t *);
0292 static void ips_free(ips_ha_t *);
0293 static void ips_init_scb(ips_ha_t *, ips_scb_t *);
0294 static void ips_freescb(ips_ha_t *, ips_scb_t *);
0295 static void ips_setup_funclist(ips_ha_t *);
0296 static void ips_statinit(ips_ha_t *);
0297 static void ips_statinit_memio(ips_ha_t *);
0298 static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
0299 static void ips_ffdc_reset(ips_ha_t *, int);
0300 static void ips_ffdc_time(ips_ha_t *);
0301 static uint32_t ips_statupd_copperhead(ips_ha_t *);
0302 static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
0303 static uint32_t ips_statupd_morpheus(ips_ha_t *);
0304 static ips_scb_t *ips_getscb(ips_ha_t *);
0305 static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
0306 static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
0307 static void ips_putq_copp_tail(ips_copp_queue_t *,
0308 ips_copp_wait_item_t *);
0309 static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
0310 static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
0311 static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
0312 static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
0313 struct scsi_cmnd *);
0314 static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
0315 ips_copp_wait_item_t *);
0316 static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
0317
0318 static int ips_is_passthru(struct scsi_cmnd *);
0319 static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
0320 static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
0321 static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
0322 static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
0323 unsigned int count);
0324 static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
0325 unsigned int count);
0326
0327 static int ips_write_info(struct Scsi_Host *, char *, int);
0328 static int ips_show_info(struct seq_file *, struct Scsi_Host *);
0329 static int ips_host_info(ips_ha_t *, struct seq_file *);
0330 static int ips_abort_init(ips_ha_t * ha, int index);
0331 static int ips_init_phase2(int index);
0332
0333 static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
0334 static int ips_register_scsi(int index);
0335
0336 static int ips_poll_for_flush_complete(ips_ha_t * ha);
0337 static void ips_flush_and_reset(ips_ha_t *ha);
0338
0339
0340
0341
0342 static const char ips_name[] = "ips";
0343 static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS];
0344 static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS];
0345 static unsigned int ips_next_controller;
0346 static unsigned int ips_num_controllers;
0347 static unsigned int ips_released_controllers;
0348 static int ips_hotplug;
0349 static int ips_cmd_timeout = 60;
0350 static int ips_reset_timeout = 60 * 5;
0351 static int ips_force_memio = 1;
0352 static int ips_force_i2o = 1;
0353 static int ips_ioctlsize = IPS_IOCTL_SIZE;
0354 static int ips_cd_boot;
0355 static char *ips_FlashData = NULL;
0356 static dma_addr_t ips_flashbusaddr;
0357 static long ips_FlashDataInUse;
0358 static uint32_t MaxLiteCmds = 32;
0359 static struct scsi_host_template ips_driver_template = {
0360 .info = ips_info,
0361 .queuecommand = ips_queue,
0362 .eh_abort_handler = ips_eh_abort,
0363 .eh_host_reset_handler = ips_eh_reset,
0364 .proc_name = "ips",
0365 .show_info = ips_show_info,
0366 .write_info = ips_write_info,
0367 .slave_configure = ips_slave_configure,
0368 .bios_param = ips_biosparam,
0369 .this_id = -1,
0370 .sg_tablesize = IPS_MAX_SG,
0371 .cmd_per_lun = 3,
0372 .no_write_same = 1,
0373 };
0374
0375
0376
0377 static struct pci_device_id ips_pci_table[] = {
0378 { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
0379 { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
0380 { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
0381 { 0, }
0382 };
0383
0384 MODULE_DEVICE_TABLE( pci, ips_pci_table );
0385
0386 static char ips_hot_plug_name[] = "ips";
0387
0388 static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
0389 static void ips_remove_device(struct pci_dev *pci_dev);
0390
0391 static struct pci_driver ips_pci_driver = {
0392 .name = ips_hot_plug_name,
0393 .id_table = ips_pci_table,
0394 .probe = ips_insert_device,
0395 .remove = ips_remove_device,
0396 };
0397
0398
0399
0400
0401
0402 static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
0403
0404 #define MAX_ADAPTER_NAME 15
0405
0406 static char ips_adapter_name[][30] = {
0407 "ServeRAID",
0408 "ServeRAID II",
0409 "ServeRAID on motherboard",
0410 "ServeRAID on motherboard",
0411 "ServeRAID 3H",
0412 "ServeRAID 3L",
0413 "ServeRAID 4H",
0414 "ServeRAID 4M",
0415 "ServeRAID 4L",
0416 "ServeRAID 4Mx",
0417 "ServeRAID 4Lx",
0418 "ServeRAID 5i",
0419 "ServeRAID 5i",
0420 "ServeRAID 6M",
0421 "ServeRAID 6i",
0422 "ServeRAID 7t",
0423 "ServeRAID 7k",
0424 "ServeRAID 7M"
0425 };
0426
0427 static struct notifier_block ips_notifier = {
0428 ips_halt, NULL, 0
0429 };
0430
0431
0432
0433
0434 static char ips_command_direction[] = {
0435 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
0436 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
0437 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0438 IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
0439 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
0440 IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
0441 IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
0442 IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
0443 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
0444 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
0445 IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
0446 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
0447 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
0448 IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
0449 IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
0450 IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
0451 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0452 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0453 IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0454 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0455 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0456 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0457 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0458 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0459 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0460 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0461 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0462 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0463 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0464 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0465 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0466 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0467 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0468 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
0469 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
0470 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
0471 IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
0472 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0473 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0474 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0475 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0476 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0477 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0478 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0479 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0480 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0481 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
0482 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0483 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0484 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
0485 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
0486 };
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 static int
0499 ips_setup(char *ips_str)
0500 {
0501
0502 int i;
0503 char *key;
0504 char *value;
0505 static const IPS_OPTION options[] = {
0506 {"noi2o", &ips_force_i2o, 0},
0507 {"nommap", &ips_force_memio, 0},
0508 {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
0509 {"cdboot", &ips_cd_boot, 0},
0510 {"maxcmds", &MaxLiteCmds, 32},
0511 };
0512
0513
0514
0515 while ((key = strsep(&ips_str, ",."))) {
0516 if (!*key)
0517 continue;
0518 value = strchr(key, ':');
0519 if (value)
0520 *value++ = '\0';
0521
0522
0523
0524
0525 for (i = 0; i < ARRAY_SIZE(options); i++) {
0526 if (strncasecmp
0527 (key, options[i].option_name,
0528 strlen(options[i].option_name)) == 0) {
0529 if (value)
0530 *options[i].option_flag =
0531 simple_strtoul(value, NULL, 0);
0532 else
0533 *options[i].option_flag =
0534 options[i].option_value;
0535 break;
0536 }
0537 }
0538 }
0539
0540 return (1);
0541 }
0542
0543 __setup("ips=", ips_setup);
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 static int
0557 ips_detect(struct scsi_host_template * SHT)
0558 {
0559 int i;
0560
0561 METHOD_TRACE("ips_detect", 1);
0562
0563 #ifdef MODULE
0564 if (ips)
0565 ips_setup(ips);
0566 #endif
0567
0568 for (i = 0; i < ips_num_controllers; i++) {
0569 if (ips_register_scsi(i))
0570 ips_free(ips_ha[i]);
0571 ips_released_controllers++;
0572 }
0573 ips_hotplug = 1;
0574 return (ips_num_controllers);
0575 }
0576
0577
0578
0579
0580
0581 static void
0582 ips_setup_funclist(ips_ha_t * ha)
0583 {
0584
0585
0586
0587
0588 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
0589
0590 ha->func.isintr = ips_isintr_morpheus;
0591 ha->func.isinit = ips_isinit_morpheus;
0592 ha->func.issue = ips_issue_i2o_memio;
0593 ha->func.init = ips_init_morpheus;
0594 ha->func.statupd = ips_statupd_morpheus;
0595 ha->func.reset = ips_reset_morpheus;
0596 ha->func.intr = ips_intr_morpheus;
0597 ha->func.enableint = ips_enable_int_morpheus;
0598 } else if (IPS_USE_MEMIO(ha)) {
0599
0600 ha->func.isintr = ips_isintr_copperhead_memio;
0601 ha->func.isinit = ips_isinit_copperhead_memio;
0602 ha->func.init = ips_init_copperhead_memio;
0603 ha->func.statupd = ips_statupd_copperhead_memio;
0604 ha->func.statinit = ips_statinit_memio;
0605 ha->func.reset = ips_reset_copperhead_memio;
0606 ha->func.intr = ips_intr_copperhead;
0607 ha->func.erasebios = ips_erase_bios_memio;
0608 ha->func.programbios = ips_program_bios_memio;
0609 ha->func.verifybios = ips_verify_bios_memio;
0610 ha->func.enableint = ips_enable_int_copperhead_memio;
0611 if (IPS_USE_I2O_DELIVER(ha))
0612 ha->func.issue = ips_issue_i2o_memio;
0613 else
0614 ha->func.issue = ips_issue_copperhead_memio;
0615 } else {
0616
0617 ha->func.isintr = ips_isintr_copperhead;
0618 ha->func.isinit = ips_isinit_copperhead;
0619 ha->func.init = ips_init_copperhead;
0620 ha->func.statupd = ips_statupd_copperhead;
0621 ha->func.statinit = ips_statinit;
0622 ha->func.reset = ips_reset_copperhead;
0623 ha->func.intr = ips_intr_copperhead;
0624 ha->func.erasebios = ips_erase_bios;
0625 ha->func.programbios = ips_program_bios;
0626 ha->func.verifybios = ips_verify_bios;
0627 ha->func.enableint = ips_enable_int_copperhead;
0628
0629 if (IPS_USE_I2O_DELIVER(ha))
0630 ha->func.issue = ips_issue_i2o;
0631 else
0632 ha->func.issue = ips_issue_copperhead;
0633 }
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 static void ips_release(struct Scsi_Host *sh)
0646 {
0647 ips_scb_t *scb;
0648 ips_ha_t *ha;
0649 int i;
0650
0651 METHOD_TRACE("ips_release", 1);
0652
0653 scsi_remove_host(sh);
0654
0655 for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
0656
0657 if (i == IPS_MAX_ADAPTERS) {
0658 printk(KERN_WARNING
0659 "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
0660 BUG();
0661 }
0662
0663 ha = IPS_HA(sh);
0664
0665 if (!ha)
0666 return;
0667
0668
0669 scb = &ha->scbs[ha->max_cmds - 1];
0670
0671 ips_init_scb(ha, scb);
0672
0673 scb->timeout = ips_cmd_timeout;
0674 scb->cdb[0] = IPS_CMD_FLUSH;
0675
0676 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
0677 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
0678 scb->cmd.flush_cache.state = IPS_NORM_STATE;
0679 scb->cmd.flush_cache.reserved = 0;
0680 scb->cmd.flush_cache.reserved2 = 0;
0681 scb->cmd.flush_cache.reserved3 = 0;
0682 scb->cmd.flush_cache.reserved4 = 0;
0683
0684 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
0685
0686
0687 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
0688 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
0689
0690 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
0691
0692 ips_sh[i] = NULL;
0693 ips_ha[i] = NULL;
0694
0695
0696 ips_free(ha);
0697
0698
0699 free_irq(ha->pcidev->irq, ha);
0700
0701 scsi_host_put(sh);
0702
0703 ips_released_controllers++;
0704 }
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715 static int
0716 ips_halt(struct notifier_block *nb, ulong event, void *buf)
0717 {
0718 ips_scb_t *scb;
0719 ips_ha_t *ha;
0720 int i;
0721
0722 if ((event != SYS_RESTART) && (event != SYS_HALT) &&
0723 (event != SYS_POWER_OFF))
0724 return (NOTIFY_DONE);
0725
0726 for (i = 0; i < ips_next_controller; i++) {
0727 ha = (ips_ha_t *) ips_ha[i];
0728
0729 if (!ha)
0730 continue;
0731
0732 if (!ha->active)
0733 continue;
0734
0735
0736 scb = &ha->scbs[ha->max_cmds - 1];
0737
0738 ips_init_scb(ha, scb);
0739
0740 scb->timeout = ips_cmd_timeout;
0741 scb->cdb[0] = IPS_CMD_FLUSH;
0742
0743 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
0744 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
0745 scb->cmd.flush_cache.state = IPS_NORM_STATE;
0746 scb->cmd.flush_cache.reserved = 0;
0747 scb->cmd.flush_cache.reserved2 = 0;
0748 scb->cmd.flush_cache.reserved3 = 0;
0749 scb->cmd.flush_cache.reserved4 = 0;
0750
0751 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
0752
0753
0754 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
0755 IPS_FAILURE)
0756 IPS_PRINTK(KERN_WARNING, ha->pcidev,
0757 "Incomplete Flush.\n");
0758 else
0759 IPS_PRINTK(KERN_WARNING, ha->pcidev,
0760 "Flushing Complete.\n");
0761 }
0762
0763 return (NOTIFY_OK);
0764 }
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 int ips_eh_abort(struct scsi_cmnd *SC)
0776 {
0777 ips_ha_t *ha;
0778 ips_copp_wait_item_t *item;
0779 int ret;
0780 struct Scsi_Host *host;
0781
0782 METHOD_TRACE("ips_eh_abort", 1);
0783
0784 if (!SC)
0785 return (FAILED);
0786
0787 host = SC->device->host;
0788 ha = (ips_ha_t *) SC->device->host->hostdata;
0789
0790 if (!ha)
0791 return (FAILED);
0792
0793 if (!ha->active)
0794 return (FAILED);
0795
0796 spin_lock(host->host_lock);
0797
0798
0799 item = ha->copp_waitlist.head;
0800 while ((item) && (item->scsi_cmd != SC))
0801 item = item->next;
0802
0803 if (item) {
0804
0805 ips_removeq_copp(&ha->copp_waitlist, item);
0806 ret = (SUCCESS);
0807
0808
0809 } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
0810
0811 ret = (SUCCESS);
0812 } else {
0813
0814 ret = (FAILED);
0815 }
0816
0817 spin_unlock(host->host_lock);
0818 return ret;
0819 }
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832 static int __ips_eh_reset(struct scsi_cmnd *SC)
0833 {
0834 int ret;
0835 int i;
0836 ips_ha_t *ha;
0837 ips_scb_t *scb;
0838 ips_copp_wait_item_t *item;
0839
0840 METHOD_TRACE("ips_eh_reset", 1);
0841
0842 #ifdef NO_IPS_RESET
0843 return (FAILED);
0844 #else
0845
0846 if (!SC) {
0847 DEBUG(1, "Reset called with NULL scsi command");
0848
0849 return (FAILED);
0850 }
0851
0852 ha = (ips_ha_t *) SC->device->host->hostdata;
0853
0854 if (!ha) {
0855 DEBUG(1, "Reset called with NULL ha struct");
0856
0857 return (FAILED);
0858 }
0859
0860 if (!ha->active)
0861 return (FAILED);
0862
0863
0864 item = ha->copp_waitlist.head;
0865 while ((item) && (item->scsi_cmd != SC))
0866 item = item->next;
0867
0868 if (item) {
0869
0870 ips_removeq_copp(&ha->copp_waitlist, item);
0871 return (SUCCESS);
0872 }
0873
0874
0875 if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
0876
0877 return (SUCCESS);
0878 }
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890 if (ha->ioctl_reset == 0) {
0891 scb = &ha->scbs[ha->max_cmds - 1];
0892
0893 ips_init_scb(ha, scb);
0894
0895 scb->timeout = ips_cmd_timeout;
0896 scb->cdb[0] = IPS_CMD_FLUSH;
0897
0898 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
0899 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
0900 scb->cmd.flush_cache.state = IPS_NORM_STATE;
0901 scb->cmd.flush_cache.reserved = 0;
0902 scb->cmd.flush_cache.reserved2 = 0;
0903 scb->cmd.flush_cache.reserved3 = 0;
0904 scb->cmd.flush_cache.reserved4 = 0;
0905
0906
0907 ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
0908 if (ret == IPS_SUCCESS) {
0909 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
0910 "Reset Request - Flushed Cache\n");
0911 return (SUCCESS);
0912 }
0913 }
0914
0915
0916
0917
0918 ha->ioctl_reset = 0;
0919
0920
0921
0922
0923
0924 IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
0925 ret = (*ha->func.reset) (ha);
0926
0927 if (!ret) {
0928 struct scsi_cmnd *scsi_cmd;
0929
0930 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
0931 "Controller reset failed - controller now offline.\n");
0932
0933
0934 DEBUG_VAR(1, "(%s%d) Failing active commands",
0935 ips_name, ha->host_num);
0936
0937 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
0938 scb->scsi_cmd->result = DID_ERROR << 16;
0939 scsi_done(scb->scsi_cmd);
0940 ips_freescb(ha, scb);
0941 }
0942
0943
0944 DEBUG_VAR(1, "(%s%d) Failing pending commands",
0945 ips_name, ha->host_num);
0946
0947 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
0948 scsi_cmd->result = DID_ERROR;
0949 scsi_done(scsi_cmd);
0950 }
0951
0952 ha->active = false;
0953 return (FAILED);
0954 }
0955
0956 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
0957 struct scsi_cmnd *scsi_cmd;
0958
0959 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
0960 "Controller reset failed - controller now offline.\n");
0961
0962
0963 DEBUG_VAR(1, "(%s%d) Failing active commands",
0964 ips_name, ha->host_num);
0965
0966 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
0967 scb->scsi_cmd->result = DID_ERROR << 16;
0968 scsi_done(scb->scsi_cmd);
0969 ips_freescb(ha, scb);
0970 }
0971
0972
0973 DEBUG_VAR(1, "(%s%d) Failing pending commands",
0974 ips_name, ha->host_num);
0975
0976 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
0977 scsi_cmd->result = DID_ERROR << 16;
0978 scsi_done(scsi_cmd);
0979 }
0980
0981 ha->active = false;
0982 return (FAILED);
0983 }
0984
0985
0986 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
0987 ha->last_ffdc = ktime_get_real_seconds();
0988 ha->reset_count++;
0989 ips_ffdc_reset(ha, IPS_INTR_IORL);
0990 }
0991
0992
0993 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
0994
0995 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
0996 scb->scsi_cmd->result = DID_RESET << 16;
0997 scsi_done(scb->scsi_cmd);
0998 ips_freescb(ha, scb);
0999 }
1000
1001
1002 for (i = 1; i < ha->nbus; i++)
1003 ha->dcdb_active[i - 1] = 0;
1004
1005
1006 ha->num_ioctl = 0;
1007
1008 ips_next(ha, IPS_INTR_IORL);
1009
1010 return (SUCCESS);
1011 #endif
1012
1013 }
1014
1015 static int ips_eh_reset(struct scsi_cmnd *SC)
1016 {
1017 int rc;
1018
1019 spin_lock_irq(SC->device->host->host_lock);
1020 rc = __ips_eh_reset(SC);
1021 spin_unlock_irq(SC->device->host->host_lock);
1022
1023 return rc;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 static int ips_queue_lck(struct scsi_cmnd *SC)
1039 {
1040 void (*done)(struct scsi_cmnd *) = scsi_done;
1041 ips_ha_t *ha;
1042 ips_passthru_t *pt;
1043
1044 METHOD_TRACE("ips_queue", 1);
1045
1046 ha = (ips_ha_t *) SC->device->host->hostdata;
1047
1048 if (!ha)
1049 goto out_error;
1050
1051 if (!ha->active)
1052 goto out_error;
1053
1054 if (ips_is_passthru(SC)) {
1055 if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
1056 SC->result = DID_BUS_BUSY << 16;
1057 done(SC);
1058
1059 return (0);
1060 }
1061 } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
1062 SC->result = DID_BUS_BUSY << 16;
1063 done(SC);
1064
1065 return (0);
1066 }
1067
1068 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
1069 ips_name,
1070 ha->host_num,
1071 SC->cmnd[0],
1072 SC->device->channel, SC->device->id, SC->device->lun);
1073
1074
1075 if ((scmd_channel(SC) > 0)
1076 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1077 SC->result = DID_NO_CONNECT << 16;
1078 done(SC);
1079
1080 return (0);
1081 }
1082
1083 if (ips_is_passthru(SC)) {
1084
1085 ips_copp_wait_item_t *scratch;
1086
1087
1088
1089
1090 pt = (ips_passthru_t *) scsi_sglist(SC);
1091 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1092 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1093 if (ha->scb_activelist.count != 0) {
1094 SC->result = DID_BUS_BUSY << 16;
1095 done(SC);
1096 return (0);
1097 }
1098 ha->ioctl_reset = 1;
1099 __ips_eh_reset(SC);
1100 SC->result = DID_OK << 16;
1101 scsi_done(SC);
1102 return (0);
1103 }
1104
1105
1106 scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
1107
1108 if (!scratch) {
1109 SC->result = DID_ERROR << 16;
1110 done(SC);
1111
1112 return (0);
1113 }
1114
1115 scratch->scsi_cmd = SC;
1116 scratch->next = NULL;
1117
1118 ips_putq_copp_tail(&ha->copp_waitlist, scratch);
1119 } else {
1120 ips_putq_wait_tail(&ha->scb_waitlist, SC);
1121 }
1122
1123 ips_next(ha, IPS_INTR_IORL);
1124
1125 return (0);
1126 out_error:
1127 SC->result = DID_ERROR << 16;
1128 done(SC);
1129
1130 return (0);
1131 }
1132
1133 static DEF_SCSI_QCMD(ips_queue)
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1145 sector_t capacity, int geom[])
1146 {
1147 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1148 int heads;
1149 int sectors;
1150 int cylinders;
1151
1152 METHOD_TRACE("ips_biosparam", 1);
1153
1154 if (!ha)
1155
1156 return (0);
1157
1158 if (!ha->active)
1159 return (0);
1160
1161 if (!ips_read_adapter_status(ha, IPS_INTR_ON))
1162
1163 return (0);
1164
1165 if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
1166 heads = IPS_NORM_HEADS;
1167 sectors = IPS_NORM_SECTORS;
1168 } else {
1169 heads = IPS_COMP_HEADS;
1170 sectors = IPS_COMP_SECTORS;
1171 }
1172
1173 cylinders = (unsigned long) capacity / (heads * sectors);
1174
1175 DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
1176 heads, sectors, cylinders);
1177
1178 geom[0] = heads;
1179 geom[1] = sectors;
1180 geom[2] = cylinders;
1181
1182 return (0);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 static int
1195 ips_slave_configure(struct scsi_device * SDptr)
1196 {
1197 ips_ha_t *ha;
1198 int min;
1199
1200 ha = IPS_HA(SDptr->host);
1201 if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
1202 min = ha->max_cmds / 2;
1203 if (ha->enq->ucLogDriveCount <= 2)
1204 min = ha->max_cmds - 1;
1205 scsi_change_queue_depth(SDptr, min);
1206 }
1207
1208 SDptr->skip_ms_page_8 = 1;
1209 SDptr->skip_ms_page_3f = 1;
1210 return 0;
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 static irqreturn_t
1223 do_ipsintr(int irq, void *dev_id)
1224 {
1225 ips_ha_t *ha;
1226 struct Scsi_Host *host;
1227 int irqstatus;
1228
1229 METHOD_TRACE("do_ipsintr", 2);
1230
1231 ha = (ips_ha_t *) dev_id;
1232 if (!ha)
1233 return IRQ_NONE;
1234 host = ips_sh[ha->host_num];
1235
1236 if (!host) {
1237 (*ha->func.intr) (ha);
1238 return IRQ_HANDLED;
1239 }
1240
1241 spin_lock(host->host_lock);
1242
1243 if (!ha->active) {
1244 spin_unlock(host->host_lock);
1245 return IRQ_HANDLED;
1246 }
1247
1248 irqstatus = (*ha->func.intr) (ha);
1249
1250 spin_unlock(host->host_lock);
1251
1252
1253 ips_next(ha, IPS_INTR_ON);
1254 return IRQ_RETVAL(irqstatus);
1255 }
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 int
1269 ips_intr_copperhead(ips_ha_t * ha)
1270 {
1271 ips_stat_t *sp;
1272 ips_scb_t *scb;
1273 IPS_STATUS cstatus;
1274 int intrstatus;
1275
1276 METHOD_TRACE("ips_intr", 2);
1277
1278 if (!ha)
1279 return 0;
1280
1281 if (!ha->active)
1282 return 0;
1283
1284 intrstatus = (*ha->func.isintr) (ha);
1285
1286 if (!intrstatus) {
1287
1288
1289
1290
1291 return 0;
1292 }
1293
1294 while (true) {
1295 sp = &ha->sp;
1296
1297 intrstatus = (*ha->func.isintr) (ha);
1298
1299 if (!intrstatus)
1300 break;
1301 else
1302 cstatus.value = (*ha->func.statupd) (ha);
1303
1304 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1305
1306 continue;
1307 }
1308
1309 ips_chkstatus(ha, &cstatus);
1310 scb = (ips_scb_t *) sp->scb_addr;
1311
1312
1313
1314
1315
1316 (*scb->callback) (ha, scb);
1317 }
1318 return 1;
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 int
1333 ips_intr_morpheus(ips_ha_t * ha)
1334 {
1335 ips_stat_t *sp;
1336 ips_scb_t *scb;
1337 IPS_STATUS cstatus;
1338 int intrstatus;
1339
1340 METHOD_TRACE("ips_intr_morpheus", 2);
1341
1342 if (!ha)
1343 return 0;
1344
1345 if (!ha->active)
1346 return 0;
1347
1348 intrstatus = (*ha->func.isintr) (ha);
1349
1350 if (!intrstatus) {
1351
1352
1353
1354
1355 return 0;
1356 }
1357
1358 while (true) {
1359 sp = &ha->sp;
1360
1361 intrstatus = (*ha->func.isintr) (ha);
1362
1363 if (!intrstatus)
1364 break;
1365 else
1366 cstatus.value = (*ha->func.statupd) (ha);
1367
1368 if (cstatus.value == 0xffffffff)
1369
1370 break;
1371
1372 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1373 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1374 "Spurious interrupt; no ccb.\n");
1375
1376 continue;
1377 }
1378
1379 ips_chkstatus(ha, &cstatus);
1380 scb = (ips_scb_t *) sp->scb_addr;
1381
1382
1383
1384
1385
1386 (*scb->callback) (ha, scb);
1387 }
1388 return 1;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 static const char *
1401 ips_info(struct Scsi_Host *SH)
1402 {
1403 static char buffer[256];
1404 char *bp;
1405 ips_ha_t *ha;
1406
1407 METHOD_TRACE("ips_info", 1);
1408
1409 ha = IPS_HA(SH);
1410
1411 if (!ha)
1412 return (NULL);
1413
1414 bp = &buffer[0];
1415 memset(bp, 0, sizeof (buffer));
1416
1417 sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
1418 IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
1419
1420 if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
1421 strcat(bp, " <");
1422 strcat(bp, ips_adapter_name[ha->ad_type - 1]);
1423 strcat(bp, ">");
1424 }
1425
1426 return (bp);
1427 }
1428
1429 static int
1430 ips_write_info(struct Scsi_Host *host, char *buffer, int length)
1431 {
1432 int i;
1433 ips_ha_t *ha = NULL;
1434
1435
1436 for (i = 0; i < ips_next_controller; i++) {
1437 if (ips_sh[i]) {
1438 if (ips_sh[i] == host) {
1439 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1440 break;
1441 }
1442 }
1443 }
1444
1445 if (!ha)
1446 return (-EINVAL);
1447
1448 return 0;
1449 }
1450
1451 static int
1452 ips_show_info(struct seq_file *m, struct Scsi_Host *host)
1453 {
1454 int i;
1455 ips_ha_t *ha = NULL;
1456
1457
1458 for (i = 0; i < ips_next_controller; i++) {
1459 if (ips_sh[i]) {
1460 if (ips_sh[i] == host) {
1461 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1462 break;
1463 }
1464 }
1465 }
1466
1467 if (!ha)
1468 return (-EINVAL);
1469
1470 return ips_host_info(ha, m);
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 static int ips_is_passthru(struct scsi_cmnd *SC)
1487 {
1488 unsigned long flags;
1489
1490 METHOD_TRACE("ips_is_passthru", 1);
1491
1492 if (!SC)
1493 return (0);
1494
1495 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1496 (SC->device->channel == 0) &&
1497 (SC->device->id == IPS_ADAPTER_ID) &&
1498 (SC->device->lun == 0) && scsi_sglist(SC)) {
1499 struct scatterlist *sg = scsi_sglist(SC);
1500 char *buffer;
1501
1502
1503
1504 local_irq_save(flags);
1505 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1506 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1507 buffer[2] == 'P' && buffer[3] == 'P') {
1508 kunmap_atomic(buffer - sg->offset);
1509 local_irq_restore(flags);
1510 return 1;
1511 }
1512 kunmap_atomic(buffer - sg->offset);
1513 local_irq_restore(flags);
1514 }
1515 return 0;
1516 }
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 static int
1527 ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1528 {
1529 void *bigger_buf;
1530 dma_addr_t dma_busaddr;
1531
1532 if (ha->ioctl_data && length <= ha->ioctl_len)
1533 return 0;
1534
1535 bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
1536 GFP_KERNEL);
1537 if (bigger_buf) {
1538
1539 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
1540 ha->ioctl_data, ha->ioctl_busaddr);
1541
1542 ha->ioctl_data = (char *) bigger_buf;
1543 ha->ioctl_len = length;
1544 ha->ioctl_busaddr = dma_busaddr;
1545 } else {
1546 return -1;
1547 }
1548 return 0;
1549 }
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 static int
1561 ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1562 {
1563 ips_passthru_t *pt;
1564 int length = 0;
1565 int i, ret;
1566 struct scatterlist *sg = scsi_sglist(SC);
1567
1568 METHOD_TRACE("ips_make_passthru", 1);
1569
1570 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1571 length += sg->length;
1572
1573 if (length < sizeof (ips_passthru_t)) {
1574
1575 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
1576 ips_name, ha->host_num);
1577 return (IPS_FAILURE);
1578 }
1579 if (ips_alloc_passthru_buffer(ha, length)) {
1580
1581
1582 if (ha->ioctl_data) {
1583 pt = (ips_passthru_t *) ha->ioctl_data;
1584 ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
1585 pt->BasicStatus = 0x0B;
1586 pt->ExtendedStatus = 0x00;
1587 ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
1588 }
1589 return IPS_FAILURE;
1590 }
1591 ha->ioctl_datasize = length;
1592
1593 ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
1594 pt = (ips_passthru_t *) ha->ioctl_data;
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 switch (pt->CoppCmd) {
1607 case IPS_NUMCTRLS:
1608 memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
1609 &ips_num_controllers, sizeof (int));
1610 ips_scmd_buf_write(SC, ha->ioctl_data,
1611 sizeof (ips_passthru_t) + sizeof (int));
1612 SC->result = DID_OK << 16;
1613
1614 return (IPS_SUCCESS_IMM);
1615
1616 case IPS_COPPUSRCMD:
1617 case IPS_COPPIOCCMD:
1618 if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
1619 if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
1620
1621 DEBUG_VAR(1,
1622 "(%s%d) Passthru structure wrong size",
1623 ips_name, ha->host_num);
1624
1625 return (IPS_FAILURE);
1626 }
1627
1628 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1629 pt->CoppCP.cmd.flashfw.op_code ==
1630 IPS_CMD_RW_BIOSFW) {
1631 ret = ips_flash_copperhead(ha, pt, scb);
1632 ips_scmd_buf_write(SC, ha->ioctl_data,
1633 sizeof (ips_passthru_t));
1634 return ret;
1635 }
1636 if (ips_usrcmd(ha, pt, scb))
1637 return (IPS_SUCCESS);
1638 else
1639 return (IPS_FAILURE);
1640 }
1641
1642 break;
1643
1644 }
1645
1646 return (IPS_FAILURE);
1647 }
1648
1649
1650
1651
1652
1653
1654 static int
1655 ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1656 {
1657 int datasize;
1658
1659
1660
1661 if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
1662 if (ips_usrcmd(ha, pt, scb))
1663 return IPS_SUCCESS;
1664 else
1665 return IPS_FAILURE;
1666 }
1667 pt->BasicStatus = 0x0B;
1668 pt->ExtendedStatus = 0;
1669 scb->scsi_cmd->result = DID_OK << 16;
1670
1671
1672 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1673 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1674 pt->BasicStatus = 0;
1675 return ips_flash_bios(ha, pt, scb);
1676 } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
1677 if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
1678 ha->flash_data = ips_FlashData;
1679 ha->flash_busaddr = ips_flashbusaddr;
1680 ha->flash_len = PAGE_SIZE << 7;
1681 ha->flash_datasize = 0;
1682 } else if (!ha->flash_data) {
1683 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1684 pt->CoppCP.cmd.flashfw.count;
1685 ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
1686 datasize, &ha->flash_busaddr, GFP_KERNEL);
1687 if (!ha->flash_data){
1688 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1689 return IPS_FAILURE;
1690 }
1691 ha->flash_datasize = 0;
1692 ha->flash_len = datasize;
1693 } else
1694 return IPS_FAILURE;
1695 } else {
1696 if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
1697 ha->flash_len) {
1698 ips_free_flash_copperhead(ha);
1699 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1700 "failed size sanity check\n");
1701 return IPS_FAILURE;
1702 }
1703 }
1704 if (!ha->flash_data)
1705 return IPS_FAILURE;
1706 pt->BasicStatus = 0;
1707 memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
1708 pt->CoppCP.cmd.flashfw.count);
1709 ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
1710 if (pt->CoppCP.cmd.flashfw.packet_num ==
1711 pt->CoppCP.cmd.flashfw.total_packets - 1) {
1712 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
1713 return ips_flash_bios(ha, pt, scb);
1714 else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
1715 return ips_flash_firmware(ha, pt, scb);
1716 }
1717 return IPS_SUCCESS_IMM;
1718 }
1719
1720
1721
1722
1723
1724
1725 static int
1726 ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1727 {
1728
1729 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1730 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
1731 if ((!ha->func.programbios) || (!ha->func.erasebios) ||
1732 (!ha->func.verifybios))
1733 goto error;
1734 if ((*ha->func.erasebios) (ha)) {
1735 DEBUG_VAR(1,
1736 "(%s%d) flash bios failed - unable to erase flash",
1737 ips_name, ha->host_num);
1738 goto error;
1739 } else
1740 if ((*ha->func.programbios) (ha,
1741 ha->flash_data +
1742 IPS_BIOS_HEADER,
1743 ha->flash_datasize -
1744 IPS_BIOS_HEADER, 0)) {
1745 DEBUG_VAR(1,
1746 "(%s%d) flash bios failed - unable to flash",
1747 ips_name, ha->host_num);
1748 goto error;
1749 } else
1750 if ((*ha->func.verifybios) (ha,
1751 ha->flash_data +
1752 IPS_BIOS_HEADER,
1753 ha->flash_datasize -
1754 IPS_BIOS_HEADER, 0)) {
1755 DEBUG_VAR(1,
1756 "(%s%d) flash bios failed - unable to verify flash",
1757 ips_name, ha->host_num);
1758 goto error;
1759 }
1760 ips_free_flash_copperhead(ha);
1761 return IPS_SUCCESS_IMM;
1762 } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1763 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1764 if (!ha->func.erasebios)
1765 goto error;
1766 if ((*ha->func.erasebios) (ha)) {
1767 DEBUG_VAR(1,
1768 "(%s%d) flash bios failed - unable to erase flash",
1769 ips_name, ha->host_num);
1770 goto error;
1771 }
1772 return IPS_SUCCESS_IMM;
1773 }
1774 error:
1775 pt->BasicStatus = 0x0B;
1776 pt->ExtendedStatus = 0x00;
1777 ips_free_flash_copperhead(ha);
1778 return IPS_FAILURE;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789 static int
1790 ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1791 ips_scb_t * scb, int indx, unsigned int e_len)
1792 {
1793
1794 int ret_val = 0;
1795
1796 if ((scb->data_len + e_len) > ha->max_xfer) {
1797 e_len = ha->max_xfer - scb->data_len;
1798 scb->breakup = indx;
1799 ++scb->sg_break;
1800 ret_val = -1;
1801 } else {
1802 scb->breakup = 0;
1803 scb->sg_break = 0;
1804 }
1805 if (IPS_USE_ENH_SGLIST(ha)) {
1806 scb->sg_list.enh_list[indx].address_lo =
1807 cpu_to_le32(lower_32_bits(busaddr));
1808 scb->sg_list.enh_list[indx].address_hi =
1809 cpu_to_le32(upper_32_bits(busaddr));
1810 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1811 } else {
1812 scb->sg_list.std_list[indx].address =
1813 cpu_to_le32(lower_32_bits(busaddr));
1814 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1815 }
1816
1817 ++scb->sg_len;
1818 scb->data_len += e_len;
1819 return ret_val;
1820 }
1821
1822
1823
1824
1825
1826
1827 static int
1828 ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1829 {
1830 IPS_SG_LIST sg_list;
1831 uint32_t cmd_busaddr;
1832
1833 if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
1834 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
1835 memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
1836 pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
1837 pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
1838 } else {
1839 pt->BasicStatus = 0x0B;
1840 pt->ExtendedStatus = 0x00;
1841 ips_free_flash_copperhead(ha);
1842 return IPS_FAILURE;
1843 }
1844
1845 sg_list.list = scb->sg_list.list;
1846 cmd_busaddr = scb->scb_busaddr;
1847
1848 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1849
1850 scb->sg_list.list = sg_list.list;
1851 scb->scb_busaddr = cmd_busaddr;
1852 scb->bus = scb->scsi_cmd->device->channel;
1853 scb->target_id = scb->scsi_cmd->device->id;
1854 scb->lun = scb->scsi_cmd->device->lun;
1855 scb->sg_len = 0;
1856 scb->data_len = 0;
1857 scb->flags = 0;
1858 scb->op_code = 0;
1859 scb->callback = ipsintr_done;
1860 scb->timeout = ips_cmd_timeout;
1861
1862 scb->data_len = ha->flash_datasize;
1863 scb->data_busaddr =
1864 dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
1865 IPS_DMA_DIR(scb));
1866 scb->flags |= IPS_SCB_MAP_SINGLE;
1867 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
1868 scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
1869 if (pt->TimeOut)
1870 scb->timeout = pt->TimeOut;
1871 scb->scsi_cmd->result = DID_OK << 16;
1872 return IPS_SUCCESS;
1873 }
1874
1875
1876
1877
1878
1879
1880 static void
1881 ips_free_flash_copperhead(ips_ha_t * ha)
1882 {
1883 if (ha->flash_data == ips_FlashData)
1884 test_and_clear_bit(0, &ips_FlashDataInUse);
1885 else if (ha->flash_data)
1886 dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
1887 ha->flash_data, ha->flash_busaddr);
1888 ha->flash_data = NULL;
1889 }
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 static int
1901 ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1902 {
1903 IPS_SG_LIST sg_list;
1904 uint32_t cmd_busaddr;
1905
1906 METHOD_TRACE("ips_usrcmd", 1);
1907
1908 if ((!scb) || (!pt) || (!ha))
1909 return (0);
1910
1911
1912 sg_list.list = scb->sg_list.list;
1913 cmd_busaddr = scb->scb_busaddr;
1914
1915 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1916 memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
1917
1918
1919 scb->sg_list.list = sg_list.list;
1920 scb->scb_busaddr = cmd_busaddr;
1921 scb->bus = scb->scsi_cmd->device->channel;
1922 scb->target_id = scb->scsi_cmd->device->id;
1923 scb->lun = scb->scsi_cmd->device->lun;
1924 scb->sg_len = 0;
1925 scb->data_len = 0;
1926 scb->flags = 0;
1927 scb->op_code = 0;
1928 scb->callback = ipsintr_done;
1929 scb->timeout = ips_cmd_timeout;
1930 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
1931
1932
1933 if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
1934 (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
1935 (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
1936 return (0);
1937
1938 if (pt->CmdBSize) {
1939 scb->data_len = pt->CmdBSize;
1940 scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
1941 } else {
1942 scb->data_busaddr = 0L;
1943 }
1944
1945 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1946 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
1947 (unsigned long) &scb->
1948 dcdb -
1949 (unsigned long) scb);
1950
1951 if (pt->CmdBSize) {
1952 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1953 scb->dcdb.buffer_pointer =
1954 cpu_to_le32(scb->data_busaddr);
1955 else
1956 scb->cmd.basic_io.sg_addr =
1957 cpu_to_le32(scb->data_busaddr);
1958 }
1959
1960
1961 if (pt->TimeOut) {
1962 scb->timeout = pt->TimeOut;
1963
1964 if (pt->TimeOut <= 10)
1965 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
1966 else if (pt->TimeOut <= 60)
1967 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
1968 else
1969 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
1970 }
1971
1972
1973 scb->scsi_cmd->result = DID_OK << 16;
1974
1975
1976 return (1);
1977 }
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 static void
1989 ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
1990 {
1991 ips_passthru_t *pt;
1992
1993 METHOD_TRACE("ips_cleanup_passthru", 1);
1994
1995 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
1996 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
1997 ips_name, ha->host_num);
1998
1999 return;
2000 }
2001 pt = (ips_passthru_t *) ha->ioctl_data;
2002
2003
2004 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
2005 memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
2006
2007 pt->BasicStatus = scb->basic_status;
2008 pt->ExtendedStatus = scb->extended_status;
2009 pt->AdapterType = ha->ad_type;
2010
2011 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2012 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2013 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2014 ips_free_flash_copperhead(ha);
2015
2016 ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
2017 }
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028 static int
2029 ips_host_info(ips_ha_t *ha, struct seq_file *m)
2030 {
2031 METHOD_TRACE("ips_host_info", 1);
2032
2033 seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
2034
2035 if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
2036 (le16_to_cpu(ha->nvram->adapter_type) != 0))
2037 seq_printf(m, "\tController Type : %s\n",
2038 ips_adapter_name[ha->ad_type - 1]);
2039 else
2040 seq_puts(m, "\tController Type : Unknown\n");
2041
2042 if (ha->io_addr)
2043 seq_printf(m,
2044 "\tIO region : 0x%x (%d bytes)\n",
2045 ha->io_addr, ha->io_len);
2046
2047 if (ha->mem_addr) {
2048 seq_printf(m,
2049 "\tMemory region : 0x%x (%d bytes)\n",
2050 ha->mem_addr, ha->mem_len);
2051 seq_printf(m,
2052 "\tShared memory address : 0x%lx\n",
2053 (unsigned long)ha->mem_ptr);
2054 }
2055
2056 seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
2057
2058
2059
2060
2061 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2062 if (ha->nvram->bios_low[3] == 0) {
2063 seq_printf(m,
2064 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2065 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2066 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2067 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2068 ha->nvram->bios_low[2]);
2069
2070 } else {
2071 seq_printf(m,
2072 "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
2073 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2074 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2075 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2076 ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
2077 }
2078
2079 }
2080
2081 if (ha->enq->CodeBlkVersion[7] == 0) {
2082 seq_printf(m,
2083 "\tFirmware Version : %c%c%c%c%c%c%c\n",
2084 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2085 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2086 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2087 ha->enq->CodeBlkVersion[6]);
2088 } else {
2089 seq_printf(m,
2090 "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
2091 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2092 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2093 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2094 ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
2095 }
2096
2097 if (ha->enq->BootBlkVersion[7] == 0) {
2098 seq_printf(m,
2099 "\tBoot Block Version : %c%c%c%c%c%c%c\n",
2100 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2101 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2102 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2103 ha->enq->BootBlkVersion[6]);
2104 } else {
2105 seq_printf(m,
2106 "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
2107 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2108 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2109 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2110 ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
2111 }
2112
2113 seq_printf(m, "\tDriver Version : %s%s\n",
2114 IPS_VERSION_HIGH, IPS_VERSION_LOW);
2115
2116 seq_printf(m, "\tDriver Build : %d\n",
2117 IPS_BUILD_IDENT);
2118
2119 seq_printf(m, "\tMax Physical Devices : %d\n",
2120 ha->enq->ucMaxPhysicalDevices);
2121 seq_printf(m, "\tMax Active Commands : %d\n",
2122 ha->max_cmds);
2123 seq_printf(m, "\tCurrent Queued Commands : %d\n",
2124 ha->scb_waitlist.count);
2125 seq_printf(m, "\tCurrent Active Commands : %d\n",
2126 ha->scb_activelist.count - ha->num_ioctl);
2127 seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
2128 ha->copp_waitlist.count);
2129 seq_printf(m, "\tCurrent Active PT Commands : %d\n",
2130 ha->num_ioctl);
2131
2132 seq_putc(m, '\n');
2133
2134 return 0;
2135 }
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 static void
2147 ips_identify_controller(ips_ha_t * ha)
2148 {
2149 METHOD_TRACE("ips_identify_controller", 1);
2150
2151 switch (ha->pcidev->device) {
2152 case IPS_DEVICEID_COPPERHEAD:
2153 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2154 ha->ad_type = IPS_ADTYPE_SERVERAID;
2155 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2156 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2157 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2158 ha->ad_type = IPS_ADTYPE_NAVAJO;
2159 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2160 && (ha->slot_num == 0)) {
2161 ha->ad_type = IPS_ADTYPE_KIOWA;
2162 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2163 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2164 if (ha->enq->ucMaxPhysicalDevices == 15)
2165 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2166 else
2167 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2168 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2169 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2170 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2171 }
2172 break;
2173
2174 case IPS_DEVICEID_MORPHEUS:
2175 switch (ha->pcidev->subsystem_device) {
2176 case IPS_SUBDEVICEID_4L:
2177 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2178 break;
2179
2180 case IPS_SUBDEVICEID_4M:
2181 ha->ad_type = IPS_ADTYPE_SERVERAID4M;
2182 break;
2183
2184 case IPS_SUBDEVICEID_4MX:
2185 ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
2186 break;
2187
2188 case IPS_SUBDEVICEID_4LX:
2189 ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
2190 break;
2191
2192 case IPS_SUBDEVICEID_5I2:
2193 ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
2194 break;
2195
2196 case IPS_SUBDEVICEID_5I1:
2197 ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
2198 break;
2199 }
2200
2201 break;
2202
2203 case IPS_DEVICEID_MARCO:
2204 switch (ha->pcidev->subsystem_device) {
2205 case IPS_SUBDEVICEID_6M:
2206 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2207 break;
2208 case IPS_SUBDEVICEID_6I:
2209 ha->ad_type = IPS_ADTYPE_SERVERAID6I;
2210 break;
2211 case IPS_SUBDEVICEID_7k:
2212 ha->ad_type = IPS_ADTYPE_SERVERAID7k;
2213 break;
2214 case IPS_SUBDEVICEID_7M:
2215 ha->ad_type = IPS_ADTYPE_SERVERAID7M;
2216 break;
2217 }
2218 break;
2219 }
2220 }
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231 static void
2232 ips_get_bios_version(ips_ha_t * ha, int intr)
2233 {
2234 ips_scb_t *scb;
2235 int ret;
2236 uint8_t major;
2237 uint8_t minor;
2238 uint8_t subminor;
2239 uint8_t *buffer;
2240
2241 METHOD_TRACE("ips_get_bios_version", 1);
2242
2243 major = 0;
2244 minor = 0;
2245
2246 memcpy(ha->bios_version, " ?", 8);
2247
2248 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2249 if (IPS_USE_MEMIO(ha)) {
2250
2251
2252
2253 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2254 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2255 udelay(25);
2256
2257 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2258 return;
2259
2260 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2261 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2262 udelay(25);
2263
2264 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
2265 return;
2266
2267
2268 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2269 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2270 udelay(25);
2271
2272 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2273
2274
2275 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2276 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2277 udelay(25);
2278 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2279
2280
2281 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2282 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2283 udelay(25);
2284 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2285
2286 } else {
2287
2288
2289
2290 outl(0, ha->io_addr + IPS_REG_FLAP);
2291 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2292 udelay(25);
2293
2294 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2295 return;
2296
2297 outl(1, ha->io_addr + IPS_REG_FLAP);
2298 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2299 udelay(25);
2300
2301 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
2302 return;
2303
2304
2305 outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
2306 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2307 udelay(25);
2308
2309 major = inb(ha->io_addr + IPS_REG_FLDP);
2310
2311
2312 outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
2313 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2314 udelay(25);
2315
2316 minor = inb(ha->io_addr + IPS_REG_FLDP);
2317
2318
2319 outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
2320 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2321 udelay(25);
2322
2323 subminor = inb(ha->io_addr + IPS_REG_FLDP);
2324
2325 }
2326 } else {
2327
2328
2329 buffer = ha->ioctl_data;
2330
2331 memset(buffer, 0, 0x1000);
2332
2333 scb = &ha->scbs[ha->max_cmds - 1];
2334
2335 ips_init_scb(ha, scb);
2336
2337 scb->timeout = ips_cmd_timeout;
2338 scb->cdb[0] = IPS_CMD_RW_BIOSFW;
2339
2340 scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
2341 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
2342 scb->cmd.flashfw.type = 1;
2343 scb->cmd.flashfw.direction = 0;
2344 scb->cmd.flashfw.count = cpu_to_le32(0x800);
2345 scb->cmd.flashfw.total_packets = 1;
2346 scb->cmd.flashfw.packet_num = 0;
2347 scb->data_len = 0x1000;
2348 scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
2349
2350
2351 if (((ret =
2352 ips_send_wait(ha, scb, ips_cmd_timeout,
2353 intr)) == IPS_FAILURE)
2354 || (ret == IPS_SUCCESS_IMM)
2355 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
2356
2357
2358 return;
2359 }
2360
2361 if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
2362 major = buffer[0x1ff + 0xC0];
2363 minor = buffer[0x1fe + 0xC0];
2364 subminor = buffer[0x1fd + 0xC0];
2365 } else {
2366 return;
2367 }
2368 }
2369
2370 ha->bios_version[0] = hex_asc_upper_hi(major);
2371 ha->bios_version[1] = '.';
2372 ha->bios_version[2] = hex_asc_upper_lo(major);
2373 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2374 ha->bios_version[4] = '.';
2375 ha->bios_version[5] = hex_asc_upper_hi(minor);
2376 ha->bios_version[6] = hex_asc_upper_lo(minor);
2377 ha->bios_version[7] = 0;
2378 }
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391 static int
2392 ips_hainit(ips_ha_t * ha)
2393 {
2394 int i;
2395
2396 METHOD_TRACE("ips_hainit", 1);
2397
2398 if (!ha)
2399 return (0);
2400
2401 if (ha->func.statinit)
2402 (*ha->func.statinit) (ha);
2403
2404 if (ha->func.enableint)
2405 (*ha->func.enableint) (ha);
2406
2407
2408 ha->reset_count = 1;
2409 ha->last_ffdc = ktime_get_real_seconds();
2410 ips_ffdc_reset(ha, IPS_INTR_IORL);
2411
2412 if (!ips_read_config(ha, IPS_INTR_IORL)) {
2413 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2414 "unable to read config from controller.\n");
2415
2416 return (0);
2417 }
2418
2419 if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
2420 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2421 "unable to read controller status.\n");
2422
2423 return (0);
2424 }
2425
2426
2427 ips_identify_controller(ha);
2428
2429 if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
2430 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2431 "unable to read subsystem parameters.\n");
2432
2433 return (0);
2434 }
2435
2436
2437 if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
2438 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2439 "unable to write driver info to controller.\n");
2440
2441 return (0);
2442 }
2443
2444
2445 if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
2446 ips_clear_adapter(ha, IPS_INTR_IORL);
2447
2448
2449 ha->ntargets = IPS_MAX_TARGETS + 1;
2450 ha->nlun = 1;
2451 ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
2452
2453 switch (ha->conf->logical_drive[0].ucStripeSize) {
2454 case 4:
2455 ha->max_xfer = 0x10000;
2456 break;
2457
2458 case 5:
2459 ha->max_xfer = 0x20000;
2460 break;
2461
2462 case 6:
2463 ha->max_xfer = 0x40000;
2464 break;
2465
2466 case 7:
2467 default:
2468 ha->max_xfer = 0x80000;
2469 break;
2470 }
2471
2472
2473 if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
2474
2475 ha->max_cmds = ha->enq->ucConcurrentCmdCount;
2476 } else {
2477
2478 switch (ha->conf->logical_drive[0].ucStripeSize) {
2479 case 4:
2480 ha->max_cmds = 32;
2481 break;
2482
2483 case 5:
2484 ha->max_cmds = 16;
2485 break;
2486
2487 case 6:
2488 ha->max_cmds = 8;
2489 break;
2490
2491 case 7:
2492 default:
2493 ha->max_cmds = 4;
2494 break;
2495 }
2496 }
2497
2498
2499 if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
2500 (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
2501 (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
2502 if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
2503 ha->max_cmds = MaxLiteCmds;
2504 }
2505
2506
2507 ha->ha_id[0] = IPS_ADAPTER_ID;
2508 for (i = 1; i < ha->nbus; i++) {
2509 ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
2510 ha->dcdb_active[i - 1] = 0;
2511 }
2512
2513 return (1);
2514 }
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 static void
2526 ips_next(ips_ha_t * ha, int intr)
2527 {
2528 ips_scb_t *scb;
2529 struct scsi_cmnd *SC;
2530 struct scsi_cmnd *p;
2531 struct scsi_cmnd *q;
2532 ips_copp_wait_item_t *item;
2533 int ret;
2534 struct Scsi_Host *host;
2535 METHOD_TRACE("ips_next", 1);
2536
2537 if (!ha)
2538 return;
2539 host = ips_sh[ha->host_num];
2540
2541
2542
2543
2544 if (intr == IPS_INTR_ON)
2545 spin_lock(host->host_lock);
2546
2547 if ((ha->subsys->param[3] & 0x300000)
2548 && (ha->scb_activelist.count == 0)) {
2549 time64_t now = ktime_get_real_seconds();
2550 if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
2551 ha->last_ffdc = now;
2552 ips_ffdc_time(ha);
2553 }
2554 }
2555
2556
2557
2558
2559
2560
2561
2562
2563 while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
2564 (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
2565
2566 item = ips_removeq_copp_head(&ha->copp_waitlist);
2567 ha->num_ioctl++;
2568 if (intr == IPS_INTR_ON)
2569 spin_unlock(host->host_lock);
2570 scb->scsi_cmd = item->scsi_cmd;
2571 kfree(item);
2572
2573 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2574
2575 if (intr == IPS_INTR_ON)
2576 spin_lock(host->host_lock);
2577 switch (ret) {
2578 case IPS_FAILURE:
2579 if (scb->scsi_cmd) {
2580 scb->scsi_cmd->result = DID_ERROR << 16;
2581 scsi_done(scb->scsi_cmd);
2582 }
2583
2584 ips_freescb(ha, scb);
2585 break;
2586 case IPS_SUCCESS_IMM:
2587 if (scb->scsi_cmd) {
2588 scb->scsi_cmd->result = DID_OK << 16;
2589 scsi_done(scb->scsi_cmd);
2590 }
2591
2592 ips_freescb(ha, scb);
2593 break;
2594 default:
2595 break;
2596 }
2597
2598 if (ret != IPS_SUCCESS) {
2599 ha->num_ioctl--;
2600 continue;
2601 }
2602
2603 ret = ips_send_cmd(ha, scb);
2604
2605 if (ret == IPS_SUCCESS)
2606 ips_putq_scb_head(&ha->scb_activelist, scb);
2607 else
2608 ha->num_ioctl--;
2609
2610 switch (ret) {
2611 case IPS_FAILURE:
2612 if (scb->scsi_cmd) {
2613 scb->scsi_cmd->result = DID_ERROR << 16;
2614 }
2615
2616 ips_freescb(ha, scb);
2617 break;
2618 case IPS_SUCCESS_IMM:
2619 ips_freescb(ha, scb);
2620 break;
2621 default:
2622 break;
2623 }
2624
2625 }
2626
2627
2628
2629
2630
2631 p = ha->scb_waitlist.head;
2632 while ((p) && (scb = ips_getscb(ha))) {
2633 if ((scmd_channel(p) > 0)
2634 && (ha->
2635 dcdb_active[scmd_channel(p) -
2636 1] & (1 << scmd_id(p)))) {
2637 ips_freescb(ha, scb);
2638 p = (struct scsi_cmnd *) p->host_scribble;
2639 continue;
2640 }
2641
2642 q = p;
2643 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2644
2645 if (intr == IPS_INTR_ON)
2646 spin_unlock(host->host_lock);
2647
2648 SC->result = DID_OK;
2649 SC->host_scribble = NULL;
2650
2651 scb->target_id = SC->device->id;
2652 scb->lun = SC->device->lun;
2653 scb->bus = SC->device->channel;
2654 scb->scsi_cmd = SC;
2655 scb->breakup = 0;
2656 scb->data_len = 0;
2657 scb->callback = ipsintr_done;
2658 scb->timeout = ips_cmd_timeout;
2659 memset(&scb->cmd, 0, 16);
2660
2661
2662 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2663
2664 scb->sg_count = scsi_dma_map(SC);
2665 BUG_ON(scb->sg_count < 0);
2666 if (scb->sg_count) {
2667 struct scatterlist *sg;
2668 int i;
2669
2670 scb->flags |= IPS_SCB_MAP_SG;
2671
2672 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2673 if (ips_fill_scb_sg_single
2674 (ha, sg_dma_address(sg), scb, i,
2675 sg_dma_len(sg)) < 0)
2676 break;
2677 }
2678 scb->dcdb.transfer_length = scb->data_len;
2679 } else {
2680 scb->data_busaddr = 0L;
2681 scb->sg_len = 0;
2682 scb->data_len = 0;
2683 scb->dcdb.transfer_length = 0;
2684 }
2685
2686 scb->dcdb.cmd_attribute =
2687 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2688
2689
2690
2691 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2692 (scb->data_len == 0))
2693 scb->dcdb.cmd_attribute = 0;
2694
2695 if (!(scb->dcdb.cmd_attribute & 0x3))
2696 scb->dcdb.transfer_length = 0;
2697
2698 if (scb->data_len >= IPS_MAX_XFER) {
2699 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
2700 scb->dcdb.transfer_length = 0;
2701 }
2702 if (intr == IPS_INTR_ON)
2703 spin_lock(host->host_lock);
2704
2705 ret = ips_send_cmd(ha, scb);
2706
2707 switch (ret) {
2708 case IPS_SUCCESS:
2709 ips_putq_scb_head(&ha->scb_activelist, scb);
2710 break;
2711 case IPS_FAILURE:
2712 if (scb->scsi_cmd) {
2713 scb->scsi_cmd->result = DID_ERROR << 16;
2714 scsi_done(scb->scsi_cmd);
2715 }
2716
2717 if (scb->bus)
2718 ha->dcdb_active[scb->bus - 1] &=
2719 ~(1 << scb->target_id);
2720
2721 ips_freescb(ha, scb);
2722 break;
2723 case IPS_SUCCESS_IMM:
2724 if (scb->scsi_cmd)
2725 scsi_done(scb->scsi_cmd);
2726
2727 if (scb->bus)
2728 ha->dcdb_active[scb->bus - 1] &=
2729 ~(1 << scb->target_id);
2730
2731 ips_freescb(ha, scb);
2732 break;
2733 default:
2734 break;
2735 }
2736
2737 p = (struct scsi_cmnd *) p->host_scribble;
2738
2739 }
2740
2741 if (intr == IPS_INTR_ON)
2742 spin_unlock(host->host_lock);
2743 }
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756 static void
2757 ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
2758 {
2759 METHOD_TRACE("ips_putq_scb_head", 1);
2760
2761 if (!item)
2762 return;
2763
2764 item->q_next = queue->head;
2765 queue->head = item;
2766
2767 if (!queue->tail)
2768 queue->tail = item;
2769
2770 queue->count++;
2771 }
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 static ips_scb_t *
2785 ips_removeq_scb_head(ips_scb_queue_t * queue)
2786 {
2787 ips_scb_t *item;
2788
2789 METHOD_TRACE("ips_removeq_scb_head", 1);
2790
2791 item = queue->head;
2792
2793 if (!item) {
2794 return (NULL);
2795 }
2796
2797 queue->head = item->q_next;
2798 item->q_next = NULL;
2799
2800 if (queue->tail == item)
2801 queue->tail = NULL;
2802
2803 queue->count--;
2804
2805 return (item);
2806 }
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819 static ips_scb_t *
2820 ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
2821 {
2822 ips_scb_t *p;
2823
2824 METHOD_TRACE("ips_removeq_scb", 1);
2825
2826 if (!item)
2827 return (NULL);
2828
2829 if (item == queue->head) {
2830 return (ips_removeq_scb_head(queue));
2831 }
2832
2833 p = queue->head;
2834
2835 while ((p) && (item != p->q_next))
2836 p = p->q_next;
2837
2838 if (p) {
2839
2840 p->q_next = item->q_next;
2841
2842 if (!item->q_next)
2843 queue->tail = p;
2844
2845 item->q_next = NULL;
2846 queue->count--;
2847
2848 return (item);
2849 }
2850
2851 return (NULL);
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865 static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
2866 {
2867 METHOD_TRACE("ips_putq_wait_tail", 1);
2868
2869 if (!item)
2870 return;
2871
2872 item->host_scribble = NULL;
2873
2874 if (queue->tail)
2875 queue->tail->host_scribble = (char *) item;
2876
2877 queue->tail = item;
2878
2879 if (!queue->head)
2880 queue->head = item;
2881
2882 queue->count++;
2883 }
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896 static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
2897 {
2898 struct scsi_cmnd *item;
2899
2900 METHOD_TRACE("ips_removeq_wait_head", 1);
2901
2902 item = queue->head;
2903
2904 if (!item) {
2905 return (NULL);
2906 }
2907
2908 queue->head = (struct scsi_cmnd *) item->host_scribble;
2909 item->host_scribble = NULL;
2910
2911 if (queue->tail == item)
2912 queue->tail = NULL;
2913
2914 queue->count--;
2915
2916 return (item);
2917 }
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930 static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
2931 struct scsi_cmnd *item)
2932 {
2933 struct scsi_cmnd *p;
2934
2935 METHOD_TRACE("ips_removeq_wait", 1);
2936
2937 if (!item)
2938 return (NULL);
2939
2940 if (item == queue->head) {
2941 return (ips_removeq_wait_head(queue));
2942 }
2943
2944 p = queue->head;
2945
2946 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
2947 p = (struct scsi_cmnd *) p->host_scribble;
2948
2949 if (p) {
2950
2951 p->host_scribble = item->host_scribble;
2952
2953 if (!item->host_scribble)
2954 queue->tail = p;
2955
2956 item->host_scribble = NULL;
2957 queue->count--;
2958
2959 return (item);
2960 }
2961
2962 return (NULL);
2963 }
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976 static void
2977 ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
2978 {
2979 METHOD_TRACE("ips_putq_copp_tail", 1);
2980
2981 if (!item)
2982 return;
2983
2984 item->next = NULL;
2985
2986 if (queue->tail)
2987 queue->tail->next = item;
2988
2989 queue->tail = item;
2990
2991 if (!queue->head)
2992 queue->head = item;
2993
2994 queue->count++;
2995 }
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008 static ips_copp_wait_item_t *
3009 ips_removeq_copp_head(ips_copp_queue_t * queue)
3010 {
3011 ips_copp_wait_item_t *item;
3012
3013 METHOD_TRACE("ips_removeq_copp_head", 1);
3014
3015 item = queue->head;
3016
3017 if (!item) {
3018 return (NULL);
3019 }
3020
3021 queue->head = item->next;
3022 item->next = NULL;
3023
3024 if (queue->tail == item)
3025 queue->tail = NULL;
3026
3027 queue->count--;
3028
3029 return (item);
3030 }
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043 static ips_copp_wait_item_t *
3044 ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
3045 {
3046 ips_copp_wait_item_t *p;
3047
3048 METHOD_TRACE("ips_removeq_copp", 1);
3049
3050 if (!item)
3051 return (NULL);
3052
3053 if (item == queue->head) {
3054 return (ips_removeq_copp_head(queue));
3055 }
3056
3057 p = queue->head;
3058
3059 while ((p) && (item != p->next))
3060 p = p->next;
3061
3062 if (p) {
3063
3064 p->next = item->next;
3065
3066 if (!item->next)
3067 queue->tail = p;
3068
3069 item->next = NULL;
3070 queue->count--;
3071
3072 return (item);
3073 }
3074
3075 return (NULL);
3076 }
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087 static void
3088 ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
3089 {
3090 METHOD_TRACE("ipsintr_blocking", 2);
3091
3092 ips_freescb(ha, scb);
3093 if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) {
3094 ha->waitflag = false;
3095
3096 return;
3097 }
3098 }
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 static void
3110 ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
3111 {
3112 METHOD_TRACE("ipsintr_done", 2);
3113
3114 if (!scb) {
3115 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3116 "Spurious interrupt; scb NULL.\n");
3117
3118 return;
3119 }
3120
3121 if (scb->scsi_cmd == NULL) {
3122
3123 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3124 "Spurious interrupt; scsi_cmd not set.\n");
3125
3126 return;
3127 }
3128
3129 ips_done(ha, scb);
3130 }
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141 static void
3142 ips_done(ips_ha_t * ha, ips_scb_t * scb)
3143 {
3144 int ret;
3145
3146 METHOD_TRACE("ips_done", 1);
3147
3148 if (!scb)
3149 return;
3150
3151 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
3152 ips_cleanup_passthru(ha, scb);
3153 ha->num_ioctl--;
3154 } else {
3155
3156
3157
3158
3159
3160 if ((scb->breakup) || (scb->sg_break)) {
3161 struct scatterlist *sg;
3162 int i, sg_dma_index, ips_sg_index = 0;
3163
3164
3165 scb->data_len = 0;
3166
3167 sg = scsi_sglist(scb->scsi_cmd);
3168
3169
3170 sg_dma_index = scb->breakup;
3171 for (i = 0; i < scb->breakup; i++)
3172 sg = sg_next(sg);
3173
3174
3175 ips_fill_scb_sg_single(ha,
3176 sg_dma_address(sg),
3177 scb, ips_sg_index++,
3178 sg_dma_len(sg));
3179
3180 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3181 sg_dma_index++, sg = sg_next(sg)) {
3182 if (ips_fill_scb_sg_single
3183 (ha,
3184 sg_dma_address(sg),
3185 scb, ips_sg_index++,
3186 sg_dma_len(sg)) < 0)
3187 break;
3188 }
3189
3190 scb->dcdb.transfer_length = scb->data_len;
3191 scb->dcdb.cmd_attribute |=
3192 ips_command_direction[scb->scsi_cmd->cmnd[0]];
3193
3194 if (!(scb->dcdb.cmd_attribute & 0x3))
3195 scb->dcdb.transfer_length = 0;
3196
3197 if (scb->data_len >= IPS_MAX_XFER) {
3198 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
3199 scb->dcdb.transfer_length = 0;
3200 }
3201
3202 ret = ips_send_cmd(ha, scb);
3203
3204 switch (ret) {
3205 case IPS_FAILURE:
3206 if (scb->scsi_cmd) {
3207 scb->scsi_cmd->result = DID_ERROR << 16;
3208 scsi_done(scb->scsi_cmd);
3209 }
3210
3211 ips_freescb(ha, scb);
3212 break;
3213 case IPS_SUCCESS_IMM:
3214 if (scb->scsi_cmd) {
3215 scb->scsi_cmd->result = DID_ERROR << 16;
3216 scsi_done(scb->scsi_cmd);
3217 }
3218
3219 ips_freescb(ha, scb);
3220 break;
3221 default:
3222 break;
3223 }
3224
3225 return;
3226 }
3227 }
3228
3229 if (scb->bus) {
3230 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
3231 }
3232
3233 scsi_done(scb->scsi_cmd);
3234
3235 ips_freescb(ha, scb);
3236 }
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247 static int
3248 ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3249 {
3250 int errcode;
3251 int device_error;
3252 uint32_t transfer_len;
3253 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3254 IPS_SCSI_INQ_DATA inquiryData;
3255
3256 METHOD_TRACE("ips_map_status", 1);
3257
3258 if (scb->bus) {
3259 DEBUG_VAR(2,
3260 "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
3261 ips_name, ha->host_num,
3262 scb->scsi_cmd->device->channel,
3263 scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
3264 scb->basic_status, scb->extended_status,
3265 scb->extended_status ==
3266 IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
3267 scb->extended_status ==
3268 IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
3269 scb->extended_status ==
3270 IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
3271 }
3272
3273
3274 errcode = DID_ERROR;
3275 device_error = 0;
3276
3277 switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
3278 case IPS_CMD_TIMEOUT:
3279 errcode = DID_TIME_OUT;
3280 break;
3281
3282 case IPS_INVAL_OPCO:
3283 case IPS_INVAL_CMD_BLK:
3284 case IPS_INVAL_PARM_BLK:
3285 case IPS_LD_ERROR:
3286 case IPS_CMD_CMPLT_WERROR:
3287 break;
3288
3289 case IPS_PHYS_DRV_ERROR:
3290 switch (scb->extended_status) {
3291 case IPS_ERR_SEL_TO:
3292 if (scb->bus)
3293 errcode = DID_NO_CONNECT;
3294
3295 break;
3296
3297 case IPS_ERR_OU_RUN:
3298 if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
3299 (scb->cmd.dcdb.op_code ==
3300 IPS_CMD_EXTENDED_DCDB_SG)) {
3301 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3302 transfer_len = tapeDCDB->transfer_length;
3303 } else {
3304 transfer_len =
3305 (uint32_t) scb->dcdb.transfer_length;
3306 }
3307
3308 if ((scb->bus) && (transfer_len < scb->data_len)) {
3309
3310 errcode = DID_OK;
3311
3312
3313 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3314 ips_scmd_buf_read(scb->scsi_cmd,
3315 &inquiryData, sizeof (inquiryData));
3316 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3317 errcode = DID_TIME_OUT;
3318 break;
3319 }
3320 }
3321 } else
3322 errcode = DID_ERROR;
3323
3324 break;
3325
3326 case IPS_ERR_RECOVERY:
3327
3328 if (scb->bus)
3329 errcode = DID_OK;
3330
3331 break;
3332
3333 case IPS_ERR_HOST_RESET:
3334 case IPS_ERR_DEV_RESET:
3335 errcode = DID_RESET;
3336 break;
3337
3338 case IPS_ERR_CKCOND:
3339 if (scb->bus) {
3340 if ((scb->cmd.dcdb.op_code ==
3341 IPS_CMD_EXTENDED_DCDB)
3342 || (scb->cmd.dcdb.op_code ==
3343 IPS_CMD_EXTENDED_DCDB_SG)) {
3344 tapeDCDB =
3345 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3346 memcpy_and_pad(scb->scsi_cmd->sense_buffer,
3347 SCSI_SENSE_BUFFERSIZE,
3348 tapeDCDB->sense_info,
3349 sizeof(tapeDCDB->sense_info), 0);
3350 } else {
3351 memcpy_and_pad(scb->scsi_cmd->sense_buffer,
3352 SCSI_SENSE_BUFFERSIZE,
3353 scb->dcdb.sense_info,
3354 sizeof(scb->dcdb.sense_info), 0);
3355 }
3356 device_error = 2;
3357 }
3358
3359 errcode = DID_OK;
3360
3361 break;
3362
3363 default:
3364 errcode = DID_ERROR;
3365 break;
3366
3367 }
3368 }
3369
3370 scb->scsi_cmd->result = device_error | (errcode << 16);
3371
3372 return (1);
3373 }
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386 static int
3387 ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3388 {
3389 int ret;
3390
3391 METHOD_TRACE("ips_send_wait", 1);
3392
3393 if (intr != IPS_FFDC) {
3394 ha->waitflag = true;
3395 ha->cmd_in_progress = scb->cdb[0];
3396 }
3397 scb->callback = ipsintr_blocking;
3398 ret = ips_send_cmd(ha, scb);
3399
3400 if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
3401 return (ret);
3402
3403 if (intr != IPS_FFDC)
3404 ret = ips_wait(ha, timeout, intr);
3405
3406 return (ret);
3407 }
3408
3409
3410
3411
3412
3413
3414
3415
3416 static void
3417 ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3418 {
3419 unsigned long flags;
3420
3421 local_irq_save(flags);
3422 scsi_sg_copy_from_buffer(scmd, data, count);
3423 local_irq_restore(flags);
3424 }
3425
3426
3427
3428
3429
3430
3431
3432
3433 static void
3434 ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3435 {
3436 unsigned long flags;
3437
3438 local_irq_save(flags);
3439 scsi_sg_copy_to_buffer(scmd, data, count);
3440 local_irq_restore(flags);
3441 }
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452 static int
3453 ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3454 {
3455 int ret;
3456 char *sp;
3457 int device_error;
3458 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3459 int TimeOut;
3460
3461 METHOD_TRACE("ips_send_cmd", 1);
3462
3463 ret = IPS_SUCCESS;
3464
3465 if (!scb->scsi_cmd) {
3466
3467
3468 if (scb->bus > 0) {
3469
3470
3471 if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0])
3472 ha->waitflag = false;
3473
3474 return (1);
3475 }
3476 } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
3477
3478 ret = IPS_SUCCESS_IMM;
3479
3480 switch (scb->scsi_cmd->cmnd[0]) {
3481 case ALLOW_MEDIUM_REMOVAL:
3482 case REZERO_UNIT:
3483 case ERASE:
3484 case WRITE_FILEMARKS:
3485 case SPACE:
3486 scb->scsi_cmd->result = DID_ERROR << 16;
3487 break;
3488
3489 case START_STOP:
3490 scb->scsi_cmd->result = DID_OK << 16;
3491 break;
3492
3493 case TEST_UNIT_READY:
3494 case INQUIRY:
3495 if (scb->target_id == IPS_ADAPTER_ID) {
3496
3497
3498
3499
3500 if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
3501 scb->scsi_cmd->result = DID_OK << 16;
3502
3503 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3504 IPS_SCSI_INQ_DATA inquiry;
3505
3506 memset(&inquiry, 0,
3507 sizeof (IPS_SCSI_INQ_DATA));
3508
3509 inquiry.DeviceType =
3510 IPS_SCSI_INQ_TYPE_PROCESSOR;
3511 inquiry.DeviceTypeQualifier =
3512 IPS_SCSI_INQ_LU_CONNECTED;
3513 inquiry.Version = IPS_SCSI_INQ_REV2;
3514 inquiry.ResponseDataFormat =
3515 IPS_SCSI_INQ_RD_REV2;
3516 inquiry.AdditionalLength = 31;
3517 inquiry.Flags[0] =
3518 IPS_SCSI_INQ_Address16;
3519 inquiry.Flags[1] =
3520 IPS_SCSI_INQ_WBus16 |
3521 IPS_SCSI_INQ_Sync;
3522 memcpy(inquiry.VendorId, "IBM ",
3523 8);
3524 memcpy(inquiry.ProductId,
3525 "SERVERAID ", 16);
3526 memcpy(inquiry.ProductRevisionLevel,
3527 "1.00", 4);
3528
3529 ips_scmd_buf_write(scb->scsi_cmd,
3530 &inquiry,
3531 sizeof (inquiry));
3532
3533 scb->scsi_cmd->result = DID_OK << 16;
3534 }
3535 } else {
3536 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3537 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3538 scb->cmd.logical_info.reserved = 0;
3539 scb->cmd.logical_info.reserved2 = 0;
3540 scb->data_len = sizeof (IPS_LD_INFO);
3541 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3542 scb->flags = 0;
3543 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3544 ret = IPS_SUCCESS;
3545 }
3546
3547 break;
3548
3549 case REQUEST_SENSE:
3550 ips_reqsen(ha, scb);
3551 scb->scsi_cmd->result = DID_OK << 16;
3552 break;
3553
3554 case READ_6:
3555 case WRITE_6:
3556 if (!scb->sg_len) {
3557 scb->cmd.basic_io.op_code =
3558 (scb->scsi_cmd->cmnd[0] ==
3559 READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
3560 scb->cmd.basic_io.enhanced_sg = 0;
3561 scb->cmd.basic_io.sg_addr =
3562 cpu_to_le32(scb->data_busaddr);
3563 } else {
3564 scb->cmd.basic_io.op_code =
3565 (scb->scsi_cmd->cmnd[0] ==
3566 READ_6) ? IPS_CMD_READ_SG :
3567 IPS_CMD_WRITE_SG;
3568 scb->cmd.basic_io.enhanced_sg =
3569 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3570 scb->cmd.basic_io.sg_addr =
3571 cpu_to_le32(scb->sg_busaddr);
3572 }
3573
3574 scb->cmd.basic_io.segment_4G = 0;
3575 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3576 scb->cmd.basic_io.log_drv = scb->target_id;
3577 scb->cmd.basic_io.sg_count = scb->sg_len;
3578
3579 if (scb->cmd.basic_io.lba)
3580 le32_add_cpu(&scb->cmd.basic_io.lba,
3581 le16_to_cpu(scb->cmd.basic_io.
3582 sector_count));
3583 else
3584 scb->cmd.basic_io.lba =
3585 (((scb->scsi_cmd->
3586 cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
3587 cmnd[2] << 8) |
3588 (scb->scsi_cmd->cmnd[3]));
3589
3590 scb->cmd.basic_io.sector_count =
3591 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3592
3593 if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
3594 scb->cmd.basic_io.sector_count =
3595 cpu_to_le16(256);
3596
3597 ret = IPS_SUCCESS;
3598 break;
3599
3600 case READ_10:
3601 case WRITE_10:
3602 if (!scb->sg_len) {
3603 scb->cmd.basic_io.op_code =
3604 (scb->scsi_cmd->cmnd[0] ==
3605 READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
3606 scb->cmd.basic_io.enhanced_sg = 0;
3607 scb->cmd.basic_io.sg_addr =
3608 cpu_to_le32(scb->data_busaddr);
3609 } else {
3610 scb->cmd.basic_io.op_code =
3611 (scb->scsi_cmd->cmnd[0] ==
3612 READ_10) ? IPS_CMD_READ_SG :
3613 IPS_CMD_WRITE_SG;
3614 scb->cmd.basic_io.enhanced_sg =
3615 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3616 scb->cmd.basic_io.sg_addr =
3617 cpu_to_le32(scb->sg_busaddr);
3618 }
3619
3620 scb->cmd.basic_io.segment_4G = 0;
3621 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3622 scb->cmd.basic_io.log_drv = scb->target_id;
3623 scb->cmd.basic_io.sg_count = scb->sg_len;
3624
3625 if (scb->cmd.basic_io.lba)
3626 le32_add_cpu(&scb->cmd.basic_io.lba,
3627 le16_to_cpu(scb->cmd.basic_io.
3628 sector_count));
3629 else
3630 scb->cmd.basic_io.lba =
3631 ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
3632 scsi_cmd->
3633 cmnd[3]
3634 << 16) |
3635 (scb->scsi_cmd->cmnd[4] << 8) | scb->
3636 scsi_cmd->cmnd[5]);
3637
3638 scb->cmd.basic_io.sector_count =
3639 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3640
3641 if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
3642
3643
3644
3645
3646
3647 scb->scsi_cmd->result = DID_OK << 16;
3648 } else
3649 ret = IPS_SUCCESS;
3650
3651 break;
3652
3653 case RESERVE:
3654 case RELEASE:
3655 scb->scsi_cmd->result = DID_OK << 16;
3656 break;
3657
3658 case MODE_SENSE:
3659 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
3660 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3661 scb->cmd.basic_io.segment_4G = 0;
3662 scb->cmd.basic_io.enhanced_sg = 0;
3663 scb->data_len = sizeof (*ha->enq);
3664 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
3665 ret = IPS_SUCCESS;
3666 break;
3667
3668 case READ_CAPACITY:
3669 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3670 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3671 scb->cmd.logical_info.reserved = 0;
3672 scb->cmd.logical_info.reserved2 = 0;
3673 scb->cmd.logical_info.reserved3 = 0;
3674 scb->data_len = sizeof (IPS_LD_INFO);
3675 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3676 scb->flags = 0;
3677 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3678 ret = IPS_SUCCESS;
3679 break;
3680
3681 case SEND_DIAGNOSTIC:
3682 case REASSIGN_BLOCKS:
3683 case FORMAT_UNIT:
3684 case SEEK_10:
3685 case VERIFY:
3686 case READ_DEFECT_DATA:
3687 case READ_BUFFER:
3688 case WRITE_BUFFER:
3689 scb->scsi_cmd->result = DID_OK << 16;
3690 break;
3691
3692 default:
3693
3694
3695
3696 sp = (char *) scb->scsi_cmd->sense_buffer;
3697
3698 sp[0] = 0x70;
3699 sp[2] = ILLEGAL_REQUEST;
3700 sp[7] = 0x0A;
3701 sp[12] = 0x20;
3702 sp[13] = 0x00;
3703
3704 device_error = 2;
3705 scb->scsi_cmd->result = device_error | (DID_OK << 16);
3706 break;
3707 }
3708 }
3709
3710 if (ret == IPS_SUCCESS_IMM)
3711 return (ret);
3712
3713
3714 if (scb->bus > 0) {
3715
3716
3717
3718 if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
3719 scb->scsi_cmd->result = DID_NO_CONNECT << 16;
3720 return (IPS_SUCCESS_IMM);
3721 }
3722
3723 ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
3724 scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
3725 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
3726 (unsigned long) &scb->
3727 dcdb -
3728 (unsigned long) scb);
3729 scb->cmd.dcdb.reserved = 0;
3730 scb->cmd.dcdb.reserved2 = 0;
3731 scb->cmd.dcdb.reserved3 = 0;
3732 scb->cmd.dcdb.segment_4G = 0;
3733 scb->cmd.dcdb.enhanced_sg = 0;
3734
3735 TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
3736
3737 if (ha->subsys->param[4] & 0x00100000) {
3738 if (!scb->sg_len) {
3739 scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
3740 } else {
3741 scb->cmd.dcdb.op_code =
3742 IPS_CMD_EXTENDED_DCDB_SG;
3743 scb->cmd.dcdb.enhanced_sg =
3744 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3745 }
3746
3747 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3748 tapeDCDB->device_address =
3749 ((scb->bus - 1) << 4) | scb->target_id;
3750 tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3751 tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K;
3752
3753 if (TimeOut) {
3754 if (TimeOut < (10 * HZ))
3755 tapeDCDB->cmd_attribute |= IPS_TIMEOUT10;
3756 else if (TimeOut < (60 * HZ))
3757 tapeDCDB->cmd_attribute |= IPS_TIMEOUT60;
3758 else if (TimeOut < (1200 * HZ))
3759 tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M;
3760 }
3761
3762 tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
3763 tapeDCDB->reserved_for_LUN = 0;
3764 tapeDCDB->transfer_length = scb->data_len;
3765 if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
3766 tapeDCDB->buffer_pointer =
3767 cpu_to_le32(scb->sg_busaddr);
3768 else
3769 tapeDCDB->buffer_pointer =
3770 cpu_to_le32(scb->data_busaddr);
3771 tapeDCDB->sg_count = scb->sg_len;
3772 tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
3773 tapeDCDB->scsi_status = 0;
3774 tapeDCDB->reserved = 0;
3775 memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
3776 scb->scsi_cmd->cmd_len);
3777 } else {
3778 if (!scb->sg_len) {
3779 scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
3780 } else {
3781 scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
3782 scb->cmd.dcdb.enhanced_sg =
3783 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3784 }
3785
3786 scb->dcdb.device_address =
3787 ((scb->bus - 1) << 4) | scb->target_id;
3788 scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3789
3790 if (TimeOut) {
3791 if (TimeOut < (10 * HZ))
3792 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
3793 else if (TimeOut < (60 * HZ))
3794 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
3795 else if (TimeOut < (1200 * HZ))
3796 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
3797 }
3798
3799 scb->dcdb.transfer_length = scb->data_len;
3800 if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
3801 scb->dcdb.transfer_length = 0;
3802 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
3803 scb->dcdb.buffer_pointer =
3804 cpu_to_le32(scb->sg_busaddr);
3805 else
3806 scb->dcdb.buffer_pointer =
3807 cpu_to_le32(scb->data_busaddr);
3808 scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
3809 scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
3810 scb->dcdb.sg_count = scb->sg_len;
3811 scb->dcdb.reserved = 0;
3812 memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
3813 scb->scsi_cmd->cmd_len);
3814 scb->dcdb.scsi_status = 0;
3815 scb->dcdb.reserved2[0] = 0;
3816 scb->dcdb.reserved2[1] = 0;
3817 scb->dcdb.reserved2[2] = 0;
3818 }
3819 }
3820
3821 return ((*ha->func.issue) (ha, scb));
3822 }
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833 static void
3834 ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
3835 {
3836 ips_scb_t *scb;
3837 ips_stat_t *sp;
3838 uint8_t basic_status;
3839 uint8_t ext_status;
3840 int errcode;
3841 IPS_SCSI_INQ_DATA inquiryData;
3842
3843 METHOD_TRACE("ips_chkstatus", 1);
3844
3845 scb = &ha->scbs[pstatus->fields.command_id];
3846 scb->basic_status = basic_status =
3847 pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
3848 scb->extended_status = ext_status = pstatus->fields.extended_status;
3849
3850 sp = &ha->sp;
3851 sp->residue_len = 0;
3852 sp->scb_addr = (void *) scb;
3853
3854
3855 ips_removeq_scb(&ha->scb_activelist, scb);
3856
3857 if (!scb->scsi_cmd)
3858
3859 return;
3860
3861 DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
3862 ips_name,
3863 ha->host_num,
3864 scb->cdb[0],
3865 scb->cmd.basic_io.command_id,
3866 scb->bus, scb->target_id, scb->lun);
3867
3868 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
3869
3870 return;
3871
3872 errcode = DID_OK;
3873
3874 if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
3875 ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
3876
3877 if (scb->bus == 0) {
3878 if ((basic_status & IPS_GSC_STATUS_MASK) ==
3879 IPS_CMD_RECOVERED_ERROR) {
3880 DEBUG_VAR(1,
3881 "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3882 ips_name, ha->host_num,
3883 scb->cmd.basic_io.op_code,
3884 basic_status, ext_status);
3885 }
3886
3887 switch (scb->scsi_cmd->cmnd[0]) {
3888 case ALLOW_MEDIUM_REMOVAL:
3889 case REZERO_UNIT:
3890 case ERASE:
3891 case WRITE_FILEMARKS:
3892 case SPACE:
3893 errcode = DID_ERROR;
3894 break;
3895
3896 case START_STOP:
3897 break;
3898
3899 case TEST_UNIT_READY:
3900 if (!ips_online(ha, scb)) {
3901 errcode = DID_TIME_OUT;
3902 }
3903 break;
3904
3905 case INQUIRY:
3906 if (ips_online(ha, scb)) {
3907 ips_inquiry(ha, scb);
3908 } else {
3909 errcode = DID_TIME_OUT;
3910 }
3911 break;
3912
3913 case REQUEST_SENSE:
3914 ips_reqsen(ha, scb);
3915 break;
3916
3917 case READ_6:
3918 case WRITE_6:
3919 case READ_10:
3920 case WRITE_10:
3921 case RESERVE:
3922 case RELEASE:
3923 break;
3924
3925 case MODE_SENSE:
3926 if (!ips_online(ha, scb)
3927 || !ips_msense(ha, scb)) {
3928 errcode = DID_ERROR;
3929 }
3930 break;
3931
3932 case READ_CAPACITY:
3933 if (ips_online(ha, scb))
3934 ips_rdcap(ha, scb);
3935 else {
3936 errcode = DID_TIME_OUT;
3937 }
3938 break;
3939
3940 case SEND_DIAGNOSTIC:
3941 case REASSIGN_BLOCKS:
3942 break;
3943
3944 case FORMAT_UNIT:
3945 errcode = DID_ERROR;
3946 break;
3947
3948 case SEEK_10:
3949 case VERIFY:
3950 case READ_DEFECT_DATA:
3951 case READ_BUFFER:
3952 case WRITE_BUFFER:
3953 break;
3954
3955 default:
3956 errcode = DID_ERROR;
3957 }
3958
3959 scb->scsi_cmd->result = errcode << 16;
3960 } else {
3961
3962 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3963 ips_scmd_buf_read(scb->scsi_cmd,
3964 &inquiryData, sizeof (inquiryData));
3965 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
3966 scb->scsi_cmd->result = DID_TIME_OUT << 16;
3967 }
3968 }
3969 } else {
3970 if (scb->bus == 0) {
3971 DEBUG_VAR(1,
3972 "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3973 ips_name, ha->host_num,
3974 scb->cmd.basic_io.op_code, basic_status,
3975 ext_status);
3976 }
3977
3978 ips_map_status(ha, scb, sp);
3979 }
3980 }
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991 static int
3992 ips_online(ips_ha_t * ha, ips_scb_t * scb)
3993 {
3994 METHOD_TRACE("ips_online", 1);
3995
3996 if (scb->target_id >= IPS_MAX_LD)
3997 return (0);
3998
3999 if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
4000 memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
4001 return (0);
4002 }
4003
4004 if (ha->logical_drive_info->drive_info[scb->target_id].state !=
4005 IPS_LD_OFFLINE
4006 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4007 IPS_LD_FREE
4008 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4009 IPS_LD_CRS
4010 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4011 IPS_LD_SYS)
4012 return (1);
4013 else
4014 return (0);
4015 }
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026 static int
4027 ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
4028 {
4029 IPS_SCSI_INQ_DATA inquiry;
4030
4031 METHOD_TRACE("ips_inquiry", 1);
4032
4033 memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
4034
4035 inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
4036 inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
4037 inquiry.Version = IPS_SCSI_INQ_REV2;
4038 inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
4039 inquiry.AdditionalLength = 31;
4040 inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
4041 inquiry.Flags[1] =
4042 IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
4043 memcpy(inquiry.VendorId, "IBM ", 8);
4044 memcpy(inquiry.ProductId, "SERVERAID ", 16);
4045 memcpy(inquiry.ProductRevisionLevel, "1.00", 4);
4046
4047 ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
4048
4049 return (1);
4050 }
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061 static int
4062 ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4063 {
4064 IPS_SCSI_CAPACITY cap;
4065
4066 METHOD_TRACE("ips_rdcap", 1);
4067
4068 if (scsi_bufflen(scb->scsi_cmd) < 8)
4069 return (0);
4070
4071 cap.lba =
4072 cpu_to_be32(le32_to_cpu
4073 (ha->logical_drive_info->
4074 drive_info[scb->target_id].sector_count) - 1);
4075 cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
4076
4077 ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
4078
4079 return (1);
4080 }
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091 static int
4092 ips_msense(ips_ha_t * ha, ips_scb_t * scb)
4093 {
4094 uint16_t heads;
4095 uint16_t sectors;
4096 uint32_t cylinders;
4097 IPS_SCSI_MODE_PAGE_DATA mdata;
4098
4099 METHOD_TRACE("ips_msense", 1);
4100
4101 if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
4102 (ha->enq->ucMiscFlag & 0x8) == 0) {
4103 heads = IPS_NORM_HEADS;
4104 sectors = IPS_NORM_SECTORS;
4105 } else {
4106 heads = IPS_COMP_HEADS;
4107 sectors = IPS_COMP_SECTORS;
4108 }
4109
4110 cylinders =
4111 (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
4112 1) / (heads * sectors);
4113
4114 memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
4115
4116 mdata.hdr.BlockDescLength = 8;
4117
4118 switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
4119 case 0x03:
4120 mdata.pdata.pg3.PageCode = 3;
4121 mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
4122 mdata.hdr.DataLength =
4123 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
4124 mdata.pdata.pg3.TracksPerZone = 0;
4125 mdata.pdata.pg3.AltSectorsPerZone = 0;
4126 mdata.pdata.pg3.AltTracksPerZone = 0;
4127 mdata.pdata.pg3.AltTracksPerVolume = 0;
4128 mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
4129 mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
4130 mdata.pdata.pg3.Interleave = cpu_to_be16(1);
4131 mdata.pdata.pg3.TrackSkew = 0;
4132 mdata.pdata.pg3.CylinderSkew = 0;
4133 mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
4134 break;
4135
4136 case 0x4:
4137 mdata.pdata.pg4.PageCode = 4;
4138 mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
4139 mdata.hdr.DataLength =
4140 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
4141 mdata.pdata.pg4.CylindersHigh =
4142 cpu_to_be16((cylinders >> 8) & 0xFFFF);
4143 mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
4144 mdata.pdata.pg4.Heads = heads;
4145 mdata.pdata.pg4.WritePrecompHigh = 0;
4146 mdata.pdata.pg4.WritePrecompLow = 0;
4147 mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
4148 mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
4149 mdata.pdata.pg4.StepRate = cpu_to_be16(1);
4150 mdata.pdata.pg4.LandingZoneHigh = 0;
4151 mdata.pdata.pg4.LandingZoneLow = 0;
4152 mdata.pdata.pg4.flags = 0;
4153 mdata.pdata.pg4.RotationalOffset = 0;
4154 mdata.pdata.pg4.MediumRotationRate = 0;
4155 break;
4156 case 0x8:
4157 mdata.pdata.pg8.PageCode = 8;
4158 mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
4159 mdata.hdr.DataLength =
4160 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
4161
4162 break;
4163
4164 default:
4165 return (0);
4166 }
4167
4168 ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
4169
4170 return (1);
4171 }
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182 static int
4183 ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
4184 {
4185 IPS_SCSI_REQSEN reqsen;
4186
4187 METHOD_TRACE("ips_reqsen", 1);
4188
4189 memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
4190
4191 reqsen.ResponseCode =
4192 IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
4193 reqsen.AdditionalLength = 10;
4194 reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
4195 reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
4196
4197 ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
4198
4199 return (1);
4200 }
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211 static void
4212 ips_free(ips_ha_t * ha)
4213 {
4214
4215 METHOD_TRACE("ips_free", 1);
4216
4217 if (ha) {
4218 if (ha->enq) {
4219 dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
4220 ha->enq, ha->enq_busaddr);
4221 ha->enq = NULL;
4222 }
4223
4224 kfree(ha->conf);
4225 ha->conf = NULL;
4226
4227 if (ha->adapt) {
4228 dma_free_coherent(&ha->pcidev->dev,
4229 sizeof (IPS_ADAPTER) +
4230 sizeof (IPS_IO_CMD), ha->adapt,
4231 ha->adapt->hw_status_start);
4232 ha->adapt = NULL;
4233 }
4234
4235 if (ha->logical_drive_info) {
4236 dma_free_coherent(&ha->pcidev->dev,
4237 sizeof (IPS_LD_INFO),
4238 ha->logical_drive_info,
4239 ha->logical_drive_info_dma_addr);
4240 ha->logical_drive_info = NULL;
4241 }
4242
4243 kfree(ha->nvram);
4244 ha->nvram = NULL;
4245
4246 kfree(ha->subsys);
4247 ha->subsys = NULL;
4248
4249 if (ha->ioctl_data) {
4250 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
4251 ha->ioctl_data, ha->ioctl_busaddr);
4252 ha->ioctl_data = NULL;
4253 ha->ioctl_datasize = 0;
4254 ha->ioctl_len = 0;
4255 }
4256 ips_deallocatescbs(ha, ha->max_cmds);
4257
4258
4259 if (ha->mem_ptr) {
4260 iounmap(ha->ioremap_ptr);
4261 ha->ioremap_ptr = NULL;
4262 ha->mem_ptr = NULL;
4263 }
4264
4265 ha->mem_addr = 0;
4266
4267 }
4268 }
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279 static int
4280 ips_deallocatescbs(ips_ha_t * ha, int cmds)
4281 {
4282 if (ha->scbs) {
4283 dma_free_coherent(&ha->pcidev->dev,
4284 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4285 ha->scbs->sg_list.list,
4286 ha->scbs->sg_busaddr);
4287 dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
4288 ha->scbs, ha->scbs->scb_busaddr);
4289 ha->scbs = NULL;
4290 }
4291 return 1;
4292 }
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303 static int
4304 ips_allocatescbs(ips_ha_t * ha)
4305 {
4306 ips_scb_t *scb_p;
4307 IPS_SG_LIST ips_sg;
4308 int i;
4309 dma_addr_t command_dma, sg_dma;
4310
4311 METHOD_TRACE("ips_allocatescbs", 1);
4312
4313
4314 ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
4315 ha->max_cmds * sizeof (ips_scb_t),
4316 &command_dma, GFP_KERNEL);
4317 if (ha->scbs == NULL)
4318 return 0;
4319 ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
4320 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
4321 &sg_dma, GFP_KERNEL);
4322 if (ips_sg.list == NULL) {
4323 dma_free_coherent(&ha->pcidev->dev,
4324 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4325 command_dma);
4326 return 0;
4327 }
4328
4329 memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
4330
4331 for (i = 0; i < ha->max_cmds; i++) {
4332 scb_p = &ha->scbs[i];
4333 scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
4334
4335 if (IPS_USE_ENH_SGLIST(ha)) {
4336 scb_p->sg_list.enh_list =
4337 ips_sg.enh_list + i * IPS_MAX_SG;
4338 scb_p->sg_busaddr =
4339 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4340 } else {
4341 scb_p->sg_list.std_list =
4342 ips_sg.std_list + i * IPS_MAX_SG;
4343 scb_p->sg_busaddr =
4344 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4345 }
4346
4347
4348 if (i < ha->max_cmds - 1) {
4349 scb_p->q_next = ha->scb_freelist;
4350 ha->scb_freelist = scb_p;
4351 }
4352 }
4353
4354
4355 return (1);
4356 }
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367 static void
4368 ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
4369 {
4370 IPS_SG_LIST sg_list;
4371 uint32_t cmd_busaddr, sg_busaddr;
4372 METHOD_TRACE("ips_init_scb", 1);
4373
4374 if (scb == NULL)
4375 return;
4376
4377 sg_list.list = scb->sg_list.list;
4378 cmd_busaddr = scb->scb_busaddr;
4379 sg_busaddr = scb->sg_busaddr;
4380
4381 memset(scb, 0, sizeof (ips_scb_t));
4382 memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
4383
4384
4385 ha->dummy->op_code = 0xFF;
4386 ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
4387 + sizeof (IPS_ADAPTER));
4388 ha->dummy->command_id = IPS_MAX_CMDS;
4389
4390
4391 scb->scb_busaddr = cmd_busaddr;
4392 scb->sg_busaddr = sg_busaddr;
4393 scb->sg_list.list = sg_list.list;
4394
4395
4396 scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
4397 scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
4398 + sizeof (IPS_ADAPTER));
4399 }
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412 static ips_scb_t *
4413 ips_getscb(ips_ha_t * ha)
4414 {
4415 ips_scb_t *scb;
4416
4417 METHOD_TRACE("ips_getscb", 1);
4418
4419 if ((scb = ha->scb_freelist) == NULL) {
4420
4421 return (NULL);
4422 }
4423
4424 ha->scb_freelist = scb->q_next;
4425 scb->flags = 0;
4426 scb->q_next = NULL;
4427
4428 ips_init_scb(ha, scb);
4429
4430 return (scb);
4431 }
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444 static void
4445 ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4446 {
4447
4448 METHOD_TRACE("ips_freescb", 1);
4449 if (scb->flags & IPS_SCB_MAP_SG)
4450 scsi_dma_unmap(scb->scsi_cmd);
4451 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4452 dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
4453 scb->data_len, IPS_DMA_DIR(scb));
4454
4455
4456 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
4457 scb->q_next = ha->scb_freelist;
4458 ha->scb_freelist = scb;
4459 }
4460 }
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471 static int
4472 ips_isinit_copperhead(ips_ha_t * ha)
4473 {
4474 uint8_t scpr;
4475 uint8_t isr;
4476
4477 METHOD_TRACE("ips_isinit_copperhead", 1);
4478
4479 isr = inb(ha->io_addr + IPS_REG_HISR);
4480 scpr = inb(ha->io_addr + IPS_REG_SCPR);
4481
4482 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4483 return (0);
4484 else
4485 return (1);
4486 }
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497 static int
4498 ips_isinit_copperhead_memio(ips_ha_t * ha)
4499 {
4500 uint8_t isr = 0;
4501 uint8_t scpr;
4502
4503 METHOD_TRACE("ips_is_init_copperhead_memio", 1);
4504
4505 isr = readb(ha->mem_ptr + IPS_REG_HISR);
4506 scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
4507
4508 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4509 return (0);
4510 else
4511 return (1);
4512 }
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523 static int
4524 ips_isinit_morpheus(ips_ha_t * ha)
4525 {
4526 uint32_t post;
4527 uint32_t bits;
4528
4529 METHOD_TRACE("ips_is_init_morpheus", 1);
4530
4531 if (ips_isintr_morpheus(ha))
4532 ips_flush_and_reset(ha);
4533
4534 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4535 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4536
4537 if (post == 0)
4538 return (0);
4539 else if (bits & 0x3)
4540 return (0);
4541 else
4542 return (1);
4543 }
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555 static void
4556 ips_flush_and_reset(ips_ha_t *ha)
4557 {
4558 ips_scb_t *scb;
4559 int ret;
4560 int time;
4561 int done;
4562 dma_addr_t command_dma;
4563
4564
4565 scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
4566 &command_dma, GFP_KERNEL);
4567 if (scb) {
4568 memset(scb, 0, sizeof(ips_scb_t));
4569 ips_init_scb(ha, scb);
4570 scb->scb_busaddr = command_dma;
4571
4572 scb->timeout = ips_cmd_timeout;
4573 scb->cdb[0] = IPS_CMD_FLUSH;
4574
4575 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4576 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS;
4577 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4578 scb->cmd.flush_cache.reserved = 0;
4579 scb->cmd.flush_cache.reserved2 = 0;
4580 scb->cmd.flush_cache.reserved3 = 0;
4581 scb->cmd.flush_cache.reserved4 = 0;
4582
4583 ret = ips_send_cmd(ha, scb);
4584
4585 if (ret == IPS_SUCCESS) {
4586 time = 60 * IPS_ONE_SEC;
4587 done = 0;
4588
4589 while ((time > 0) && (!done)) {
4590 done = ips_poll_for_flush_complete(ha);
4591
4592 udelay(1000);
4593 time--;
4594 }
4595 }
4596 }
4597
4598
4599 (*ha->func.reset) (ha);
4600
4601 dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
4602 return;
4603 }
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615 static int
4616 ips_poll_for_flush_complete(ips_ha_t * ha)
4617 {
4618 IPS_STATUS cstatus;
4619
4620 while (true) {
4621 cstatus.value = (*ha->func.statupd) (ha);
4622
4623 if (cstatus.value == 0xffffffff)
4624 break;
4625
4626
4627 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4628 return 1;
4629 }
4630
4631 return 0;
4632 }
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642 static void
4643 ips_enable_int_copperhead(ips_ha_t * ha)
4644 {
4645 METHOD_TRACE("ips_enable_int_copperhead", 1);
4646
4647 outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
4648 inb(ha->io_addr + IPS_REG_HISR);
4649 }
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659 static void
4660 ips_enable_int_copperhead_memio(ips_ha_t * ha)
4661 {
4662 METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
4663
4664 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4665 readb(ha->mem_ptr + IPS_REG_HISR);
4666 }
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676 static void
4677 ips_enable_int_morpheus(ips_ha_t * ha)
4678 {
4679 uint32_t Oimr;
4680
4681 METHOD_TRACE("ips_enable_int_morpheus", 1);
4682
4683 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4684 Oimr &= ~0x08;
4685 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4686 readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4687 }
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698 static int
4699 ips_init_copperhead(ips_ha_t * ha)
4700 {
4701 uint8_t Isr;
4702 uint8_t Cbsp;
4703 uint8_t PostByte[IPS_MAX_POST_BYTES];
4704 int i, j;
4705
4706 METHOD_TRACE("ips_init_copperhead", 1);
4707
4708 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4709 for (j = 0; j < 45; j++) {
4710 Isr = inb(ha->io_addr + IPS_REG_HISR);
4711 if (Isr & IPS_BIT_GHI)
4712 break;
4713
4714
4715 MDELAY(IPS_ONE_SEC);
4716 }
4717
4718 if (j >= 45)
4719
4720 return (0);
4721
4722 PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4723 outb(Isr, ha->io_addr + IPS_REG_HISR);
4724 }
4725
4726 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4727 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4728 "reset controller fails (post status %x %x).\n",
4729 PostByte[0], PostByte[1]);
4730
4731 return (0);
4732 }
4733
4734 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4735 for (j = 0; j < 240; j++) {
4736 Isr = inb(ha->io_addr + IPS_REG_HISR);
4737 if (Isr & IPS_BIT_GHI)
4738 break;
4739
4740
4741 MDELAY(IPS_ONE_SEC);
4742 }
4743
4744 if (j >= 240)
4745
4746 return (0);
4747
4748 inb(ha->io_addr + IPS_REG_ISPR);
4749 outb(Isr, ha->io_addr + IPS_REG_HISR);
4750 }
4751
4752 for (i = 0; i < 240; i++) {
4753 Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
4754
4755 if ((Cbsp & IPS_BIT_OP) == 0)
4756 break;
4757
4758
4759 MDELAY(IPS_ONE_SEC);
4760 }
4761
4762 if (i >= 240)
4763
4764 return (0);
4765
4766
4767 outl(0x1010, ha->io_addr + IPS_REG_CCCR);
4768
4769
4770 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4771
4772 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4773
4774 outl(0, ha->io_addr + IPS_REG_NDAE);
4775
4776
4777 outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
4778
4779 return (1);
4780 }
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791 static int
4792 ips_init_copperhead_memio(ips_ha_t * ha)
4793 {
4794 uint8_t Isr = 0;
4795 uint8_t Cbsp;
4796 uint8_t PostByte[IPS_MAX_POST_BYTES];
4797 int i, j;
4798
4799 METHOD_TRACE("ips_init_copperhead_memio", 1);
4800
4801 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4802 for (j = 0; j < 45; j++) {
4803 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4804 if (Isr & IPS_BIT_GHI)
4805 break;
4806
4807
4808 MDELAY(IPS_ONE_SEC);
4809 }
4810
4811 if (j >= 45)
4812
4813 return (0);
4814
4815 PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4816 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4817 }
4818
4819 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4820 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4821 "reset controller fails (post status %x %x).\n",
4822 PostByte[0], PostByte[1]);
4823
4824 return (0);
4825 }
4826
4827 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4828 for (j = 0; j < 240; j++) {
4829 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4830 if (Isr & IPS_BIT_GHI)
4831 break;
4832
4833
4834 MDELAY(IPS_ONE_SEC);
4835 }
4836
4837 if (j >= 240)
4838
4839 return (0);
4840
4841 readb(ha->mem_ptr + IPS_REG_ISPR);
4842 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4843 }
4844
4845 for (i = 0; i < 240; i++) {
4846 Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
4847
4848 if ((Cbsp & IPS_BIT_OP) == 0)
4849 break;
4850
4851
4852 MDELAY(IPS_ONE_SEC);
4853 }
4854
4855 if (i >= 240)
4856
4857 return (0);
4858
4859
4860 writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
4861
4862
4863 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4864
4865 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4866
4867 writel(0, ha->mem_ptr + IPS_REG_NDAE);
4868
4869
4870 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4871
4872
4873 return (1);
4874 }
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885 static int
4886 ips_init_morpheus(ips_ha_t * ha)
4887 {
4888 uint32_t Post;
4889 uint32_t Config;
4890 uint32_t Isr;
4891 uint32_t Oimr;
4892 int i;
4893
4894 METHOD_TRACE("ips_init_morpheus", 1);
4895
4896
4897 for (i = 0; i < 45; i++) {
4898 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4899
4900 if (Isr & IPS_BIT_I960_MSG0I)
4901 break;
4902
4903
4904 MDELAY(IPS_ONE_SEC);
4905 }
4906
4907 if (i >= 45) {
4908
4909 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4910 "timeout waiting for post.\n");
4911
4912 return (0);
4913 }
4914
4915 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4916
4917 if (Post == 0x4F00) {
4918 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4919 "Flashing Battery PIC, Please wait ...\n");
4920
4921
4922 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4923 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4924
4925 for (i = 0; i < 120; i++) {
4926 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4927 if (Post != 0x4F00)
4928 break;
4929
4930 MDELAY(IPS_ONE_SEC);
4931 }
4932
4933 if (i >= 120) {
4934 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4935 "timeout waiting for Battery PIC Flash\n");
4936 return (0);
4937 }
4938
4939 }
4940
4941
4942 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4943 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4944
4945 if (Post < (IPS_GOOD_POST_STATUS << 8)) {
4946 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4947 "reset controller fails (post status %x).\n", Post);
4948
4949 return (0);
4950 }
4951
4952
4953 for (i = 0; i < 240; i++) {
4954 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4955
4956 if (Isr & IPS_BIT_I960_MSG1I)
4957 break;
4958
4959
4960 MDELAY(IPS_ONE_SEC);
4961 }
4962
4963 if (i >= 240) {
4964
4965 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4966 "timeout waiting for config.\n");
4967
4968 return (0);
4969 }
4970
4971 Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
4972
4973
4974 Isr = (uint32_t) IPS_BIT_I960_MSG1I;
4975 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4976
4977
4978 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4979 Oimr &= ~0x8;
4980 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4981
4982
4983
4984
4985 if (Post == 0xEF10) {
4986 if ((Config == 0x000F) || (Config == 0x0009))
4987 ha->requires_esl = 1;
4988 }
4989
4990 return (1);
4991 }
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002 static int
5003 ips_reset_copperhead(ips_ha_t * ha)
5004 {
5005 int reset_counter;
5006
5007 METHOD_TRACE("ips_reset_copperhead", 1);
5008
5009 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5010 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5011
5012 reset_counter = 0;
5013
5014 while (reset_counter < 2) {
5015 reset_counter++;
5016
5017 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5018
5019
5020 MDELAY(IPS_ONE_SEC);
5021
5022 outb(0, ha->io_addr + IPS_REG_SCPR);
5023
5024
5025 MDELAY(IPS_ONE_SEC);
5026
5027 if ((*ha->func.init) (ha))
5028 break;
5029 else if (reset_counter >= 2) {
5030
5031 return (0);
5032 }
5033 }
5034
5035 return (1);
5036 }
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047 static int
5048 ips_reset_copperhead_memio(ips_ha_t * ha)
5049 {
5050 int reset_counter;
5051
5052 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5053
5054 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5055 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5056
5057 reset_counter = 0;
5058
5059 while (reset_counter < 2) {
5060 reset_counter++;
5061
5062 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5063
5064
5065 MDELAY(IPS_ONE_SEC);
5066
5067 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5068
5069
5070 MDELAY(IPS_ONE_SEC);
5071
5072 if ((*ha->func.init) (ha))
5073 break;
5074 else if (reset_counter >= 2) {
5075
5076 return (0);
5077 }
5078 }
5079
5080 return (1);
5081 }
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092 static int
5093 ips_reset_morpheus(ips_ha_t * ha)
5094 {
5095 int reset_counter;
5096 uint8_t junk;
5097
5098 METHOD_TRACE("ips_reset_morpheus", 1);
5099
5100 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5101 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5102
5103 reset_counter = 0;
5104
5105 while (reset_counter < 2) {
5106 reset_counter++;
5107
5108 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5109
5110
5111 MDELAY(5 * IPS_ONE_SEC);
5112
5113
5114 pci_read_config_byte(ha->pcidev, 4, &junk);
5115
5116 if ((*ha->func.init) (ha))
5117 break;
5118 else if (reset_counter >= 2) {
5119
5120 return (0);
5121 }
5122 }
5123
5124 return (1);
5125 }
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136 static void
5137 ips_statinit(ips_ha_t * ha)
5138 {
5139 uint32_t phys_status_start;
5140
5141 METHOD_TRACE("ips_statinit", 1);
5142
5143 ha->adapt->p_status_start = ha->adapt->status;
5144 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5145 ha->adapt->p_status_tail = ha->adapt->status;
5146
5147 phys_status_start = ha->adapt->hw_status_start;
5148 outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
5149 outl(phys_status_start + IPS_STATUS_Q_SIZE,
5150 ha->io_addr + IPS_REG_SQER);
5151 outl(phys_status_start + IPS_STATUS_SIZE,
5152 ha->io_addr + IPS_REG_SQHR);
5153 outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
5154
5155 ha->adapt->hw_status_tail = phys_status_start;
5156 }
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167 static void
5168 ips_statinit_memio(ips_ha_t * ha)
5169 {
5170 uint32_t phys_status_start;
5171
5172 METHOD_TRACE("ips_statinit_memio", 1);
5173
5174 ha->adapt->p_status_start = ha->adapt->status;
5175 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5176 ha->adapt->p_status_tail = ha->adapt->status;
5177
5178 phys_status_start = ha->adapt->hw_status_start;
5179 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
5180 writel(phys_status_start + IPS_STATUS_Q_SIZE,
5181 ha->mem_ptr + IPS_REG_SQER);
5182 writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
5183 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
5184
5185 ha->adapt->hw_status_tail = phys_status_start;
5186 }
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197 static uint32_t
5198 ips_statupd_copperhead(ips_ha_t * ha)
5199 {
5200 METHOD_TRACE("ips_statupd_copperhead", 1);
5201
5202 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5203 ha->adapt->p_status_tail++;
5204 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5205 } else {
5206 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5207 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5208 }
5209
5210 outl(ha->adapt->hw_status_tail,
5211 ha->io_addr + IPS_REG_SQTR);
5212
5213 return (ha->adapt->p_status_tail->value);
5214 }
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225 static uint32_t
5226 ips_statupd_copperhead_memio(ips_ha_t * ha)
5227 {
5228 METHOD_TRACE("ips_statupd_copperhead_memio", 1);
5229
5230 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5231 ha->adapt->p_status_tail++;
5232 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5233 } else {
5234 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5235 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5236 }
5237
5238 writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
5239
5240 return (ha->adapt->p_status_tail->value);
5241 }
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252 static uint32_t
5253 ips_statupd_morpheus(ips_ha_t * ha)
5254 {
5255 uint32_t val;
5256
5257 METHOD_TRACE("ips_statupd_morpheus", 1);
5258
5259 val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
5260
5261 return (val);
5262 }
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273 static int
5274 ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
5275 {
5276 uint32_t TimeOut;
5277 uint32_t val;
5278
5279 METHOD_TRACE("ips_issue_copperhead", 1);
5280
5281 if (scb->scsi_cmd) {
5282 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5283 ips_name,
5284 ha->host_num,
5285 scb->cdb[0],
5286 scb->cmd.basic_io.command_id,
5287 scb->bus, scb->target_id, scb->lun);
5288 } else {
5289 DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
5290 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5291 }
5292
5293 TimeOut = 0;
5294
5295 while ((val =
5296 le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
5297 udelay(1000);
5298
5299 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5300 if (!(val & IPS_BIT_START_STOP))
5301 break;
5302
5303 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5304 "ips_issue val [0x%x].\n", val);
5305 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5306 "ips_issue semaphore chk timeout.\n");
5307
5308 return (IPS_FAILURE);
5309 }
5310 }
5311
5312 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
5313 outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
5314
5315 return (IPS_SUCCESS);
5316 }
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327 static int
5328 ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
5329 {
5330 uint32_t TimeOut;
5331 uint32_t val;
5332
5333 METHOD_TRACE("ips_issue_copperhead_memio", 1);
5334
5335 if (scb->scsi_cmd) {
5336 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5337 ips_name,
5338 ha->host_num,
5339 scb->cdb[0],
5340 scb->cmd.basic_io.command_id,
5341 scb->bus, scb->target_id, scb->lun);
5342 } else {
5343 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5344 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5345 }
5346
5347 TimeOut = 0;
5348
5349 while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
5350 udelay(1000);
5351
5352 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5353 if (!(val & IPS_BIT_START_STOP))
5354 break;
5355
5356 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5357 "ips_issue val [0x%x].\n", val);
5358 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5359 "ips_issue semaphore chk timeout.\n");
5360
5361 return (IPS_FAILURE);
5362 }
5363 }
5364
5365 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
5366 writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
5367
5368 return (IPS_SUCCESS);
5369 }
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380 static int
5381 ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
5382 {
5383
5384 METHOD_TRACE("ips_issue_i2o", 1);
5385
5386 if (scb->scsi_cmd) {
5387 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5388 ips_name,
5389 ha->host_num,
5390 scb->cdb[0],
5391 scb->cmd.basic_io.command_id,
5392 scb->bus, scb->target_id, scb->lun);
5393 } else {
5394 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5395 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5396 }
5397
5398 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
5399
5400 return (IPS_SUCCESS);
5401 }
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412 static int
5413 ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
5414 {
5415
5416 METHOD_TRACE("ips_issue_i2o_memio", 1);
5417
5418 if (scb->scsi_cmd) {
5419 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5420 ips_name,
5421 ha->host_num,
5422 scb->cdb[0],
5423 scb->cmd.basic_io.command_id,
5424 scb->bus, scb->target_id, scb->lun);
5425 } else {
5426 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5427 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5428 }
5429
5430 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
5431
5432 return (IPS_SUCCESS);
5433 }
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444 static int
5445 ips_isintr_copperhead(ips_ha_t * ha)
5446 {
5447 uint8_t Isr;
5448
5449 METHOD_TRACE("ips_isintr_copperhead", 2);
5450
5451 Isr = inb(ha->io_addr + IPS_REG_HISR);
5452
5453 if (Isr == 0xFF)
5454
5455 return (0);
5456
5457 if (Isr & IPS_BIT_SCE)
5458 return (1);
5459 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5460
5461
5462 outb(Isr, ha->io_addr + IPS_REG_HISR);
5463 }
5464
5465 return (0);
5466 }
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477 static int
5478 ips_isintr_copperhead_memio(ips_ha_t * ha)
5479 {
5480 uint8_t Isr;
5481
5482 METHOD_TRACE("ips_isintr_memio", 2);
5483
5484 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
5485
5486 if (Isr == 0xFF)
5487
5488 return (0);
5489
5490 if (Isr & IPS_BIT_SCE)
5491 return (1);
5492 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5493
5494
5495 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
5496 }
5497
5498 return (0);
5499 }
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510 static int
5511 ips_isintr_morpheus(ips_ha_t * ha)
5512 {
5513 uint32_t Isr;
5514
5515 METHOD_TRACE("ips_isintr_morpheus", 2);
5516
5517 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
5518
5519 if (Isr & IPS_BIT_I2O_OPQI)
5520 return (1);
5521 else
5522 return (0);
5523 }
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534 static int
5535 ips_wait(ips_ha_t * ha, int time, int intr)
5536 {
5537 int ret;
5538 int done;
5539
5540 METHOD_TRACE("ips_wait", 1);
5541
5542 ret = IPS_FAILURE;
5543 done = false;
5544
5545 time *= IPS_ONE_SEC;
5546
5547 while ((time > 0) && (!done)) {
5548 if (intr == IPS_INTR_ON) {
5549 if (!ha->waitflag) {
5550 ret = IPS_SUCCESS;
5551 done = true;
5552 break;
5553 }
5554 } else if (intr == IPS_INTR_IORL) {
5555 if (!ha->waitflag) {
5556
5557
5558
5559
5560
5561 ret = IPS_SUCCESS;
5562 done = true;
5563 break;
5564 }
5565
5566
5567
5568
5569
5570
5571
5572 (*ha->func.intr) (ha);
5573 }
5574
5575
5576 udelay(1000);
5577 time--;
5578 }
5579
5580 return (ret);
5581 }
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592 static int
5593 ips_write_driver_status(ips_ha_t * ha, int intr)
5594 {
5595 METHOD_TRACE("ips_write_driver_status", 1);
5596
5597 if (!ips_readwrite_page5(ha, false, intr)) {
5598 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5599 "unable to read NVRAM page 5.\n");
5600
5601 return (0);
5602 }
5603
5604
5605
5606 if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
5607 DEBUG_VAR(1,
5608 "(%s%d) NVRAM page 5 has an invalid signature: %X.",
5609 ips_name, ha->host_num, ha->nvram->signature);
5610 ha->nvram->signature = IPS_NVRAM_P5_SIG;
5611 }
5612
5613 DEBUG_VAR(2,
5614 "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
5615 ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
5616 ha->nvram->adapter_slot, ha->nvram->bios_high[0],
5617 ha->nvram->bios_high[1], ha->nvram->bios_high[2],
5618 ha->nvram->bios_high[3], ha->nvram->bios_low[0],
5619 ha->nvram->bios_low[1], ha->nvram->bios_low[2],
5620 ha->nvram->bios_low[3]);
5621
5622 ips_get_bios_version(ha, intr);
5623
5624
5625 ha->nvram->operating_system = IPS_OS_LINUX;
5626 ha->nvram->adapter_type = ha->ad_type;
5627 memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
5628 memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
5629 memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
5630 memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
5631
5632 ha->nvram->versioning = 0;
5633
5634
5635 if (!ips_readwrite_page5(ha, true, intr)) {
5636 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5637 "unable to write NVRAM page 5.\n");
5638
5639 return (0);
5640 }
5641
5642
5643 ha->slot_num = ha->nvram->adapter_slot;
5644
5645 return (1);
5646 }
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657 static int
5658 ips_read_adapter_status(ips_ha_t * ha, int intr)
5659 {
5660 ips_scb_t *scb;
5661 int ret;
5662
5663 METHOD_TRACE("ips_read_adapter_status", 1);
5664
5665 scb = &ha->scbs[ha->max_cmds - 1];
5666
5667 ips_init_scb(ha, scb);
5668
5669 scb->timeout = ips_cmd_timeout;
5670 scb->cdb[0] = IPS_CMD_ENQUIRY;
5671
5672 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
5673 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5674 scb->cmd.basic_io.sg_count = 0;
5675 scb->cmd.basic_io.lba = 0;
5676 scb->cmd.basic_io.sector_count = 0;
5677 scb->cmd.basic_io.log_drv = 0;
5678 scb->data_len = sizeof (*ha->enq);
5679 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
5680
5681
5682 if (((ret =
5683 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5684 || (ret == IPS_SUCCESS_IMM)
5685 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5686 return (0);
5687
5688 return (1);
5689 }
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700 static int
5701 ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
5702 {
5703 ips_scb_t *scb;
5704 int ret;
5705
5706 METHOD_TRACE("ips_read_subsystem_parameters", 1);
5707
5708 scb = &ha->scbs[ha->max_cmds - 1];
5709
5710 ips_init_scb(ha, scb);
5711
5712 scb->timeout = ips_cmd_timeout;
5713 scb->cdb[0] = IPS_CMD_GET_SUBSYS;
5714
5715 scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
5716 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5717 scb->cmd.basic_io.sg_count = 0;
5718 scb->cmd.basic_io.lba = 0;
5719 scb->cmd.basic_io.sector_count = 0;
5720 scb->cmd.basic_io.log_drv = 0;
5721 scb->data_len = sizeof (*ha->subsys);
5722 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5723
5724
5725 if (((ret =
5726 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5727 || (ret == IPS_SUCCESS_IMM)
5728 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5729 return (0);
5730
5731 memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
5732 return (1);
5733 }
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744 static int
5745 ips_read_config(ips_ha_t * ha, int intr)
5746 {
5747 ips_scb_t *scb;
5748 int i;
5749 int ret;
5750
5751 METHOD_TRACE("ips_read_config", 1);
5752
5753
5754 for (i = 0; i < 4; i++)
5755 ha->conf->init_id[i] = 7;
5756
5757 scb = &ha->scbs[ha->max_cmds - 1];
5758
5759 ips_init_scb(ha, scb);
5760
5761 scb->timeout = ips_cmd_timeout;
5762 scb->cdb[0] = IPS_CMD_READ_CONF;
5763
5764 scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
5765 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5766 scb->data_len = sizeof (*ha->conf);
5767 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5768
5769
5770 if (((ret =
5771 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5772 || (ret == IPS_SUCCESS_IMM)
5773 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5774
5775 memset(ha->conf, 0, sizeof (IPS_CONF));
5776
5777
5778 for (i = 0; i < 4; i++)
5779 ha->conf->init_id[i] = 7;
5780
5781
5782 if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
5783 IPS_CMD_CMPLT_WERROR)
5784 return (1);
5785
5786 return (0);
5787 }
5788
5789 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5790 return (1);
5791 }
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802 static int
5803 ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5804 {
5805 ips_scb_t *scb;
5806 int ret;
5807
5808 METHOD_TRACE("ips_readwrite_page5", 1);
5809
5810 scb = &ha->scbs[ha->max_cmds - 1];
5811
5812 ips_init_scb(ha, scb);
5813
5814 scb->timeout = ips_cmd_timeout;
5815 scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
5816
5817 scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
5818 scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
5819 scb->cmd.nvram.page = 5;
5820 scb->cmd.nvram.write = write;
5821 scb->cmd.nvram.reserved = 0;
5822 scb->cmd.nvram.reserved2 = 0;
5823 scb->data_len = sizeof (*ha->nvram);
5824 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5825 if (write)
5826 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5827
5828
5829 if (((ret =
5830 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5831 || (ret == IPS_SUCCESS_IMM)
5832 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5833
5834 memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
5835
5836 return (0);
5837 }
5838 if (!write)
5839 memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
5840 return (1);
5841 }
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852 static int
5853 ips_clear_adapter(ips_ha_t * ha, int intr)
5854 {
5855 ips_scb_t *scb;
5856 int ret;
5857
5858 METHOD_TRACE("ips_clear_adapter", 1);
5859
5860 scb = &ha->scbs[ha->max_cmds - 1];
5861
5862 ips_init_scb(ha, scb);
5863
5864 scb->timeout = ips_reset_timeout;
5865 scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
5866
5867 scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
5868 scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
5869 scb->cmd.config_sync.channel = 0;
5870 scb->cmd.config_sync.source_target = IPS_POCL;
5871 scb->cmd.config_sync.reserved = 0;
5872 scb->cmd.config_sync.reserved2 = 0;
5873 scb->cmd.config_sync.reserved3 = 0;
5874
5875
5876 if (((ret =
5877 ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
5878 || (ret == IPS_SUCCESS_IMM)
5879 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5880 return (0);
5881
5882
5883 ips_init_scb(ha, scb);
5884
5885 scb->cdb[0] = IPS_CMD_ERROR_TABLE;
5886 scb->timeout = ips_reset_timeout;
5887
5888 scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
5889 scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
5890 scb->cmd.unlock_stripe.log_drv = 0;
5891 scb->cmd.unlock_stripe.control = IPS_CSL;
5892 scb->cmd.unlock_stripe.reserved = 0;
5893 scb->cmd.unlock_stripe.reserved2 = 0;
5894 scb->cmd.unlock_stripe.reserved3 = 0;
5895
5896
5897 if (((ret =
5898 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5899 || (ret == IPS_SUCCESS_IMM)
5900 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5901 return (0);
5902
5903 return (1);
5904 }
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915 static void
5916 ips_ffdc_reset(ips_ha_t * ha, int intr)
5917 {
5918 ips_scb_t *scb;
5919
5920 METHOD_TRACE("ips_ffdc_reset", 1);
5921
5922 scb = &ha->scbs[ha->max_cmds - 1];
5923
5924 ips_init_scb(ha, scb);
5925
5926 scb->timeout = ips_cmd_timeout;
5927 scb->cdb[0] = IPS_CMD_FFDC;
5928 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5929 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5930 scb->cmd.ffdc.reset_count = ha->reset_count;
5931 scb->cmd.ffdc.reset_type = 0x80;
5932
5933
5934 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5935
5936
5937 ips_send_wait(ha, scb, ips_cmd_timeout, intr);
5938 }
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949 static void
5950 ips_ffdc_time(ips_ha_t * ha)
5951 {
5952 ips_scb_t *scb;
5953
5954 METHOD_TRACE("ips_ffdc_time", 1);
5955
5956 DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
5957
5958 scb = &ha->scbs[ha->max_cmds - 1];
5959
5960 ips_init_scb(ha, scb);
5961
5962 scb->timeout = ips_cmd_timeout;
5963 scb->cdb[0] = IPS_CMD_FFDC;
5964 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5965 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5966 scb->cmd.ffdc.reset_count = 0;
5967 scb->cmd.ffdc.reset_type = 0;
5968
5969
5970 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5971
5972
5973 ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
5974 }
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984 static void
5985 ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
5986 {
5987 struct tm tm;
5988
5989 METHOD_TRACE("ips_fix_ffdc_time", 1);
5990
5991 time64_to_tm(current_time, 0, &tm);
5992
5993 scb->cmd.ffdc.hour = tm.tm_hour;
5994 scb->cmd.ffdc.minute = tm.tm_min;
5995 scb->cmd.ffdc.second = tm.tm_sec;
5996 scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
5997 scb->cmd.ffdc.yearL = tm.tm_year % 100;
5998 scb->cmd.ffdc.month = tm.tm_mon + 1;
5999 scb->cmd.ffdc.day = tm.tm_mday;
6000 }
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014 static int
6015 ips_erase_bios(ips_ha_t * ha)
6016 {
6017 int timeout;
6018 uint8_t status = 0;
6019
6020 METHOD_TRACE("ips_erase_bios", 1);
6021
6022 status = 0;
6023
6024
6025 outl(0, ha->io_addr + IPS_REG_FLAP);
6026 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6027 udelay(25);
6028
6029 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6030 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6031 udelay(25);
6032
6033
6034 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6035 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6036 udelay(25);
6037
6038
6039 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6040 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6041 udelay(25);
6042
6043
6044 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6045 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6046 udelay(25);
6047
6048 timeout = 80000;
6049
6050 while (timeout > 0) {
6051 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6052 outl(0, ha->io_addr + IPS_REG_FLAP);
6053 udelay(25);
6054 }
6055
6056 status = inb(ha->io_addr + IPS_REG_FLDP);
6057
6058 if (status & 0x80)
6059 break;
6060
6061 MDELAY(1);
6062 timeout--;
6063 }
6064
6065
6066 if (timeout <= 0) {
6067
6068
6069
6070 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6071 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6072 udelay(25);
6073
6074
6075 timeout = 10000;
6076 while (timeout > 0) {
6077 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6078 outl(0, ha->io_addr + IPS_REG_FLAP);
6079 udelay(25);
6080 }
6081
6082 status = inb(ha->io_addr + IPS_REG_FLDP);
6083
6084 if (status & 0xC0)
6085 break;
6086
6087 MDELAY(1);
6088 timeout--;
6089 }
6090
6091 return (1);
6092 }
6093
6094
6095 if (status & 0x08)
6096
6097 return (1);
6098
6099
6100 if (status & 0x30)
6101
6102 return (1);
6103
6104
6105
6106 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6107 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6108 udelay(25);
6109
6110
6111 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6112 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6113 udelay(25);
6114
6115 return (0);
6116 }
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126 static int
6127 ips_erase_bios_memio(ips_ha_t * ha)
6128 {
6129 int timeout;
6130 uint8_t status;
6131
6132 METHOD_TRACE("ips_erase_bios_memio", 1);
6133
6134 status = 0;
6135
6136
6137 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6138 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6139 udelay(25);
6140
6141 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6142 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6143 udelay(25);
6144
6145
6146 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6147 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6148 udelay(25);
6149
6150
6151 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6152 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6153 udelay(25);
6154
6155
6156 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6157 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6158 udelay(25);
6159
6160 timeout = 80000;
6161
6162 while (timeout > 0) {
6163 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6164 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6165 udelay(25);
6166 }
6167
6168 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6169
6170 if (status & 0x80)
6171 break;
6172
6173 MDELAY(1);
6174 timeout--;
6175 }
6176
6177
6178 if (timeout <= 0) {
6179
6180
6181
6182 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6183 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6184 udelay(25);
6185
6186
6187 timeout = 10000;
6188 while (timeout > 0) {
6189 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6190 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6191 udelay(25);
6192 }
6193
6194 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6195
6196 if (status & 0xC0)
6197 break;
6198
6199 MDELAY(1);
6200 timeout--;
6201 }
6202
6203 return (1);
6204 }
6205
6206
6207 if (status & 0x08)
6208
6209 return (1);
6210
6211
6212 if (status & 0x30)
6213
6214 return (1);
6215
6216
6217
6218 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6219 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6220 udelay(25);
6221
6222
6223 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6224 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6225 udelay(25);
6226
6227 return (0);
6228 }
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238 static int
6239 ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6240 uint32_t offset)
6241 {
6242 int i;
6243 int timeout;
6244 uint8_t status = 0;
6245
6246 METHOD_TRACE("ips_program_bios", 1);
6247
6248 status = 0;
6249
6250 for (i = 0; i < buffersize; i++) {
6251
6252 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6253 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6254 udelay(25);
6255
6256 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6257 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6258 udelay(25);
6259
6260 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6261 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6262 udelay(25);
6263
6264
6265 timeout = 1000;
6266 while (timeout > 0) {
6267 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6268 outl(0, ha->io_addr + IPS_REG_FLAP);
6269 udelay(25);
6270 }
6271
6272 status = inb(ha->io_addr + IPS_REG_FLDP);
6273
6274 if (status & 0x80)
6275 break;
6276
6277 MDELAY(1);
6278 timeout--;
6279 }
6280
6281 if (timeout == 0) {
6282
6283 outl(0, ha->io_addr + IPS_REG_FLAP);
6284 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6285 udelay(25);
6286
6287 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6288 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6289 udelay(25);
6290
6291 return (1);
6292 }
6293
6294
6295 if (status & 0x18) {
6296
6297 outl(0, ha->io_addr + IPS_REG_FLAP);
6298 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6299 udelay(25);
6300
6301 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6302 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6303 udelay(25);
6304
6305 return (1);
6306 }
6307 }
6308
6309
6310 outl(0, ha->io_addr + IPS_REG_FLAP);
6311 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6312 udelay(25);
6313
6314 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6315 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6316 udelay(25);
6317
6318 return (0);
6319 }
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329 static int
6330 ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6331 uint32_t offset)
6332 {
6333 int i;
6334 int timeout;
6335 uint8_t status = 0;
6336
6337 METHOD_TRACE("ips_program_bios_memio", 1);
6338
6339 status = 0;
6340
6341 for (i = 0; i < buffersize; i++) {
6342
6343 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6344 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6345 udelay(25);
6346
6347 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6348 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6349 udelay(25);
6350
6351 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6352 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6353 udelay(25);
6354
6355
6356 timeout = 1000;
6357 while (timeout > 0) {
6358 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6359 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6360 udelay(25);
6361 }
6362
6363 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6364
6365 if (status & 0x80)
6366 break;
6367
6368 MDELAY(1);
6369 timeout--;
6370 }
6371
6372 if (timeout == 0) {
6373
6374 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6375 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6376 udelay(25);
6377
6378 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6379 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6380 udelay(25);
6381
6382 return (1);
6383 }
6384
6385
6386 if (status & 0x18) {
6387
6388 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6389 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6390 udelay(25);
6391
6392 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6393 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6394 udelay(25);
6395
6396 return (1);
6397 }
6398 }
6399
6400
6401 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6402 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6403 udelay(25);
6404
6405 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6406 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6407 udelay(25);
6408
6409 return (0);
6410 }
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420 static int
6421 ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6422 uint32_t offset)
6423 {
6424 uint8_t checksum;
6425 int i;
6426
6427 METHOD_TRACE("ips_verify_bios", 1);
6428
6429
6430 outl(0, ha->io_addr + IPS_REG_FLAP);
6431 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6432 udelay(25);
6433
6434 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6435 return (1);
6436
6437 outl(1, ha->io_addr + IPS_REG_FLAP);
6438 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6439 udelay(25);
6440 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6441 return (1);
6442
6443 checksum = 0xff;
6444 for (i = 2; i < buffersize; i++) {
6445
6446 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6447 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6448 udelay(25);
6449
6450 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
6451 }
6452
6453 if (checksum != 0)
6454
6455 return (1);
6456 else
6457
6458 return (0);
6459 }
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469 static int
6470 ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6471 uint32_t offset)
6472 {
6473 uint8_t checksum;
6474 int i;
6475
6476 METHOD_TRACE("ips_verify_bios_memio", 1);
6477
6478
6479 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6480 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6481 udelay(25);
6482
6483 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6484 return (1);
6485
6486 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6487 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6488 udelay(25);
6489 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6490 return (1);
6491
6492 checksum = 0xff;
6493 for (i = 2; i < buffersize; i++) {
6494
6495 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6496 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6497 udelay(25);
6498
6499 checksum =
6500 (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
6501 }
6502
6503 if (checksum != 0)
6504
6505 return (1);
6506 else
6507
6508 return (0);
6509 }
6510
6511
6512
6513
6514
6515
6516
6517
6518 static int
6519 ips_abort_init(ips_ha_t * ha, int index)
6520 {
6521 ha->active = 0;
6522 ips_free(ha);
6523 ips_ha[index] = NULL;
6524 ips_sh[index] = NULL;
6525 return -1;
6526 }
6527
6528
6529
6530
6531
6532
6533
6534
6535 static void
6536 ips_shift_controllers(int lowindex, int highindex)
6537 {
6538 ips_ha_t *ha_sav = ips_ha[highindex];
6539 struct Scsi_Host *sh_sav = ips_sh[highindex];
6540 int i;
6541
6542 for (i = highindex; i > lowindex; i--) {
6543 ips_ha[i] = ips_ha[i - 1];
6544 ips_sh[i] = ips_sh[i - 1];
6545 ips_ha[i]->host_num = i;
6546 }
6547 ha_sav->host_num = lowindex;
6548 ips_ha[lowindex] = ha_sav;
6549 ips_sh[lowindex] = sh_sav;
6550 }
6551
6552
6553
6554
6555
6556
6557
6558
6559 static void
6560 ips_order_controllers(void)
6561 {
6562 int i, j, tmp, position = 0;
6563 IPS_NVRAM_P5 *nvram;
6564 if (!ips_ha[0])
6565 return;
6566 nvram = ips_ha[0]->nvram;
6567
6568 if (nvram->adapter_order[0]) {
6569 for (i = 1; i <= nvram->adapter_order[0]; i++) {
6570 for (j = position; j < ips_num_controllers; j++) {
6571 switch (ips_ha[j]->ad_type) {
6572 case IPS_ADTYPE_SERVERAID6M:
6573 case IPS_ADTYPE_SERVERAID7M:
6574 if (nvram->adapter_order[i] == 'M') {
6575 ips_shift_controllers(position,
6576 j);
6577 position++;
6578 }
6579 break;
6580 case IPS_ADTYPE_SERVERAID4L:
6581 case IPS_ADTYPE_SERVERAID4M:
6582 case IPS_ADTYPE_SERVERAID4MX:
6583 case IPS_ADTYPE_SERVERAID4LX:
6584 if (nvram->adapter_order[i] == 'N') {
6585 ips_shift_controllers(position,
6586 j);
6587 position++;
6588 }
6589 break;
6590 case IPS_ADTYPE_SERVERAID6I:
6591 case IPS_ADTYPE_SERVERAID5I2:
6592 case IPS_ADTYPE_SERVERAID5I1:
6593 case IPS_ADTYPE_SERVERAID7k:
6594 if (nvram->adapter_order[i] == 'S') {
6595 ips_shift_controllers(position,
6596 j);
6597 position++;
6598 }
6599 break;
6600 case IPS_ADTYPE_SERVERAID:
6601 case IPS_ADTYPE_SERVERAID2:
6602 case IPS_ADTYPE_NAVAJO:
6603 case IPS_ADTYPE_KIOWA:
6604 case IPS_ADTYPE_SERVERAID3L:
6605 case IPS_ADTYPE_SERVERAID3:
6606 case IPS_ADTYPE_SERVERAID4H:
6607 if (nvram->adapter_order[i] == 'A') {
6608 ips_shift_controllers(position,
6609 j);
6610 position++;
6611 }
6612 break;
6613 default:
6614 break;
6615 }
6616 }
6617 }
6618
6619 return;
6620 }
6621
6622 tmp = 0;
6623 for (i = position; i < ips_num_controllers; i++) {
6624 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
6625 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
6626 ips_shift_controllers(position, i);
6627 position++;
6628 tmp = 1;
6629 }
6630 }
6631
6632 if (!tmp)
6633 return;
6634 for (i = position; i < ips_num_controllers; i++) {
6635 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
6636 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
6637 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
6638 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
6639 ips_shift_controllers(position, i);
6640 position++;
6641 }
6642 }
6643
6644 return;
6645 }
6646
6647
6648
6649
6650
6651
6652
6653
6654 static int
6655 ips_register_scsi(int index)
6656 {
6657 struct Scsi_Host *sh;
6658 ips_ha_t *ha, *oldha = ips_ha[index];
6659 sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
6660 if (!sh) {
6661 IPS_PRINTK(KERN_WARNING, oldha->pcidev,
6662 "Unable to register controller with SCSI subsystem\n");
6663 return -1;
6664 }
6665 ha = IPS_HA(sh);
6666 memcpy(ha, oldha, sizeof (ips_ha_t));
6667 free_irq(oldha->pcidev->irq, oldha);
6668
6669 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6670 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6671 "Unable to install interrupt handler\n");
6672 goto err_out_sh;
6673 }
6674
6675 kfree(oldha);
6676
6677
6678 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6679 sh->sg_tablesize = sh->hostt->sg_tablesize;
6680 sh->can_queue = sh->hostt->can_queue;
6681 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6682 sh->max_sectors = 128;
6683
6684 sh->max_id = ha->ntargets;
6685 sh->max_lun = ha->nlun;
6686 sh->max_channel = ha->nbus - 1;
6687 sh->can_queue = ha->max_cmds - 1;
6688
6689 if (scsi_add_host(sh, &ha->pcidev->dev))
6690 goto err_out;
6691
6692 ips_sh[index] = sh;
6693 ips_ha[index] = ha;
6694
6695 scsi_scan_host(sh);
6696
6697 return 0;
6698
6699 err_out:
6700 free_irq(ha->pcidev->irq, ha);
6701 err_out_sh:
6702 scsi_host_put(sh);
6703 return -1;
6704 }
6705
6706
6707
6708
6709
6710
6711
6712 static void
6713 ips_remove_device(struct pci_dev *pci_dev)
6714 {
6715 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6716
6717 pci_set_drvdata(pci_dev, NULL);
6718
6719 ips_release(sh);
6720
6721 pci_release_regions(pci_dev);
6722 pci_disable_device(pci_dev);
6723 }
6724
6725
6726
6727
6728
6729
6730
6731
6732 static int __init
6733 ips_module_init(void)
6734 {
6735 #if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6736 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6737 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6738 #endif
6739
6740 if (pci_register_driver(&ips_pci_driver) < 0)
6741 return -ENODEV;
6742 ips_driver_template.module = THIS_MODULE;
6743 ips_order_controllers();
6744 if (!ips_detect(&ips_driver_template)) {
6745 pci_unregister_driver(&ips_pci_driver);
6746 return -ENODEV;
6747 }
6748 register_reboot_notifier(&ips_notifier);
6749 return 0;
6750 }
6751
6752
6753
6754
6755
6756
6757
6758
6759 static void __exit
6760 ips_module_exit(void)
6761 {
6762 pci_unregister_driver(&ips_pci_driver);
6763 unregister_reboot_notifier(&ips_notifier);
6764 }
6765
6766 module_init(ips_module_init);
6767 module_exit(ips_module_exit);
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778 static int
6779 ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6780 {
6781 int index = -1;
6782 int rc;
6783
6784 METHOD_TRACE("ips_insert_device", 1);
6785 rc = pci_enable_device(pci_dev);
6786 if (rc)
6787 return rc;
6788
6789 rc = pci_request_regions(pci_dev, "ips");
6790 if (rc)
6791 goto err_out;
6792
6793 rc = ips_init_phase1(pci_dev, &index);
6794 if (rc == SUCCESS)
6795 rc = ips_init_phase2(index);
6796
6797 if (ips_hotplug)
6798 if (ips_register_scsi(index)) {
6799 ips_free(ips_ha[index]);
6800 rc = -1;
6801 }
6802
6803 if (rc == SUCCESS)
6804 ips_num_controllers++;
6805
6806 ips_next_controller = ips_num_controllers;
6807
6808 if (rc < 0) {
6809 rc = -ENODEV;
6810 goto err_out_regions;
6811 }
6812
6813 pci_set_drvdata(pci_dev, ips_sh[index]);
6814 return 0;
6815
6816 err_out_regions:
6817 pci_release_regions(pci_dev);
6818 err_out:
6819 pci_disable_device(pci_dev);
6820 return rc;
6821 }
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832 static int
6833 ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6834 {
6835 ips_ha_t *ha;
6836 uint32_t io_addr;
6837 uint32_t mem_addr;
6838 uint32_t io_len;
6839 uint32_t mem_len;
6840 int j;
6841 int index;
6842 dma_addr_t dma_address;
6843 char __iomem *ioremap_ptr;
6844 char __iomem *mem_ptr;
6845 uint32_t IsDead;
6846
6847 METHOD_TRACE("ips_init_phase1", 1);
6848 index = IPS_MAX_ADAPTERS;
6849 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
6850 if (ips_ha[j] == NULL) {
6851 index = j;
6852 break;
6853 }
6854 }
6855
6856 if (index >= IPS_MAX_ADAPTERS)
6857 return -1;
6858
6859
6860 mem_addr = 0;
6861 io_addr = 0;
6862 mem_len = 0;
6863 io_len = 0;
6864
6865 for (j = 0; j < 2; j++) {
6866 if (!pci_resource_start(pci_dev, j))
6867 break;
6868
6869 if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
6870 io_addr = pci_resource_start(pci_dev, j);
6871 io_len = pci_resource_len(pci_dev, j);
6872 } else {
6873 mem_addr = pci_resource_start(pci_dev, j);
6874 mem_len = pci_resource_len(pci_dev, j);
6875 }
6876 }
6877
6878
6879 if (mem_addr) {
6880 uint32_t base;
6881 uint32_t offs;
6882
6883 base = mem_addr & PAGE_MASK;
6884 offs = mem_addr - base;
6885 ioremap_ptr = ioremap(base, PAGE_SIZE);
6886 if (!ioremap_ptr)
6887 return -1;
6888 mem_ptr = ioremap_ptr + offs;
6889 } else {
6890 ioremap_ptr = NULL;
6891 mem_ptr = NULL;
6892 }
6893
6894
6895 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
6896 if (ha == NULL) {
6897 IPS_PRINTK(KERN_WARNING, pci_dev,
6898 "Unable to allocate temporary ha struct\n");
6899 return -1;
6900 }
6901
6902 ips_sh[index] = NULL;
6903 ips_ha[index] = ha;
6904 ha->active = 1;
6905
6906
6907 ha->io_addr = io_addr;
6908 ha->io_len = io_len;
6909 ha->mem_addr = mem_addr;
6910 ha->mem_len = mem_len;
6911 ha->mem_ptr = mem_ptr;
6912 ha->ioremap_ptr = ioremap_ptr;
6913 ha->host_num = (uint32_t) index;
6914 ha->slot_num = PCI_SLOT(pci_dev->devfn);
6915 ha->pcidev = pci_dev;
6916
6917
6918
6919
6920
6921
6922
6923 if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
6924 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6925 (ha)->flags |= IPS_HA_ENH_SG;
6926 } else {
6927 if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
6928 printk(KERN_WARNING "Unable to set DMA Mask\n");
6929 return ips_abort_init(ha, index);
6930 }
6931 }
6932 if(ips_cd_boot && !ips_FlashData){
6933 ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
6934 PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
6935 }
6936
6937 ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
6938 &ha->enq_busaddr, GFP_KERNEL);
6939 if (!ha->enq) {
6940 IPS_PRINTK(KERN_WARNING, pci_dev,
6941 "Unable to allocate host inquiry structure\n");
6942 return ips_abort_init(ha, index);
6943 }
6944
6945 ha->adapt = dma_alloc_coherent(&pci_dev->dev,
6946 sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
6947 &dma_address, GFP_KERNEL);
6948 if (!ha->adapt) {
6949 IPS_PRINTK(KERN_WARNING, pci_dev,
6950 "Unable to allocate host adapt & dummy structures\n");
6951 return ips_abort_init(ha, index);
6952 }
6953 ha->adapt->hw_status_start = dma_address;
6954 ha->dummy = (void *) (ha->adapt + 1);
6955
6956
6957
6958 ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
6959 sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
6960 if (!ha->logical_drive_info) {
6961 IPS_PRINTK(KERN_WARNING, pci_dev,
6962 "Unable to allocate logical drive info structure\n");
6963 return ips_abort_init(ha, index);
6964 }
6965 ha->logical_drive_info_dma_addr = dma_address;
6966
6967
6968 ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
6969
6970 if (!ha->conf) {
6971 IPS_PRINTK(KERN_WARNING, pci_dev,
6972 "Unable to allocate host conf structure\n");
6973 return ips_abort_init(ha, index);
6974 }
6975
6976 ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
6977
6978 if (!ha->nvram) {
6979 IPS_PRINTK(KERN_WARNING, pci_dev,
6980 "Unable to allocate host NVRAM structure\n");
6981 return ips_abort_init(ha, index);
6982 }
6983
6984 ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
6985
6986 if (!ha->subsys) {
6987 IPS_PRINTK(KERN_WARNING, pci_dev,
6988 "Unable to allocate host subsystem structure\n");
6989 return ips_abort_init(ha, index);
6990 }
6991
6992
6993
6994 if (ips_ioctlsize < PAGE_SIZE)
6995 ips_ioctlsize = PAGE_SIZE;
6996
6997 ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
6998 &ha->ioctl_busaddr, GFP_KERNEL);
6999 ha->ioctl_len = ips_ioctlsize;
7000 if (!ha->ioctl_data) {
7001 IPS_PRINTK(KERN_WARNING, pci_dev,
7002 "Unable to allocate IOCTL data\n");
7003 return ips_abort_init(ha, index);
7004 }
7005
7006
7007
7008
7009 ips_setup_funclist(ha);
7010
7011 if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
7012
7013 IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
7014 if (IsDead == 0xDEADBEEF) {
7015 ips_reset_morpheus(ha);
7016 }
7017 }
7018
7019
7020
7021
7022
7023 if (!(*ha->func.isinit) (ha)) {
7024 if (!(*ha->func.init) (ha)) {
7025
7026
7027
7028 IPS_PRINTK(KERN_WARNING, pci_dev,
7029 "Unable to initialize controller\n");
7030 return ips_abort_init(ha, index);
7031 }
7032 }
7033
7034 *indexPtr = index;
7035 return SUCCESS;
7036 }
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047 static int
7048 ips_init_phase2(int index)
7049 {
7050 ips_ha_t *ha;
7051
7052 ha = ips_ha[index];
7053
7054 METHOD_TRACE("ips_init_phase2", 1);
7055 if (!ha->active) {
7056 ips_ha[index] = NULL;
7057 return -1;
7058 }
7059
7060
7061 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7062 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7063 "Unable to install interrupt handler\n");
7064 return ips_abort_init(ha, index);
7065 }
7066
7067
7068
7069
7070 ha->max_cmds = 1;
7071 if (!ips_allocatescbs(ha)) {
7072 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7073 "Unable to allocate a CCB\n");
7074 free_irq(ha->pcidev->irq, ha);
7075 return ips_abort_init(ha, index);
7076 }
7077
7078 if (!ips_hainit(ha)) {
7079 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7080 "Unable to initialize controller\n");
7081 free_irq(ha->pcidev->irq, ha);
7082 return ips_abort_init(ha, index);
7083 }
7084
7085 ips_deallocatescbs(ha, 1);
7086
7087
7088 if (!ips_allocatescbs(ha)) {
7089 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7090 "Unable to allocate CCBs\n");
7091 free_irq(ha->pcidev->irq, ha);
7092 return ips_abort_init(ha, index);
7093 }
7094
7095 return SUCCESS;
7096 }
7097
7098 MODULE_LICENSE("GPL");
7099 MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7100 MODULE_VERSION(IPS_VER_STRING);