Back to home page

OSCL-LXR

 
 

    


0001 
0002 /*
0003  * Adaptec AIC7xxx device driver for Linux.
0004  *
0005  * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
0006  *
0007  * Copyright (c) 1994 John Aycock
0008  *   The University of Calgary Department of Computer Science.
0009  *
0010  * This program is free software; you can redistribute it and/or modify
0011  * it under the terms of the GNU General Public License as published by
0012  * the Free Software Foundation; either version 2, or (at your option)
0013  * any later version.
0014  *
0015  * This program is distributed in the hope that it will be useful,
0016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0018  * GNU General Public License for more details.
0019  *
0020  * You should have received a copy of the GNU General Public License
0021  * along with this program; see the file COPYING.  If not, write to
0022  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
0023  *
0024  * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
0025  * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
0026  * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
0027  * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
0028  * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
0029  * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
0030  * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
0031  * ANSI SCSI-2 specification (draft 10c), ...
0032  *
0033  * --------------------------------------------------------------------------
0034  *
0035  *  Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
0036  *
0037  *  Substantially modified to include support for wide and twin bus
0038  *  adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
0039  *  SCB paging, and other rework of the code.
0040  *
0041  * --------------------------------------------------------------------------
0042  * Copyright (c) 1994-2000 Justin T. Gibbs.
0043  * Copyright (c) 2000-2001 Adaptec Inc.
0044  * All rights reserved.
0045  *
0046  * Redistribution and use in source and binary forms, with or without
0047  * modification, are permitted provided that the following conditions
0048  * are met:
0049  * 1. Redistributions of source code must retain the above copyright
0050  *    notice, this list of conditions, and the following disclaimer,
0051  *    without modification.
0052  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
0053  *    substantially similar to the "NO WARRANTY" disclaimer below
0054  *    ("Disclaimer") and any redistribution must be conditioned upon
0055  *    including a substantially similar Disclaimer requirement for further
0056  *    binary redistribution.
0057  * 3. Neither the names of the above-listed copyright holders nor the names
0058  *    of any contributors may be used to endorse or promote products derived
0059  *    from this software without specific prior written permission.
0060  *
0061  * Alternatively, this software may be distributed under the terms of the
0062  * GNU General Public License ("GPL") version 2 as published by the Free
0063  * Software Foundation.
0064  *
0065  * NO WARRANTY
0066  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
0067  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
0068  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
0069  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
0070  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0071  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
0072  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0073  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
0074  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
0075  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0076  * POSSIBILITY OF SUCH DAMAGES.
0077  *
0078  *---------------------------------------------------------------------------
0079  *
0080  *  Thanks also go to (in alphabetical order) the following:
0081  *
0082  *    Rory Bolt     - Sequencer bug fixes
0083  *    Jay Estabrook - Initial DEC Alpha support
0084  *    Doug Ledford  - Much needed abort/reset bug fixes
0085  *    Kai Makisara  - DMAing of SCBs
0086  *
0087  *  A Boot time option was also added for not resetting the scsi bus.
0088  *
0089  *    Form:  aic7xxx=extended
0090  *           aic7xxx=no_reset
0091  *           aic7xxx=verbose
0092  *
0093  *  Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
0094  *
0095  *  Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
0096  */
0097 
0098 /*
0099  * Further driver modifications made by Doug Ledford <dledford@redhat.com>
0100  *
0101  * Copyright (c) 1997-1999 Doug Ledford
0102  *
0103  * These changes are released under the same licensing terms as the FreeBSD
0104  * driver written by Justin Gibbs.  Please see his Copyright notice above
0105  * for the exact terms and conditions covering my changes as well as the
0106  * warranty statement.
0107  *
0108  * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
0109  * but are not limited to:
0110  *
0111  *  1: Import of the latest FreeBSD sequencer code for this driver
0112  *  2: Modification of kernel code to accommodate different sequencer semantics
0113  *  3: Extensive changes throughout kernel portion of driver to improve
0114  *     abort/reset processing and error hanndling
0115  *  4: Other work contributed by various people on the Internet
0116  *  5: Changes to printk information and verbosity selection code
0117  *  6: General reliability related changes, especially in IRQ management
0118  *  7: Modifications to the default probe/attach order for supported cards
0119  *  8: SMP friendliness has been improved
0120  *
0121  */
0122 
0123 #include "aic7xxx_osm.h"
0124 #include "aic7xxx_inline.h"
0125 #include <scsi/scsicam.h>
0126 
0127 static struct scsi_transport_template *ahc_linux_transport_template = NULL;
0128 
0129 #include <linux/init.h>     /* __setup */
0130 #include <linux/mm.h>       /* For fetching system memory size */
0131 #include <linux/blkdev.h>       /* For block_size() */
0132 #include <linux/delay.h>    /* For ssleep/msleep */
0133 #include <linux/slab.h>
0134 
0135 
0136 /*
0137  * Set this to the delay in seconds after SCSI bus reset.
0138  * Note, we honor this only for the initial bus reset.
0139  * The scsi error recovery code performs its own bus settle
0140  * delay handling for error recovery actions.
0141  */
0142 #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
0143 #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
0144 #else
0145 #define AIC7XXX_RESET_DELAY 5000
0146 #endif
0147 
0148 /*
0149  * To change the default number of tagged transactions allowed per-device,
0150  * add a line to the lilo.conf file like:
0151  * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
0152  * which will result in the first four devices on the first two
0153  * controllers being set to a tagged queue depth of 32.
0154  *
0155  * The tag_commands is an array of 16 to allow for wide and twin adapters.
0156  * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
0157  * for channel 1.
0158  */
0159 typedef struct {
0160     uint8_t tag_commands[16];   /* Allow for wide/twin adapters. */
0161 } adapter_tag_info_t;
0162 
0163 /*
0164  * Modify this as you see fit for your system.
0165  *
0166  * 0            tagged queuing disabled
0167  * 1 <= n <= 253    n == max tags ever dispatched.
0168  *
0169  * The driver will throttle the number of commands dispatched to a
0170  * device if it returns queue full.  For devices with a fixed maximum
0171  * queue depth, the driver will eventually determine this depth and
0172  * lock it in (a console message is printed to indicate that a lock
0173  * has occurred).  On some devices, queue full is returned for a temporary
0174  * resource shortage.  These devices will return queue full at varying
0175  * depths.  The driver will throttle back when the queue fulls occur and
0176  * attempt to slowly increase the depth over time as the device recovers
0177  * from the resource shortage.
0178  *
0179  * In this example, the first line will disable tagged queueing for all
0180  * the devices on the first probed aic7xxx adapter.
0181  *
0182  * The second line enables tagged queueing with 4 commands/LUN for IDs
0183  * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
0184  * driver to attempt to use up to 64 tags for ID 1.
0185  *
0186  * The third line is the same as the first line.
0187  *
0188  * The fourth line disables tagged queueing for devices 0 and 3.  It
0189  * enables tagged queueing for the other IDs, with 16 commands/LUN
0190  * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
0191  * IDs 2, 5-7, and 9-15.
0192  */
0193 
0194 /*
0195  * NOTE: The below structure is for reference only, the actual structure
0196  *       to modify in order to change things is just below this comment block.
0197 adapter_tag_info_t aic7xxx_tag_info[] =
0198 {
0199     {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
0200     {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
0201     {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
0202     {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
0203 };
0204 */
0205 
0206 #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
0207 #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
0208 #else
0209 #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
0210 #endif
0211 
0212 #define AIC7XXX_CONFIGED_TAG_COMMANDS {                 \
0213     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0214     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0215     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0216     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0217     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0218     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0219     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,       \
0220     AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE        \
0221 }
0222 
0223 /*
0224  * By default, use the number of commands specified by
0225  * the users kernel configuration.
0226  */
0227 static adapter_tag_info_t aic7xxx_tag_info[] =
0228 {
0229     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0230     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0231     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0232     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0233     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0234     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0235     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0236     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0237     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0238     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0239     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0240     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0241     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0242     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0243     {AIC7XXX_CONFIGED_TAG_COMMANDS},
0244     {AIC7XXX_CONFIGED_TAG_COMMANDS}
0245 };
0246 
0247 /*
0248  * There should be a specific return value for this in scsi.h, but
0249  * it seems that most drivers ignore it.
0250  */
0251 #define DID_UNDERFLOW   DID_ERROR
0252 
0253 void
0254 ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
0255 {
0256     printk("(scsi%d:%c:%d:%d): ",
0257            ahc->platform_data->host->host_no,
0258            scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
0259            scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
0260            scb != NULL ? SCB_GET_LUN(scb) : -1);
0261 }
0262 
0263 /*
0264  * XXX - these options apply unilaterally to _all_ 274x/284x/294x
0265  *       cards in the system.  This should be fixed.  Exceptions to this
0266  *       rule are noted in the comments.
0267  */
0268 
0269 /*
0270  * Skip the scsi bus reset.  Non 0 make us skip the reset at startup.  This
0271  * has no effect on any later resets that might occur due to things like
0272  * SCSI bus timeouts.
0273  */
0274 static uint32_t aic7xxx_no_reset;
0275 
0276 /*
0277  * Should we force EXTENDED translation on a controller.
0278  *     0 == Use whatever is in the SEEPROM or default to off
0279  *     1 == Use whatever is in the SEEPROM or default to on
0280  */
0281 static uint32_t aic7xxx_extended;
0282 
0283 /*
0284  * PCI bus parity checking of the Adaptec controllers.  This is somewhat
0285  * dubious at best.  To my knowledge, this option has never actually
0286  * solved a PCI parity problem, but on certain machines with broken PCI
0287  * chipset configurations where stray PCI transactions with bad parity are
0288  * the norm rather than the exception, the error messages can be overwhelming.
0289  * It's included in the driver for completeness.
0290  *   0     = Shut off PCI parity check
0291  *   non-0 = reverse polarity pci parity checking
0292  */
0293 static uint32_t aic7xxx_pci_parity = ~0;
0294 
0295 /*
0296  * There are lots of broken chipsets in the world.  Some of them will
0297  * violate the PCI spec when we issue byte sized memory writes to our
0298  * controller.  I/O mapped register access, if allowed by the given
0299  * platform, will work in almost all cases.
0300  */
0301 uint32_t aic7xxx_allow_memio = ~0;
0302 
0303 /*
0304  * So that we can set how long each device is given as a selection timeout.
0305  * The table of values goes like this:
0306  *   0 - 256ms
0307  *   1 - 128ms
0308  *   2 - 64ms
0309  *   3 - 32ms
0310  * We default to 256ms because some older devices need a longer time
0311  * to respond to initial selection.
0312  */
0313 static uint32_t aic7xxx_seltime;
0314 
0315 /*
0316  * Certain devices do not perform any aging on commands.  Should the
0317  * device be saturated by commands in one portion of the disk, it is
0318  * possible for transactions on far away sectors to never be serviced.
0319  * To handle these devices, we can periodically send an ordered tag to
0320  * force all outstanding transactions to be serviced prior to a new
0321  * transaction.
0322  */
0323 static uint32_t aic7xxx_periodic_otag;
0324 
0325 /*
0326  * Module information and settable options.
0327  */
0328 static char *aic7xxx = NULL;
0329 
0330 MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
0331 MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver");
0332 MODULE_LICENSE("Dual BSD/GPL");
0333 MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
0334 module_param(aic7xxx, charp, 0444);
0335 MODULE_PARM_DESC(aic7xxx,
0336 "period-delimited options string:\n"
0337 "   verbose         Enable verbose/diagnostic logging\n"
0338 "   allow_memio     Allow device registers to be memory mapped\n"
0339 "   debug           Bitmask of debug values to enable\n"
0340 "   no_probe        Toggle EISA/VLB controller probing\n"
0341 "   probe_eisa_vl       Toggle EISA/VLB controller probing\n"
0342 "   no_reset        Suppress initial bus resets\n"
0343 "   extended        Enable extended geometry on all controllers\n"
0344 "   periodic_otag       Send an ordered tagged transaction\n"
0345 "               periodically to prevent tag starvation.\n"
0346 "               This may be required by some older disk\n"
0347 "               drives or RAID arrays.\n"
0348 "   tag_info:<tag_str>  Set per-target tag depth\n"
0349 "   global_tag_depth:<int>  Global tag depth for every target\n"
0350 "               on every bus\n"
0351 "   seltime:<int>       Selection Timeout\n"
0352 "               (0/256ms,1/128ms,2/64ms,3/32ms)\n"
0353 "\n"
0354 "   Sample modprobe configuration file:\n"
0355 "   #   Toggle EISA/VLB probing\n"
0356 "   #   Set tag depth on Controller 1/Target 1 to 10 tags\n"
0357 "   #   Shorten the selection timeout to 128ms\n"
0358 "\n"
0359 "   options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
0360 );
0361 
0362 static void ahc_linux_handle_scsi_status(struct ahc_softc *,
0363                      struct scsi_device *,
0364                      struct scb *);
0365 static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
0366                      struct scsi_cmnd *cmd);
0367 static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
0368 static void ahc_linux_release_simq(struct ahc_softc *ahc);
0369 static int  ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
0370 static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
0371 static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
0372                      struct ahc_devinfo *devinfo);
0373 static void ahc_linux_device_queue_depth(struct scsi_device *);
0374 static int ahc_linux_run_command(struct ahc_softc*,
0375                  struct ahc_linux_device *,
0376                  struct scsi_cmnd *);
0377 static void ahc_linux_setup_tag_info_global(char *p);
0378 static int  aic7xxx_setup(char *s);
0379 
0380 static int ahc_linux_unit;
0381 
0382 
0383 /************************** OS Utility Wrappers *******************************/
0384 void
0385 ahc_delay(long usec)
0386 {
0387     /*
0388      * udelay on Linux can have problems for
0389      * multi-millisecond waits.  Wait at most
0390      * 1024us per call.
0391      */
0392     while (usec > 0) {
0393         udelay(usec % 1024);
0394         usec -= 1024;
0395     }
0396 }
0397 
0398 /***************************** Low Level I/O **********************************/
0399 uint8_t
0400 ahc_inb(struct ahc_softc * ahc, long port)
0401 {
0402     uint8_t x;
0403 
0404     if (ahc->tag == BUS_SPACE_MEMIO) {
0405         x = readb(ahc->bsh.maddr + port);
0406     } else {
0407         x = inb(ahc->bsh.ioport + port);
0408     }
0409     mb();
0410     return (x);
0411 }
0412 
0413 void
0414 ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
0415 {
0416     if (ahc->tag == BUS_SPACE_MEMIO) {
0417         writeb(val, ahc->bsh.maddr + port);
0418     } else {
0419         outb(val, ahc->bsh.ioport + port);
0420     }
0421     mb();
0422 }
0423 
0424 void
0425 ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
0426 {
0427     int i;
0428 
0429     /*
0430      * There is probably a more efficient way to do this on Linux
0431      * but we don't use this for anything speed critical and this
0432      * should work.
0433      */
0434     for (i = 0; i < count; i++)
0435         ahc_outb(ahc, port, *array++);
0436 }
0437 
0438 void
0439 ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
0440 {
0441     int i;
0442 
0443     /*
0444      * There is probably a more efficient way to do this on Linux
0445      * but we don't use this for anything speed critical and this
0446      * should work.
0447      */
0448     for (i = 0; i < count; i++)
0449         *array++ = ahc_inb(ahc, port);
0450 }
0451 
0452 /********************************* Inlines ************************************/
0453 static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
0454 
0455 static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
0456                       struct ahc_dma_seg *sg,
0457                       dma_addr_t addr, bus_size_t len);
0458 
0459 static void
0460 ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
0461 {
0462     struct scsi_cmnd *cmd;
0463 
0464     cmd = scb->io_ctx;
0465     ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
0466 
0467     scsi_dma_unmap(cmd);
0468 }
0469 
0470 static int
0471 ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
0472           struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
0473 {
0474     int  consumed;
0475 
0476     if ((scb->sg_count + 1) > AHC_NSEG)
0477         panic("Too few segs for dma mapping.  "
0478               "Increase AHC_NSEG\n");
0479 
0480     consumed = 1;
0481     sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
0482     scb->platform_data->xfer_len += len;
0483 
0484     if (sizeof(dma_addr_t) > 4
0485      && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
0486         len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
0487 
0488     sg->len = ahc_htole32(len);
0489     return (consumed);
0490 }
0491 
0492 /*
0493  * Return a string describing the driver.
0494  */
0495 static const char *
0496 ahc_linux_info(struct Scsi_Host *host)
0497 {
0498     static char buffer[512];
0499     char    ahc_info[256];
0500     char   *bp;
0501     struct ahc_softc *ahc;
0502 
0503     bp = &buffer[0];
0504     ahc = *(struct ahc_softc **)host->hostdata;
0505     memset(bp, 0, sizeof(buffer));
0506     strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
0507             "        <");
0508     strcat(bp, ahc->description);
0509     strcat(bp, ">\n"
0510             "        ");
0511     ahc_controller_info(ahc, ahc_info);
0512     strcat(bp, ahc_info);
0513     strcat(bp, "\n");
0514 
0515     return (bp);
0516 }
0517 
0518 /*
0519  * Queue an SCB to the controller.
0520  */
0521 static int ahc_linux_queue_lck(struct scsi_cmnd *cmd)
0522 {
0523     struct   ahc_softc *ahc;
0524     struct   ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
0525     int rtn = SCSI_MLQUEUE_HOST_BUSY;
0526     unsigned long flags;
0527 
0528     ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
0529 
0530     ahc_lock(ahc, &flags);
0531     if (ahc->platform_data->qfrozen == 0) {
0532         cmd->result = CAM_REQ_INPROG << 16;
0533         rtn = ahc_linux_run_command(ahc, dev, cmd);
0534     }
0535     ahc_unlock(ahc, &flags);
0536 
0537     return rtn;
0538 }
0539 
0540 static DEF_SCSI_QCMD(ahc_linux_queue)
0541 
0542 static inline struct scsi_target **
0543 ahc_linux_target_in_softc(struct scsi_target *starget)
0544 {
0545     struct  ahc_softc *ahc =
0546         *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
0547     unsigned int target_offset;
0548 
0549     target_offset = starget->id;
0550     if (starget->channel != 0)
0551         target_offset += 8;
0552 
0553     return &ahc->platform_data->starget[target_offset];
0554 }
0555 
0556 static int
0557 ahc_linux_target_alloc(struct scsi_target *starget)
0558 {
0559     struct  ahc_softc *ahc =
0560         *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
0561     struct seeprom_config *sc = ahc->seep_config;
0562     unsigned long flags;
0563     struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
0564     unsigned short scsirate;
0565     struct ahc_devinfo devinfo;
0566     char channel = starget->channel + 'A';
0567     unsigned int our_id = ahc->our_id;
0568     unsigned int target_offset;
0569 
0570     target_offset = starget->id;
0571     if (starget->channel != 0)
0572         target_offset += 8;
0573 
0574     if (starget->channel)
0575         our_id = ahc->our_id_b;
0576 
0577     ahc_lock(ahc, &flags);
0578 
0579     BUG_ON(*ahc_targp != NULL);
0580 
0581     *ahc_targp = starget;
0582 
0583     if (sc) {
0584         int maxsync = AHC_SYNCRATE_DT;
0585         int ultra = 0;
0586         int flags = sc->device_flags[target_offset];
0587 
0588         if (ahc->flags & AHC_NEWEEPROM_FMT) {
0589             if (flags & CFSYNCHISULTRA)
0590             ultra = 1;
0591         } else if (flags & CFULTRAEN)
0592             ultra = 1;
0593         /* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04
0594          * change it to ultra=0, CFXFER = 0 */
0595         if(ultra && (flags & CFXFER) == 0x04) {
0596             ultra = 0;
0597             flags &= ~CFXFER;
0598         }
0599 
0600         if ((ahc->features & AHC_ULTRA2) != 0) {
0601             scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
0602         } else {
0603             scsirate = (flags & CFXFER) << 4;
0604             maxsync = ultra ? AHC_SYNCRATE_ULTRA :
0605                 AHC_SYNCRATE_FAST;
0606         }
0607         spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
0608         if (!(flags & CFSYNCH))
0609             spi_max_offset(starget) = 0;
0610         spi_min_period(starget) =
0611             ahc_find_period(ahc, scsirate, maxsync);
0612     }
0613     ahc_compile_devinfo(&devinfo, our_id, starget->id,
0614                 CAM_LUN_WILDCARD, channel,
0615                 ROLE_INITIATOR);
0616     ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
0617              AHC_TRANS_GOAL, /*paused*/FALSE);
0618     ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
0619               AHC_TRANS_GOAL, /*paused*/FALSE);
0620     ahc_unlock(ahc, &flags);
0621 
0622     return 0;
0623 }
0624 
0625 static void
0626 ahc_linux_target_destroy(struct scsi_target *starget)
0627 {
0628     struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
0629 
0630     *ahc_targp = NULL;
0631 }
0632 
0633 static int
0634 ahc_linux_slave_alloc(struct scsi_device *sdev)
0635 {
0636     struct  ahc_softc *ahc =
0637         *((struct ahc_softc **)sdev->host->hostdata);
0638     struct scsi_target *starget = sdev->sdev_target;
0639     struct ahc_linux_device *dev;
0640 
0641     if (bootverbose)
0642         printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
0643 
0644     dev = scsi_transport_device_data(sdev);
0645     memset(dev, 0, sizeof(*dev));
0646 
0647     /*
0648      * We start out life using untagged
0649      * transactions of which we allow one.
0650      */
0651     dev->openings = 1;
0652 
0653     /*
0654      * Set maxtags to 0.  This will be changed if we
0655      * later determine that we are dealing with
0656      * a tagged queuing capable device.
0657      */
0658     dev->maxtags = 0;
0659 
0660     spi_period(starget) = 0;
0661 
0662     return 0;
0663 }
0664 
0665 static int
0666 ahc_linux_slave_configure(struct scsi_device *sdev)
0667 {
0668     if (bootverbose)
0669         sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
0670 
0671     ahc_linux_device_queue_depth(sdev);
0672 
0673     /* Initial Domain Validation */
0674     if (!spi_initial_dv(sdev->sdev_target))
0675         spi_dv_device(sdev);
0676 
0677     return 0;
0678 }
0679 
0680 #if defined(__i386__)
0681 /*
0682  * Return the disk geometry for the given SCSI device.
0683  */
0684 static int
0685 ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
0686             sector_t capacity, int geom[])
0687 {
0688     int  heads;
0689     int  sectors;
0690     int  cylinders;
0691     int  extended;
0692     struct   ahc_softc *ahc;
0693     u_int    channel;
0694 
0695     ahc = *((struct ahc_softc **)sdev->host->hostdata);
0696     channel = sdev_channel(sdev);
0697 
0698     if (scsi_partsize(bdev, capacity, geom))
0699         return 0;
0700 
0701     heads = 64;
0702     sectors = 32;
0703     cylinders = aic_sector_div(capacity, heads, sectors);
0704 
0705     if (aic7xxx_extended != 0)
0706         extended = 1;
0707     else if (channel == 0)
0708         extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
0709     else
0710         extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
0711     if (extended && cylinders >= 1024) {
0712         heads = 255;
0713         sectors = 63;
0714         cylinders = aic_sector_div(capacity, heads, sectors);
0715     }
0716     geom[0] = heads;
0717     geom[1] = sectors;
0718     geom[2] = cylinders;
0719     return (0);
0720 }
0721 #endif
0722 
0723 /*
0724  * Abort the current SCSI command(s).
0725  */
0726 static int
0727 ahc_linux_abort(struct scsi_cmnd *cmd)
0728 {
0729     int error;
0730 
0731     error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
0732     if (error != SUCCESS)
0733         printk("aic7xxx_abort returns 0x%x\n", error);
0734     return (error);
0735 }
0736 
0737 /*
0738  * Attempt to send a target reset message to the device that timed out.
0739  */
0740 static int
0741 ahc_linux_dev_reset(struct scsi_cmnd *cmd)
0742 {
0743     int error;
0744 
0745     error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
0746     if (error != SUCCESS)
0747         printk("aic7xxx_dev_reset returns 0x%x\n", error);
0748     return (error);
0749 }
0750 
0751 /*
0752  * Reset the SCSI bus.
0753  */
0754 static int
0755 ahc_linux_bus_reset(struct scsi_cmnd *cmd)
0756 {
0757     struct ahc_softc *ahc;
0758     int    found;
0759     unsigned long flags;
0760 
0761     ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
0762 
0763     ahc_lock(ahc, &flags);
0764     found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A',
0765                   /*initiate reset*/TRUE);
0766     ahc_unlock(ahc, &flags);
0767 
0768     if (bootverbose)
0769         printk("%s: SCSI bus reset delivered. "
0770                "%d SCBs aborted.\n", ahc_name(ahc), found);
0771 
0772     return SUCCESS;
0773 }
0774 
0775 struct scsi_host_template aic7xxx_driver_template = {
0776     .module         = THIS_MODULE,
0777     .name           = "aic7xxx",
0778     .proc_name      = "aic7xxx",
0779     .show_info      = ahc_linux_show_info,
0780     .write_info     = ahc_proc_write_seeprom,
0781     .info           = ahc_linux_info,
0782     .queuecommand       = ahc_linux_queue,
0783     .eh_abort_handler   = ahc_linux_abort,
0784     .eh_device_reset_handler = ahc_linux_dev_reset,
0785     .eh_bus_reset_handler   = ahc_linux_bus_reset,
0786 #if defined(__i386__)
0787     .bios_param     = ahc_linux_biosparam,
0788 #endif
0789     .can_queue      = AHC_MAX_QUEUE,
0790     .this_id        = -1,
0791     .max_sectors        = 8192,
0792     .cmd_per_lun        = 2,
0793     .slave_alloc        = ahc_linux_slave_alloc,
0794     .slave_configure    = ahc_linux_slave_configure,
0795     .target_alloc       = ahc_linux_target_alloc,
0796     .target_destroy     = ahc_linux_target_destroy,
0797 };
0798 
0799 /**************************** Tasklet Handler *********************************/
0800 
0801 /******************************** Macros **************************************/
0802 #define BUILD_SCSIID(ahc, cmd)                          \
0803     ((((cmd)->device->id << TID_SHIFT) & TID)               \
0804     | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
0805     | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
0806 
0807 /******************************** Bus DMA *************************************/
0808 int
0809 ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
0810            bus_size_t alignment, bus_size_t boundary,
0811            dma_addr_t lowaddr, dma_addr_t highaddr,
0812            bus_dma_filter_t *filter, void *filterarg,
0813            bus_size_t maxsize, int nsegments,
0814            bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
0815 {
0816     bus_dma_tag_t dmat;
0817 
0818     dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
0819     if (dmat == NULL)
0820         return (ENOMEM);
0821 
0822     /*
0823      * Linux is very simplistic about DMA memory.  For now don't
0824      * maintain all specification information.  Once Linux supplies
0825      * better facilities for doing these operations, or the
0826      * needs of this particular driver change, we might need to do
0827      * more here.
0828      */
0829     dmat->alignment = alignment;
0830     dmat->boundary = boundary;
0831     dmat->maxsize = maxsize;
0832     *ret_tag = dmat;
0833     return (0);
0834 }
0835 
0836 void
0837 ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
0838 {
0839     kfree(dmat);
0840 }
0841 
0842 int
0843 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
0844          int flags, bus_dmamap_t *mapp)
0845 {
0846     /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
0847     *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
0848     if (*vaddr == NULL)
0849         return ENOMEM;
0850     return 0;
0851 }
0852 
0853 void
0854 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
0855         void* vaddr, bus_dmamap_t map)
0856 {
0857     dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
0858 }
0859 
0860 int
0861 ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
0862         void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
0863         void *cb_arg, int flags)
0864 {
0865     /*
0866      * Assume for now that this will only be used during
0867      * initialization and not for per-transaction buffer mapping.
0868      */
0869     bus_dma_segment_t stack_sg;
0870 
0871     stack_sg.ds_addr = map;
0872     stack_sg.ds_len = dmat->maxsize;
0873     cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
0874     return (0);
0875 }
0876 
0877 void
0878 ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
0879 {
0880 }
0881 
0882 int
0883 ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
0884 {
0885     /* Nothing to do */
0886     return (0);
0887 }
0888 
0889 static void
0890 ahc_linux_setup_tag_info_global(char *p)
0891 {
0892     int tags, i, j;
0893 
0894     tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
0895     printk("Setting Global Tags= %d\n", tags);
0896 
0897     for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) {
0898         for (j = 0; j < AHC_NUM_TARGETS; j++) {
0899             aic7xxx_tag_info[i].tag_commands[j] = tags;
0900         }
0901     }
0902 }
0903 
0904 static void
0905 ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
0906 {
0907 
0908     if ((instance >= 0) && (targ >= 0)
0909      && (instance < ARRAY_SIZE(aic7xxx_tag_info))
0910      && (targ < AHC_NUM_TARGETS)) {
0911         aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
0912         if (bootverbose)
0913             printk("tag_info[%d:%d] = %d\n", instance, targ, value);
0914     }
0915 }
0916 
0917 static char *
0918 ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
0919                void (*callback)(u_long, int, int, int32_t),
0920                u_long callback_arg)
0921 {
0922     char    *tok_end;
0923     char    *tok_end2;
0924     int      i;
0925     int      instance;
0926     int  targ;
0927     int  done;
0928     char     tok_list[] = {'.', ',', '{', '}', '\0'};
0929 
0930     /* All options use a ':' name/arg separator */
0931     if (*opt_arg != ':')
0932         return (opt_arg);
0933     opt_arg++;
0934     instance = -1;
0935     targ = -1;
0936     done = FALSE;
0937     /*
0938      * Restore separator that may be in
0939      * the middle of our option argument.
0940      */
0941     tok_end = strchr(opt_arg, '\0');
0942     if (tok_end < end)
0943         *tok_end = ',';
0944     while (!done) {
0945         switch (*opt_arg) {
0946         case '{':
0947             if (instance == -1) {
0948                 instance = 0;
0949             } else {
0950                 if (depth > 1) {
0951                     if (targ == -1)
0952                         targ = 0;
0953                 } else {
0954                     printk("Malformed Option %s\n",
0955                            opt_name);
0956                     done = TRUE;
0957                 }
0958             }
0959             opt_arg++;
0960             break;
0961         case '}':
0962             if (targ != -1)
0963                 targ = -1;
0964             else if (instance != -1)
0965                 instance = -1;
0966             opt_arg++;
0967             break;
0968         case ',':
0969         case '.':
0970             if (instance == -1)
0971                 done = TRUE;
0972             else if (targ >= 0)
0973                 targ++;
0974             else if (instance >= 0)
0975                 instance++;
0976             opt_arg++;
0977             break;
0978         case '\0':
0979             done = TRUE;
0980             break;
0981         default:
0982             tok_end = end;
0983             for (i = 0; tok_list[i]; i++) {
0984                 tok_end2 = strchr(opt_arg, tok_list[i]);
0985                 if ((tok_end2) && (tok_end2 < tok_end))
0986                     tok_end = tok_end2;
0987             }
0988             callback(callback_arg, instance, targ,
0989                  simple_strtol(opt_arg, NULL, 0));
0990             opt_arg = tok_end;
0991             break;
0992         }
0993     }
0994     return (opt_arg);
0995 }
0996 
0997 /*
0998  * Handle Linux boot parameters. This routine allows for assigning a value
0999  * to a parameter with a ':' between the parameter and the value.
1000  * ie. aic7xxx=stpwlev:1,extended
1001  */
1002 static int
1003 aic7xxx_setup(char *s)
1004 {
1005     int i, n;
1006     char   *p;
1007     char   *end;
1008 
1009     static const struct {
1010         const char *name;
1011         uint32_t *flag;
1012     } options[] = {
1013         { "extended", &aic7xxx_extended },
1014         { "no_reset", &aic7xxx_no_reset },
1015         { "verbose", &aic7xxx_verbose },
1016         { "allow_memio", &aic7xxx_allow_memio},
1017 #ifdef AHC_DEBUG
1018         { "debug", &ahc_debug },
1019 #endif
1020         { "periodic_otag", &aic7xxx_periodic_otag },
1021         { "pci_parity", &aic7xxx_pci_parity },
1022         { "seltime", &aic7xxx_seltime },
1023         { "tag_info", NULL },
1024         { "global_tag_depth", NULL },
1025         { "dv", NULL }
1026     };
1027 
1028     end = strchr(s, '\0');
1029 
1030     /*
1031      * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
1032      * will never be 0 in this case.
1033      */
1034     n = 0;
1035 
1036     while ((p = strsep(&s, ",.")) != NULL) {
1037         if (*p == '\0')
1038             continue;
1039         for (i = 0; i < ARRAY_SIZE(options); i++) {
1040 
1041             n = strlen(options[i].name);
1042             if (strncmp(options[i].name, p, n) == 0)
1043                 break;
1044         }
1045         if (i == ARRAY_SIZE(options))
1046             continue;
1047 
1048         if (strncmp(p, "global_tag_depth", n) == 0) {
1049             ahc_linux_setup_tag_info_global(p + n);
1050         } else if (strncmp(p, "tag_info", n) == 0) {
1051             s = ahc_parse_brace_option("tag_info", p + n, end,
1052                 2, ahc_linux_setup_tag_info, 0);
1053         } else if (p[n] == ':') {
1054             *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
1055         } else if (strncmp(p, "verbose", n) == 0) {
1056             *(options[i].flag) = 1;
1057         } else {
1058             *(options[i].flag) ^= 0xFFFFFFFF;
1059         }
1060     }
1061     return 1;
1062 }
1063 
1064 __setup("aic7xxx=", aic7xxx_setup);
1065 
1066 uint32_t aic7xxx_verbose;
1067 
1068 int
1069 ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template)
1070 {
1071     char    buf[80];
1072     struct  Scsi_Host *host;
1073     char    *new_name;
1074     u_long  s;
1075     int retval;
1076 
1077     template->name = ahc->description;
1078     host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
1079     if (host == NULL)
1080         return (ENOMEM);
1081 
1082     *((struct ahc_softc **)host->hostdata) = ahc;
1083     ahc->platform_data->host = host;
1084     host->can_queue = AHC_MAX_QUEUE;
1085     host->cmd_per_lun = 2;
1086     /* XXX No way to communicate the ID for multiple channels */
1087     host->this_id = ahc->our_id;
1088     host->irq = ahc->platform_data->irq;
1089     host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
1090     host->max_lun = AHC_NUM_LUNS;
1091     host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
1092     host->sg_tablesize = AHC_NSEG;
1093     ahc_lock(ahc, &s);
1094     ahc_set_unit(ahc, ahc_linux_unit++);
1095     ahc_unlock(ahc, &s);
1096     sprintf(buf, "scsi%d", host->host_no);
1097     new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
1098     if (new_name != NULL) {
1099         strcpy(new_name, buf);
1100         ahc_set_name(ahc, new_name);
1101     }
1102     host->unique_id = ahc->unit;
1103     ahc_linux_initialize_scsi_bus(ahc);
1104     ahc_intr_enable(ahc, TRUE);
1105 
1106     host->transportt = ahc_linux_transport_template;
1107 
1108     retval = scsi_add_host(host, ahc->dev);
1109     if (retval) {
1110         printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
1111         scsi_host_put(host);
1112         return retval;
1113     }
1114 
1115     scsi_scan_host(host);
1116     return 0;
1117 }
1118 
1119 /*
1120  * Place the SCSI bus into a known state by either resetting it,
1121  * or forcing transfer negotiations on the next command to any
1122  * target.
1123  */
1124 static void
1125 ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
1126 {
1127     int i;
1128     int numtarg;
1129     unsigned long s;
1130 
1131     i = 0;
1132     numtarg = 0;
1133 
1134     ahc_lock(ahc, &s);
1135 
1136     if (aic7xxx_no_reset != 0)
1137         ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
1138 
1139     if ((ahc->flags & AHC_RESET_BUS_A) != 0)
1140         ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
1141     else
1142         numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
1143 
1144     if ((ahc->features & AHC_TWIN) != 0) {
1145 
1146         if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
1147             ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
1148         } else {
1149             if (numtarg == 0)
1150                 i = 8;
1151             numtarg += 8;
1152         }
1153     }
1154 
1155     /*
1156      * Force negotiation to async for all targets that
1157      * will not see an initial bus reset.
1158      */
1159     for (; i < numtarg; i++) {
1160         struct ahc_devinfo devinfo;
1161         struct ahc_initiator_tinfo *tinfo;
1162         struct ahc_tmode_tstate *tstate;
1163         u_int our_id;
1164         u_int target_id;
1165         char channel;
1166 
1167         channel = 'A';
1168         our_id = ahc->our_id;
1169         target_id = i;
1170         if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
1171             channel = 'B';
1172             our_id = ahc->our_id_b;
1173             target_id = i % 8;
1174         }
1175         tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
1176                         target_id, &tstate);
1177         ahc_compile_devinfo(&devinfo, our_id, target_id,
1178                     CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
1179         ahc_update_neg_request(ahc, &devinfo, tstate,
1180                        tinfo, AHC_NEG_ALWAYS);
1181     }
1182     ahc_unlock(ahc, &s);
1183     /* Give the bus some time to recover */
1184     if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
1185         ahc_linux_freeze_simq(ahc);
1186         msleep(AIC7XXX_RESET_DELAY);
1187         ahc_linux_release_simq(ahc);
1188     }
1189 }
1190 
1191 int
1192 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1193 {
1194 
1195     ahc->platform_data =
1196         kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
1197     if (ahc->platform_data == NULL)
1198         return (ENOMEM);
1199     ahc->platform_data->irq = AHC_LINUX_NOIRQ;
1200     ahc_lockinit(ahc);
1201     ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
1202     ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
1203     if (aic7xxx_pci_parity == 0)
1204         ahc->flags |= AHC_DISABLE_PCI_PERR;
1205 
1206     return (0);
1207 }
1208 
1209 void
1210 ahc_platform_free(struct ahc_softc *ahc)
1211 {
1212     struct scsi_target *starget;
1213     int i;
1214 
1215     if (ahc->platform_data != NULL) {
1216         /* destroy all of the device and target objects */
1217         for (i = 0; i < AHC_NUM_TARGETS; i++) {
1218             starget = ahc->platform_data->starget[i];
1219             if (starget != NULL) {
1220                 ahc->platform_data->starget[i] = NULL;
1221             }
1222         }
1223 
1224         if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
1225             free_irq(ahc->platform_data->irq, ahc);
1226         if (ahc->tag == BUS_SPACE_PIO
1227          && ahc->bsh.ioport != 0)
1228             release_region(ahc->bsh.ioport, 256);
1229         if (ahc->tag == BUS_SPACE_MEMIO
1230          && ahc->bsh.maddr != NULL) {
1231             iounmap(ahc->bsh.maddr);
1232             release_mem_region(ahc->platform_data->mem_busaddr,
1233                        0x1000);
1234         }
1235 
1236         if (ahc->platform_data->host)
1237             scsi_host_put(ahc->platform_data->host);
1238 
1239         kfree(ahc->platform_data);
1240     }
1241 }
1242 
1243 void
1244 ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
1245 {
1246     ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
1247                 SCB_GET_CHANNEL(ahc, scb),
1248                 SCB_GET_LUN(scb), SCB_LIST_NULL,
1249                 ROLE_UNKNOWN, CAM_REQUEUE_REQ);
1250 }
1251 
1252 void
1253 ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
1254               struct ahc_devinfo *devinfo, ahc_queue_alg alg)
1255 {
1256     struct ahc_linux_device *dev;
1257     int was_queuing;
1258     int now_queuing;
1259 
1260     if (sdev == NULL)
1261         return;
1262     dev = scsi_transport_device_data(sdev);
1263 
1264     was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
1265     switch (alg) {
1266     default:
1267     case AHC_QUEUE_NONE:
1268         now_queuing = 0;
1269         break;
1270     case AHC_QUEUE_BASIC:
1271         now_queuing = AHC_DEV_Q_BASIC;
1272         break;
1273     case AHC_QUEUE_TAGGED:
1274         now_queuing = AHC_DEV_Q_TAGGED;
1275         break;
1276     }
1277     if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
1278      && (was_queuing != now_queuing)
1279      && (dev->active != 0)) {
1280         dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
1281         dev->qfrozen++;
1282     }
1283 
1284     dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
1285     if (now_queuing) {
1286         u_int usertags;
1287 
1288         usertags = ahc_linux_user_tagdepth(ahc, devinfo);
1289         if (!was_queuing) {
1290             /*
1291              * Start out aggressively and allow our
1292              * dynamic queue depth algorithm to take
1293              * care of the rest.
1294              */
1295             dev->maxtags = usertags;
1296             dev->openings = dev->maxtags - dev->active;
1297         }
1298         if (dev->maxtags == 0) {
1299             /*
1300              * Queueing is disabled by the user.
1301              */
1302             dev->openings = 1;
1303         } else if (alg == AHC_QUEUE_TAGGED) {
1304             dev->flags |= AHC_DEV_Q_TAGGED;
1305             if (aic7xxx_periodic_otag != 0)
1306                 dev->flags |= AHC_DEV_PERIODIC_OTAG;
1307         } else
1308             dev->flags |= AHC_DEV_Q_BASIC;
1309     } else {
1310         /* We can only have one opening. */
1311         dev->maxtags = 0;
1312         dev->openings =  1 - dev->active;
1313     }
1314     switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
1315     case AHC_DEV_Q_BASIC:
1316     case AHC_DEV_Q_TAGGED:
1317         scsi_change_queue_depth(sdev,
1318                 dev->openings + dev->active);
1319         break;
1320     default:
1321         /*
1322          * We allow the OS to queue 2 untagged transactions to
1323          * us at any time even though we can only execute them
1324          * serially on the controller/device.  This should
1325          * remove some latency.
1326          */
1327         scsi_change_queue_depth(sdev, 2);
1328         break;
1329     }
1330 }
1331 
1332 int
1333 ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
1334             int lun, u_int tag, role_t role, uint32_t status)
1335 {
1336     return 0;
1337 }
1338 
1339 static u_int
1340 ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1341 {
1342     static int warned_user;
1343     u_int tags;
1344 
1345     tags = 0;
1346     if ((ahc->user_discenable & devinfo->target_mask) != 0) {
1347         if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) {
1348             if (warned_user == 0) {
1349 
1350                 printk(KERN_WARNING
1351 "aic7xxx: WARNING: Insufficient tag_info instances\n"
1352 "aic7xxx: for installed controllers. Using defaults\n"
1353 "aic7xxx: Please update the aic7xxx_tag_info array in\n"
1354 "aic7xxx: the aic7xxx_osm..c source file.\n");
1355                 warned_user++;
1356             }
1357             tags = AHC_MAX_QUEUE;
1358         } else {
1359             adapter_tag_info_t *tag_info;
1360 
1361             tag_info = &aic7xxx_tag_info[ahc->unit];
1362             tags = tag_info->tag_commands[devinfo->target_offset];
1363             if (tags > AHC_MAX_QUEUE)
1364                 tags = AHC_MAX_QUEUE;
1365         }
1366     }
1367     return (tags);
1368 }
1369 
1370 /*
1371  * Determines the queue depth for a given device.
1372  */
1373 static void
1374 ahc_linux_device_queue_depth(struct scsi_device *sdev)
1375 {
1376     struct  ahc_devinfo devinfo;
1377     u_int   tags;
1378     struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata);
1379 
1380     ahc_compile_devinfo(&devinfo,
1381                 sdev->sdev_target->channel == 0
1382               ? ahc->our_id : ahc->our_id_b,
1383                 sdev->sdev_target->id, sdev->lun,
1384                 sdev->sdev_target->channel == 0 ? 'A' : 'B',
1385                 ROLE_INITIATOR);
1386     tags = ahc_linux_user_tagdepth(ahc, &devinfo);
1387     if (tags != 0 && sdev->tagged_supported != 0) {
1388 
1389         ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED);
1390         ahc_send_async(ahc, devinfo.channel, devinfo.target,
1391                    devinfo.lun, AC_TRANSFER_NEG);
1392         ahc_print_devinfo(ahc, &devinfo);
1393         printk("Tagged Queuing enabled.  Depth %d\n", tags);
1394     } else {
1395         ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE);
1396         ahc_send_async(ahc, devinfo.channel, devinfo.target,
1397                    devinfo.lun, AC_TRANSFER_NEG);
1398     }
1399 }
1400 
1401 static int
1402 ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1403               struct scsi_cmnd *cmd)
1404 {
1405     struct   scb *scb;
1406     struct   hardware_scb *hscb;
1407     struct   ahc_initiator_tinfo *tinfo;
1408     struct   ahc_tmode_tstate *tstate;
1409     uint16_t mask;
1410     struct scb_tailq *untagged_q = NULL;
1411     int nseg;
1412 
1413     /*
1414      * Schedule us to run later.  The only reason we are not
1415      * running is because the whole controller Q is frozen.
1416      */
1417     if (ahc->platform_data->qfrozen != 0)
1418         return SCSI_MLQUEUE_HOST_BUSY;
1419 
1420     /*
1421      * We only allow one untagged transaction
1422      * per target in the initiator role unless
1423      * we are storing a full busy target *lun*
1424      * table in SCB space.
1425      */
1426     if (!(cmd->flags & SCMD_TAGGED)
1427         && (ahc->features & AHC_SCB_BTT) == 0) {
1428         int target_offset;
1429 
1430         target_offset = cmd->device->id + cmd->device->channel * 8;
1431         untagged_q = &(ahc->untagged_queues[target_offset]);
1432         if (!TAILQ_EMPTY(untagged_q))
1433             /* if we're already executing an untagged command
1434              * we're busy to another */
1435             return SCSI_MLQUEUE_DEVICE_BUSY;
1436     }
1437 
1438     nseg = scsi_dma_map(cmd);
1439     if (nseg < 0)
1440         return SCSI_MLQUEUE_HOST_BUSY;
1441 
1442     /*
1443      * Get an scb to use.
1444      */
1445     scb = ahc_get_scb(ahc);
1446     if (!scb) {
1447         scsi_dma_unmap(cmd);
1448         return SCSI_MLQUEUE_HOST_BUSY;
1449     }
1450 
1451     scb->io_ctx = cmd;
1452     scb->platform_data->dev = dev;
1453     hscb = scb->hscb;
1454     cmd->host_scribble = (char *)scb;
1455 
1456     /*
1457      * Fill out basics of the HSCB.
1458      */
1459     hscb->control = 0;
1460     hscb->scsiid = BUILD_SCSIID(ahc, cmd);
1461     hscb->lun = cmd->device->lun;
1462     mask = SCB_GET_TARGET_MASK(ahc, scb);
1463     tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
1464                     SCB_GET_OUR_ID(scb),
1465                     SCB_GET_TARGET(ahc, scb), &tstate);
1466     hscb->scsirate = tinfo->scsirate;
1467     hscb->scsioffset = tinfo->curr.offset;
1468     if ((tstate->ultraenb & mask) != 0)
1469         hscb->control |= ULTRAENB;
1470 
1471     if ((ahc->user_discenable & mask) != 0)
1472         hscb->control |= DISCENB;
1473 
1474     if ((tstate->auto_negotiate & mask) != 0) {
1475         scb->flags |= SCB_AUTO_NEGOTIATE;
1476         scb->hscb->control |= MK_MESSAGE;
1477     }
1478 
1479     if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
1480         if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
1481                 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
1482             hscb->control |= ORDERED_QUEUE_TAG;
1483             dev->commands_since_idle_or_otag = 0;
1484         } else {
1485             hscb->control |= SIMPLE_QUEUE_TAG;
1486         }
1487     }
1488 
1489     hscb->cdb_len = cmd->cmd_len;
1490     if (hscb->cdb_len <= 12) {
1491         memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
1492     } else {
1493         memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
1494         scb->flags |= SCB_CDB32_PTR;
1495     }
1496 
1497     scb->platform_data->xfer_len = 0;
1498     ahc_set_residual(scb, 0);
1499     ahc_set_sense_residual(scb, 0);
1500     scb->sg_count = 0;
1501 
1502     if (nseg > 0) {
1503         struct  ahc_dma_seg *sg;
1504         struct  scatterlist *cur_seg;
1505         int i;
1506 
1507         /* Copy the segments into the SG list. */
1508         sg = scb->sg_list;
1509         /*
1510          * The sg_count may be larger than nseg if
1511          * a transfer crosses a 32bit page.
1512          */
1513         scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1514             dma_addr_t addr;
1515             bus_size_t len;
1516             int consumed;
1517 
1518             addr = sg_dma_address(cur_seg);
1519             len = sg_dma_len(cur_seg);
1520             consumed = ahc_linux_map_seg(ahc, scb,
1521                              sg, addr, len);
1522             sg += consumed;
1523             scb->sg_count += consumed;
1524         }
1525         sg--;
1526         sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
1527 
1528         /*
1529          * Reset the sg list pointer.
1530          */
1531         scb->hscb->sgptr =
1532             ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
1533 
1534         /*
1535          * Copy the first SG into the "current"
1536          * data pointer area.
1537          */
1538         scb->hscb->dataptr = scb->sg_list->addr;
1539         scb->hscb->datacnt = scb->sg_list->len;
1540     } else {
1541         scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
1542         scb->hscb->dataptr = 0;
1543         scb->hscb->datacnt = 0;
1544         scb->sg_count = 0;
1545     }
1546 
1547     LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1548     dev->openings--;
1549     dev->active++;
1550     dev->commands_issued++;
1551     if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
1552         dev->commands_since_idle_or_otag++;
1553 
1554     scb->flags |= SCB_ACTIVE;
1555     if (untagged_q) {
1556         TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1557         scb->flags |= SCB_UNTAGGEDQ;
1558     }
1559     ahc_queue_scb(ahc, scb);
1560     return 0;
1561 }
1562 
1563 /*
1564  * SCSI controller interrupt handler.
1565  */
1566 irqreturn_t
1567 ahc_linux_isr(int irq, void *dev_id)
1568 {
1569     struct  ahc_softc *ahc;
1570     u_long  flags;
1571     int ours;
1572 
1573     ahc = (struct ahc_softc *) dev_id;
1574     ahc_lock(ahc, &flags);
1575     ours = ahc_intr(ahc);
1576     ahc_unlock(ahc, &flags);
1577     return IRQ_RETVAL(ours);
1578 }
1579 
1580 void
1581 ahc_platform_flushwork(struct ahc_softc *ahc)
1582 {
1583 
1584 }
1585 
1586 void
1587 ahc_send_async(struct ahc_softc *ahc, char channel,
1588            u_int target, u_int lun, ac_code code)
1589 {
1590     switch (code) {
1591     case AC_TRANSFER_NEG:
1592     {
1593         struct  scsi_target *starget;
1594         struct  ahc_initiator_tinfo *tinfo;
1595         struct  ahc_tmode_tstate *tstate;
1596         int target_offset;
1597         unsigned int target_ppr_options;
1598 
1599         BUG_ON(target == CAM_TARGET_WILDCARD);
1600 
1601         tinfo = ahc_fetch_transinfo(ahc, channel,
1602                         channel == 'A' ? ahc->our_id
1603                                    : ahc->our_id_b,
1604                         target, &tstate);
1605 
1606         /*
1607          * Don't bother reporting results while
1608          * negotiations are still pending.
1609          */
1610         if (tinfo->curr.period != tinfo->goal.period
1611          || tinfo->curr.width != tinfo->goal.width
1612          || tinfo->curr.offset != tinfo->goal.offset
1613          || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1614             if (bootverbose == 0)
1615                 break;
1616 
1617         /*
1618          * Don't bother reporting results that
1619          * are identical to those last reported.
1620          */
1621         target_offset = target;
1622         if (channel == 'B')
1623             target_offset += 8;
1624         starget = ahc->platform_data->starget[target_offset];
1625         if (starget == NULL)
1626             break;
1627 
1628         target_ppr_options =
1629             (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
1630             + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
1631             + (spi_iu(starget) ?  MSG_EXT_PPR_IU_REQ : 0);
1632 
1633         if (tinfo->curr.period == spi_period(starget)
1634             && tinfo->curr.width == spi_width(starget)
1635             && tinfo->curr.offset == spi_offset(starget)
1636          && tinfo->curr.ppr_options == target_ppr_options)
1637             if (bootverbose == 0)
1638                 break;
1639 
1640         spi_period(starget) = tinfo->curr.period;
1641         spi_width(starget) = tinfo->curr.width;
1642         spi_offset(starget) = tinfo->curr.offset;
1643         spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
1644         spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
1645         spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
1646         spi_display_xfer_agreement(starget);
1647         break;
1648     }
1649     case AC_SENT_BDR:
1650     {
1651         WARN_ON(lun != CAM_LUN_WILDCARD);
1652         scsi_report_device_reset(ahc->platform_data->host,
1653                      channel - 'A', target);
1654         break;
1655     }
1656     case AC_BUS_RESET:
1657         if (ahc->platform_data->host != NULL) {
1658             scsi_report_bus_reset(ahc->platform_data->host,
1659                           channel - 'A');
1660         }
1661         break;
1662     default:
1663         panic("ahc_send_async: Unexpected async event");
1664     }
1665 }
1666 
1667 /*
1668  * Calls the higher level scsi done function and frees the scb.
1669  */
1670 void
1671 ahc_done(struct ahc_softc *ahc, struct scb *scb)
1672 {
1673     struct scsi_cmnd *cmd;
1674     struct     ahc_linux_device *dev;
1675 
1676     LIST_REMOVE(scb, pending_links);
1677     if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
1678         struct scb_tailq *untagged_q;
1679         int target_offset;
1680 
1681         target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1682         untagged_q = &(ahc->untagged_queues[target_offset]);
1683         TAILQ_REMOVE(untagged_q, scb, links.tqe);
1684         BUG_ON(!TAILQ_EMPTY(untagged_q));
1685     } else if ((scb->flags & SCB_ACTIVE) == 0) {
1686         /*
1687          * Transactions aborted from the untagged queue may
1688          * not have been dispatched to the controller, so
1689          * only check the SCB_ACTIVE flag for tagged transactions.
1690          */
1691         printk("SCB %d done'd twice\n", scb->hscb->tag);
1692         ahc_dump_card_state(ahc);
1693         panic("Stopping for safety");
1694     }
1695     cmd = scb->io_ctx;
1696     dev = scb->platform_data->dev;
1697     dev->active--;
1698     dev->openings++;
1699     if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
1700         cmd->result &= ~(CAM_DEV_QFRZN << 16);
1701         dev->qfrozen--;
1702     }
1703     ahc_linux_unmap_scb(ahc, scb);
1704 
1705     /*
1706      * Guard against stale sense data.
1707      * The Linux mid-layer assumes that sense
1708      * was retrieved anytime the first byte of
1709      * the sense buffer looks "sane".
1710      */
1711     cmd->sense_buffer[0] = 0;
1712     if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
1713 #ifdef AHC_REPORT_UNDERFLOWS
1714         uint32_t amount_xferred;
1715 
1716         amount_xferred =
1717             ahc_get_transfer_length(scb) - ahc_get_residual(scb);
1718 #endif
1719         if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
1720 #ifdef AHC_DEBUG
1721             if ((ahc_debug & AHC_SHOW_MISC) != 0) {
1722                 ahc_print_path(ahc, scb);
1723                 printk("Set CAM_UNCOR_PARITY\n");
1724             }
1725 #endif
1726             ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
1727 #ifdef AHC_REPORT_UNDERFLOWS
1728         /*
1729          * This code is disabled by default as some
1730          * clients of the SCSI system do not properly
1731          * initialize the underflow parameter.  This
1732          * results in spurious termination of commands
1733          * that complete as expected (e.g. underflow is
1734          * allowed as command can return variable amounts
1735          * of data.
1736          */
1737         } else if (amount_xferred < scb->io_ctx->underflow) {
1738             u_int i;
1739 
1740             ahc_print_path(ahc, scb);
1741             printk("CDB:");
1742             for (i = 0; i < scb->io_ctx->cmd_len; i++)
1743                 printk(" 0x%x", scb->io_ctx->cmnd[i]);
1744             printk("\n");
1745             ahc_print_path(ahc, scb);
1746             printk("Saw underflow (%ld of %ld bytes). "
1747                    "Treated as error\n",
1748                 ahc_get_residual(scb),
1749                 ahc_get_transfer_length(scb));
1750             ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1751 #endif
1752         } else {
1753             ahc_set_transaction_status(scb, CAM_REQ_CMP);
1754         }
1755     } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
1756         ahc_linux_handle_scsi_status(ahc, cmd->device, scb);
1757     }
1758 
1759     if (dev->openings == 1
1760      && ahc_get_transaction_status(scb) == CAM_REQ_CMP
1761      && ahc_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL)
1762         dev->tag_success_count++;
1763     /*
1764      * Some devices deal with temporary internal resource
1765      * shortages by returning queue full.  When the queue
1766      * full occurrs, we throttle back.  Slowly try to get
1767      * back to our previous queue depth.
1768      */
1769     if ((dev->openings + dev->active) < dev->maxtags
1770      && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
1771         dev->tag_success_count = 0;
1772         dev->openings++;
1773     }
1774 
1775     if (dev->active == 0)
1776         dev->commands_since_idle_or_otag = 0;
1777 
1778     if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
1779         printk("Recovery SCB completes\n");
1780         if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
1781          || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
1782             ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1783 
1784         if (ahc->platform_data->eh_done)
1785             complete(ahc->platform_data->eh_done);
1786     }
1787 
1788     ahc_free_scb(ahc, scb);
1789     ahc_linux_queue_cmd_complete(ahc, cmd);
1790 }
1791 
1792 static void
1793 ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
1794                  struct scsi_device *sdev, struct scb *scb)
1795 {
1796     struct  ahc_devinfo devinfo;
1797     struct ahc_linux_device *dev = scsi_transport_device_data(sdev);
1798 
1799     ahc_compile_devinfo(&devinfo,
1800                 ahc->our_id,
1801                 sdev->sdev_target->id, sdev->lun,
1802                 sdev->sdev_target->channel == 0 ? 'A' : 'B',
1803                 ROLE_INITIATOR);
1804 
1805     /*
1806      * We don't currently trust the mid-layer to
1807      * properly deal with queue full or busy.  So,
1808      * when one occurs, we tell the mid-layer to
1809      * unconditionally requeue the command to us
1810      * so that we can retry it ourselves.  We also
1811      * implement our own throttling mechanism so
1812      * we don't clobber the device with too many
1813      * commands.
1814      */
1815     switch (ahc_get_scsi_status(scb)) {
1816     default:
1817         break;
1818     case SAM_STAT_CHECK_CONDITION:
1819     case SAM_STAT_COMMAND_TERMINATED:
1820     {
1821         struct scsi_cmnd *cmd;
1822 
1823         /*
1824          * Copy sense information to the OS's cmd
1825          * structure if it is available.
1826          */
1827         cmd = scb->io_ctx;
1828         if (scb->flags & SCB_SENSE) {
1829             u_int sense_size;
1830 
1831             sense_size = min(sizeof(struct scsi_sense_data)
1832                        - ahc_get_sense_residual(scb),
1833                      (u_long)SCSI_SENSE_BUFFERSIZE);
1834             memcpy(cmd->sense_buffer,
1835                    ahc_get_sense_buf(ahc, scb), sense_size);
1836             if (sense_size < SCSI_SENSE_BUFFERSIZE)
1837                 memset(&cmd->sense_buffer[sense_size], 0,
1838                        SCSI_SENSE_BUFFERSIZE - sense_size);
1839 #ifdef AHC_DEBUG
1840             if (ahc_debug & AHC_SHOW_SENSE) {
1841                 int i;
1842 
1843                 printk("Copied %d bytes of sense data:",
1844                        sense_size);
1845                 for (i = 0; i < sense_size; i++) {
1846                     if ((i & 0xF) == 0)
1847                         printk("\n");
1848                     printk("0x%x ", cmd->sense_buffer[i]);
1849                 }
1850                 printk("\n");
1851             }
1852 #endif
1853         }
1854         break;
1855     }
1856     case SAM_STAT_TASK_SET_FULL:
1857     {
1858         /*
1859          * By the time the core driver has returned this
1860          * command, all other commands that were queued
1861          * to us but not the device have been returned.
1862          * This ensures that dev->active is equal to
1863          * the number of commands actually queued to
1864          * the device.
1865          */
1866         dev->tag_success_count = 0;
1867         if (dev->active != 0) {
1868             /*
1869              * Drop our opening count to the number
1870              * of commands currently outstanding.
1871              */
1872             dev->openings = 0;
1873 /*
1874             ahc_print_path(ahc, scb);
1875             printk("Dropping tag count to %d\n", dev->active);
1876  */
1877             if (dev->active == dev->tags_on_last_queuefull) {
1878 
1879                 dev->last_queuefull_same_count++;
1880                 /*
1881                  * If we repeatedly see a queue full
1882                  * at the same queue depth, this
1883                  * device has a fixed number of tag
1884                  * slots.  Lock in this tag depth
1885                  * so we stop seeing queue fulls from
1886                  * this device.
1887                  */
1888                 if (dev->last_queuefull_same_count
1889                  == AHC_LOCK_TAGS_COUNT) {
1890                     dev->maxtags = dev->active;
1891                     ahc_print_path(ahc, scb);
1892                     printk("Locking max tag count at %d\n",
1893                            dev->active);
1894                 }
1895             } else {
1896                 dev->tags_on_last_queuefull = dev->active;
1897                 dev->last_queuefull_same_count = 0;
1898             }
1899             ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
1900             ahc_set_scsi_status(scb, SAM_STAT_GOOD);
1901             ahc_platform_set_tags(ahc, sdev, &devinfo,
1902                      (dev->flags & AHC_DEV_Q_BASIC)
1903                    ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
1904             break;
1905         }
1906         /*
1907          * Drop down to a single opening, and treat this
1908          * as if the target returned BUSY SCSI status.
1909          */
1910         dev->openings = 1;
1911         ahc_set_scsi_status(scb, SAM_STAT_BUSY);
1912         ahc_platform_set_tags(ahc, sdev, &devinfo,
1913                  (dev->flags & AHC_DEV_Q_BASIC)
1914                ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
1915         break;
1916     }
1917     }
1918 }
1919 
1920 static void
1921 ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
1922 {
1923     /*
1924      * Map CAM error codes into Linux Error codes.  We
1925      * avoid the conversion so that the DV code has the
1926      * full error information available when making
1927      * state change decisions.
1928      */
1929     {
1930         u_int new_status;
1931 
1932         switch (ahc_cmd_get_transaction_status(cmd)) {
1933         case CAM_REQ_INPROG:
1934         case CAM_REQ_CMP:
1935         case CAM_SCSI_STATUS_ERROR:
1936             new_status = DID_OK;
1937             break;
1938         case CAM_REQ_ABORTED:
1939             new_status = DID_ABORT;
1940             break;
1941         case CAM_BUSY:
1942             new_status = DID_BUS_BUSY;
1943             break;
1944         case CAM_REQ_INVALID:
1945         case CAM_PATH_INVALID:
1946             new_status = DID_BAD_TARGET;
1947             break;
1948         case CAM_SEL_TIMEOUT:
1949             new_status = DID_NO_CONNECT;
1950             break;
1951         case CAM_SCSI_BUS_RESET:
1952         case CAM_BDR_SENT:
1953             new_status = DID_RESET;
1954             break;
1955         case CAM_UNCOR_PARITY:
1956             new_status = DID_PARITY;
1957             break;
1958         case CAM_CMD_TIMEOUT:
1959             new_status = DID_TIME_OUT;
1960             break;
1961         case CAM_UA_ABORT:
1962         case CAM_REQ_CMP_ERR:
1963         case CAM_AUTOSENSE_FAIL:
1964         case CAM_NO_HBA:
1965         case CAM_DATA_RUN_ERR:
1966         case CAM_UNEXP_BUSFREE:
1967         case CAM_SEQUENCE_FAIL:
1968         case CAM_CCB_LEN_ERR:
1969         case CAM_PROVIDE_FAIL:
1970         case CAM_REQ_TERMIO:
1971         case CAM_UNREC_HBA_ERROR:
1972         case CAM_REQ_TOO_BIG:
1973             new_status = DID_ERROR;
1974             break;
1975         case CAM_REQUEUE_REQ:
1976             new_status = DID_REQUEUE;
1977             break;
1978         default:
1979             /* We should never get here */
1980             new_status = DID_ERROR;
1981             break;
1982         }
1983 
1984         ahc_cmd_set_transaction_status(cmd, new_status);
1985     }
1986 
1987     scsi_done(cmd);
1988 }
1989 
1990 static void
1991 ahc_linux_freeze_simq(struct ahc_softc *ahc)
1992 {
1993     unsigned long s;
1994 
1995     ahc_lock(ahc, &s);
1996     ahc->platform_data->qfrozen++;
1997     if (ahc->platform_data->qfrozen == 1) {
1998         scsi_block_requests(ahc->platform_data->host);
1999 
2000         /* XXX What about Twin channels? */
2001         ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
2002                     CAM_LUN_WILDCARD, SCB_LIST_NULL,
2003                     ROLE_INITIATOR, CAM_REQUEUE_REQ);
2004     }
2005     ahc_unlock(ahc, &s);
2006 }
2007 
2008 static void
2009 ahc_linux_release_simq(struct ahc_softc *ahc)
2010 {
2011     u_long s;
2012     int    unblock_reqs;
2013 
2014     unblock_reqs = 0;
2015     ahc_lock(ahc, &s);
2016     if (ahc->platform_data->qfrozen > 0)
2017         ahc->platform_data->qfrozen--;
2018     if (ahc->platform_data->qfrozen == 0)
2019         unblock_reqs = 1;
2020     ahc_unlock(ahc, &s);
2021     /*
2022      * There is still a race here.  The mid-layer
2023      * should keep its own freeze count and use
2024      * a bottom half handler to run the queues
2025      * so we can unblock with our own lock held.
2026      */
2027     if (unblock_reqs)
2028         scsi_unblock_requests(ahc->platform_data->host);
2029 }
2030 
2031 static int
2032 ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2033 {
2034     struct ahc_softc *ahc;
2035     struct ahc_linux_device *dev;
2036     struct scb *pending_scb;
2037     u_int  saved_scbptr;
2038     u_int  active_scb_index;
2039     u_int  last_phase;
2040     u_int  saved_scsiid;
2041     u_int  cdb_byte;
2042     int    retval;
2043     int    was_paused;
2044     int    paused;
2045     int    wait;
2046     int    disconnected;
2047     unsigned long flags;
2048 
2049     pending_scb = NULL;
2050     paused = FALSE;
2051     wait = FALSE;
2052     ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
2053 
2054     scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
2055            flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
2056 
2057     printk("CDB:");
2058     for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2059         printk(" 0x%x", cmd->cmnd[cdb_byte]);
2060     printk("\n");
2061 
2062     ahc_lock(ahc, &flags);
2063 
2064     /*
2065      * First determine if we currently own this command.
2066      * Start by searching the device queue.  If not found
2067      * there, check the pending_scb list.  If not found
2068      * at all, and the system wanted us to just abort the
2069      * command, return success.
2070      */
2071     dev = scsi_transport_device_data(cmd->device);
2072 
2073     if (dev == NULL) {
2074         /*
2075          * No target device for this command exists,
2076          * so we must not still own the command.
2077          */
2078         printk("%s:%d:%d:%d: Is not an active device\n",
2079                ahc_name(ahc), cmd->device->channel, cmd->device->id,
2080                (u8)cmd->device->lun);
2081         retval = SUCCESS;
2082         goto no_cmd;
2083     }
2084 
2085     if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
2086      && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
2087                        cmd->device->channel + 'A',
2088                        (u8)cmd->device->lun,
2089                        CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
2090         printk("%s:%d:%d:%d: Command found on untagged queue\n",
2091                ahc_name(ahc), cmd->device->channel, cmd->device->id,
2092                (u8)cmd->device->lun);
2093         retval = SUCCESS;
2094         goto done;
2095     }
2096 
2097     /*
2098      * See if we can find a matching cmd in the pending list.
2099      */
2100     LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2101         if (pending_scb->io_ctx == cmd)
2102             break;
2103     }
2104 
2105     if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
2106 
2107         /* Any SCB for this device will do for a target reset */
2108         LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2109             if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
2110                       scmd_channel(cmd) + 'A',
2111                       CAM_LUN_WILDCARD,
2112                       SCB_LIST_NULL, ROLE_INITIATOR))
2113                 break;
2114         }
2115     }
2116 
2117     if (pending_scb == NULL) {
2118         scmd_printk(KERN_INFO, cmd, "Command not found\n");
2119         goto no_cmd;
2120     }
2121 
2122     if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
2123         /*
2124          * We can't queue two recovery actions using the same SCB
2125          */
2126         retval = FAILED;
2127         goto  done;
2128     }
2129 
2130     /*
2131      * Ensure that the card doesn't do anything
2132      * behind our back and that we didn't "just" miss
2133      * an interrupt that would affect this cmd.
2134      */
2135     was_paused = ahc_is_paused(ahc);
2136     ahc_pause_and_flushwork(ahc);
2137     paused = TRUE;
2138 
2139     if ((pending_scb->flags & SCB_ACTIVE) == 0) {
2140         scmd_printk(KERN_INFO, cmd, "Command already completed\n");
2141         goto no_cmd;
2142     }
2143 
2144     printk("%s: At time of recovery, card was %spaused\n",
2145            ahc_name(ahc), was_paused ? "" : "not ");
2146     ahc_dump_card_state(ahc);
2147 
2148     disconnected = TRUE;
2149     if (flag == SCB_ABORT) {
2150         if (ahc_search_qinfifo(ahc, cmd->device->id,
2151                        cmd->device->channel + 'A',
2152                        cmd->device->lun,
2153                        pending_scb->hscb->tag,
2154                        ROLE_INITIATOR, CAM_REQ_ABORTED,
2155                        SEARCH_COMPLETE) > 0) {
2156             printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2157                    ahc_name(ahc), cmd->device->channel,
2158                    cmd->device->id, (u8)cmd->device->lun);
2159             retval = SUCCESS;
2160             goto done;
2161         }
2162     } else if (ahc_search_qinfifo(ahc, cmd->device->id,
2163                       cmd->device->channel + 'A',
2164                       cmd->device->lun,
2165                       pending_scb->hscb->tag,
2166                       ROLE_INITIATOR, /*status*/0,
2167                       SEARCH_COUNT) > 0) {
2168         disconnected = FALSE;
2169     }
2170 
2171     if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
2172         struct scb *bus_scb;
2173 
2174         bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
2175         if (bus_scb == pending_scb)
2176             disconnected = FALSE;
2177         else if (flag != SCB_ABORT
2178               && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
2179               && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
2180             disconnected = FALSE;
2181     }
2182 
2183     /*
2184      * At this point, pending_scb is the scb associated with the
2185      * passed in command.  That command is currently active on the
2186      * bus, is in the disconnected state, or we're hoping to find
2187      * a command for the same target active on the bus to abuse to
2188      * send a BDR.  Queue the appropriate message based on which of
2189      * these states we are in.
2190      */
2191     last_phase = ahc_inb(ahc, LASTPHASE);
2192     saved_scbptr = ahc_inb(ahc, SCBPTR);
2193     active_scb_index = ahc_inb(ahc, SCB_TAG);
2194     saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2195     if (last_phase != P_BUSFREE
2196      && (pending_scb->hscb->tag == active_scb_index
2197       || (flag == SCB_DEVICE_RESET
2198        && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
2199 
2200         /*
2201          * We're active on the bus, so assert ATN
2202          * and hope that the target responds.
2203          */
2204         pending_scb = ahc_lookup_scb(ahc, active_scb_index);
2205         pending_scb->flags |= SCB_RECOVERY_SCB|flag;
2206         ahc_outb(ahc, MSG_OUT, HOST_MSG);
2207         ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
2208         scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
2209         wait = TRUE;
2210     } else if (disconnected) {
2211 
2212         /*
2213          * Actually re-queue this SCB in an attempt
2214          * to select the device before it reconnects.
2215          * In either case (selection or reselection),
2216          * we will now issue the approprate message
2217          * to the timed-out device.
2218          *
2219          * Set the MK_MESSAGE control bit indicating
2220          * that we desire to send a message.  We
2221          * also set the disconnected flag since
2222          * in the paging case there is no guarantee
2223          * that our SCB control byte matches the
2224          * version on the card.  We don't want the
2225          * sequencer to abort the command thinking
2226          * an unsolicited reselection occurred.
2227          */
2228         pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
2229         pending_scb->flags |= SCB_RECOVERY_SCB|flag;
2230 
2231         /*
2232          * Remove any cached copy of this SCB in the
2233          * disconnected list in preparation for the
2234          * queuing of our abort SCB.  We use the
2235          * same element in the SCB, SCB_NEXT, for
2236          * both the qinfifo and the disconnected list.
2237          */
2238         ahc_search_disc_list(ahc, cmd->device->id,
2239                      cmd->device->channel + 'A',
2240                      cmd->device->lun, pending_scb->hscb->tag,
2241                      /*stop_on_first*/TRUE,
2242                      /*remove*/TRUE,
2243                      /*save_state*/FALSE);
2244 
2245         /*
2246          * In the non-paging case, the sequencer will
2247          * never re-reference the in-core SCB.
2248          * To make sure we are notified during
2249          * reselection, set the MK_MESSAGE flag in
2250          * the card's copy of the SCB.
2251          */
2252         if ((ahc->flags & AHC_PAGESCBS) == 0) {
2253             ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
2254             ahc_outb(ahc, SCB_CONTROL,
2255                  ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
2256         }
2257 
2258         /*
2259          * Clear out any entries in the QINFIFO first
2260          * so we are the next SCB for this target
2261          * to run.
2262          */
2263         ahc_search_qinfifo(ahc, cmd->device->id,
2264                    cmd->device->channel + 'A',
2265                    cmd->device->lun, SCB_LIST_NULL,
2266                    ROLE_INITIATOR, CAM_REQUEUE_REQ,
2267                    SEARCH_COMPLETE);
2268         ahc_qinfifo_requeue_tail(ahc, pending_scb);
2269         ahc_outb(ahc, SCBPTR, saved_scbptr);
2270         ahc_print_path(ahc, pending_scb);
2271         printk("Device is disconnected, re-queuing SCB\n");
2272         wait = TRUE;
2273     } else {
2274         scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
2275         retval = FAILED;
2276         goto done;
2277     }
2278 
2279 no_cmd:
2280     /*
2281      * Our assumption is that if we don't have the command, no
2282      * recovery action was required, so we return success.  Again,
2283      * the semantics of the mid-layer recovery engine are not
2284      * well defined, so this may change in time.
2285      */
2286     retval = SUCCESS;
2287 done:
2288     if (paused)
2289         ahc_unpause(ahc);
2290     if (wait) {
2291         DECLARE_COMPLETION_ONSTACK(done);
2292 
2293         ahc->platform_data->eh_done = &done;
2294         ahc_unlock(ahc, &flags);
2295 
2296         printk("Recovery code sleeping\n");
2297         if (!wait_for_completion_timeout(&done, 5 * HZ)) {
2298             ahc_lock(ahc, &flags);
2299             ahc->platform_data->eh_done = NULL;
2300             ahc_unlock(ahc, &flags);
2301 
2302             printk("Timer Expired\n");
2303             retval = FAILED;
2304         }
2305         printk("Recovery code awake\n");
2306     } else
2307         ahc_unlock(ahc, &flags);
2308     return (retval);
2309 }
2310 
2311 static void ahc_linux_set_width(struct scsi_target *starget, int width)
2312 {
2313     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2314     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2315     struct ahc_devinfo devinfo;
2316     unsigned long flags;
2317 
2318     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2319                 starget->channel + 'A', ROLE_INITIATOR);
2320     ahc_lock(ahc, &flags);
2321     ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
2322     ahc_unlock(ahc, &flags);
2323 }
2324 
2325 static void ahc_linux_set_period(struct scsi_target *starget, int period)
2326 {
2327     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2328     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2329     struct ahc_tmode_tstate *tstate;
2330     struct ahc_initiator_tinfo *tinfo
2331         = ahc_fetch_transinfo(ahc,
2332                       starget->channel + 'A',
2333                       shost->this_id, starget->id, &tstate);
2334     struct ahc_devinfo devinfo;
2335     unsigned int ppr_options = tinfo->goal.ppr_options;
2336     unsigned long flags;
2337     unsigned long offset = tinfo->goal.offset;
2338     const struct ahc_syncrate *syncrate;
2339 
2340     if (offset == 0)
2341         offset = MAX_OFFSET;
2342 
2343     if (period < 9)
2344         period = 9; /* 12.5ns is our minimum */
2345     if (period == 9) {
2346         if (spi_max_width(starget))
2347             ppr_options |= MSG_EXT_PPR_DT_REQ;
2348         else
2349             /* need wide for DT and need DT for 12.5 ns */
2350             period = 10;
2351     }
2352 
2353     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2354                 starget->channel + 'A', ROLE_INITIATOR);
2355 
2356     /* all PPR requests apart from QAS require wide transfers */
2357     if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
2358         if (spi_width(starget) == 0)
2359             ppr_options &= MSG_EXT_PPR_QAS_REQ;
2360     }
2361 
2362     syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
2363                      AHC_SYNCRATE_DT);
2364     ahc_lock(ahc, &flags);
2365     ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
2366              ppr_options, AHC_TRANS_GOAL, FALSE);
2367     ahc_unlock(ahc, &flags);
2368 }
2369 
2370 static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
2371 {
2372     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2373     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2374     struct ahc_tmode_tstate *tstate;
2375     struct ahc_initiator_tinfo *tinfo
2376         = ahc_fetch_transinfo(ahc,
2377                       starget->channel + 'A',
2378                       shost->this_id, starget->id, &tstate);
2379     struct ahc_devinfo devinfo;
2380     unsigned int ppr_options = 0;
2381     unsigned int period = 0;
2382     unsigned long flags;
2383     const struct ahc_syncrate *syncrate = NULL;
2384 
2385     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2386                 starget->channel + 'A', ROLE_INITIATOR);
2387     if (offset != 0) {
2388         syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
2389                          AHC_SYNCRATE_DT);
2390         period = tinfo->goal.period;
2391         ppr_options = tinfo->goal.ppr_options;
2392     }
2393     ahc_lock(ahc, &flags);
2394     ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
2395              ppr_options, AHC_TRANS_GOAL, FALSE);
2396     ahc_unlock(ahc, &flags);
2397 }
2398 
2399 static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
2400 {
2401     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2402     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2403     struct ahc_tmode_tstate *tstate;
2404     struct ahc_initiator_tinfo *tinfo
2405         = ahc_fetch_transinfo(ahc,
2406                       starget->channel + 'A',
2407                       shost->this_id, starget->id, &tstate);
2408     struct ahc_devinfo devinfo;
2409     unsigned int ppr_options = tinfo->goal.ppr_options
2410         & ~MSG_EXT_PPR_DT_REQ;
2411     unsigned int period = tinfo->goal.period;
2412     unsigned int width = tinfo->goal.width;
2413     unsigned long flags;
2414     const struct ahc_syncrate *syncrate;
2415 
2416     if (dt && spi_max_width(starget)) {
2417         ppr_options |= MSG_EXT_PPR_DT_REQ;
2418         if (!width)
2419             ahc_linux_set_width(starget, 1);
2420     } else if (period == 9)
2421         period = 10;    /* if resetting DT, period must be >= 25ns */
2422 
2423     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2424                 starget->channel + 'A', ROLE_INITIATOR);
2425     syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
2426                      AHC_SYNCRATE_DT);
2427     ahc_lock(ahc, &flags);
2428     ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
2429              ppr_options, AHC_TRANS_GOAL, FALSE);
2430     ahc_unlock(ahc, &flags);
2431 }
2432 
2433 #if 0
2434 /* FIXME: This code claims to support IU and QAS.  However, the actual
2435  * sequencer code and aic7xxx_core have no support for these parameters and
2436  * will get into a bad state if they're negotiated.  Do not enable this
2437  * unless you know what you're doing */
2438 static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
2439 {
2440     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2441     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2442     struct ahc_tmode_tstate *tstate;
2443     struct ahc_initiator_tinfo *tinfo
2444         = ahc_fetch_transinfo(ahc,
2445                       starget->channel + 'A',
2446                       shost->this_id, starget->id, &tstate);
2447     struct ahc_devinfo devinfo;
2448     unsigned int ppr_options = tinfo->goal.ppr_options
2449         & ~MSG_EXT_PPR_QAS_REQ;
2450     unsigned int period = tinfo->goal.period;
2451     unsigned long flags;
2452     struct ahc_syncrate *syncrate;
2453 
2454     if (qas)
2455         ppr_options |= MSG_EXT_PPR_QAS_REQ;
2456 
2457     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2458                 starget->channel + 'A', ROLE_INITIATOR);
2459     syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
2460                      AHC_SYNCRATE_DT);
2461     ahc_lock(ahc, &flags);
2462     ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
2463              ppr_options, AHC_TRANS_GOAL, FALSE);
2464     ahc_unlock(ahc, &flags);
2465 }
2466 
2467 static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
2468 {
2469     struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2470     struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2471     struct ahc_tmode_tstate *tstate;
2472     struct ahc_initiator_tinfo *tinfo
2473         = ahc_fetch_transinfo(ahc,
2474                       starget->channel + 'A',
2475                       shost->this_id, starget->id, &tstate);
2476     struct ahc_devinfo devinfo;
2477     unsigned int ppr_options = tinfo->goal.ppr_options
2478         & ~MSG_EXT_PPR_IU_REQ;
2479     unsigned int period = tinfo->goal.period;
2480     unsigned long flags;
2481     struct ahc_syncrate *syncrate;
2482 
2483     if (iu)
2484         ppr_options |= MSG_EXT_PPR_IU_REQ;
2485 
2486     ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2487                 starget->channel + 'A', ROLE_INITIATOR);
2488     syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
2489                      AHC_SYNCRATE_DT);
2490     ahc_lock(ahc, &flags);
2491     ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
2492              ppr_options, AHC_TRANS_GOAL, FALSE);
2493     ahc_unlock(ahc, &flags);
2494 }
2495 #endif
2496 
2497 static void ahc_linux_get_signalling(struct Scsi_Host *shost)
2498 {
2499     struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
2500     unsigned long flags;
2501     u8 mode;
2502 
2503     if (!(ahc->features & AHC_ULTRA2)) {
2504         /* non-LVD chipset, may not have SBLKCTL reg */
2505         spi_signalling(shost) =
2506             ahc->features & AHC_HVD ?
2507             SPI_SIGNAL_HVD :
2508             SPI_SIGNAL_SE;
2509         return;
2510     }
2511 
2512     ahc_lock(ahc, &flags);
2513     ahc_pause(ahc);
2514     mode = ahc_inb(ahc, SBLKCTL);
2515     ahc_unpause(ahc);
2516     ahc_unlock(ahc, &flags);
2517 
2518     if (mode & ENAB40)
2519         spi_signalling(shost) = SPI_SIGNAL_LVD;
2520     else if (mode & ENAB20)
2521         spi_signalling(shost) = SPI_SIGNAL_SE;
2522     else
2523         spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
2524 }
2525 
2526 static struct spi_function_template ahc_linux_transport_functions = {
2527     .set_offset = ahc_linux_set_offset,
2528     .show_offset    = 1,
2529     .set_period = ahc_linux_set_period,
2530     .show_period    = 1,
2531     .set_width  = ahc_linux_set_width,
2532     .show_width = 1,
2533     .set_dt     = ahc_linux_set_dt,
2534     .show_dt    = 1,
2535 #if 0
2536     .set_iu     = ahc_linux_set_iu,
2537     .show_iu    = 1,
2538     .set_qas    = ahc_linux_set_qas,
2539     .show_qas   = 1,
2540 #endif
2541     .get_signalling = ahc_linux_get_signalling,
2542 };
2543 
2544 
2545 
2546 static int __init
2547 ahc_linux_init(void)
2548 {
2549     /*
2550      * If we've been passed any parameters, process them now.
2551      */
2552     if (aic7xxx)
2553         aic7xxx_setup(aic7xxx);
2554 
2555     ahc_linux_transport_template =
2556         spi_attach_transport(&ahc_linux_transport_functions);
2557     if (!ahc_linux_transport_template)
2558         return -ENODEV;
2559 
2560     scsi_transport_reserve_device(ahc_linux_transport_template,
2561                       sizeof(struct ahc_linux_device));
2562 
2563     ahc_linux_pci_init();
2564     ahc_linux_eisa_init();
2565     return 0;
2566 }
2567 
2568 static void
2569 ahc_linux_exit(void)
2570 {
2571     ahc_linux_pci_exit();
2572     ahc_linux_eisa_exit();
2573     spi_release_transport(ahc_linux_transport_template);
2574 }
2575 
2576 module_init(ahc_linux_init);
2577 module_exit(ahc_linux_exit);