Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _SCSI_SCSI_HOST_H
0003 #define _SCSI_SCSI_HOST_H
0004 
0005 #include <linux/device.h>
0006 #include <linux/list.h>
0007 #include <linux/types.h>
0008 #include <linux/workqueue.h>
0009 #include <linux/mutex.h>
0010 #include <linux/seq_file.h>
0011 #include <linux/blk-mq.h>
0012 #include <scsi/scsi.h>
0013 
0014 struct block_device;
0015 struct completion;
0016 struct module;
0017 struct scsi_cmnd;
0018 struct scsi_device;
0019 struct scsi_target;
0020 struct Scsi_Host;
0021 struct scsi_transport_template;
0022 
0023 
0024 #define SG_ALL  SG_CHUNK_SIZE
0025 
0026 #define MODE_UNKNOWN 0x00
0027 #define MODE_INITIATOR 0x01
0028 #define MODE_TARGET 0x02
0029 
0030 struct scsi_host_template {
0031     /*
0032      * Put fields referenced in IO submission path together in
0033      * same cacheline
0034      */
0035 
0036     /*
0037      * Additional per-command data allocated for the driver.
0038      */
0039     unsigned int cmd_size;
0040 
0041     /*
0042      * The queuecommand function is used to queue up a scsi
0043      * command block to the LLDD.  When the driver finished
0044      * processing the command the done callback is invoked.
0045      *
0046      * If queuecommand returns 0, then the driver has accepted the
0047      * command.  It must also push it to the HBA if the scsi_cmnd
0048      * flag SCMD_LAST is set, or if the driver does not implement
0049      * commit_rqs.  The done() function must be called on the command
0050      * when the driver has finished with it. (you may call done on the
0051      * command before queuecommand returns, but in this case you
0052      * *must* return 0 from queuecommand).
0053      *
0054      * Queuecommand may also reject the command, in which case it may
0055      * not touch the command and must not call done() for it.
0056      *
0057      * There are two possible rejection returns:
0058      *
0059      *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
0060      *   allow commands to other devices serviced by this host.
0061      *
0062      *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
0063      *   host temporarily.
0064      *
0065          * For compatibility, any other non-zero return is treated the
0066          * same as SCSI_MLQUEUE_HOST_BUSY.
0067      *
0068      * NOTE: "temporarily" means either until the next command for#
0069      * this device/host completes, or a period of time determined by
0070      * I/O pressure in the system if there are no other outstanding
0071      * commands.
0072      *
0073      * STATUS: REQUIRED
0074      */
0075     int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
0076 
0077     /*
0078      * The commit_rqs function is used to trigger a hardware
0079      * doorbell after some requests have been queued with
0080      * queuecommand, when an error is encountered before sending
0081      * the request with SCMD_LAST set.
0082      *
0083      * STATUS: OPTIONAL
0084      */
0085     void (*commit_rqs)(struct Scsi_Host *, u16);
0086 
0087     struct module *module;
0088     const char *name;
0089 
0090     /*
0091      * The info function will return whatever useful information the
0092      * developer sees fit.  If not provided, then the name field will
0093      * be used instead.
0094      *
0095      * Status: OPTIONAL
0096      */
0097     const char *(*info)(struct Scsi_Host *);
0098 
0099     /*
0100      * Ioctl interface
0101      *
0102      * Status: OPTIONAL
0103      */
0104     int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
0105              void __user *arg);
0106 
0107 
0108 #ifdef CONFIG_COMPAT
0109     /*
0110      * Compat handler. Handle 32bit ABI.
0111      * When unknown ioctl is passed return -ENOIOCTLCMD.
0112      *
0113      * Status: OPTIONAL
0114      */
0115     int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
0116                 void __user *arg);
0117 #endif
0118 
0119     int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
0120     int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
0121 
0122     /*
0123      * This is an error handling strategy routine.  You don't need to
0124      * define one of these if you don't want to - there is a default
0125      * routine that is present that should work in most cases.  For those
0126      * driver authors that have the inclination and ability to write their
0127      * own strategy routine, this is where it is specified.  Note - the
0128      * strategy routine is *ALWAYS* run in the context of the kernel eh
0129      * thread.  Thus you are guaranteed to *NOT* be in an interrupt
0130      * handler when you execute this, and you are also guaranteed to
0131      * *NOT* have any other commands being queued while you are in the
0132      * strategy routine. When you return from this function, operations
0133      * return to normal.
0134      *
0135      * See scsi_error.c scsi_unjam_host for additional comments about
0136      * what this function should and should not be attempting to do.
0137      *
0138      * Status: REQUIRED (at least one of them)
0139      */
0140     int (* eh_abort_handler)(struct scsi_cmnd *);
0141     int (* eh_device_reset_handler)(struct scsi_cmnd *);
0142     int (* eh_target_reset_handler)(struct scsi_cmnd *);
0143     int (* eh_bus_reset_handler)(struct scsi_cmnd *);
0144     int (* eh_host_reset_handler)(struct scsi_cmnd *);
0145 
0146     /*
0147      * Before the mid layer attempts to scan for a new device where none
0148      * currently exists, it will call this entry in your driver.  Should
0149      * your driver need to allocate any structs or perform any other init
0150      * items in order to send commands to a currently unused target/lun
0151      * combo, then this is where you can perform those allocations.  This
0152      * is specifically so that drivers won't have to perform any kind of
0153      * "is this a new device" checks in their queuecommand routine,
0154      * thereby making the hot path a bit quicker.
0155      *
0156      * Return values: 0 on success, non-0 on failure
0157      *
0158      * Deallocation:  If we didn't find any devices at this ID, you will
0159      * get an immediate call to slave_destroy().  If we find something
0160      * here then you will get a call to slave_configure(), then the
0161      * device will be used for however long it is kept around, then when
0162      * the device is removed from the system (or * possibly at reboot
0163      * time), you will then get a call to slave_destroy().  This is
0164      * assuming you implement slave_configure and slave_destroy.
0165      * However, if you allocate memory and hang it off the device struct,
0166      * then you must implement the slave_destroy() routine at a minimum
0167      * in order to avoid leaking memory
0168      * each time a device is tore down.
0169      *
0170      * Status: OPTIONAL
0171      */
0172     int (* slave_alloc)(struct scsi_device *);
0173 
0174     /*
0175      * Once the device has responded to an INQUIRY and we know the
0176      * device is online, we call into the low level driver with the
0177      * struct scsi_device *.  If the low level device driver implements
0178      * this function, it *must* perform the task of setting the queue
0179      * depth on the device.  All other tasks are optional and depend
0180      * on what the driver supports and various implementation details.
0181      * 
0182      * Things currently recommended to be handled at this time include:
0183      *
0184      * 1.  Setting the device queue depth.  Proper setting of this is
0185      *     described in the comments for scsi_change_queue_depth.
0186      * 2.  Determining if the device supports the various synchronous
0187      *     negotiation protocols.  The device struct will already have
0188      *     responded to INQUIRY and the results of the standard items
0189      *     will have been shoved into the various device flag bits, eg.
0190      *     device->sdtr will be true if the device supports SDTR messages.
0191      * 3.  Allocating command structs that the device will need.
0192      * 4.  Setting the default timeout on this device (if needed).
0193      * 5.  Anything else the low level driver might want to do on a device
0194      *     specific setup basis...
0195      * 6.  Return 0 on success, non-0 on error.  The device will be marked
0196      *     as offline on error so that no access will occur.  If you return
0197      *     non-0, your slave_destroy routine will never get called for this
0198      *     device, so don't leave any loose memory hanging around, clean
0199      *     up after yourself before returning non-0
0200      *
0201      * Status: OPTIONAL
0202      */
0203     int (* slave_configure)(struct scsi_device *);
0204 
0205     /*
0206      * Immediately prior to deallocating the device and after all activity
0207      * has ceased the mid layer calls this point so that the low level
0208      * driver may completely detach itself from the scsi device and vice
0209      * versa.  The low level driver is responsible for freeing any memory
0210      * it allocated in the slave_alloc or slave_configure calls. 
0211      *
0212      * Status: OPTIONAL
0213      */
0214     void (* slave_destroy)(struct scsi_device *);
0215 
0216     /*
0217      * Before the mid layer attempts to scan for a new device attached
0218      * to a target where no target currently exists, it will call this
0219      * entry in your driver.  Should your driver need to allocate any
0220      * structs or perform any other init items in order to send commands
0221      * to a currently unused target, then this is where you can perform
0222      * those allocations.
0223      *
0224      * Return values: 0 on success, non-0 on failure
0225      *
0226      * Status: OPTIONAL
0227      */
0228     int (* target_alloc)(struct scsi_target *);
0229 
0230     /*
0231      * Immediately prior to deallocating the target structure, and
0232      * after all activity to attached scsi devices has ceased, the
0233      * midlayer calls this point so that the driver may deallocate
0234      * and terminate any references to the target.
0235      *
0236      * Status: OPTIONAL
0237      */
0238     void (* target_destroy)(struct scsi_target *);
0239 
0240     /*
0241      * If a host has the ability to discover targets on its own instead
0242      * of scanning the entire bus, it can fill in this function and
0243      * call scsi_scan_host().  This function will be called periodically
0244      * until it returns 1 with the scsi_host and the elapsed time of
0245      * the scan in jiffies.
0246      *
0247      * Status: OPTIONAL
0248      */
0249     int (* scan_finished)(struct Scsi_Host *, unsigned long);
0250 
0251     /*
0252      * If the host wants to be called before the scan starts, but
0253      * after the midlayer has set up ready for the scan, it can fill
0254      * in this function.
0255      *
0256      * Status: OPTIONAL
0257      */
0258     void (* scan_start)(struct Scsi_Host *);
0259 
0260     /*
0261      * Fill in this function to allow the queue depth of this host
0262      * to be changeable (on a per device basis).  Returns either
0263      * the current queue depth setting (may be different from what
0264      * was passed in) or an error.  An error should only be
0265      * returned if the requested depth is legal but the driver was
0266      * unable to set it.  If the requested depth is illegal, the
0267      * driver should set and return the closest legal queue depth.
0268      *
0269      * Status: OPTIONAL
0270      */
0271     int (* change_queue_depth)(struct scsi_device *, int);
0272 
0273     /*
0274      * This functions lets the driver expose the queue mapping
0275      * to the block layer.
0276      *
0277      * Status: OPTIONAL
0278      */
0279     int (* map_queues)(struct Scsi_Host *shost);
0280 
0281     /*
0282      * SCSI interface of blk_poll - poll for IO completions.
0283      * Only applicable if SCSI LLD exposes multiple h/w queues.
0284      *
0285      * Return value: Number of completed entries found.
0286      *
0287      * Status: OPTIONAL
0288      */
0289     int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
0290 
0291     /*
0292      * Check if scatterlists need to be padded for DMA draining.
0293      *
0294      * Status: OPTIONAL
0295      */
0296     bool (* dma_need_drain)(struct request *rq);
0297 
0298     /*
0299      * This function determines the BIOS parameters for a given
0300      * harddisk.  These tend to be numbers that are made up by
0301      * the host adapter.  Parameters:
0302      * size, device, list (heads, sectors, cylinders)
0303      *
0304      * Status: OPTIONAL
0305      */
0306     int (* bios_param)(struct scsi_device *, struct block_device *,
0307             sector_t, int []);
0308 
0309     /*
0310      * This function is called when one or more partitions on the
0311      * device reach beyond the end of the device.
0312      *
0313      * Status: OPTIONAL
0314      */
0315     void (*unlock_native_capacity)(struct scsi_device *);
0316 
0317     /*
0318      * Can be used to export driver statistics and other infos to the
0319      * world outside the kernel ie. userspace and it also provides an
0320      * interface to feed the driver with information.
0321      *
0322      * Status: OBSOLETE
0323      */
0324     int (*show_info)(struct seq_file *, struct Scsi_Host *);
0325     int (*write_info)(struct Scsi_Host *, char *, int);
0326 
0327     /*
0328      * This is an optional routine that allows the transport to become
0329      * involved when a scsi io timer fires. The return value tells the
0330      * timer routine how to finish the io timeout handling.
0331      *
0332      * Status: OPTIONAL
0333      */
0334     enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
0335     /*
0336      * Optional routine that allows the transport to decide if a cmd
0337      * is retryable. Return true if the transport is in a state the
0338      * cmd should be retried on.
0339      */
0340     bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
0341 
0342     /* This is an optional routine that allows transport to initiate
0343      * LLD adapter or firmware reset using sysfs attribute.
0344      *
0345      * Return values: 0 on success, -ve value on failure.
0346      *
0347      * Status: OPTIONAL
0348      */
0349 
0350     int (*host_reset)(struct Scsi_Host *shost, int reset_type);
0351 #define SCSI_ADAPTER_RESET  1
0352 #define SCSI_FIRMWARE_RESET 2
0353 
0354 
0355     /*
0356      * Name of proc directory
0357      */
0358     const char *proc_name;
0359 
0360     /*
0361      * Used to store the procfs directory if a driver implements the
0362      * show_info method.
0363      */
0364     struct proc_dir_entry *proc_dir;
0365 
0366     /*
0367      * This determines if we will use a non-interrupt driven
0368      * or an interrupt driven scheme.  It is set to the maximum number
0369      * of simultaneous commands a single hw queue in HBA will accept.
0370      */
0371     int can_queue;
0372 
0373     /*
0374      * In many instances, especially where disconnect / reconnect are
0375      * supported, our host also has an ID on the SCSI bus.  If this is
0376      * the case, then it must be reserved.  Please set this_id to -1 if
0377      * your setup is in single initiator mode, and the host lacks an
0378      * ID.
0379      */
0380     int this_id;
0381 
0382     /*
0383      * This determines the degree to which the host adapter is capable
0384      * of scatter-gather.
0385      */
0386     unsigned short sg_tablesize;
0387     unsigned short sg_prot_tablesize;
0388 
0389     /*
0390      * Set this if the host adapter has limitations beside segment count.
0391      */
0392     unsigned int max_sectors;
0393 
0394     /*
0395      * Maximum size in bytes of a single segment.
0396      */
0397     unsigned int max_segment_size;
0398 
0399     /*
0400      * DMA scatter gather segment boundary limit. A segment crossing this
0401      * boundary will be split in two.
0402      */
0403     unsigned long dma_boundary;
0404 
0405     unsigned long virt_boundary_mask;
0406 
0407     /*
0408      * This specifies "machine infinity" for host templates which don't
0409      * limit the transfer size.  Note this limit represents an absolute
0410      * maximum, and may be over the transfer limits allowed for
0411      * individual devices (e.g. 256 for SCSI-1).
0412      */
0413 #define SCSI_DEFAULT_MAX_SECTORS    1024
0414 
0415     /*
0416      * True if this host adapter can make good use of linked commands.
0417      * This will allow more than one command to be queued to a given
0418      * unit on a given host.  Set this to the maximum number of command
0419      * blocks to be provided for each device.  Set this to 1 for one
0420      * command block per lun, 2 for two, etc.  Do not set this to 0.
0421      * You should make sure that the host adapter will do the right thing
0422      * before you try setting this above 1.
0423      */
0424     short cmd_per_lun;
0425 
0426     /*
0427      * present contains counter indicating how many boards of this
0428      * type were found when we did the scan.
0429      */
0430     unsigned char present;
0431 
0432     /* If use block layer to manage tags, this is tag allocation policy */
0433     int tag_alloc_policy;
0434 
0435     /*
0436      * Track QUEUE_FULL events and reduce queue depth on demand.
0437      */
0438     unsigned track_queue_depth:1;
0439 
0440     /*
0441      * This specifies the mode that a LLD supports.
0442      */
0443     unsigned supported_mode:2;
0444 
0445     /*
0446      * True for emulated SCSI host adapters (e.g. ATAPI).
0447      */
0448     unsigned emulated:1;
0449 
0450     /*
0451      * True if the low-level driver performs its own reset-settle delays.
0452      */
0453     unsigned skip_settle_delay:1;
0454 
0455     /* True if the controller does not support WRITE SAME */
0456     unsigned no_write_same:1;
0457 
0458     /* True if the host uses host-wide tagspace */
0459     unsigned host_tagset:1;
0460 
0461     /*
0462      * Countdown for host blocking with no commands outstanding.
0463      */
0464     unsigned int max_host_blocked;
0465 
0466     /*
0467      * Default value for the blocking.  If the queue is empty,
0468      * host_blocked counts down in the request_fn until it restarts
0469      * host operations as zero is reached.  
0470      *
0471      * FIXME: This should probably be a value in the template
0472      */
0473 #define SCSI_DEFAULT_HOST_BLOCKED   7
0474 
0475     /*
0476      * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
0477      */
0478     const struct attribute_group **shost_groups;
0479 
0480     /*
0481      * Pointer to the SCSI device attribute groups for this host,
0482      * NULL terminated.
0483      */
0484     const struct attribute_group **sdev_groups;
0485 
0486     /*
0487      * Vendor Identifier associated with the host
0488      *
0489      * Note: When specifying vendor_id, be sure to read the
0490      *   Vendor Type and ID formatting requirements specified in
0491      *   scsi_netlink.h
0492      */
0493     u64 vendor_id;
0494 
0495     /* Delay for runtime autosuspend */
0496     int rpm_autosuspend_delay;
0497 };
0498 
0499 /*
0500  * Temporary #define for host lock push down. Can be removed when all
0501  * drivers have been updated to take advantage of unlocked
0502  * queuecommand.
0503  *
0504  */
0505 #define DEF_SCSI_QCMD(func_name) \
0506     int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
0507     {                               \
0508         unsigned long irq_flags;                \
0509         int rc;                         \
0510         spin_lock_irqsave(shost->host_lock, irq_flags);     \
0511         rc = func_name##_lck(cmd);              \
0512         spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
0513         return rc;                      \
0514     }
0515 
0516 
0517 /*
0518  * shost state: If you alter this, you also need to alter scsi_sysfs.c
0519  * (for the ascii descriptions) and the state model enforcer:
0520  * scsi_host_set_state()
0521  */
0522 enum scsi_host_state {
0523     SHOST_CREATED = 1,
0524     SHOST_RUNNING,
0525     SHOST_CANCEL,
0526     SHOST_DEL,
0527     SHOST_RECOVERY,
0528     SHOST_CANCEL_RECOVERY,
0529     SHOST_DEL_RECOVERY,
0530 };
0531 
0532 struct Scsi_Host {
0533     /*
0534      * __devices is protected by the host_lock, but you should
0535      * usually use scsi_device_lookup / shost_for_each_device
0536      * to access it and don't care about locking yourself.
0537      * In the rare case of being in irq context you can use
0538      * their __ prefixed variants with the lock held. NEVER
0539      * access this list directly from a driver.
0540      */
0541     struct list_head    __devices;
0542     struct list_head    __targets;
0543     
0544     struct list_head    starved_list;
0545 
0546     spinlock_t      default_lock;
0547     spinlock_t      *host_lock;
0548 
0549     struct mutex        scan_mutex;/* serialize scanning activity */
0550 
0551     struct list_head    eh_abort_list;
0552     struct list_head    eh_cmd_q;
0553     struct task_struct    * ehandler;  /* Error recovery thread. */
0554     struct completion     * eh_action; /* Wait for specific actions on the
0555                           host. */
0556     wait_queue_head_t       host_wait;
0557     struct scsi_host_template *hostt;
0558     struct scsi_transport_template *transportt;
0559 
0560     struct kref     tagset_refcnt;
0561     struct completion   tagset_freed;
0562     /* Area to keep a shared tag map */
0563     struct blk_mq_tag_set   tag_set;
0564 
0565     atomic_t host_blocked;
0566 
0567     unsigned int host_failed;      /* commands that failed.
0568                           protected by host_lock */
0569     unsigned int host_eh_scheduled;    /* EH scheduled without command */
0570     
0571     unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
0572 
0573     /* next two fields are used to bound the time spent in error handling */
0574     int eh_deadline;
0575     unsigned long last_reset;
0576 
0577 
0578     /*
0579      * These three parameters can be used to allow for wide scsi,
0580      * and for host adapters that support multiple busses
0581      * The last two should be set to 1 more than the actual max id
0582      * or lun (e.g. 8 for SCSI parallel systems).
0583      */
0584     unsigned int max_channel;
0585     unsigned int max_id;
0586     u64 max_lun;
0587 
0588     /*
0589      * This is a unique identifier that must be assigned so that we
0590      * have some way of identifying each detected host adapter properly
0591      * and uniquely.  For hosts that do not support more than one card
0592      * in the system at one time, this does not need to be set.  It is
0593      * initialized to 0 in scsi_register.
0594      */
0595     unsigned int unique_id;
0596 
0597     /*
0598      * The maximum length of SCSI commands that this host can accept.
0599      * Probably 12 for most host adapters, but could be 16 for others.
0600      * or 260 if the driver supports variable length cdbs.
0601      * For drivers that don't set this field, a value of 12 is
0602      * assumed.
0603      */
0604     unsigned short max_cmd_len;
0605 
0606     int this_id;
0607     int can_queue;
0608     short cmd_per_lun;
0609     short unsigned int sg_tablesize;
0610     short unsigned int sg_prot_tablesize;
0611     unsigned int max_sectors;
0612     unsigned int opt_sectors;
0613     unsigned int max_segment_size;
0614     unsigned long dma_boundary;
0615     unsigned long virt_boundary_mask;
0616     /*
0617      * In scsi-mq mode, the number of hardware queues supported by the LLD.
0618      *
0619      * Note: it is assumed that each hardware queue has a queue depth of
0620      * can_queue. In other words, the total queue depth per host
0621      * is nr_hw_queues * can_queue. However, for when host_tagset is set,
0622      * the total queue depth is can_queue.
0623      */
0624     unsigned nr_hw_queues;
0625     unsigned nr_maps;
0626     unsigned active_mode:2;
0627 
0628     /*
0629      * Host has requested that no further requests come through for the
0630      * time being.
0631      */
0632     unsigned host_self_blocked:1;
0633     
0634     /*
0635      * Host uses correct SCSI ordering not PC ordering. The bit is
0636      * set for the minority of drivers whose authors actually read
0637      * the spec ;).
0638      */
0639     unsigned reverse_ordering:1;
0640 
0641     /* Task mgmt function in progress */
0642     unsigned tmf_in_progress:1;
0643 
0644     /* Asynchronous scan in progress */
0645     unsigned async_scan:1;
0646 
0647     /* Don't resume host in EH */
0648     unsigned eh_noresume:1;
0649 
0650     /* The controller does not support WRITE SAME */
0651     unsigned no_write_same:1;
0652 
0653     /* True if the host uses host-wide tagspace */
0654     unsigned host_tagset:1;
0655 
0656     /* Host responded with short (<36 bytes) INQUIRY result */
0657     unsigned short_inquiry:1;
0658 
0659     /* The transport requires the LUN bits NOT to be stored in CDB[1] */
0660     unsigned no_scsi2_lun_in_cdb:1;
0661 
0662     /*
0663      * Optional work queue to be utilized by the transport
0664      */
0665     char work_q_name[20];
0666     struct workqueue_struct *work_q;
0667 
0668     /*
0669      * Task management function work queue
0670      */
0671     struct workqueue_struct *tmf_work_q;
0672 
0673     /*
0674      * Value host_blocked counts down from
0675      */
0676     unsigned int max_host_blocked;
0677 
0678     /* Protection Information */
0679     unsigned int prot_capabilities;
0680     unsigned char prot_guard_type;
0681 
0682     /* legacy crap */
0683     unsigned long base;
0684     unsigned long io_port;
0685     unsigned char n_io_port;
0686     unsigned char dma_channel;
0687     unsigned int  irq;
0688     
0689 
0690     enum scsi_host_state shost_state;
0691 
0692     /* ldm bits */
0693     struct device       shost_gendev, shost_dev;
0694 
0695     /*
0696      * Points to the transport data (if any) which is allocated
0697      * separately
0698      */
0699     void *shost_data;
0700 
0701     /*
0702      * Points to the physical bus device we'd use to do DMA
0703      * Needed just in case we have virtual hosts.
0704      */
0705     struct device *dma_dev;
0706 
0707     /*
0708      * We should ensure that this is aligned, both for better performance
0709      * and also because some compilers (m68k) don't automatically force
0710      * alignment to a long boundary.
0711      */
0712     unsigned long hostdata[]  /* Used for storage of host specific stuff */
0713         __attribute__ ((aligned (sizeof(unsigned long))));
0714 };
0715 
0716 #define     class_to_shost(d)   \
0717     container_of(d, struct Scsi_Host, shost_dev)
0718 
0719 #define shost_printk(prefix, shost, fmt, a...)  \
0720     dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
0721 
0722 static inline void *shost_priv(struct Scsi_Host *shost)
0723 {
0724     return (void *)shost->hostdata;
0725 }
0726 
0727 int scsi_is_host_device(const struct device *);
0728 
0729 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
0730 {
0731     while (!scsi_is_host_device(dev)) {
0732         if (!dev->parent)
0733             return NULL;
0734         dev = dev->parent;
0735     }
0736     return container_of(dev, struct Scsi_Host, shost_gendev);
0737 }
0738 
0739 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
0740 {
0741     return shost->shost_state == SHOST_RECOVERY ||
0742         shost->shost_state == SHOST_CANCEL_RECOVERY ||
0743         shost->shost_state == SHOST_DEL_RECOVERY ||
0744         shost->tmf_in_progress;
0745 }
0746 
0747 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
0748 extern void scsi_flush_work(struct Scsi_Host *);
0749 
0750 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
0751 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
0752                            struct device *,
0753                            struct device *);
0754 extern void scsi_scan_host(struct Scsi_Host *);
0755 extern void scsi_rescan_device(struct device *);
0756 extern void scsi_remove_host(struct Scsi_Host *);
0757 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
0758 extern int scsi_host_busy(struct Scsi_Host *shost);
0759 extern void scsi_host_put(struct Scsi_Host *t);
0760 extern struct Scsi_Host *scsi_host_lookup(unsigned short);
0761 extern const char *scsi_host_state_name(enum scsi_host_state);
0762 extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
0763                         enum scsi_host_status status);
0764 
0765 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
0766                          struct device *dev)
0767 {
0768     return scsi_add_host_with_dma(host, dev, dev);
0769 }
0770 
0771 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
0772 {
0773         return shost->shost_gendev.parent;
0774 }
0775 
0776 /**
0777  * scsi_host_scan_allowed - Is scanning of this host allowed
0778  * @shost:  Pointer to Scsi_Host.
0779  **/
0780 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
0781 {
0782     return shost->shost_state == SHOST_RUNNING ||
0783            shost->shost_state == SHOST_RECOVERY;
0784 }
0785 
0786 extern void scsi_unblock_requests(struct Scsi_Host *);
0787 extern void scsi_block_requests(struct Scsi_Host *);
0788 extern int scsi_host_block(struct Scsi_Host *shost);
0789 extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
0790 
0791 void scsi_host_busy_iter(struct Scsi_Host *,
0792              bool (*fn)(struct scsi_cmnd *, void *), void *priv);
0793 
0794 struct class_container;
0795 
0796 /*
0797  * DIF defines the exchange of protection information between
0798  * initiator and SBC block device.
0799  *
0800  * DIX defines the exchange of protection information between OS and
0801  * initiator.
0802  */
0803 enum scsi_host_prot_capabilities {
0804     SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
0805     SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
0806     SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
0807 
0808     SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
0809     SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
0810     SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
0811     SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
0812 };
0813 
0814 /*
0815  * SCSI hosts which support the Data Integrity Extensions must
0816  * indicate their capabilities by setting the prot_capabilities using
0817  * this call.
0818  */
0819 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
0820 {
0821     shost->prot_capabilities = mask;
0822 }
0823 
0824 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
0825 {
0826     return shost->prot_capabilities;
0827 }
0828 
0829 static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
0830 {
0831     return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
0832 }
0833 
0834 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
0835 {
0836     static unsigned char cap[] = { 0,
0837                        SHOST_DIF_TYPE1_PROTECTION,
0838                        SHOST_DIF_TYPE2_PROTECTION,
0839                        SHOST_DIF_TYPE3_PROTECTION };
0840 
0841     if (target_type >= ARRAY_SIZE(cap))
0842         return 0;
0843 
0844     return shost->prot_capabilities & cap[target_type] ? target_type : 0;
0845 }
0846 
0847 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
0848 {
0849 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0850     static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
0851                        SHOST_DIX_TYPE1_PROTECTION,
0852                        SHOST_DIX_TYPE2_PROTECTION,
0853                        SHOST_DIX_TYPE3_PROTECTION };
0854 
0855     if (target_type >= ARRAY_SIZE(cap))
0856         return 0;
0857 
0858     return shost->prot_capabilities & cap[target_type];
0859 #endif
0860     return 0;
0861 }
0862 
0863 /*
0864  * All DIX-capable initiators must support the T10-mandated CRC
0865  * checksum.  Controllers can optionally implement the IP checksum
0866  * scheme which has much lower impact on system performance.  Note
0867  * that the main rationale for the checksum is to match integrity
0868  * metadata with data.  Detecting bit errors are a job for ECC memory
0869  * and buses.
0870  */
0871 
0872 enum scsi_host_guard_type {
0873     SHOST_DIX_GUARD_CRC = 1 << 0,
0874     SHOST_DIX_GUARD_IP  = 1 << 1,
0875 };
0876 
0877 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
0878 {
0879     shost->prot_guard_type = type;
0880 }
0881 
0882 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
0883 {
0884     return shost->prot_guard_type;
0885 }
0886 
0887 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
0888 
0889 #endif /* _SCSI_SCSI_HOST_H */