Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * SCSI RDMA (SRP) transport class
0004  *
0005  * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
0006  */
0007 #include <linux/init.h>
0008 #include <linux/module.h>
0009 #include <linux/jiffies.h>
0010 #include <linux/err.h>
0011 #include <linux/slab.h>
0012 #include <linux/string.h>
0013 
0014 #include <scsi/scsi.h>
0015 #include <scsi/scsi_cmnd.h>
0016 #include <scsi/scsi_device.h>
0017 #include <scsi/scsi_host.h>
0018 #include <scsi/scsi_transport.h>
0019 #include <scsi/scsi_transport_srp.h>
0020 #include "scsi_priv.h"
0021 
0022 struct srp_host_attrs {
0023     atomic_t next_port_id;
0024 };
0025 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
0026 
0027 #define SRP_HOST_ATTRS 0
0028 #define SRP_RPORT_ATTRS 8
0029 
0030 struct srp_internal {
0031     struct scsi_transport_template t;
0032     struct srp_function_template *f;
0033 
0034     struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
0035 
0036     struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
0037     struct transport_container rport_attr_cont;
0038 };
0039 
0040 static int scsi_is_srp_rport(const struct device *dev);
0041 
0042 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
0043 
0044 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
0045 #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
0046 static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
0047 {
0048     return dev_to_shost(r->dev.parent);
0049 }
0050 
0051 static int find_child_rport(struct device *dev, void *data)
0052 {
0053     struct device **child = data;
0054 
0055     if (scsi_is_srp_rport(dev)) {
0056         WARN_ON_ONCE(*child);
0057         *child = dev;
0058     }
0059     return 0;
0060 }
0061 
0062 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
0063 {
0064     struct device *child = NULL;
0065 
0066     WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
0067                        find_child_rport) < 0);
0068     return child ? dev_to_rport(child) : NULL;
0069 }
0070 
0071 /**
0072  * srp_tmo_valid() - check timeout combination validity
0073  * @reconnect_delay: Reconnect delay in seconds.
0074  * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
0075  * @dev_loss_tmo: Device loss timeout in seconds.
0076  *
0077  * The combination of the timeout parameters must be such that SCSI commands
0078  * are finished in a reasonable time. Hence do not allow the fast I/O fail
0079  * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
0080  * exceed that limit if failing I/O fast has been disabled. Furthermore, these
0081  * parameters must be such that multipath can detect failed paths timely.
0082  * Hence do not allow all three parameters to be disabled simultaneously.
0083  */
0084 int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
0085 {
0086     if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
0087         return -EINVAL;
0088     if (reconnect_delay == 0)
0089         return -EINVAL;
0090     if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
0091         return -EINVAL;
0092     if (fast_io_fail_tmo < 0 &&
0093         dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
0094         return -EINVAL;
0095     if (dev_loss_tmo >= LONG_MAX / HZ)
0096         return -EINVAL;
0097     if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
0098         fast_io_fail_tmo >= dev_loss_tmo)
0099         return -EINVAL;
0100     return 0;
0101 }
0102 EXPORT_SYMBOL_GPL(srp_tmo_valid);
0103 
0104 static int srp_host_setup(struct transport_container *tc, struct device *dev,
0105               struct device *cdev)
0106 {
0107     struct Scsi_Host *shost = dev_to_shost(dev);
0108     struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
0109 
0110     atomic_set(&srp_host->next_port_id, 0);
0111     return 0;
0112 }
0113 
0114 static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
0115                    NULL, NULL);
0116 
0117 static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
0118                    NULL, NULL, NULL);
0119 
0120 static ssize_t
0121 show_srp_rport_id(struct device *dev, struct device_attribute *attr,
0122           char *buf)
0123 {
0124     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0125     return sprintf(buf, "%16phC\n", rport->port_id);
0126 }
0127 
0128 static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
0129 
0130 static const struct {
0131     u32 value;
0132     char *name;
0133 } srp_rport_role_names[] = {
0134     {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
0135     {SRP_RPORT_ROLE_TARGET, "SRP Target"},
0136 };
0137 
0138 static ssize_t
0139 show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
0140              char *buf)
0141 {
0142     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0143     int i;
0144     char *name = NULL;
0145 
0146     for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
0147         if (srp_rport_role_names[i].value == rport->roles) {
0148             name = srp_rport_role_names[i].name;
0149             break;
0150         }
0151     return sprintf(buf, "%s\n", name ? : "unknown");
0152 }
0153 
0154 static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
0155 
0156 static ssize_t store_srp_rport_delete(struct device *dev,
0157                       struct device_attribute *attr,
0158                       const char *buf, size_t count)
0159 {
0160     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0161     struct Scsi_Host *shost = dev_to_shost(dev);
0162     struct srp_internal *i = to_srp_internal(shost->transportt);
0163 
0164     if (i->f->rport_delete) {
0165         i->f->rport_delete(rport);
0166         return count;
0167     } else {
0168         return -ENOSYS;
0169     }
0170 }
0171 
0172 static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
0173 
0174 static ssize_t show_srp_rport_state(struct device *dev,
0175                     struct device_attribute *attr,
0176                     char *buf)
0177 {
0178     static const char *const state_name[] = {
0179         [SRP_RPORT_RUNNING] = "running",
0180         [SRP_RPORT_BLOCKED] = "blocked",
0181         [SRP_RPORT_FAIL_FAST]   = "fail-fast",
0182         [SRP_RPORT_LOST]    = "lost",
0183     };
0184     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0185     enum srp_rport_state state = rport->state;
0186 
0187     return sprintf(buf, "%s\n",
0188                (unsigned)state < ARRAY_SIZE(state_name) ?
0189                state_name[state] : "???");
0190 }
0191 
0192 static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
0193 
0194 static ssize_t srp_show_tmo(char *buf, int tmo)
0195 {
0196     return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
0197 }
0198 
0199 int srp_parse_tmo(int *tmo, const char *buf)
0200 {
0201     int res = 0;
0202 
0203     if (strncmp(buf, "off", 3) != 0)
0204         res = kstrtoint(buf, 0, tmo);
0205     else
0206         *tmo = -1;
0207 
0208     return res;
0209 }
0210 EXPORT_SYMBOL(srp_parse_tmo);
0211 
0212 static ssize_t show_reconnect_delay(struct device *dev,
0213                     struct device_attribute *attr, char *buf)
0214 {
0215     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0216 
0217     return srp_show_tmo(buf, rport->reconnect_delay);
0218 }
0219 
0220 static ssize_t store_reconnect_delay(struct device *dev,
0221                      struct device_attribute *attr,
0222                      const char *buf, const size_t count)
0223 {
0224     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0225     int res, delay;
0226 
0227     res = srp_parse_tmo(&delay, buf);
0228     if (res)
0229         goto out;
0230     res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
0231                 rport->dev_loss_tmo);
0232     if (res)
0233         goto out;
0234 
0235     if (rport->reconnect_delay <= 0 && delay > 0 &&
0236         rport->state != SRP_RPORT_RUNNING) {
0237         queue_delayed_work(system_long_wq, &rport->reconnect_work,
0238                    delay * HZ);
0239     } else if (delay <= 0) {
0240         cancel_delayed_work(&rport->reconnect_work);
0241     }
0242     rport->reconnect_delay = delay;
0243     res = count;
0244 
0245 out:
0246     return res;
0247 }
0248 
0249 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
0250            store_reconnect_delay);
0251 
0252 static ssize_t show_failed_reconnects(struct device *dev,
0253                       struct device_attribute *attr, char *buf)
0254 {
0255     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0256 
0257     return sprintf(buf, "%d\n", rport->failed_reconnects);
0258 }
0259 
0260 static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
0261 
0262 static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
0263                            struct device_attribute *attr,
0264                            char *buf)
0265 {
0266     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0267 
0268     return srp_show_tmo(buf, rport->fast_io_fail_tmo);
0269 }
0270 
0271 static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
0272                         struct device_attribute *attr,
0273                         const char *buf, size_t count)
0274 {
0275     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0276     int res;
0277     int fast_io_fail_tmo;
0278 
0279     res = srp_parse_tmo(&fast_io_fail_tmo, buf);
0280     if (res)
0281         goto out;
0282     res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
0283                 rport->dev_loss_tmo);
0284     if (res)
0285         goto out;
0286     rport->fast_io_fail_tmo = fast_io_fail_tmo;
0287     res = count;
0288 
0289 out:
0290     return res;
0291 }
0292 
0293 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
0294            show_srp_rport_fast_io_fail_tmo,
0295            store_srp_rport_fast_io_fail_tmo);
0296 
0297 static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
0298                        struct device_attribute *attr,
0299                        char *buf)
0300 {
0301     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0302 
0303     return srp_show_tmo(buf, rport->dev_loss_tmo);
0304 }
0305 
0306 static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
0307                         struct device_attribute *attr,
0308                         const char *buf, size_t count)
0309 {
0310     struct srp_rport *rport = transport_class_to_srp_rport(dev);
0311     int res;
0312     int dev_loss_tmo;
0313 
0314     res = srp_parse_tmo(&dev_loss_tmo, buf);
0315     if (res)
0316         goto out;
0317     res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
0318                 dev_loss_tmo);
0319     if (res)
0320         goto out;
0321     rport->dev_loss_tmo = dev_loss_tmo;
0322     res = count;
0323 
0324 out:
0325     return res;
0326 }
0327 
0328 static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
0329            show_srp_rport_dev_loss_tmo,
0330            store_srp_rport_dev_loss_tmo);
0331 
0332 static int srp_rport_set_state(struct srp_rport *rport,
0333                    enum srp_rport_state new_state)
0334 {
0335     enum srp_rport_state old_state = rport->state;
0336 
0337     lockdep_assert_held(&rport->mutex);
0338 
0339     switch (new_state) {
0340     case SRP_RPORT_RUNNING:
0341         switch (old_state) {
0342         case SRP_RPORT_LOST:
0343             goto invalid;
0344         default:
0345             break;
0346         }
0347         break;
0348     case SRP_RPORT_BLOCKED:
0349         switch (old_state) {
0350         case SRP_RPORT_RUNNING:
0351             break;
0352         default:
0353             goto invalid;
0354         }
0355         break;
0356     case SRP_RPORT_FAIL_FAST:
0357         switch (old_state) {
0358         case SRP_RPORT_LOST:
0359             goto invalid;
0360         default:
0361             break;
0362         }
0363         break;
0364     case SRP_RPORT_LOST:
0365         break;
0366     }
0367     rport->state = new_state;
0368     return 0;
0369 
0370 invalid:
0371     return -EINVAL;
0372 }
0373 
0374 /**
0375  * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
0376  * @work: Work structure used for scheduling this operation.
0377  */
0378 static void srp_reconnect_work(struct work_struct *work)
0379 {
0380     struct srp_rport *rport = container_of(to_delayed_work(work),
0381                     struct srp_rport, reconnect_work);
0382     struct Scsi_Host *shost = rport_to_shost(rport);
0383     int delay, res;
0384 
0385     res = srp_reconnect_rport(rport);
0386     if (res != 0) {
0387         shost_printk(KERN_ERR, shost,
0388                  "reconnect attempt %d failed (%d)\n",
0389                  ++rport->failed_reconnects, res);
0390         delay = rport->reconnect_delay *
0391             min(100, max(1, rport->failed_reconnects - 10));
0392         if (delay > 0)
0393             queue_delayed_work(system_long_wq,
0394                        &rport->reconnect_work, delay * HZ);
0395     }
0396 }
0397 
0398 /*
0399  * scsi_target_block() must have been called before this function is
0400  * called to guarantee that no .queuecommand() calls are in progress.
0401  */
0402 static void __rport_fail_io_fast(struct srp_rport *rport)
0403 {
0404     struct Scsi_Host *shost = rport_to_shost(rport);
0405     struct srp_internal *i;
0406 
0407     lockdep_assert_held(&rport->mutex);
0408 
0409     if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
0410         return;
0411 
0412     scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
0413 
0414     /* Involve the LLD if possible to terminate all I/O on the rport. */
0415     i = to_srp_internal(shost->transportt);
0416     if (i->f->terminate_rport_io)
0417         i->f->terminate_rport_io(rport);
0418 }
0419 
0420 /**
0421  * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
0422  * @work: Work structure used for scheduling this operation.
0423  */
0424 static void rport_fast_io_fail_timedout(struct work_struct *work)
0425 {
0426     struct srp_rport *rport = container_of(to_delayed_work(work),
0427                     struct srp_rport, fast_io_fail_work);
0428     struct Scsi_Host *shost = rport_to_shost(rport);
0429 
0430     pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
0431         dev_name(&rport->dev), dev_name(&shost->shost_gendev));
0432 
0433     mutex_lock(&rport->mutex);
0434     if (rport->state == SRP_RPORT_BLOCKED)
0435         __rport_fail_io_fast(rport);
0436     mutex_unlock(&rport->mutex);
0437 }
0438 
0439 /**
0440  * rport_dev_loss_timedout() - device loss timeout handler
0441  * @work: Work structure used for scheduling this operation.
0442  */
0443 static void rport_dev_loss_timedout(struct work_struct *work)
0444 {
0445     struct srp_rport *rport = container_of(to_delayed_work(work),
0446                     struct srp_rport, dev_loss_work);
0447     struct Scsi_Host *shost = rport_to_shost(rport);
0448     struct srp_internal *i = to_srp_internal(shost->transportt);
0449 
0450     pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
0451         dev_name(&rport->dev), dev_name(&shost->shost_gendev));
0452 
0453     mutex_lock(&rport->mutex);
0454     WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
0455     scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
0456     mutex_unlock(&rport->mutex);
0457 
0458     i->f->rport_delete(rport);
0459 }
0460 
0461 static void __srp_start_tl_fail_timers(struct srp_rport *rport)
0462 {
0463     struct Scsi_Host *shost = rport_to_shost(rport);
0464     int delay, fast_io_fail_tmo, dev_loss_tmo;
0465 
0466     lockdep_assert_held(&rport->mutex);
0467 
0468     delay = rport->reconnect_delay;
0469     fast_io_fail_tmo = rport->fast_io_fail_tmo;
0470     dev_loss_tmo = rport->dev_loss_tmo;
0471     pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
0472          rport->state);
0473 
0474     if (rport->state == SRP_RPORT_LOST)
0475         return;
0476     if (delay > 0)
0477         queue_delayed_work(system_long_wq, &rport->reconnect_work,
0478                    1UL * delay * HZ);
0479     if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
0480         srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
0481         pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
0482              rport->state);
0483         scsi_target_block(&shost->shost_gendev);
0484         if (fast_io_fail_tmo >= 0)
0485             queue_delayed_work(system_long_wq,
0486                        &rport->fast_io_fail_work,
0487                        1UL * fast_io_fail_tmo * HZ);
0488         if (dev_loss_tmo >= 0)
0489             queue_delayed_work(system_long_wq,
0490                        &rport->dev_loss_work,
0491                        1UL * dev_loss_tmo * HZ);
0492     }
0493 }
0494 
0495 /**
0496  * srp_start_tl_fail_timers() - start the transport layer failure timers
0497  * @rport: SRP target port.
0498  *
0499  * Start the transport layer fast I/O failure and device loss timers. Do not
0500  * modify a timer that was already started.
0501  */
0502 void srp_start_tl_fail_timers(struct srp_rport *rport)
0503 {
0504     mutex_lock(&rport->mutex);
0505     __srp_start_tl_fail_timers(rport);
0506     mutex_unlock(&rport->mutex);
0507 }
0508 EXPORT_SYMBOL(srp_start_tl_fail_timers);
0509 
0510 /**
0511  * srp_reconnect_rport() - reconnect to an SRP target port
0512  * @rport: SRP target port.
0513  *
0514  * Blocks SCSI command queueing before invoking reconnect() such that
0515  * queuecommand() won't be invoked concurrently with reconnect() from outside
0516  * the SCSI EH. This is important since a reconnect() implementation may
0517  * reallocate resources needed by queuecommand().
0518  *
0519  * Notes:
0520  * - This function neither waits until outstanding requests have finished nor
0521  *   tries to abort these. It is the responsibility of the reconnect()
0522  *   function to finish outstanding commands before reconnecting to the target
0523  *   port.
0524  * - It is the responsibility of the caller to ensure that the resources
0525  *   reallocated by the reconnect() function won't be used while this function
0526  *   is in progress. One possible strategy is to invoke this function from
0527  *   the context of the SCSI EH thread only. Another possible strategy is to
0528  *   lock the rport mutex inside each SCSI LLD callback that can be invoked by
0529  *   the SCSI EH (the scsi_host_template.eh_*() functions and also the
0530  *   scsi_host_template.queuecommand() function).
0531  */
0532 int srp_reconnect_rport(struct srp_rport *rport)
0533 {
0534     struct Scsi_Host *shost = rport_to_shost(rport);
0535     struct srp_internal *i = to_srp_internal(shost->transportt);
0536     struct scsi_device *sdev;
0537     int res;
0538 
0539     pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
0540 
0541     res = mutex_lock_interruptible(&rport->mutex);
0542     if (res)
0543         goto out;
0544     if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
0545         /*
0546          * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
0547          * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
0548          * later is ok though, scsi_internal_device_unblock_nowait()
0549          * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
0550          */
0551         scsi_target_block(&shost->shost_gendev);
0552     res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
0553     pr_debug("%s (state %d): transport.reconnect() returned %d\n",
0554          dev_name(&shost->shost_gendev), rport->state, res);
0555     if (res == 0) {
0556         cancel_delayed_work(&rport->fast_io_fail_work);
0557         cancel_delayed_work(&rport->dev_loss_work);
0558 
0559         rport->failed_reconnects = 0;
0560         srp_rport_set_state(rport, SRP_RPORT_RUNNING);
0561         scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
0562         /*
0563          * If the SCSI error handler has offlined one or more devices,
0564          * invoking scsi_target_unblock() won't change the state of
0565          * these devices into running so do that explicitly.
0566          */
0567         shost_for_each_device(sdev, shost) {
0568             mutex_lock(&sdev->state_mutex);
0569             if (sdev->sdev_state == SDEV_OFFLINE)
0570                 sdev->sdev_state = SDEV_RUNNING;
0571             mutex_unlock(&sdev->state_mutex);
0572         }
0573     } else if (rport->state == SRP_RPORT_RUNNING) {
0574         /*
0575          * srp_reconnect_rport() has been invoked with fast_io_fail
0576          * and dev_loss off. Mark the port as failed and start the TL
0577          * failure timers if these had not yet been started.
0578          */
0579         __rport_fail_io_fast(rport);
0580         __srp_start_tl_fail_timers(rport);
0581     } else if (rport->state != SRP_RPORT_BLOCKED) {
0582         scsi_target_unblock(&shost->shost_gendev,
0583                     SDEV_TRANSPORT_OFFLINE);
0584     }
0585     mutex_unlock(&rport->mutex);
0586 
0587 out:
0588     return res;
0589 }
0590 EXPORT_SYMBOL(srp_reconnect_rport);
0591 
0592 /**
0593  * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
0594  * @scmd: SCSI command.
0595  *
0596  * If a timeout occurs while an rport is in the blocked state, ask the SCSI
0597  * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
0598  * handle the timeout (BLK_EH_DONE).
0599  *
0600  * Note: This function is called from soft-IRQ context and with the request
0601  * queue lock held.
0602  */
0603 enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
0604 {
0605     struct scsi_device *sdev = scmd->device;
0606     struct Scsi_Host *shost = sdev->host;
0607     struct srp_internal *i = to_srp_internal(shost->transportt);
0608     struct srp_rport *rport = shost_to_rport(shost);
0609 
0610     pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
0611     return rport && rport->fast_io_fail_tmo < 0 &&
0612         rport->dev_loss_tmo < 0 &&
0613         i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
0614         BLK_EH_RESET_TIMER : BLK_EH_DONE;
0615 }
0616 EXPORT_SYMBOL(srp_timed_out);
0617 
0618 static void srp_rport_release(struct device *dev)
0619 {
0620     struct srp_rport *rport = dev_to_rport(dev);
0621 
0622     put_device(dev->parent);
0623     kfree(rport);
0624 }
0625 
0626 static int scsi_is_srp_rport(const struct device *dev)
0627 {
0628     return dev->release == srp_rport_release;
0629 }
0630 
0631 static int srp_rport_match(struct attribute_container *cont,
0632                struct device *dev)
0633 {
0634     struct Scsi_Host *shost;
0635     struct srp_internal *i;
0636 
0637     if (!scsi_is_srp_rport(dev))
0638         return 0;
0639 
0640     shost = dev_to_shost(dev->parent);
0641     if (!shost->transportt)
0642         return 0;
0643     if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
0644         return 0;
0645 
0646     i = to_srp_internal(shost->transportt);
0647     return &i->rport_attr_cont.ac == cont;
0648 }
0649 
0650 static int srp_host_match(struct attribute_container *cont, struct device *dev)
0651 {
0652     struct Scsi_Host *shost;
0653     struct srp_internal *i;
0654 
0655     if (!scsi_is_host_device(dev))
0656         return 0;
0657 
0658     shost = dev_to_shost(dev);
0659     if (!shost->transportt)
0660         return 0;
0661     if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
0662         return 0;
0663 
0664     i = to_srp_internal(shost->transportt);
0665     return &i->t.host_attrs.ac == cont;
0666 }
0667 
0668 /**
0669  * srp_rport_get() - increment rport reference count
0670  * @rport: SRP target port.
0671  */
0672 void srp_rport_get(struct srp_rport *rport)
0673 {
0674     get_device(&rport->dev);
0675 }
0676 EXPORT_SYMBOL(srp_rport_get);
0677 
0678 /**
0679  * srp_rport_put() - decrement rport reference count
0680  * @rport: SRP target port.
0681  */
0682 void srp_rport_put(struct srp_rport *rport)
0683 {
0684     put_device(&rport->dev);
0685 }
0686 EXPORT_SYMBOL(srp_rport_put);
0687 
0688 /**
0689  * srp_rport_add - add a SRP remote port to the device hierarchy
0690  * @shost:  scsi host the remote port is connected to.
0691  * @ids:    The port id for the remote port.
0692  *
0693  * Publishes a port to the rest of the system.
0694  */
0695 struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
0696                 struct srp_rport_identifiers *ids)
0697 {
0698     struct srp_rport *rport;
0699     struct device *parent = &shost->shost_gendev;
0700     struct srp_internal *i = to_srp_internal(shost->transportt);
0701     int id, ret;
0702 
0703     rport = kzalloc(sizeof(*rport), GFP_KERNEL);
0704     if (!rport)
0705         return ERR_PTR(-ENOMEM);
0706 
0707     mutex_init(&rport->mutex);
0708 
0709     device_initialize(&rport->dev);
0710 
0711     rport->dev.parent = get_device(parent);
0712     rport->dev.release = srp_rport_release;
0713 
0714     memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
0715     rport->roles = ids->roles;
0716 
0717     if (i->f->reconnect)
0718         rport->reconnect_delay = i->f->reconnect_delay ?
0719             *i->f->reconnect_delay : 10;
0720     INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
0721     rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
0722         *i->f->fast_io_fail_tmo : 15;
0723     rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
0724     INIT_DELAYED_WORK(&rport->fast_io_fail_work,
0725               rport_fast_io_fail_timedout);
0726     INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
0727 
0728     id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
0729     dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
0730 
0731     transport_setup_device(&rport->dev);
0732 
0733     ret = device_add(&rport->dev);
0734     if (ret) {
0735         transport_destroy_device(&rport->dev);
0736         put_device(&rport->dev);
0737         return ERR_PTR(ret);
0738     }
0739 
0740     transport_add_device(&rport->dev);
0741     transport_configure_device(&rport->dev);
0742 
0743     return rport;
0744 }
0745 EXPORT_SYMBOL_GPL(srp_rport_add);
0746 
0747 /**
0748  * srp_rport_del  -  remove a SRP remote port
0749  * @rport:  SRP remote port to remove
0750  *
0751  * Removes the specified SRP remote port.
0752  */
0753 void srp_rport_del(struct srp_rport *rport)
0754 {
0755     struct device *dev = &rport->dev;
0756 
0757     transport_remove_device(dev);
0758     device_del(dev);
0759     transport_destroy_device(dev);
0760 
0761     put_device(dev);
0762 }
0763 EXPORT_SYMBOL_GPL(srp_rport_del);
0764 
0765 static int do_srp_rport_del(struct device *dev, void *data)
0766 {
0767     if (scsi_is_srp_rport(dev))
0768         srp_rport_del(dev_to_rport(dev));
0769     return 0;
0770 }
0771 
0772 /**
0773  * srp_remove_host  -  tear down a Scsi_Host's SRP data structures
0774  * @shost:  Scsi Host that is torn down
0775  *
0776  * Removes all SRP remote ports for a given Scsi_Host.
0777  * Must be called just before scsi_remove_host for SRP HBAs.
0778  */
0779 void srp_remove_host(struct Scsi_Host *shost)
0780 {
0781     device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
0782 }
0783 EXPORT_SYMBOL_GPL(srp_remove_host);
0784 
0785 /**
0786  * srp_stop_rport_timers - stop the transport layer recovery timers
0787  * @rport: SRP remote port for which to stop the timers.
0788  *
0789  * Must be called after srp_remove_host() and scsi_remove_host(). The caller
0790  * must hold a reference on the rport (rport->dev) and on the SCSI host
0791  * (rport->dev.parent).
0792  */
0793 void srp_stop_rport_timers(struct srp_rport *rport)
0794 {
0795     mutex_lock(&rport->mutex);
0796     if (rport->state == SRP_RPORT_BLOCKED)
0797         __rport_fail_io_fast(rport);
0798     srp_rport_set_state(rport, SRP_RPORT_LOST);
0799     mutex_unlock(&rport->mutex);
0800 
0801     cancel_delayed_work_sync(&rport->reconnect_work);
0802     cancel_delayed_work_sync(&rport->fast_io_fail_work);
0803     cancel_delayed_work_sync(&rport->dev_loss_work);
0804 }
0805 EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
0806 
0807 /**
0808  * srp_attach_transport  -  instantiate SRP transport template
0809  * @ft:     SRP transport class function template
0810  */
0811 struct scsi_transport_template *
0812 srp_attach_transport(struct srp_function_template *ft)
0813 {
0814     int count;
0815     struct srp_internal *i;
0816 
0817     i = kzalloc(sizeof(*i), GFP_KERNEL);
0818     if (!i)
0819         return NULL;
0820 
0821     i->t.host_size = sizeof(struct srp_host_attrs);
0822     i->t.host_attrs.ac.attrs = &i->host_attrs[0];
0823     i->t.host_attrs.ac.class = &srp_host_class.class;
0824     i->t.host_attrs.ac.match = srp_host_match;
0825     i->host_attrs[0] = NULL;
0826     transport_container_register(&i->t.host_attrs);
0827 
0828     i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
0829     i->rport_attr_cont.ac.class = &srp_rport_class.class;
0830     i->rport_attr_cont.ac.match = srp_rport_match;
0831 
0832     count = 0;
0833     i->rport_attrs[count++] = &dev_attr_port_id;
0834     i->rport_attrs[count++] = &dev_attr_roles;
0835     if (ft->has_rport_state) {
0836         i->rport_attrs[count++] = &dev_attr_state;
0837         i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
0838         i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
0839     }
0840     if (ft->reconnect) {
0841         i->rport_attrs[count++] = &dev_attr_reconnect_delay;
0842         i->rport_attrs[count++] = &dev_attr_failed_reconnects;
0843     }
0844     if (ft->rport_delete)
0845         i->rport_attrs[count++] = &dev_attr_delete;
0846     i->rport_attrs[count++] = NULL;
0847     BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
0848 
0849     transport_container_register(&i->rport_attr_cont);
0850 
0851     i->f = ft;
0852 
0853     return &i->t;
0854 }
0855 EXPORT_SYMBOL_GPL(srp_attach_transport);
0856 
0857 /**
0858  * srp_release_transport  -  release SRP transport template instance
0859  * @t:      transport template instance
0860  */
0861 void srp_release_transport(struct scsi_transport_template *t)
0862 {
0863     struct srp_internal *i = to_srp_internal(t);
0864 
0865     transport_container_unregister(&i->t.host_attrs);
0866     transport_container_unregister(&i->rport_attr_cont);
0867 
0868     kfree(i);
0869 }
0870 EXPORT_SYMBOL_GPL(srp_release_transport);
0871 
0872 static __init int srp_transport_init(void)
0873 {
0874     int ret;
0875 
0876     ret = transport_class_register(&srp_host_class);
0877     if (ret)
0878         return ret;
0879     ret = transport_class_register(&srp_rport_class);
0880     if (ret)
0881         goto unregister_host_class;
0882 
0883     return 0;
0884 unregister_host_class:
0885     transport_class_unregister(&srp_host_class);
0886     return ret;
0887 }
0888 
0889 static void __exit srp_transport_exit(void)
0890 {
0891     transport_class_unregister(&srp_host_class);
0892     transport_class_unregister(&srp_rport_class);
0893 }
0894 
0895 MODULE_AUTHOR("FUJITA Tomonori");
0896 MODULE_DESCRIPTION("SRP Transport Attributes");
0897 MODULE_LICENSE("GPL");
0898 
0899 module_init(srp_transport_init);
0900 module_exit(srp_transport_exit);