0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/export.h>
0010 #include <scsi/scsi_host.h>
0011 #include "sas_internal.h"
0012
0013 bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
0014 {
0015 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
0016 return false;
0017
0018 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
0019
0020 if (list_empty(&sw->drain_node))
0021 list_add_tail(&sw->drain_node, &ha->defer_q);
0022 return true;
0023 }
0024
0025 return queue_work(ha->event_q, &sw->work);
0026 }
0027
0028 static bool sas_queue_event(int event, struct sas_work *work,
0029 struct sas_ha_struct *ha)
0030 {
0031 unsigned long flags;
0032 bool rc;
0033
0034 spin_lock_irqsave(&ha->lock, flags);
0035 rc = sas_queue_work(ha, work);
0036 spin_unlock_irqrestore(&ha->lock, flags);
0037
0038 return rc;
0039 }
0040
0041 void sas_queue_deferred_work(struct sas_ha_struct *ha)
0042 {
0043 struct sas_work *sw, *_sw;
0044
0045 spin_lock_irq(&ha->lock);
0046 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
0047 list_del_init(&sw->drain_node);
0048
0049 if (!sas_queue_work(ha, sw)) {
0050 pm_runtime_put(ha->dev);
0051 sas_free_event(to_asd_sas_event(&sw->work));
0052 }
0053 }
0054 spin_unlock_irq(&ha->lock);
0055 }
0056
0057 void __sas_drain_work(struct sas_ha_struct *ha)
0058 {
0059 set_bit(SAS_HA_DRAINING, &ha->state);
0060
0061 spin_lock_irq(&ha->lock);
0062 spin_unlock_irq(&ha->lock);
0063
0064 drain_workqueue(ha->event_q);
0065 drain_workqueue(ha->disco_q);
0066
0067 clear_bit(SAS_HA_DRAINING, &ha->state);
0068 sas_queue_deferred_work(ha);
0069 }
0070
0071 int sas_drain_work(struct sas_ha_struct *ha)
0072 {
0073 int err;
0074
0075 err = mutex_lock_interruptible(&ha->drain_mutex);
0076 if (err)
0077 return err;
0078 if (test_bit(SAS_HA_REGISTERED, &ha->state))
0079 __sas_drain_work(ha);
0080 mutex_unlock(&ha->drain_mutex);
0081
0082 return 0;
0083 }
0084 EXPORT_SYMBOL_GPL(sas_drain_work);
0085
0086 void sas_disable_revalidation(struct sas_ha_struct *ha)
0087 {
0088 mutex_lock(&ha->disco_mutex);
0089 set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
0090 mutex_unlock(&ha->disco_mutex);
0091 }
0092
0093 void sas_enable_revalidation(struct sas_ha_struct *ha)
0094 {
0095 int i;
0096
0097 mutex_lock(&ha->disco_mutex);
0098 clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
0099 for (i = 0; i < ha->num_phys; i++) {
0100 struct asd_sas_port *port = ha->sas_port[i];
0101 const int ev = DISCE_REVALIDATE_DOMAIN;
0102 struct sas_discovery *d = &port->disc;
0103 struct asd_sas_phy *sas_phy;
0104
0105 if (!test_and_clear_bit(ev, &d->pending))
0106 continue;
0107
0108 spin_lock(&port->phy_list_lock);
0109 if (list_empty(&port->phy_list)) {
0110 spin_unlock(&port->phy_list_lock);
0111 continue;
0112 }
0113
0114 sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
0115 port_phy_el);
0116 spin_unlock(&port->phy_list_lock);
0117 sas_notify_port_event(sas_phy,
0118 PORTE_BROADCAST_RCVD, GFP_KERNEL);
0119 }
0120 mutex_unlock(&ha->disco_mutex);
0121 }
0122
0123
0124 static void sas_port_event_worker(struct work_struct *work)
0125 {
0126 struct asd_sas_event *ev = to_asd_sas_event(work);
0127 struct asd_sas_phy *phy = ev->phy;
0128 struct sas_ha_struct *ha = phy->ha;
0129
0130 sas_port_event_fns[ev->event](work);
0131 pm_runtime_put(ha->dev);
0132 sas_free_event(ev);
0133 }
0134
0135 static void sas_phy_event_worker(struct work_struct *work)
0136 {
0137 struct asd_sas_event *ev = to_asd_sas_event(work);
0138 struct asd_sas_phy *phy = ev->phy;
0139 struct sas_ha_struct *ha = phy->ha;
0140
0141 sas_phy_event_fns[ev->event](work);
0142 pm_runtime_put(ha->dev);
0143 sas_free_event(ev);
0144 }
0145
0146
0147 static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
0148 {
0149 struct sas_ha_struct *ha = phy->ha;
0150 unsigned long flags;
0151 bool deferred = false;
0152
0153 spin_lock_irqsave(&ha->lock, flags);
0154 if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
0155 struct sas_work *sw = &ev->work;
0156
0157 list_add_tail(&sw->drain_node, &ha->defer_q);
0158 deferred = true;
0159 }
0160 spin_unlock_irqrestore(&ha->lock, flags);
0161 return deferred;
0162 }
0163
0164 void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
0165 gfp_t gfp_flags)
0166 {
0167 struct sas_ha_struct *ha = phy->ha;
0168 struct asd_sas_event *ev;
0169
0170 BUG_ON(event >= PORT_NUM_EVENTS);
0171
0172 ev = sas_alloc_event(phy, gfp_flags);
0173 if (!ev)
0174 return;
0175
0176
0177 pm_runtime_get_noresume(ha->dev);
0178
0179 INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
0180
0181 if (sas_defer_event(phy, ev))
0182 return;
0183
0184 if (!sas_queue_event(event, &ev->work, ha)) {
0185 pm_runtime_put(ha->dev);
0186 sas_free_event(ev);
0187 }
0188 }
0189 EXPORT_SYMBOL_GPL(sas_notify_port_event);
0190
0191 void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
0192 gfp_t gfp_flags)
0193 {
0194 struct sas_ha_struct *ha = phy->ha;
0195 struct asd_sas_event *ev;
0196
0197 BUG_ON(event >= PHY_NUM_EVENTS);
0198
0199 ev = sas_alloc_event(phy, gfp_flags);
0200 if (!ev)
0201 return;
0202
0203
0204 pm_runtime_get_noresume(ha->dev);
0205
0206 INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
0207
0208 if (sas_defer_event(phy, ev))
0209 return;
0210
0211 if (!sas_queue_event(event, &ev->work, ha)) {
0212 pm_runtime_put(ha->dev);
0213 sas_free_event(ev);
0214 }
0215 }
0216 EXPORT_SYMBOL_GPL(sas_notify_phy_event);