0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/slab.h>
0011 #include <linux/init.h>
0012 #include <linux/device.h>
0013 #include <linux/spinlock.h>
0014 #include <scsi/sas_ata.h>
0015 #include <scsi/scsi_host.h>
0016 #include <scsi/scsi_device.h>
0017 #include <scsi/scsi_transport.h>
0018 #include <scsi/scsi_transport_sas.h>
0019
0020 #include "sas_internal.h"
0021
0022 #include "scsi_sas_internal.h"
0023
0024 static struct kmem_cache *sas_task_cache;
0025 static struct kmem_cache *sas_event_cache;
0026
0027 struct sas_task *sas_alloc_task(gfp_t flags)
0028 {
0029 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
0030
0031 if (task) {
0032 spin_lock_init(&task->task_state_lock);
0033 task->task_state_flags = SAS_TASK_STATE_PENDING;
0034 }
0035
0036 return task;
0037 }
0038 EXPORT_SYMBOL_GPL(sas_alloc_task);
0039
0040 struct sas_task *sas_alloc_slow_task(gfp_t flags)
0041 {
0042 struct sas_task *task = sas_alloc_task(flags);
0043 struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
0044
0045 if (!task || !slow) {
0046 if (task)
0047 kmem_cache_free(sas_task_cache, task);
0048 kfree(slow);
0049 return NULL;
0050 }
0051
0052 task->slow_task = slow;
0053 slow->task = task;
0054 timer_setup(&slow->timer, NULL, 0);
0055 init_completion(&slow->completion);
0056
0057 return task;
0058 }
0059 EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
0060
0061 void sas_free_task(struct sas_task *task)
0062 {
0063 if (task) {
0064 kfree(task->slow_task);
0065 kmem_cache_free(sas_task_cache, task);
0066 }
0067 }
0068 EXPORT_SYMBOL_GPL(sas_free_task);
0069
0070
0071 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
0072 {
0073 const u32 poly = 0x00DB2777;
0074 u32 r = 0;
0075 int i;
0076
0077 for (i = 0; i < SAS_ADDR_SIZE; i++) {
0078 int b;
0079
0080 for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
0081 r <<= 1;
0082 if ((1 << b) & sas_addr[i]) {
0083 if (!(r & 0x01000000))
0084 r ^= poly;
0085 } else if (r & 0x01000000) {
0086 r ^= poly;
0087 }
0088 }
0089 }
0090
0091 hashed[0] = (r >> 16) & 0xFF;
0092 hashed[1] = (r >> 8) & 0xFF;
0093 hashed[2] = r & 0xFF;
0094 }
0095
0096 int sas_register_ha(struct sas_ha_struct *sas_ha)
0097 {
0098 char name[64];
0099 int error = 0;
0100
0101 mutex_init(&sas_ha->disco_mutex);
0102 spin_lock_init(&sas_ha->phy_port_lock);
0103 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
0104
0105 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
0106 spin_lock_init(&sas_ha->lock);
0107 mutex_init(&sas_ha->drain_mutex);
0108 init_waitqueue_head(&sas_ha->eh_wait_q);
0109 INIT_LIST_HEAD(&sas_ha->defer_q);
0110 INIT_LIST_HEAD(&sas_ha->eh_dev_q);
0111
0112 sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
0113
0114 error = sas_register_phys(sas_ha);
0115 if (error) {
0116 pr_notice("couldn't register sas phys:%d\n", error);
0117 return error;
0118 }
0119
0120 error = sas_register_ports(sas_ha);
0121 if (error) {
0122 pr_notice("couldn't register sas ports:%d\n", error);
0123 goto Undo_phys;
0124 }
0125
0126 error = -ENOMEM;
0127 snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
0128 sas_ha->event_q = create_singlethread_workqueue(name);
0129 if (!sas_ha->event_q)
0130 goto Undo_ports;
0131
0132 snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
0133 sas_ha->disco_q = create_singlethread_workqueue(name);
0134 if (!sas_ha->disco_q)
0135 goto Undo_event_q;
0136
0137 INIT_LIST_HEAD(&sas_ha->eh_done_q);
0138 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
0139
0140 return 0;
0141
0142 Undo_event_q:
0143 destroy_workqueue(sas_ha->event_q);
0144 Undo_ports:
0145 sas_unregister_ports(sas_ha);
0146 Undo_phys:
0147
0148 return error;
0149 }
0150 EXPORT_SYMBOL_GPL(sas_register_ha);
0151
0152 static void sas_disable_events(struct sas_ha_struct *sas_ha)
0153 {
0154
0155
0156
0157 mutex_lock(&sas_ha->drain_mutex);
0158 spin_lock_irq(&sas_ha->lock);
0159 clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
0160 spin_unlock_irq(&sas_ha->lock);
0161 __sas_drain_work(sas_ha);
0162 mutex_unlock(&sas_ha->drain_mutex);
0163 }
0164
0165 int sas_unregister_ha(struct sas_ha_struct *sas_ha)
0166 {
0167 sas_disable_events(sas_ha);
0168 sas_unregister_ports(sas_ha);
0169
0170
0171 mutex_lock(&sas_ha->drain_mutex);
0172 __sas_drain_work(sas_ha);
0173 mutex_unlock(&sas_ha->drain_mutex);
0174
0175 destroy_workqueue(sas_ha->disco_q);
0176 destroy_workqueue(sas_ha->event_q);
0177
0178 return 0;
0179 }
0180 EXPORT_SYMBOL_GPL(sas_unregister_ha);
0181
0182 static int sas_get_linkerrors(struct sas_phy *phy)
0183 {
0184 if (scsi_is_sas_phy_local(phy)) {
0185 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0186 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
0187 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
0188 struct sas_internal *i =
0189 to_sas_internal(sas_ha->core.shost->transportt);
0190
0191 return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
0192 }
0193
0194 return sas_smp_get_phy_events(phy);
0195 }
0196
0197 int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
0198 {
0199 struct domain_device *dev = NULL;
0200
0201
0202 if (asd_phy->port)
0203 dev = asd_phy->port->port_dev;
0204
0205
0206 if (dev)
0207 dev = sas_find_dev_by_rphy(dev->rphy);
0208
0209 if (dev && dev_is_sata(dev)) {
0210 sas_ata_schedule_reset(dev);
0211 sas_ata_wait_eh(dev);
0212 return 0;
0213 }
0214
0215 return -ENODEV;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224 static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
0225 {
0226 enum phy_func reset_type;
0227
0228 if (hard_reset)
0229 reset_type = PHY_FUNC_HARD_RESET;
0230 else
0231 reset_type = PHY_FUNC_LINK_RESET;
0232
0233 if (scsi_is_sas_phy_local(phy)) {
0234 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0235 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
0236 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
0237 struct sas_internal *i =
0238 to_sas_internal(sas_ha->core.shost->transportt);
0239
0240 if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
0241 return 0;
0242 return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
0243 } else {
0244 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
0245 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
0246 struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
0247
0248 if (ata_dev && !hard_reset) {
0249 sas_ata_schedule_reset(ata_dev);
0250 sas_ata_wait_eh(ata_dev);
0251 return 0;
0252 } else
0253 return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
0254 }
0255 }
0256
0257 int sas_phy_enable(struct sas_phy *phy, int enable)
0258 {
0259 int ret;
0260 enum phy_func cmd;
0261
0262 if (enable)
0263 cmd = PHY_FUNC_LINK_RESET;
0264 else
0265 cmd = PHY_FUNC_DISABLE;
0266
0267 if (scsi_is_sas_phy_local(phy)) {
0268 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0269 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
0270 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
0271 struct sas_internal *i =
0272 to_sas_internal(sas_ha->core.shost->transportt);
0273
0274 if (enable)
0275 ret = transport_sas_phy_reset(phy, 0);
0276 else
0277 ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
0278 } else {
0279 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
0280 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
0281
0282 if (enable)
0283 ret = transport_sas_phy_reset(phy, 0);
0284 else
0285 ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
0286 }
0287 return ret;
0288 }
0289 EXPORT_SYMBOL_GPL(sas_phy_enable);
0290
0291 int sas_phy_reset(struct sas_phy *phy, int hard_reset)
0292 {
0293 int ret;
0294 enum phy_func reset_type;
0295
0296 if (!phy->enabled)
0297 return -ENODEV;
0298
0299 if (hard_reset)
0300 reset_type = PHY_FUNC_HARD_RESET;
0301 else
0302 reset_type = PHY_FUNC_LINK_RESET;
0303
0304 if (scsi_is_sas_phy_local(phy)) {
0305 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0306 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
0307 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
0308 struct sas_internal *i =
0309 to_sas_internal(sas_ha->core.shost->transportt);
0310
0311 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
0312 } else {
0313 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
0314 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
0315 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
0316 }
0317 return ret;
0318 }
0319 EXPORT_SYMBOL_GPL(sas_phy_reset);
0320
0321 int sas_set_phy_speed(struct sas_phy *phy,
0322 struct sas_phy_linkrates *rates)
0323 {
0324 int ret;
0325
0326 if ((rates->minimum_linkrate &&
0327 rates->minimum_linkrate > phy->maximum_linkrate) ||
0328 (rates->maximum_linkrate &&
0329 rates->maximum_linkrate < phy->minimum_linkrate))
0330 return -EINVAL;
0331
0332 if (rates->minimum_linkrate &&
0333 rates->minimum_linkrate < phy->minimum_linkrate_hw)
0334 rates->minimum_linkrate = phy->minimum_linkrate_hw;
0335
0336 if (rates->maximum_linkrate &&
0337 rates->maximum_linkrate > phy->maximum_linkrate_hw)
0338 rates->maximum_linkrate = phy->maximum_linkrate_hw;
0339
0340 if (scsi_is_sas_phy_local(phy)) {
0341 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0342 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
0343 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
0344 struct sas_internal *i =
0345 to_sas_internal(sas_ha->core.shost->transportt);
0346
0347 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
0348 rates);
0349 } else {
0350 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
0351 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
0352 ret = sas_smp_phy_control(ddev, phy->number,
0353 PHY_FUNC_LINK_RESET, rates);
0354
0355 }
0356
0357 return ret;
0358 }
0359
0360 void sas_prep_resume_ha(struct sas_ha_struct *ha)
0361 {
0362 int i;
0363
0364 set_bit(SAS_HA_REGISTERED, &ha->state);
0365 set_bit(SAS_HA_RESUMING, &ha->state);
0366
0367
0368 for (i = 0; i < ha->num_phys; i++) {
0369 struct asd_sas_phy *phy = ha->sas_phy[i];
0370
0371 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
0372 phy->frame_rcvd_size = 0;
0373 }
0374 }
0375 EXPORT_SYMBOL(sas_prep_resume_ha);
0376
0377 static int phys_suspended(struct sas_ha_struct *ha)
0378 {
0379 int i, rc = 0;
0380
0381 for (i = 0; i < ha->num_phys; i++) {
0382 struct asd_sas_phy *phy = ha->sas_phy[i];
0383
0384 if (phy->suspended)
0385 rc++;
0386 }
0387
0388 return rc;
0389 }
0390
0391 static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
0392 {
0393 int i;
0394
0395 for (i = 0; i < ha->num_phys; i++) {
0396 struct asd_sas_port *port = ha->sas_port[i];
0397 struct domain_device *dev = port->port_dev;
0398
0399 if (dev && dev_is_expander(dev->dev_type)) {
0400 struct asd_sas_phy *first_phy;
0401
0402 spin_lock(&port->phy_list_lock);
0403 first_phy = list_first_entry_or_null(
0404 &port->phy_list, struct asd_sas_phy,
0405 port_phy_el);
0406 spin_unlock(&port->phy_list_lock);
0407
0408 if (first_phy)
0409 sas_notify_port_event(first_phy,
0410 PORTE_BROADCAST_RCVD, GFP_KERNEL);
0411 }
0412 }
0413 }
0414
0415 static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
0416 {
0417 const unsigned long tmo = msecs_to_jiffies(25000);
0418 int i;
0419
0420
0421
0422
0423
0424
0425
0426 i = phys_suspended(ha);
0427 if (i)
0428 dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
0429 i, i > 1 ? "s" : "");
0430 wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
0431 for (i = 0; i < ha->num_phys; i++) {
0432 struct asd_sas_phy *phy = ha->sas_phy[i];
0433
0434 if (phy->suspended) {
0435 dev_warn(&phy->phy->dev, "resume timeout\n");
0436 sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT,
0437 GFP_KERNEL);
0438 }
0439 }
0440
0441
0442
0443
0444 scsi_unblock_requests(ha->core.shost);
0445 if (drain)
0446 sas_drain_work(ha);
0447 clear_bit(SAS_HA_RESUMING, &ha->state);
0448
0449 sas_queue_deferred_work(ha);
0450
0451
0452
0453 sas_resume_insert_broadcast_ha(ha);
0454 }
0455
0456 void sas_resume_ha(struct sas_ha_struct *ha)
0457 {
0458 _sas_resume_ha(ha, true);
0459 }
0460 EXPORT_SYMBOL(sas_resume_ha);
0461
0462
0463 void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
0464 {
0465 _sas_resume_ha(ha, false);
0466 }
0467 EXPORT_SYMBOL(sas_resume_ha_no_sync);
0468
0469 void sas_suspend_ha(struct sas_ha_struct *ha)
0470 {
0471 int i;
0472
0473 sas_disable_events(ha);
0474 scsi_block_requests(ha->core.shost);
0475 for (i = 0; i < ha->num_phys; i++) {
0476 struct asd_sas_port *port = ha->sas_port[i];
0477
0478 sas_discover_event(port, DISCE_SUSPEND);
0479 }
0480
0481
0482 mutex_lock(&ha->drain_mutex);
0483 __sas_drain_work(ha);
0484 mutex_unlock(&ha->drain_mutex);
0485 }
0486 EXPORT_SYMBOL(sas_suspend_ha);
0487
0488 static void sas_phy_release(struct sas_phy *phy)
0489 {
0490 kfree(phy->hostdata);
0491 phy->hostdata = NULL;
0492 }
0493
0494 static void phy_reset_work(struct work_struct *work)
0495 {
0496 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
0497
0498 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
0499 }
0500
0501 static void phy_enable_work(struct work_struct *work)
0502 {
0503 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
0504
0505 d->enable_result = sas_phy_enable(d->phy, d->enable);
0506 }
0507
0508 static int sas_phy_setup(struct sas_phy *phy)
0509 {
0510 struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
0511
0512 if (!d)
0513 return -ENOMEM;
0514
0515 mutex_init(&d->event_lock);
0516 INIT_SAS_WORK(&d->reset_work, phy_reset_work);
0517 INIT_SAS_WORK(&d->enable_work, phy_enable_work);
0518 d->phy = phy;
0519 phy->hostdata = d;
0520
0521 return 0;
0522 }
0523
0524 static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
0525 {
0526 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0527 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
0528 struct sas_phy_data *d = phy->hostdata;
0529 int rc;
0530
0531 if (!d)
0532 return -ENOMEM;
0533
0534 pm_runtime_get_sync(ha->dev);
0535
0536 mutex_lock(&d->event_lock);
0537 d->reset_result = 0;
0538 d->hard_reset = hard_reset;
0539
0540 spin_lock_irq(&ha->lock);
0541 sas_queue_work(ha, &d->reset_work);
0542 spin_unlock_irq(&ha->lock);
0543
0544 rc = sas_drain_work(ha);
0545 if (rc == 0)
0546 rc = d->reset_result;
0547 mutex_unlock(&d->event_lock);
0548 pm_runtime_put_sync(ha->dev);
0549
0550 return rc;
0551 }
0552
0553 static int queue_phy_enable(struct sas_phy *phy, int enable)
0554 {
0555 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
0556 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
0557 struct sas_phy_data *d = phy->hostdata;
0558 int rc;
0559
0560 if (!d)
0561 return -ENOMEM;
0562
0563 pm_runtime_get_sync(ha->dev);
0564
0565 mutex_lock(&d->event_lock);
0566 d->enable_result = 0;
0567 d->enable = enable;
0568
0569 spin_lock_irq(&ha->lock);
0570 sas_queue_work(ha, &d->enable_work);
0571 spin_unlock_irq(&ha->lock);
0572
0573 rc = sas_drain_work(ha);
0574 if (rc == 0)
0575 rc = d->enable_result;
0576 mutex_unlock(&d->event_lock);
0577 pm_runtime_put_sync(ha->dev);
0578
0579 return rc;
0580 }
0581
0582 static struct sas_function_template sft = {
0583 .phy_enable = queue_phy_enable,
0584 .phy_reset = queue_phy_reset,
0585 .phy_setup = sas_phy_setup,
0586 .phy_release = sas_phy_release,
0587 .set_phy_speed = sas_set_phy_speed,
0588 .get_linkerrors = sas_get_linkerrors,
0589 .smp_handler = sas_smp_handler,
0590 };
0591
0592 static inline ssize_t phy_event_threshold_show(struct device *dev,
0593 struct device_attribute *attr, char *buf)
0594 {
0595 struct Scsi_Host *shost = class_to_shost(dev);
0596 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
0597
0598 return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
0599 }
0600
0601 static inline ssize_t phy_event_threshold_store(struct device *dev,
0602 struct device_attribute *attr,
0603 const char *buf, size_t count)
0604 {
0605 struct Scsi_Host *shost = class_to_shost(dev);
0606 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
0607
0608 sha->event_thres = simple_strtol(buf, NULL, 10);
0609
0610
0611 if (sha->event_thres < 32)
0612 sha->event_thres = 32;
0613
0614 return count;
0615 }
0616
0617 DEVICE_ATTR(phy_event_threshold,
0618 S_IRUGO|S_IWUSR,
0619 phy_event_threshold_show,
0620 phy_event_threshold_store);
0621 EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
0622
0623 struct scsi_transport_template *
0624 sas_domain_attach_transport(struct sas_domain_function_template *dft)
0625 {
0626 struct scsi_transport_template *stt = sas_attach_transport(&sft);
0627 struct sas_internal *i;
0628
0629 if (!stt)
0630 return stt;
0631
0632 i = to_sas_internal(stt);
0633 i->dft = dft;
0634 stt->create_work_queue = 1;
0635 stt->eh_strategy_handler = sas_scsi_recover_host;
0636
0637 return stt;
0638 }
0639 EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
0640
0641 struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
0642 gfp_t gfp_flags)
0643 {
0644 struct asd_sas_event *event;
0645 struct sas_ha_struct *sas_ha = phy->ha;
0646 struct sas_internal *i =
0647 to_sas_internal(sas_ha->core.shost->transportt);
0648
0649 event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
0650 if (!event)
0651 return NULL;
0652
0653 atomic_inc(&phy->event_nr);
0654
0655 if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
0656 if (i->dft->lldd_control_phy) {
0657 if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
0658 pr_notice("The phy%d bursting events, shut it down.\n",
0659 phy->id);
0660 sas_notify_phy_event(phy, PHYE_SHUTDOWN,
0661 gfp_flags);
0662 }
0663 } else {
0664
0665 WARN_ONCE(1, "PHY control not supported.\n");
0666 kmem_cache_free(sas_event_cache, event);
0667 atomic_dec(&phy->event_nr);
0668 event = NULL;
0669 }
0670 }
0671
0672 return event;
0673 }
0674
0675 void sas_free_event(struct asd_sas_event *event)
0676 {
0677 struct asd_sas_phy *phy = event->phy;
0678
0679 kmem_cache_free(sas_event_cache, event);
0680 atomic_dec(&phy->event_nr);
0681 }
0682
0683
0684
0685 static int __init sas_class_init(void)
0686 {
0687 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
0688 if (!sas_task_cache)
0689 goto out;
0690
0691 sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
0692 if (!sas_event_cache)
0693 goto free_task_kmem;
0694
0695 return 0;
0696 free_task_kmem:
0697 kmem_cache_destroy(sas_task_cache);
0698 out:
0699 return -ENOMEM;
0700 }
0701
0702 static void __exit sas_class_exit(void)
0703 {
0704 kmem_cache_destroy(sas_task_cache);
0705 kmem_cache_destroy(sas_event_cache);
0706 }
0707
0708 MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
0709 MODULE_DESCRIPTION("SAS Transport Layer");
0710 MODULE_LICENSE("GPL v2");
0711
0712 module_init(sas_class_init);
0713 module_exit(sas_class_exit);
0714