0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/blkdev.h>
0019 #include <linux/bug.h>
0020 #include <linux/completion.h>
0021 #include <linux/delay.h>
0022 #include <linux/device.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/firewire.h>
0025 #include <linux/firewire-constants.h>
0026 #include <linux/init.h>
0027 #include <linux/jiffies.h>
0028 #include <linux/kernel.h>
0029 #include <linux/kref.h>
0030 #include <linux/list.h>
0031 #include <linux/mod_devicetable.h>
0032 #include <linux/module.h>
0033 #include <linux/moduleparam.h>
0034 #include <linux/scatterlist.h>
0035 #include <linux/slab.h>
0036 #include <linux/spinlock.h>
0037 #include <linux/string.h>
0038 #include <linux/stringify.h>
0039 #include <linux/workqueue.h>
0040
0041 #include <asm/byteorder.h>
0042
0043 #include <scsi/scsi.h>
0044 #include <scsi/scsi_cmnd.h>
0045 #include <scsi/scsi_device.h>
0046 #include <scsi/scsi_host.h>
0047
0048
0049
0050
0051
0052
0053
0054
0055 static bool sbp2_param_exclusive_login = 1;
0056 module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
0057 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
0058 "(default = Y, use N for concurrent initiators)");
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
0093 #define SBP2_WORKAROUND_INQUIRY_36 0x2
0094 #define SBP2_WORKAROUND_MODE_SENSE_8 0x4
0095 #define SBP2_WORKAROUND_FIX_CAPACITY 0x8
0096 #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
0097 #define SBP2_INQUIRY_DELAY 12
0098 #define SBP2_WORKAROUND_POWER_CONDITION 0x20
0099 #define SBP2_WORKAROUND_OVERRIDE 0x100
0100
0101 static int sbp2_param_workarounds;
0102 module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
0103 MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
0104 ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
0105 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
0106 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
0107 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
0108 ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
0109 ", set power condition in start stop unit = "
0110 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
0111 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
0112 ", or a combination)");
0113
0114
0115
0116
0117
0118 struct sbp2_logical_unit {
0119 struct sbp2_target *tgt;
0120 struct list_head link;
0121 struct fw_address_handler address_handler;
0122 struct list_head orb_list;
0123
0124 u64 command_block_agent_address;
0125 u16 lun;
0126 int login_id;
0127
0128
0129
0130
0131
0132
0133
0134 int generation;
0135 int retries;
0136 work_func_t workfn;
0137 struct delayed_work work;
0138 bool has_sdev;
0139 bool blocked;
0140 };
0141
0142 static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
0143 {
0144 queue_delayed_work(fw_workqueue, &lu->work, delay);
0145 }
0146
0147
0148
0149
0150
0151 struct sbp2_target {
0152 struct fw_unit *unit;
0153 struct list_head lu_list;
0154
0155 u64 management_agent_address;
0156 u64 guid;
0157 int directory_id;
0158 int node_id;
0159 int address_high;
0160 unsigned int workarounds;
0161 unsigned int mgt_orb_timeout;
0162 unsigned int max_payload;
0163
0164 spinlock_t lock;
0165 int dont_block;
0166 int blocked;
0167 };
0168
0169 static struct fw_device *target_parent_device(struct sbp2_target *tgt)
0170 {
0171 return fw_parent_device(tgt->unit);
0172 }
0173
0174 static const struct device *tgt_dev(const struct sbp2_target *tgt)
0175 {
0176 return &tgt->unit->device;
0177 }
0178
0179 static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
0180 {
0181 return &lu->tgt->unit->device;
0182 }
0183
0184
0185 #define INVALID_LOGIN_ID 0x10000
0186
0187 #define SBP2_ORB_TIMEOUT 2000U
0188 #define SBP2_ORB_NULL 0x80000000
0189 #define SBP2_RETRY_LIMIT 0xf
0190 #define SBP2_CYCLE_LIMIT (0xc8 << 12)
0191
0192
0193
0194
0195
0196 #define SBP2_MAX_CDB_SIZE 16
0197
0198
0199
0200
0201
0202 #define SBP2_MAX_SEG_SIZE 0xfffc
0203
0204
0205 #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
0206 #define SBP2_CSR_FIRMWARE_REVISION 0x3c
0207 #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
0208 #define SBP2_CSR_UNIT_UNIQUE_ID 0x8d
0209 #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
0210
0211
0212 #define SBP2_LOGIN_REQUEST 0x0
0213 #define SBP2_QUERY_LOGINS_REQUEST 0x1
0214 #define SBP2_RECONNECT_REQUEST 0x3
0215 #define SBP2_SET_PASSWORD_REQUEST 0x4
0216 #define SBP2_LOGOUT_REQUEST 0x7
0217 #define SBP2_ABORT_TASK_REQUEST 0xb
0218 #define SBP2_ABORT_TASK_SET 0xc
0219 #define SBP2_LOGICAL_UNIT_RESET 0xe
0220 #define SBP2_TARGET_RESET_REQUEST 0xf
0221
0222
0223 #define SBP2_AGENT_STATE 0x00
0224 #define SBP2_AGENT_RESET 0x04
0225 #define SBP2_ORB_POINTER 0x08
0226 #define SBP2_DOORBELL 0x10
0227 #define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
0228
0229
0230 #define SBP2_STATUS_REQUEST_COMPLETE 0x0
0231 #define SBP2_STATUS_TRANSPORT_FAILURE 0x1
0232 #define SBP2_STATUS_ILLEGAL_REQUEST 0x2
0233 #define SBP2_STATUS_VENDOR_DEPENDENT 0x3
0234
0235 #define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
0236 #define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
0237 #define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
0238 #define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
0239 #define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
0240 #define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
0241 #define STATUS_GET_ORB_LOW(v) ((v).orb_low)
0242 #define STATUS_GET_DATA(v) ((v).data)
0243
0244 struct sbp2_status {
0245 u32 status;
0246 u32 orb_low;
0247 u8 data[24];
0248 };
0249
0250 struct sbp2_pointer {
0251 __be32 high;
0252 __be32 low;
0253 };
0254
0255 struct sbp2_orb {
0256 struct fw_transaction t;
0257 struct kref kref;
0258 dma_addr_t request_bus;
0259 int rcode;
0260 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
0261 struct sbp2_logical_unit *lu;
0262 struct list_head link;
0263 };
0264
0265 #define MANAGEMENT_ORB_LUN(v) ((v))
0266 #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
0267 #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
0268 #define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
0269 #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
0270 #define MANAGEMENT_ORB_NOTIFY ((1) << 31)
0271
0272 #define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
0273 #define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
0274
0275 struct sbp2_management_orb {
0276 struct sbp2_orb base;
0277 struct {
0278 struct sbp2_pointer password;
0279 struct sbp2_pointer response;
0280 __be32 misc;
0281 __be32 length;
0282 struct sbp2_pointer status_fifo;
0283 } request;
0284 __be32 response[4];
0285 dma_addr_t response_bus;
0286 struct completion done;
0287 struct sbp2_status status;
0288 };
0289
0290 struct sbp2_login_response {
0291 __be32 misc;
0292 struct sbp2_pointer command_block_agent;
0293 __be32 reconnect_hold;
0294 };
0295 #define COMMAND_ORB_DATA_SIZE(v) ((v))
0296 #define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
0297 #define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
0298 #define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
0299 #define COMMAND_ORB_SPEED(v) ((v) << 24)
0300 #define COMMAND_ORB_DIRECTION ((1) << 27)
0301 #define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
0302 #define COMMAND_ORB_NOTIFY ((1) << 31)
0303
0304 struct sbp2_command_orb {
0305 struct sbp2_orb base;
0306 struct {
0307 struct sbp2_pointer next;
0308 struct sbp2_pointer data_descriptor;
0309 __be32 misc;
0310 u8 command_block[SBP2_MAX_CDB_SIZE];
0311 } request;
0312 struct scsi_cmnd *cmd;
0313
0314 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
0315 dma_addr_t page_table_bus;
0316 };
0317
0318 #define SBP2_ROM_VALUE_WILDCARD ~0
0319 #define SBP2_ROM_VALUE_MISSING 0xff000000
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329 static const struct {
0330 u32 firmware_revision;
0331 u32 model;
0332 unsigned int workarounds;
0333 } sbp2_workarounds_table[] = {
0334 {
0335 .firmware_revision = 0x002800,
0336 .model = 0x001010,
0337 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
0338 SBP2_WORKAROUND_MODE_SENSE_8 |
0339 SBP2_WORKAROUND_POWER_CONDITION,
0340 },
0341 {
0342 .firmware_revision = 0x002800,
0343 .model = 0x000000,
0344 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
0345 },
0346 {
0347 .firmware_revision = 0x000200,
0348 .model = SBP2_ROM_VALUE_WILDCARD,
0349 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
0350 },
0351 {
0352 .firmware_revision = 0x012800,
0353 .model = SBP2_ROM_VALUE_WILDCARD,
0354 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
0355 },
0356 {
0357 .firmware_revision = 0xa0b800,
0358 .model = SBP2_ROM_VALUE_WILDCARD,
0359 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
0360 },
0361 {
0362 .firmware_revision = 0x002600,
0363 .model = SBP2_ROM_VALUE_WILDCARD,
0364 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
0365 },
0366
0367
0368
0369
0370 {
0371 .firmware_revision = 0x0a2700,
0372 .model = 0x000000,
0373 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
0374 SBP2_WORKAROUND_FIX_CAPACITY,
0375 },
0376 {
0377 .firmware_revision = 0x0a2700,
0378 .model = 0x000021,
0379 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
0380 },
0381 {
0382 .firmware_revision = 0x0a2700,
0383 .model = 0x000022,
0384 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
0385 },
0386 {
0387 .firmware_revision = 0x0a2700,
0388 .model = 0x000023,
0389 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
0390 },
0391 {
0392 .firmware_revision = 0x0a2700,
0393 .model = 0x00007e,
0394 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
0395 }
0396 };
0397
0398 static void free_orb(struct kref *kref)
0399 {
0400 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
0401
0402 kfree(orb);
0403 }
0404
0405 static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
0406 int tcode, int destination, int source,
0407 int generation, unsigned long long offset,
0408 void *payload, size_t length, void *callback_data)
0409 {
0410 struct sbp2_logical_unit *lu = callback_data;
0411 struct sbp2_orb *orb = NULL, *iter;
0412 struct sbp2_status status;
0413 unsigned long flags;
0414
0415 if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
0416 length < 8 || length > sizeof(status)) {
0417 fw_send_response(card, request, RCODE_TYPE_ERROR);
0418 return;
0419 }
0420
0421 status.status = be32_to_cpup(payload);
0422 status.orb_low = be32_to_cpup(payload + 4);
0423 memset(status.data, 0, sizeof(status.data));
0424 if (length > 8)
0425 memcpy(status.data, payload + 8, length - 8);
0426
0427 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
0428 dev_notice(lu_dev(lu),
0429 "non-ORB related status write, not handled\n");
0430 fw_send_response(card, request, RCODE_COMPLETE);
0431 return;
0432 }
0433
0434
0435 spin_lock_irqsave(&lu->tgt->lock, flags);
0436 list_for_each_entry(iter, &lu->orb_list, link) {
0437 if (STATUS_GET_ORB_HIGH(status) == 0 &&
0438 STATUS_GET_ORB_LOW(status) == iter->request_bus) {
0439 iter->rcode = RCODE_COMPLETE;
0440 list_del(&iter->link);
0441 orb = iter;
0442 break;
0443 }
0444 }
0445 spin_unlock_irqrestore(&lu->tgt->lock, flags);
0446
0447 if (orb) {
0448 orb->callback(orb, &status);
0449 kref_put(&orb->kref, free_orb);
0450 } else {
0451 dev_err(lu_dev(lu), "status write for unknown ORB\n");
0452 }
0453
0454 fw_send_response(card, request, RCODE_COMPLETE);
0455 }
0456
0457 static void complete_transaction(struct fw_card *card, int rcode,
0458 void *payload, size_t length, void *data)
0459 {
0460 struct sbp2_orb *orb = data;
0461 unsigned long flags;
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 spin_lock_irqsave(&orb->lu->tgt->lock, flags);
0473
0474 if (orb->rcode == -1)
0475 orb->rcode = rcode;
0476 if (orb->rcode != RCODE_COMPLETE) {
0477 list_del(&orb->link);
0478 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
0479
0480 orb->callback(orb, NULL);
0481 kref_put(&orb->kref, free_orb);
0482 } else {
0483 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
0484 }
0485
0486 kref_put(&orb->kref, free_orb);
0487 }
0488
0489 static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
0490 int node_id, int generation, u64 offset)
0491 {
0492 struct fw_device *device = target_parent_device(lu->tgt);
0493 struct sbp2_pointer orb_pointer;
0494 unsigned long flags;
0495
0496 orb_pointer.high = 0;
0497 orb_pointer.low = cpu_to_be32(orb->request_bus);
0498
0499 orb->lu = lu;
0500 spin_lock_irqsave(&lu->tgt->lock, flags);
0501 list_add_tail(&orb->link, &lu->orb_list);
0502 spin_unlock_irqrestore(&lu->tgt->lock, flags);
0503
0504 kref_get(&orb->kref);
0505 kref_get(&orb->kref);
0506
0507 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
0508 node_id, generation, device->max_speed, offset,
0509 &orb_pointer, 8, complete_transaction, orb);
0510 }
0511
0512 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
0513 {
0514 struct fw_device *device = target_parent_device(lu->tgt);
0515 struct sbp2_orb *orb, *next;
0516 struct list_head list;
0517 int retval = -ENOENT;
0518
0519 INIT_LIST_HEAD(&list);
0520 spin_lock_irq(&lu->tgt->lock);
0521 list_splice_init(&lu->orb_list, &list);
0522 spin_unlock_irq(&lu->tgt->lock);
0523
0524 list_for_each_entry_safe(orb, next, &list, link) {
0525 retval = 0;
0526 if (fw_cancel_transaction(device->card, &orb->t) == 0)
0527 continue;
0528
0529 orb->rcode = RCODE_CANCELLED;
0530 orb->callback(orb, NULL);
0531 kref_put(&orb->kref, free_orb);
0532 }
0533
0534 return retval;
0535 }
0536
0537 static void complete_management_orb(struct sbp2_orb *base_orb,
0538 struct sbp2_status *status)
0539 {
0540 struct sbp2_management_orb *orb =
0541 container_of(base_orb, struct sbp2_management_orb, base);
0542
0543 if (status)
0544 memcpy(&orb->status, status, sizeof(*status));
0545 complete(&orb->done);
0546 }
0547
0548 static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
0549 int generation, int function,
0550 int lun_or_login_id, void *response)
0551 {
0552 struct fw_device *device = target_parent_device(lu->tgt);
0553 struct sbp2_management_orb *orb;
0554 unsigned int timeout;
0555 int retval = -ENOMEM;
0556
0557 if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
0558 return 0;
0559
0560 orb = kzalloc(sizeof(*orb), GFP_NOIO);
0561 if (orb == NULL)
0562 return -ENOMEM;
0563
0564 kref_init(&orb->base.kref);
0565 orb->response_bus =
0566 dma_map_single(device->card->device, &orb->response,
0567 sizeof(orb->response), DMA_FROM_DEVICE);
0568 if (dma_mapping_error(device->card->device, orb->response_bus))
0569 goto fail_mapping_response;
0570
0571 orb->request.response.high = 0;
0572 orb->request.response.low = cpu_to_be32(orb->response_bus);
0573
0574 orb->request.misc = cpu_to_be32(
0575 MANAGEMENT_ORB_NOTIFY |
0576 MANAGEMENT_ORB_FUNCTION(function) |
0577 MANAGEMENT_ORB_LUN(lun_or_login_id));
0578 orb->request.length = cpu_to_be32(
0579 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
0580
0581 orb->request.status_fifo.high =
0582 cpu_to_be32(lu->address_handler.offset >> 32);
0583 orb->request.status_fifo.low =
0584 cpu_to_be32(lu->address_handler.offset);
0585
0586 if (function == SBP2_LOGIN_REQUEST) {
0587
0588 orb->request.misc |= cpu_to_be32(
0589 MANAGEMENT_ORB_RECONNECT(2) |
0590 MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
0591 timeout = lu->tgt->mgt_orb_timeout;
0592 } else {
0593 timeout = SBP2_ORB_TIMEOUT;
0594 }
0595
0596 init_completion(&orb->done);
0597 orb->base.callback = complete_management_orb;
0598
0599 orb->base.request_bus =
0600 dma_map_single(device->card->device, &orb->request,
0601 sizeof(orb->request), DMA_TO_DEVICE);
0602 if (dma_mapping_error(device->card->device, orb->base.request_bus))
0603 goto fail_mapping_request;
0604
0605 sbp2_send_orb(&orb->base, lu, node_id, generation,
0606 lu->tgt->management_agent_address);
0607
0608 wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
0609
0610 retval = -EIO;
0611 if (sbp2_cancel_orbs(lu) == 0) {
0612 dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n",
0613 orb->base.rcode);
0614 goto out;
0615 }
0616
0617 if (orb->base.rcode != RCODE_COMPLETE) {
0618 dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n",
0619 orb->base.rcode);
0620 goto out;
0621 }
0622
0623 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
0624 STATUS_GET_SBP_STATUS(orb->status) != 0) {
0625 dev_err(lu_dev(lu), "error status: %d:%d\n",
0626 STATUS_GET_RESPONSE(orb->status),
0627 STATUS_GET_SBP_STATUS(orb->status));
0628 goto out;
0629 }
0630
0631 retval = 0;
0632 out:
0633 dma_unmap_single(device->card->device, orb->base.request_bus,
0634 sizeof(orb->request), DMA_TO_DEVICE);
0635 fail_mapping_request:
0636 dma_unmap_single(device->card->device, orb->response_bus,
0637 sizeof(orb->response), DMA_FROM_DEVICE);
0638 fail_mapping_response:
0639 if (response)
0640 memcpy(response, orb->response, sizeof(orb->response));
0641 kref_put(&orb->base.kref, free_orb);
0642
0643 return retval;
0644 }
0645
0646 static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
0647 {
0648 struct fw_device *device = target_parent_device(lu->tgt);
0649 __be32 d = 0;
0650
0651 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
0652 lu->tgt->node_id, lu->generation, device->max_speed,
0653 lu->command_block_agent_address + SBP2_AGENT_RESET,
0654 &d, 4);
0655 }
0656
0657 static void complete_agent_reset_write_no_wait(struct fw_card *card,
0658 int rcode, void *payload, size_t length, void *data)
0659 {
0660 kfree(data);
0661 }
0662
0663 static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
0664 {
0665 struct fw_device *device = target_parent_device(lu->tgt);
0666 struct fw_transaction *t;
0667 static __be32 d;
0668
0669 t = kmalloc(sizeof(*t), GFP_ATOMIC);
0670 if (t == NULL)
0671 return;
0672
0673 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
0674 lu->tgt->node_id, lu->generation, device->max_speed,
0675 lu->command_block_agent_address + SBP2_AGENT_RESET,
0676 &d, 4, complete_agent_reset_write_no_wait, t);
0677 }
0678
0679 static inline void sbp2_allow_block(struct sbp2_target *tgt)
0680 {
0681 spin_lock_irq(&tgt->lock);
0682 --tgt->dont_block;
0683 spin_unlock_irq(&tgt->lock);
0684 }
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
0697 {
0698 struct sbp2_target *tgt = lu->tgt;
0699 struct fw_card *card = target_parent_device(tgt)->card;
0700 struct Scsi_Host *shost =
0701 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
0702 unsigned long flags;
0703
0704 spin_lock_irqsave(&tgt->lock, flags);
0705 if (!tgt->dont_block && !lu->blocked &&
0706 lu->generation != card->generation) {
0707 lu->blocked = true;
0708 if (++tgt->blocked == 1)
0709 scsi_block_requests(shost);
0710 }
0711 spin_unlock_irqrestore(&tgt->lock, flags);
0712 }
0713
0714
0715
0716
0717
0718
0719
0720 static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
0721 {
0722 struct sbp2_target *tgt = lu->tgt;
0723 struct fw_card *card = target_parent_device(tgt)->card;
0724 struct Scsi_Host *shost =
0725 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
0726 bool unblock = false;
0727
0728 spin_lock_irq(&tgt->lock);
0729 if (lu->blocked && lu->generation == card->generation) {
0730 lu->blocked = false;
0731 unblock = --tgt->blocked == 0;
0732 }
0733 spin_unlock_irq(&tgt->lock);
0734
0735 if (unblock)
0736 scsi_unblock_requests(shost);
0737 }
0738
0739
0740
0741
0742
0743
0744
0745 static void sbp2_unblock(struct sbp2_target *tgt)
0746 {
0747 struct Scsi_Host *shost =
0748 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
0749
0750 spin_lock_irq(&tgt->lock);
0751 ++tgt->dont_block;
0752 spin_unlock_irq(&tgt->lock);
0753
0754 scsi_unblock_requests(shost);
0755 }
0756
0757 static int sbp2_lun2int(u16 lun)
0758 {
0759 struct scsi_lun eight_bytes_lun;
0760
0761 memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
0762 eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
0763 eight_bytes_lun.scsi_lun[1] = lun & 0xff;
0764
0765 return scsilun_to_int(&eight_bytes_lun);
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784 static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
0785 {
0786 struct fw_device *device = target_parent_device(lu->tgt);
0787 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
0788
0789 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
0790 lu->tgt->node_id, lu->generation, device->max_speed,
0791 CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
0792 }
0793
0794 static void sbp2_reconnect(struct work_struct *work);
0795
0796 static void sbp2_login(struct work_struct *work)
0797 {
0798 struct sbp2_logical_unit *lu =
0799 container_of(work, struct sbp2_logical_unit, work.work);
0800 struct sbp2_target *tgt = lu->tgt;
0801 struct fw_device *device = target_parent_device(tgt);
0802 struct Scsi_Host *shost;
0803 struct scsi_device *sdev;
0804 struct sbp2_login_response response;
0805 int generation, node_id, local_node_id;
0806
0807 if (fw_device_is_shutdown(device))
0808 return;
0809
0810 generation = device->generation;
0811 smp_rmb();
0812 node_id = device->node_id;
0813 local_node_id = device->card->node_id;
0814
0815
0816 if (lu->has_sdev)
0817 sbp2_send_management_orb(lu, device->node_id, generation,
0818 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
0819
0820 if (sbp2_send_management_orb(lu, node_id, generation,
0821 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
0822 if (lu->retries++ < 5) {
0823 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
0824 } else {
0825 dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n",
0826 lu->lun);
0827
0828 sbp2_unblock(lu->tgt);
0829 }
0830 return;
0831 }
0832
0833 tgt->node_id = node_id;
0834 tgt->address_high = local_node_id << 16;
0835 smp_wmb();
0836 lu->generation = generation;
0837
0838 lu->command_block_agent_address =
0839 ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
0840 << 32) | be32_to_cpu(response.command_block_agent.low);
0841 lu->login_id = be32_to_cpu(response.misc) & 0xffff;
0842
0843 dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n",
0844 lu->lun, lu->retries);
0845
0846
0847 sbp2_set_busy_timeout(lu);
0848
0849 lu->workfn = sbp2_reconnect;
0850 sbp2_agent_reset(lu);
0851
0852
0853 if (lu->has_sdev) {
0854 sbp2_cancel_orbs(lu);
0855 sbp2_conditionally_unblock(lu);
0856
0857 return;
0858 }
0859
0860 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
0861 ssleep(SBP2_INQUIRY_DELAY);
0862
0863 shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
0864 sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874 if (IS_ERR(sdev))
0875 goto out_logout_login;
0876
0877
0878 smp_rmb();
0879 if (generation != device->card->generation) {
0880 scsi_remove_device(sdev);
0881 scsi_device_put(sdev);
0882 goto out_logout_login;
0883 }
0884
0885
0886 lu->has_sdev = true;
0887 scsi_device_put(sdev);
0888 sbp2_allow_block(tgt);
0889
0890 return;
0891
0892 out_logout_login:
0893 smp_rmb();
0894 generation = device->generation;
0895 smp_rmb();
0896
0897 sbp2_send_management_orb(lu, device->node_id, generation,
0898 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
0899
0900
0901
0902
0903 lu->workfn = sbp2_login;
0904 }
0905
0906 static void sbp2_reconnect(struct work_struct *work)
0907 {
0908 struct sbp2_logical_unit *lu =
0909 container_of(work, struct sbp2_logical_unit, work.work);
0910 struct sbp2_target *tgt = lu->tgt;
0911 struct fw_device *device = target_parent_device(tgt);
0912 int generation, node_id, local_node_id;
0913
0914 if (fw_device_is_shutdown(device))
0915 return;
0916
0917 generation = device->generation;
0918 smp_rmb();
0919 node_id = device->node_id;
0920 local_node_id = device->card->node_id;
0921
0922 if (sbp2_send_management_orb(lu, node_id, generation,
0923 SBP2_RECONNECT_REQUEST,
0924 lu->login_id, NULL) < 0) {
0925
0926
0927
0928
0929
0930
0931
0932 smp_rmb();
0933 if (generation == device->card->generation ||
0934 lu->retries++ >= 5) {
0935 dev_err(tgt_dev(tgt), "failed to reconnect\n");
0936 lu->retries = 0;
0937 lu->workfn = sbp2_login;
0938 }
0939 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
0940
0941 return;
0942 }
0943
0944 tgt->node_id = node_id;
0945 tgt->address_high = local_node_id << 16;
0946 smp_wmb();
0947 lu->generation = generation;
0948
0949 dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n",
0950 lu->lun, lu->retries);
0951
0952 sbp2_agent_reset(lu);
0953 sbp2_cancel_orbs(lu);
0954 sbp2_conditionally_unblock(lu);
0955 }
0956
0957 static void sbp2_lu_workfn(struct work_struct *work)
0958 {
0959 struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
0960 struct sbp2_logical_unit, work);
0961 lu->workfn(work);
0962 }
0963
0964 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
0965 {
0966 struct sbp2_logical_unit *lu;
0967
0968 lu = kmalloc(sizeof(*lu), GFP_KERNEL);
0969 if (!lu)
0970 return -ENOMEM;
0971
0972 lu->address_handler.length = 0x100;
0973 lu->address_handler.address_callback = sbp2_status_write;
0974 lu->address_handler.callback_data = lu;
0975
0976 if (fw_core_add_address_handler(&lu->address_handler,
0977 &fw_high_memory_region) < 0) {
0978 kfree(lu);
0979 return -ENOMEM;
0980 }
0981
0982 lu->tgt = tgt;
0983 lu->lun = lun_entry & 0xffff;
0984 lu->login_id = INVALID_LOGIN_ID;
0985 lu->retries = 0;
0986 lu->has_sdev = false;
0987 lu->blocked = false;
0988 ++tgt->dont_block;
0989 INIT_LIST_HEAD(&lu->orb_list);
0990 lu->workfn = sbp2_login;
0991 INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
0992
0993 list_add_tail(&lu->link, &tgt->lu_list);
0994 return 0;
0995 }
0996
0997 static void sbp2_get_unit_unique_id(struct sbp2_target *tgt,
0998 const u32 *leaf)
0999 {
1000 if ((leaf[0] & 0xffff0000) == 0x00020000)
1001 tgt->guid = (u64)leaf[1] << 32 | leaf[2];
1002 }
1003
1004 static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
1005 const u32 *directory)
1006 {
1007 struct fw_csr_iterator ci;
1008 int key, value;
1009
1010 fw_csr_iterator_init(&ci, directory);
1011 while (fw_csr_iterator_next(&ci, &key, &value))
1012 if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
1013 sbp2_add_logical_unit(tgt, value) < 0)
1014 return -ENOMEM;
1015 return 0;
1016 }
1017
1018 static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
1019 u32 *model, u32 *firmware_revision)
1020 {
1021 struct fw_csr_iterator ci;
1022 int key, value;
1023
1024 fw_csr_iterator_init(&ci, directory);
1025 while (fw_csr_iterator_next(&ci, &key, &value)) {
1026 switch (key) {
1027
1028 case CSR_DEPENDENT_INFO | CSR_OFFSET:
1029 tgt->management_agent_address =
1030 CSR_REGISTER_BASE + 4 * value;
1031 break;
1032
1033 case CSR_DIRECTORY_ID:
1034 tgt->directory_id = value;
1035 break;
1036
1037 case CSR_MODEL:
1038 *model = value;
1039 break;
1040
1041 case SBP2_CSR_FIRMWARE_REVISION:
1042 *firmware_revision = value;
1043 break;
1044
1045 case SBP2_CSR_UNIT_CHARACTERISTICS:
1046
1047 tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
1048 break;
1049
1050 case SBP2_CSR_LOGICAL_UNIT_NUMBER:
1051 if (sbp2_add_logical_unit(tgt, value) < 0)
1052 return -ENOMEM;
1053 break;
1054
1055 case SBP2_CSR_UNIT_UNIQUE_ID:
1056 sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
1057 break;
1058
1059 case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
1060
1061 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
1062 return -ENOMEM;
1063 break;
1064 }
1065 }
1066 return 0;
1067 }
1068
1069
1070
1071
1072
1073
1074 static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
1075 {
1076 unsigned int timeout = tgt->mgt_orb_timeout;
1077
1078 if (timeout > 40000)
1079 dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n",
1080 timeout / 1000);
1081
1082 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
1083 }
1084
1085 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
1086 u32 firmware_revision)
1087 {
1088 int i;
1089 unsigned int w = sbp2_param_workarounds;
1090
1091 if (w)
1092 dev_notice(tgt_dev(tgt),
1093 "Please notify linux1394-devel@lists.sf.net "
1094 "if you need the workarounds parameter\n");
1095
1096 if (w & SBP2_WORKAROUND_OVERRIDE)
1097 goto out;
1098
1099 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1100
1101 if (sbp2_workarounds_table[i].firmware_revision !=
1102 (firmware_revision & 0xffffff00))
1103 continue;
1104
1105 if (sbp2_workarounds_table[i].model != model &&
1106 sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
1107 continue;
1108
1109 w |= sbp2_workarounds_table[i].workarounds;
1110 break;
1111 }
1112 out:
1113 if (w)
1114 dev_notice(tgt_dev(tgt), "workarounds 0x%x "
1115 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
1116 w, firmware_revision, model);
1117 tgt->workarounds = w;
1118 }
1119
1120 static struct scsi_host_template scsi_driver_template;
1121 static void sbp2_remove(struct fw_unit *unit);
1122
1123 static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
1124 {
1125 struct fw_device *device = fw_parent_device(unit);
1126 struct sbp2_target *tgt;
1127 struct sbp2_logical_unit *lu;
1128 struct Scsi_Host *shost;
1129 u32 model, firmware_revision;
1130
1131
1132 if (device->is_local)
1133 return -ENODEV;
1134
1135 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
1136 if (shost == NULL)
1137 return -ENOMEM;
1138
1139 tgt = (struct sbp2_target *)shost->hostdata;
1140 dev_set_drvdata(&unit->device, tgt);
1141 tgt->unit = unit;
1142 INIT_LIST_HEAD(&tgt->lu_list);
1143 spin_lock_init(&tgt->lock);
1144 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1145
1146 if (fw_device_enable_phys_dma(device) < 0)
1147 goto fail_shost_put;
1148
1149 shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
1150
1151 if (scsi_add_host_with_dma(shost, &unit->device,
1152 device->card->device) < 0)
1153 goto fail_shost_put;
1154
1155
1156 tgt->directory_id = ((unit->directory - device->config_rom) * 4
1157 + CSR_CONFIG_ROM) & 0xffffff;
1158
1159 firmware_revision = SBP2_ROM_VALUE_MISSING;
1160 model = SBP2_ROM_VALUE_MISSING;
1161
1162 if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
1163 &firmware_revision) < 0)
1164 goto fail_remove;
1165
1166 sbp2_clamp_management_orb_timeout(tgt);
1167 sbp2_init_workarounds(tgt, model, firmware_revision);
1168
1169
1170
1171
1172
1173
1174
1175 tgt->max_payload = min3(device->max_speed + 7, 10U,
1176 device->card->max_receive - 1);
1177
1178
1179 list_for_each_entry(lu, &tgt->lu_list, link)
1180 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1181
1182 return 0;
1183
1184 fail_remove:
1185 sbp2_remove(unit);
1186 return -ENOMEM;
1187
1188 fail_shost_put:
1189 scsi_host_put(shost);
1190 return -ENOMEM;
1191 }
1192
1193 static void sbp2_update(struct fw_unit *unit)
1194 {
1195 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1196 struct sbp2_logical_unit *lu;
1197
1198 fw_device_enable_phys_dma(fw_parent_device(unit));
1199
1200
1201
1202
1203
1204 list_for_each_entry(lu, &tgt->lu_list, link) {
1205 sbp2_conditionally_block(lu);
1206 lu->retries = 0;
1207 sbp2_queue_work(lu, 0);
1208 }
1209 }
1210
1211 static void sbp2_remove(struct fw_unit *unit)
1212 {
1213 struct fw_device *device = fw_parent_device(unit);
1214 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1215 struct sbp2_logical_unit *lu, *next;
1216 struct Scsi_Host *shost =
1217 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
1218 struct scsi_device *sdev;
1219
1220
1221 sbp2_unblock(tgt);
1222
1223 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
1224 cancel_delayed_work_sync(&lu->work);
1225 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
1226 if (sdev) {
1227 scsi_remove_device(sdev);
1228 scsi_device_put(sdev);
1229 }
1230 if (lu->login_id != INVALID_LOGIN_ID) {
1231 int generation, node_id;
1232
1233
1234
1235
1236
1237 generation = device->generation;
1238 smp_rmb();
1239 node_id = device->node_id;
1240 sbp2_send_management_orb(lu, node_id, generation,
1241 SBP2_LOGOUT_REQUEST,
1242 lu->login_id, NULL);
1243 }
1244 fw_core_remove_address_handler(&lu->address_handler);
1245 list_del(&lu->link);
1246 kfree(lu);
1247 }
1248 scsi_remove_host(shost);
1249 dev_notice(&unit->device, "released target %d:0:0\n", shost->host_no);
1250
1251 scsi_host_put(shost);
1252 }
1253
1254 #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
1255 #define SBP2_SW_VERSION_ENTRY 0x00010483
1256
1257 static const struct ieee1394_device_id sbp2_id_table[] = {
1258 {
1259 .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
1260 IEEE1394_MATCH_VERSION,
1261 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
1262 .version = SBP2_SW_VERSION_ENTRY,
1263 },
1264 { }
1265 };
1266
1267 static struct fw_driver sbp2_driver = {
1268 .driver = {
1269 .owner = THIS_MODULE,
1270 .name = KBUILD_MODNAME,
1271 .bus = &fw_bus_type,
1272 },
1273 .probe = sbp2_probe,
1274 .update = sbp2_update,
1275 .remove = sbp2_remove,
1276 .id_table = sbp2_id_table,
1277 };
1278
1279 static void sbp2_unmap_scatterlist(struct device *card_device,
1280 struct sbp2_command_orb *orb)
1281 {
1282 scsi_dma_unmap(orb->cmd);
1283
1284 if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
1285 dma_unmap_single(card_device, orb->page_table_bus,
1286 sizeof(orb->page_table), DMA_TO_DEVICE);
1287 }
1288
1289 static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1290 {
1291 int sam_status;
1292 int sfmt = (sbp2_status[0] >> 6) & 0x03;
1293
1294 if (sfmt == 2 || sfmt == 3) {
1295
1296
1297
1298
1299 return DID_ERROR << 16;
1300 }
1301
1302 sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80);
1303 sense_data[1] = 0x0;
1304 sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f);
1305 sense_data[3] = sbp2_status[4];
1306 sense_data[4] = sbp2_status[5];
1307 sense_data[5] = sbp2_status[6];
1308 sense_data[6] = sbp2_status[7];
1309 sense_data[7] = 10;
1310 sense_data[8] = sbp2_status[8];
1311 sense_data[9] = sbp2_status[9];
1312 sense_data[10] = sbp2_status[10];
1313 sense_data[11] = sbp2_status[11];
1314 sense_data[12] = sbp2_status[2];
1315 sense_data[13] = sbp2_status[3];
1316 sense_data[14] = sbp2_status[12];
1317 sense_data[15] = sbp2_status[13];
1318
1319 sam_status = sbp2_status[0] & 0x3f;
1320
1321 switch (sam_status) {
1322 case SAM_STAT_GOOD:
1323 case SAM_STAT_CHECK_CONDITION:
1324 case SAM_STAT_CONDITION_MET:
1325 case SAM_STAT_BUSY:
1326 case SAM_STAT_RESERVATION_CONFLICT:
1327 case SAM_STAT_COMMAND_TERMINATED:
1328 return DID_OK << 16 | sam_status;
1329
1330 default:
1331 return DID_ERROR << 16;
1332 }
1333 }
1334
1335 static void complete_command_orb(struct sbp2_orb *base_orb,
1336 struct sbp2_status *status)
1337 {
1338 struct sbp2_command_orb *orb =
1339 container_of(base_orb, struct sbp2_command_orb, base);
1340 struct fw_device *device = target_parent_device(base_orb->lu->tgt);
1341 int result;
1342
1343 if (status != NULL) {
1344 if (STATUS_GET_DEAD(*status))
1345 sbp2_agent_reset_no_wait(base_orb->lu);
1346
1347 switch (STATUS_GET_RESPONSE(*status)) {
1348 case SBP2_STATUS_REQUEST_COMPLETE:
1349 result = DID_OK << 16;
1350 break;
1351 case SBP2_STATUS_TRANSPORT_FAILURE:
1352 result = DID_BUS_BUSY << 16;
1353 break;
1354 case SBP2_STATUS_ILLEGAL_REQUEST:
1355 case SBP2_STATUS_VENDOR_DEPENDENT:
1356 default:
1357 result = DID_ERROR << 16;
1358 break;
1359 }
1360
1361 if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
1362 result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
1363 orb->cmd->sense_buffer);
1364 } else {
1365
1366
1367
1368
1369
1370 result = DID_BUS_BUSY << 16;
1371 sbp2_conditionally_block(base_orb->lu);
1372 }
1373
1374 dma_unmap_single(device->card->device, orb->base.request_bus,
1375 sizeof(orb->request), DMA_TO_DEVICE);
1376 sbp2_unmap_scatterlist(device->card->device, orb);
1377
1378 orb->cmd->result = result;
1379 scsi_done(orb->cmd);
1380 }
1381
1382 static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1383 struct fw_device *device, struct sbp2_logical_unit *lu)
1384 {
1385 struct scatterlist *sg = scsi_sglist(orb->cmd);
1386 int i, n;
1387
1388 n = scsi_dma_map(orb->cmd);
1389 if (n <= 0)
1390 goto fail;
1391
1392
1393
1394
1395
1396
1397
1398
1399 if (n == 1) {
1400 orb->request.data_descriptor.high =
1401 cpu_to_be32(lu->tgt->address_high);
1402 orb->request.data_descriptor.low =
1403 cpu_to_be32(sg_dma_address(sg));
1404 orb->request.misc |=
1405 cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
1406 return 0;
1407 }
1408
1409 for_each_sg(sg, sg, n, i) {
1410 orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
1411 orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
1412 }
1413
1414 orb->page_table_bus =
1415 dma_map_single(device->card->device, orb->page_table,
1416 sizeof(orb->page_table), DMA_TO_DEVICE);
1417 if (dma_mapping_error(device->card->device, orb->page_table_bus))
1418 goto fail_page_table;
1419
1420
1421
1422
1423
1424
1425
1426
1427 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
1428 orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
1429 orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
1430 COMMAND_ORB_DATA_SIZE(n));
1431
1432 return 0;
1433
1434 fail_page_table:
1435 scsi_dma_unmap(orb->cmd);
1436 fail:
1437 return -ENOMEM;
1438 }
1439
1440
1441
1442 static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1443 struct scsi_cmnd *cmd)
1444 {
1445 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1446 struct fw_device *device = target_parent_device(lu->tgt);
1447 struct sbp2_command_orb *orb;
1448 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1449
1450 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1451 if (orb == NULL)
1452 return SCSI_MLQUEUE_HOST_BUSY;
1453
1454
1455 orb->base.rcode = -1;
1456 kref_init(&orb->base.kref);
1457 orb->cmd = cmd;
1458 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1459 orb->request.misc = cpu_to_be32(
1460 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
1461 COMMAND_ORB_SPEED(device->max_speed) |
1462 COMMAND_ORB_NOTIFY);
1463
1464 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1465 orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
1466
1467 generation = device->generation;
1468 smp_rmb();
1469
1470 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1471 goto out;
1472
1473 memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
1474
1475 orb->base.callback = complete_command_orb;
1476 orb->base.request_bus =
1477 dma_map_single(device->card->device, &orb->request,
1478 sizeof(orb->request), DMA_TO_DEVICE);
1479 if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
1480 sbp2_unmap_scatterlist(device->card->device, orb);
1481 goto out;
1482 }
1483
1484 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
1485 lu->command_block_agent_address + SBP2_ORB_POINTER);
1486 retval = 0;
1487 out:
1488 kref_put(&orb->base.kref, free_orb);
1489 return retval;
1490 }
1491
1492 static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1493 {
1494 struct sbp2_logical_unit *lu = sdev->hostdata;
1495
1496
1497 if (!lu)
1498 return -ENOSYS;
1499
1500 sdev->allow_restart = 1;
1501
1502
1503
1504
1505
1506 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1507
1508 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1509 sdev->inquiry_len = 36;
1510
1511 return 0;
1512 }
1513
1514 static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1515 {
1516 struct sbp2_logical_unit *lu = sdev->hostdata;
1517
1518 sdev->use_10_for_rw = 1;
1519
1520 if (sbp2_param_exclusive_login)
1521 sdev->manage_start_stop = 1;
1522
1523 if (sdev->type == TYPE_ROM)
1524 sdev->use_10_for_ms = 1;
1525
1526 if (sdev->type == TYPE_DISK &&
1527 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1528 sdev->skip_ms_page_8 = 1;
1529
1530 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
1531 sdev->fix_capacity = 1;
1532
1533 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
1534 sdev->start_stop_pwr_cond = 1;
1535
1536 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1537 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
1538
1539 return 0;
1540 }
1541
1542
1543
1544
1545
1546 static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1547 {
1548 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1549
1550 dev_notice(lu_dev(lu), "sbp2_scsi_abort\n");
1551 sbp2_agent_reset(lu);
1552 sbp2_cancel_orbs(lu);
1553
1554 return SUCCESS;
1555 }
1556
1557
1558
1559
1560
1561
1562
1563
1564 static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
1565 struct device_attribute *attr, char *buf)
1566 {
1567 struct scsi_device *sdev = to_scsi_device(dev);
1568 struct sbp2_logical_unit *lu;
1569
1570 if (!sdev)
1571 return 0;
1572
1573 lu = sdev->hostdata;
1574
1575 return sprintf(buf, "%016llx:%06x:%04x\n",
1576 (unsigned long long)lu->tgt->guid,
1577 lu->tgt->directory_id, lu->lun);
1578 }
1579
1580 static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
1581
1582 static struct attribute *sbp2_scsi_sysfs_attrs[] = {
1583 &dev_attr_ieee1394_id.attr,
1584 NULL
1585 };
1586
1587 ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
1588
1589 static struct scsi_host_template scsi_driver_template = {
1590 .module = THIS_MODULE,
1591 .name = "SBP-2 IEEE-1394",
1592 .proc_name = "sbp2",
1593 .queuecommand = sbp2_scsi_queuecommand,
1594 .slave_alloc = sbp2_scsi_slave_alloc,
1595 .slave_configure = sbp2_scsi_slave_configure,
1596 .eh_abort_handler = sbp2_scsi_abort,
1597 .this_id = -1,
1598 .sg_tablesize = SG_ALL,
1599 .max_segment_size = SBP2_MAX_SEG_SIZE,
1600 .can_queue = 1,
1601 .sdev_groups = sbp2_scsi_sysfs_groups,
1602 };
1603
1604 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1605 MODULE_DESCRIPTION("SCSI over IEEE1394");
1606 MODULE_LICENSE("GPL");
1607 MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
1608
1609
1610 MODULE_ALIAS("sbp2");
1611
1612 static int __init sbp2_init(void)
1613 {
1614 return driver_register(&sbp2_driver.driver);
1615 }
1616
1617 static void __exit sbp2_cleanup(void)
1618 {
1619 driver_unregister(&sbp2_driver.driver);
1620 }
1621
1622 module_init(sbp2_init);
1623 module_exit(sbp2_cleanup);